xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 #ifndef _OTX_CRYPTODEV_HW_ACCESS_H_
5 #define _OTX_CRYPTODEV_HW_ACCESS_H_
6 
7 #include <stdbool.h>
8 
9 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_cycles.h>
12 #include <rte_io.h>
13 #include <rte_memory.h>
14 #include <rte_prefetch.h>
15 
16 #include "cpt_common.h"
17 #include "cpt_hw_types.h"
18 #include "cpt_mcode_defines.h"
19 #include "cpt_pmd_logs.h"
20 
21 #define CPT_INTR_POLL_INTERVAL_MS	(50)
22 
23 /* Default command queue length */
24 #define DEFAULT_CMD_QCHUNKS		2
25 #define DEFAULT_CMD_QCHUNK_SIZE		1023
26 #define DEFAULT_CMD_QLEN \
27 		(DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS)
28 
29 #define CPT_CSR_REG_BASE(cpt)		((cpt)->reg_base)
30 
31 /* Read hw register */
32 #define CPT_READ_CSR(__hw_addr, __offset) \
33 	rte_read64_relaxed((uint8_t *)__hw_addr + __offset)
34 
35 /* Write hw register */
36 #define CPT_WRITE_CSR(__hw_addr, __offset, __val) \
37 	rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset))
38 
39 /* cpt instance */
40 struct cpt_instance {
41 	uint32_t queue_id;
42 	uintptr_t rsvd;
43 	struct rte_mempool *sess_mp;
44 	struct rte_mempool *sess_mp_priv;
45 	struct cpt_qp_meta_info meta_info;
46 };
47 
48 struct command_chunk {
49 	/** 128-byte aligned real_vaddr */
50 	uint8_t *head;
51 	/** 128-byte aligned real_dma_addr */
52 	phys_addr_t dma_addr;
53 };
54 
55 /**
56  * Command queue structure
57  */
58 struct command_queue {
59 	/** Command queue host write idx */
60 	uint32_t idx;
61 	/** Command queue chunk */
62 	uint32_t cchunk;
63 	/** Command queue head; instructions are inserted here */
64 	uint8_t *qhead;
65 	/** Command chunk list head */
66 	struct command_chunk chead[DEFAULT_CMD_QCHUNKS];
67 };
68 
69 /**
70  * CPT VF device structure
71  */
72 struct cpt_vf {
73 	/** CPT instance */
74 	struct cpt_instance instance;
75 	/** Register start address */
76 	uint8_t *reg_base;
77 	/** Command queue information */
78 	struct command_queue cqueue;
79 	/** Pending queue information */
80 	struct pending_queue pqueue;
81 
82 	/** Below fields are accessed only in control path */
83 
84 	/** Env specific pdev representing the pci dev */
85 	void *pdev;
86 	/** Calculated queue size */
87 	uint32_t qsize;
88 	/** Device index (0...CPT_MAX_VQ_NUM)*/
89 	uint8_t  vfid;
90 	/** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */
91 	uint8_t  vftype;
92 	/** VF group (0 - 8) */
93 	uint8_t  vfgrp;
94 	/** Operating node: Bits (46:44) in BAR0 address */
95 	uint8_t  node;
96 
97 	/** VF-PF mailbox communication */
98 
99 	/** Flag if acked */
100 	bool pf_acked;
101 	/** Flag if not acked */
102 	bool pf_nacked;
103 
104 	/** Device name */
105 	char dev_name[32];
106 } __rte_cache_aligned;
107 
108 /*
109  * CPT Registers map for 81xx
110  */
111 
112 /* VF registers */
113 #define CPTX_VQX_CTL(a, b)		(0x0000100ll + 0x1000000000ll * \
114 					 ((a) & 0x0) + 0x100000ll * (b))
115 #define CPTX_VQX_SADDR(a, b)		(0x0000200ll + 0x1000000000ll * \
116 					 ((a) & 0x0) + 0x100000ll * (b))
117 #define CPTX_VQX_DONE_WAIT(a, b)	(0x0000400ll + 0x1000000000ll * \
118 					 ((a) & 0x0) + 0x100000ll * (b))
119 #define CPTX_VQX_INPROG(a, b)		(0x0000410ll + 0x1000000000ll * \
120 					 ((a) & 0x0) + 0x100000ll * (b))
121 #define CPTX_VQX_DONE(a, b)		(0x0000420ll + 0x1000000000ll * \
122 					 ((a) & 0x1) + 0x100000ll * (b))
123 #define CPTX_VQX_DONE_ACK(a, b)		(0x0000440ll + 0x1000000000ll * \
124 					 ((a) & 0x1) + 0x100000ll * (b))
125 #define CPTX_VQX_DONE_INT_W1S(a, b)	(0x0000460ll + 0x1000000000ll * \
126 					 ((a) & 0x1) + 0x100000ll * (b))
127 #define CPTX_VQX_DONE_INT_W1C(a, b)	(0x0000468ll + 0x1000000000ll * \
128 					 ((a) & 0x1) + 0x100000ll * (b))
129 #define CPTX_VQX_DONE_ENA_W1S(a, b)	(0x0000470ll + 0x1000000000ll * \
130 					 ((a) & 0x1) + 0x100000ll * (b))
131 #define CPTX_VQX_DONE_ENA_W1C(a, b)	(0x0000478ll + 0x1000000000ll * \
132 					 ((a) & 0x1) + 0x100000ll * (b))
133 #define CPTX_VQX_MISC_INT(a, b)		(0x0000500ll + 0x1000000000ll * \
134 					 ((a) & 0x1) + 0x100000ll * (b))
135 #define CPTX_VQX_MISC_INT_W1S(a, b)	(0x0000508ll + 0x1000000000ll * \
136 					 ((a) & 0x1) + 0x100000ll * (b))
137 #define CPTX_VQX_MISC_ENA_W1S(a, b)	(0x0000510ll + 0x1000000000ll * \
138 					 ((a) & 0x1) + 0x100000ll * (b))
139 #define CPTX_VQX_MISC_ENA_W1C(a, b)	(0x0000518ll + 0x1000000000ll * \
140 					 ((a) & 0x1) + 0x100000ll * (b))
141 #define CPTX_VQX_DOORBELL(a, b)		(0x0000600ll + 0x1000000000ll * \
142 					 ((a) & 0x1) + 0x100000ll * (b))
143 #define CPTX_VFX_PF_MBOXX(a, b, c)	(0x0001000ll + 0x1000000000ll * \
144 					 ((a) & 0x1) + 0x100000ll * (b) + \
145 					 8ll * ((c) & 0x1))
146 
147 /* VF HAL functions */
148 
149 void
150 otx_cpt_poll_misc(struct cpt_vf *cptvf);
151 
152 int
153 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name);
154 
155 int
156 otx_cpt_deinit_device(void *dev);
157 
158 int
159 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
160 		     struct cpt_instance **instance, uint16_t qp_id);
161 
162 int
163 otx_cpt_put_resource(struct cpt_instance *instance);
164 
165 int
166 otx_cpt_start_device(void *cptvf);
167 
168 void
169 otx_cpt_stop_device(void *cptvf);
170 
171 /* Write to VQX_DOORBELL register
172  */
173 static __rte_always_inline void
174 otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val)
175 {
176 	cptx_vqx_doorbell_t vqx_dbell;
177 
178 	vqx_dbell.u = 0;
179 	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
180 	CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
181 		      CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u);
182 }
183 
184 static __rte_always_inline uint32_t
185 otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
186 {
187 	cptx_vqx_doorbell_t vqx_dbell;
188 
189 	vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
190 				   CPTX_VQX_DOORBELL(0, 0));
191 	return vqx_dbell.s.dbell_cnt;
192 }
193 
194 static __rte_always_inline void
195 otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
196 {
197 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
198 	/* Memory barrier to flush pending writes */
199 	rte_smp_wmb();
200 	otx_cpt_write_vq_doorbell(cptvf, count);
201 }
202 
203 static __rte_always_inline void *
204 get_cpt_inst(struct command_queue *cqueue)
205 {
206 	CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
207 	return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
208 }
209 
210 static __rte_always_inline void
211 fill_cpt_inst(struct cpt_instance *instance, void *req)
212 {
213 	struct command_queue *cqueue;
214 	cpt_inst_s_t *cpt_ist_p;
215 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
216 	struct cpt_request_info *user_req = (struct cpt_request_info *)req;
217 	cqueue = &cptvf->cqueue;
218 	cpt_ist_p = get_cpt_inst(cqueue);
219 	rte_prefetch_non_temporal(cpt_ist_p);
220 
221 	/* EI0, EI1, EI2, EI3 are already prepared */
222 	/* HW W0 */
223 	cpt_ist_p->u[0] = 0;
224 	/* HW W1 */
225 	cpt_ist_p->s8x.res_addr = user_req->comp_baddr;
226 	/* HW W2 */
227 	cpt_ist_p->u[2] = 0;
228 	/* HW W3 */
229 	cpt_ist_p->s8x.wq_ptr = 0;
230 
231 	/* MC EI0 */
232 	cpt_ist_p->s8x.ei0 = user_req->ist.ei0;
233 	/* MC EI1 */
234 	cpt_ist_p->s8x.ei1 = user_req->ist.ei1;
235 	/* MC EI2 */
236 	cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
237 	/* MC EI3 */
238 	cpt_ist_p->s8x.ei3 = user_req->ist.ei3;
239 }
240 
241 static __rte_always_inline void
242 mark_cpt_inst(struct cpt_instance *instance)
243 {
244 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
245 	struct command_queue *queue = &cptvf->cqueue;
246 	if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) {
247 		uint32_t cchunk = queue->cchunk;
248 		MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS);
249 		queue->qhead = queue->chead[cchunk].head;
250 		queue->idx = 0;
251 		queue->cchunk = cchunk;
252 	}
253 }
254 
255 static __rte_always_inline uint8_t
256 check_nb_command_id(struct cpt_request_info *user_req,
257 		struct cpt_instance *instance)
258 {
259 	uint8_t ret = ERR_REQ_PENDING;
260 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
261 	volatile cpt_res_s_t *cptres;
262 
263 	cptres = (volatile cpt_res_s_t *)user_req->completion_addr;
264 
265 	if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) {
266 		/*
267 		 * Wait for some time for this command to get completed
268 		 * before timing out
269 		 */
270 		if (rte_get_timer_cycles() < user_req->time_out)
271 			return ret;
272 		/*
273 		 * TODO: See if alternate caddr can be used to not loop
274 		 * longer than needed.
275 		 */
276 		if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) &&
277 		    (user_req->extra_time < TIME_IN_RESET_COUNT)) {
278 			user_req->extra_time++;
279 			return ret;
280 		}
281 
282 		if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE)
283 			goto complete;
284 
285 		ret = ERR_REQ_TIMEOUT;
286 		CPT_LOG_DP_ERR("Request %p timedout", user_req);
287 		otx_cpt_poll_misc(cptvf);
288 		goto exit;
289 	}
290 
291 complete:
292 	if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) {
293 		ret = 0; /* success */
294 		if (unlikely((uint8_t)*user_req->alternate_caddr)) {
295 			ret = (uint8_t)*user_req->alternate_caddr;
296 			CPT_LOG_DP_ERR("Request %p : failed with microcode"
297 				" error, MC completion code : 0x%x", user_req,
298 				ret);
299 		}
300 		CPT_LOG_DP_DEBUG("MC status %.8x\n",
301 			   *((volatile uint32_t *)user_req->alternate_caddr));
302 		CPT_LOG_DP_DEBUG("HW status %.8x\n",
303 			   *((volatile uint32_t *)user_req->completion_addr));
304 	} else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
305 		   (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
306 		ret = (uint8_t)*user_req->alternate_caddr;
307 		if (!ret)
308 			ret = ERR_BAD_ALT_CCODE;
309 		CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x",
310 			   user_req,
311 			   (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ?
312 			   "DMA Fault" : "Software error", ret);
313 	} else {
314 		CPT_LOG_DP_ERR("Request %p : unexpected completion code %d",
315 			   user_req, cptres->s8x.compcode);
316 		ret = (uint8_t)*user_req->alternate_caddr;
317 	}
318 
319 exit:
320 	return ret;
321 }
322 
323 #endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
324