xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h (revision cf8a8a8f4896c0885d3996716f73513c4317e545)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 #ifndef _OTX_CRYPTODEV_HW_ACCESS_H_
5 #define _OTX_CRYPTODEV_HW_ACCESS_H_
6 
7 #include <stdbool.h>
8 
9 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_cycles.h>
12 #include <rte_io.h>
13 #include <rte_memory.h>
14 #include <rte_prefetch.h>
15 
16 #include "otx_cryptodev.h"
17 
18 #include "cpt_common.h"
19 #include "cpt_hw_types.h"
20 #include "cpt_mcode_defines.h"
21 #include "cpt_pmd_logs.h"
22 
23 #define CPT_INTR_POLL_INTERVAL_MS	(50)
24 
25 /* Default command queue length */
26 #define DEFAULT_CMD_QCHUNKS		2
27 #define DEFAULT_CMD_QCHUNK_SIZE		1023
28 #define DEFAULT_CMD_QLEN \
29 		(DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS)
30 
31 #define CPT_CSR_REG_BASE(cpt)		((cpt)->reg_base)
32 
33 /* Read hw register */
34 #define CPT_READ_CSR(__hw_addr, __offset) \
35 	rte_read64_relaxed((uint8_t *)__hw_addr + __offset)
36 
37 /* Write hw register */
38 #define CPT_WRITE_CSR(__hw_addr, __offset, __val) \
39 	rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset))
40 
41 /* cpt instance */
42 struct cpt_instance {
43 	uint32_t queue_id;
44 	uintptr_t rsvd;
45 	struct rte_mempool *sess_mp;
46 	struct rte_mempool *sess_mp_priv;
47 	struct cpt_qp_meta_info meta_info;
48 	uint8_t ca_enabled;
49 };
50 
51 struct command_chunk {
52 	/** 128-byte aligned real_vaddr */
53 	uint8_t *head;
54 	/** 128-byte aligned real_dma_addr */
55 	phys_addr_t dma_addr;
56 };
57 
58 /**
59  * Command queue structure
60  */
61 struct command_queue {
62 	/** Command queue host write idx */
63 	uint32_t idx;
64 	/** Command queue chunk */
65 	uint32_t cchunk;
66 	/** Command queue head; instructions are inserted here */
67 	uint8_t *qhead;
68 	/** Command chunk list head */
69 	struct command_chunk chead[DEFAULT_CMD_QCHUNKS];
70 };
71 
72 /**
73  * CPT VF device structure
74  */
75 struct cpt_vf {
76 	/** CPT instance */
77 	struct cpt_instance instance;
78 	/** Register start address */
79 	uint8_t *reg_base;
80 	/** Command queue information */
81 	struct command_queue cqueue;
82 	/** Pending queue information */
83 	struct pending_queue pqueue;
84 
85 	/** Below fields are accessed only in control path */
86 
87 	/** Env specific pdev representing the pci dev */
88 	void *pdev;
89 	/** Calculated queue size */
90 	uint32_t qsize;
91 	/** Device index (0...CPT_MAX_VQ_NUM)*/
92 	uint8_t  vfid;
93 	/** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */
94 	uint8_t  vftype;
95 	/** VF group (0 - 8) */
96 	uint8_t  vfgrp;
97 	/** Operating node: Bits (46:44) in BAR0 address */
98 	uint8_t  node;
99 
100 	/** VF-PF mailbox communication */
101 
102 	/** Flag if acked */
103 	bool pf_acked;
104 	/** Flag if not acked */
105 	bool pf_nacked;
106 
107 	/** Device name */
108 	char dev_name[32];
109 } __rte_cache_aligned;
110 
111 /*
112  * CPT Registers map for 81xx
113  */
114 
115 /* VF registers */
116 #define CPTX_VQX_CTL(a, b)		(0x0000100ll + 0x1000000000ll * \
117 					 ((a) & 0x0) + 0x100000ll * (b))
118 #define CPTX_VQX_SADDR(a, b)		(0x0000200ll + 0x1000000000ll * \
119 					 ((a) & 0x0) + 0x100000ll * (b))
120 #define CPTX_VQX_DONE_WAIT(a, b)	(0x0000400ll + 0x1000000000ll * \
121 					 ((a) & 0x0) + 0x100000ll * (b))
122 #define CPTX_VQX_INPROG(a, b)		(0x0000410ll + 0x1000000000ll * \
123 					 ((a) & 0x0) + 0x100000ll * (b))
124 #define CPTX_VQX_DONE(a, b)		(0x0000420ll + 0x1000000000ll * \
125 					 ((a) & 0x1) + 0x100000ll * (b))
126 #define CPTX_VQX_DONE_ACK(a, b)		(0x0000440ll + 0x1000000000ll * \
127 					 ((a) & 0x1) + 0x100000ll * (b))
128 #define CPTX_VQX_DONE_INT_W1S(a, b)	(0x0000460ll + 0x1000000000ll * \
129 					 ((a) & 0x1) + 0x100000ll * (b))
130 #define CPTX_VQX_DONE_INT_W1C(a, b)	(0x0000468ll + 0x1000000000ll * \
131 					 ((a) & 0x1) + 0x100000ll * (b))
132 #define CPTX_VQX_DONE_ENA_W1S(a, b)	(0x0000470ll + 0x1000000000ll * \
133 					 ((a) & 0x1) + 0x100000ll * (b))
134 #define CPTX_VQX_DONE_ENA_W1C(a, b)	(0x0000478ll + 0x1000000000ll * \
135 					 ((a) & 0x1) + 0x100000ll * (b))
136 #define CPTX_VQX_MISC_INT(a, b)		(0x0000500ll + 0x1000000000ll * \
137 					 ((a) & 0x1) + 0x100000ll * (b))
138 #define CPTX_VQX_MISC_INT_W1S(a, b)	(0x0000508ll + 0x1000000000ll * \
139 					 ((a) & 0x1) + 0x100000ll * (b))
140 #define CPTX_VQX_MISC_ENA_W1S(a, b)	(0x0000510ll + 0x1000000000ll * \
141 					 ((a) & 0x1) + 0x100000ll * (b))
142 #define CPTX_VQX_MISC_ENA_W1C(a, b)	(0x0000518ll + 0x1000000000ll * \
143 					 ((a) & 0x1) + 0x100000ll * (b))
144 #define CPTX_VQX_DOORBELL(a, b)		(0x0000600ll + 0x1000000000ll * \
145 					 ((a) & 0x1) + 0x100000ll * (b))
146 #define CPTX_VFX_PF_MBOXX(a, b, c)	(0x0001000ll + 0x1000000000ll * \
147 					 ((a) & 0x1) + 0x100000ll * (b) + \
148 					 8ll * ((c) & 0x1))
149 
150 /* VF HAL functions */
151 
152 void
153 otx_cpt_poll_misc(struct cpt_vf *cptvf);
154 
155 int
156 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name);
157 
158 int
159 otx_cpt_deinit_device(void *dev);
160 
161 int
162 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
163 		     struct cpt_instance **instance, uint16_t qp_id);
164 
165 int
166 otx_cpt_put_resource(struct cpt_instance *instance);
167 
168 int
169 otx_cpt_start_device(void *cptvf);
170 
171 void
172 otx_cpt_stop_device(void *cptvf);
173 
174 /* Write to VQX_DOORBELL register
175  */
176 static __rte_always_inline void
177 otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val)
178 {
179 	cptx_vqx_doorbell_t vqx_dbell;
180 
181 	vqx_dbell.u = 0;
182 	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
183 	CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
184 		      CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u);
185 }
186 
187 static __rte_always_inline uint32_t
188 otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
189 {
190 	cptx_vqx_doorbell_t vqx_dbell;
191 
192 	vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
193 				   CPTX_VQX_DOORBELL(0, 0));
194 	return vqx_dbell.s.dbell_cnt;
195 }
196 
197 static __rte_always_inline void
198 otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
199 {
200 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
201 	/* Memory barrier to flush pending writes */
202 	rte_smp_wmb();
203 	otx_cpt_write_vq_doorbell(cptvf, count);
204 }
205 
206 static __rte_always_inline void *
207 get_cpt_inst(struct command_queue *cqueue)
208 {
209 	CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
210 	return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
211 }
212 
213 static __rte_always_inline void
214 fill_cpt_inst(struct cpt_instance *instance, void *req, uint64_t ucmd_w3)
215 {
216 	struct command_queue *cqueue;
217 	cpt_inst_s_t *cpt_ist_p;
218 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
219 	struct cpt_request_info *user_req = (struct cpt_request_info *)req;
220 	cqueue = &cptvf->cqueue;
221 	cpt_ist_p = get_cpt_inst(cqueue);
222 	rte_prefetch_non_temporal(cpt_ist_p);
223 
224 	/* EI0, EI1, EI2, EI3 are already prepared */
225 	/* HW W0 */
226 	cpt_ist_p->u[0] = 0;
227 	/* HW W1 */
228 	cpt_ist_p->s8x.res_addr = user_req->comp_baddr;
229 	/* HW W2 */
230 	cpt_ist_p->u[2] = 0;
231 	/* HW W3 */
232 	cpt_ist_p->s8x.wq_ptr = 0;
233 
234 	/* MC EI0 */
235 	cpt_ist_p->s8x.ei0 = user_req->ist.ei0;
236 	/* MC EI1 */
237 	cpt_ist_p->s8x.ei1 = user_req->ist.ei1;
238 	/* MC EI2 */
239 	cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
240 	/* MC EI3 */
241 	cpt_ist_p->s8x.ei3 = ucmd_w3;
242 }
243 
244 static __rte_always_inline void
245 mark_cpt_inst(struct cpt_instance *instance)
246 {
247 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
248 	struct command_queue *queue = &cptvf->cqueue;
249 	if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) {
250 		uint32_t cchunk = queue->cchunk;
251 		MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS);
252 		queue->qhead = queue->chead[cchunk].head;
253 		queue->idx = 0;
254 		queue->cchunk = cchunk;
255 	}
256 }
257 
258 static __rte_always_inline uint8_t
259 check_nb_command_id(struct cpt_request_info *user_req,
260 		struct cpt_instance *instance)
261 {
262 	uint8_t ret = ERR_REQ_PENDING;
263 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
264 	volatile cpt_res_s_t *cptres;
265 
266 	cptres = (volatile cpt_res_s_t *)user_req->completion_addr;
267 
268 	if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) {
269 		/*
270 		 * Wait for some time for this command to get completed
271 		 * before timing out
272 		 */
273 		if (rte_get_timer_cycles() < user_req->time_out)
274 			return ret;
275 		/*
276 		 * TODO: See if alternate caddr can be used to not loop
277 		 * longer than needed.
278 		 */
279 		if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) &&
280 		    (user_req->extra_time < TIME_IN_RESET_COUNT)) {
281 			user_req->extra_time++;
282 			return ret;
283 		}
284 
285 		if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE)
286 			goto complete;
287 
288 		ret = ERR_REQ_TIMEOUT;
289 		CPT_LOG_DP_ERR("Request %p timedout", user_req);
290 		otx_cpt_poll_misc(cptvf);
291 		goto exit;
292 	}
293 
294 complete:
295 	if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) {
296 		ret = 0; /* success */
297 		if (unlikely((uint8_t)*user_req->alternate_caddr)) {
298 			ret = (uint8_t)*user_req->alternate_caddr;
299 			CPT_LOG_DP_ERR("Request %p : failed with microcode"
300 				" error, MC completion code : 0x%x", user_req,
301 				ret);
302 		}
303 		CPT_LOG_DP_DEBUG("MC status %.8x\n",
304 			   *((volatile uint32_t *)user_req->alternate_caddr));
305 		CPT_LOG_DP_DEBUG("HW status %.8x\n",
306 			   *((volatile uint32_t *)user_req->completion_addr));
307 	} else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
308 		   (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
309 		ret = (uint8_t)*user_req->alternate_caddr;
310 		if (!ret)
311 			ret = ERR_BAD_ALT_CCODE;
312 		CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x",
313 			   user_req,
314 			   (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ?
315 			   "DMA Fault" : "Software error", ret);
316 	} else {
317 		CPT_LOG_DP_ERR("Request %p : unexpected completion code %d",
318 			   user_req, cptres->s8x.compcode);
319 		ret = (uint8_t)*user_req->alternate_caddr;
320 	}
321 
322 exit:
323 	return ret;
324 }
325 
326 #endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
327