1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
3 */
4 #include <assert.h>
5 #include <string.h>
6 #include <unistd.h>
7
8 #include <rte_branch_prediction.h>
9 #include <rte_common.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_errno.h>
12 #include <rte_mempool.h>
13 #include <rte_memzone.h>
14 #include <rte_string_fns.h>
15
16 #include "otx_cryptodev_hw_access.h"
17 #include "otx_cryptodev_mbox.h"
18
19 #include "cpt_pmd_logs.h"
20 #include "cpt_pmd_ops_helper.h"
21 #include "cpt_hw_types.h"
22
23 #define METABUF_POOL_CACHE_SIZE 512
24
25 /*
26 * VF HAL functions
27 * Access its own BAR0/4 registers by passing VF number as 0.
28 * OS/PCI maps them accordingly.
29 */
30
31 static int
otx_cpt_vf_init(struct cpt_vf * cptvf)32 otx_cpt_vf_init(struct cpt_vf *cptvf)
33 {
34 int ret = 0;
35
36 /* Check ready with PF */
37 /* Gets chip ID / device Id from PF if ready */
38 ret = otx_cpt_check_pf_ready(cptvf);
39 if (ret) {
40 CPT_LOG_ERR("%s: PF not responding to READY msg",
41 cptvf->dev_name);
42 ret = -EBUSY;
43 goto exit;
44 }
45
46 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
47
48 exit:
49 return ret;
50 }
51
52 /*
53 * Read Interrupt status of the VF
54 *
55 * @param cptvf cptvf structure
56 */
57 static uint64_t
otx_cpt_read_vf_misc_intr_status(struct cpt_vf * cptvf)58 otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf)
59 {
60 return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0));
61 }
62
63 /*
64 * Clear mailbox interrupt of the VF
65 *
66 * @param cptvf cptvf structure
67 */
68 static void
otx_cpt_clear_mbox_intr(struct cpt_vf * cptvf)69 otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf)
70 {
71 cptx_vqx_misc_int_t vqx_misc_int;
72
73 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
74 CPTX_VQX_MISC_INT(0, 0));
75 /* W1C for the VF */
76 vqx_misc_int.s.mbox = 1;
77 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
78 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
79 }
80
81 /*
82 * Clear instruction NCB read error interrupt of the VF
83 *
84 * @param cptvf cptvf structure
85 */
86 static void
otx_cpt_clear_irde_intr(struct cpt_vf * cptvf)87 otx_cpt_clear_irde_intr(struct cpt_vf *cptvf)
88 {
89 cptx_vqx_misc_int_t vqx_misc_int;
90
91 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
92 CPTX_VQX_MISC_INT(0, 0));
93 /* W1C for the VF */
94 vqx_misc_int.s.irde = 1;
95 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
96 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
97 }
98
99 /*
100 * Clear NCB result write response error interrupt of the VF
101 *
102 * @param cptvf cptvf structure
103 */
104 static void
otx_cpt_clear_nwrp_intr(struct cpt_vf * cptvf)105 otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf)
106 {
107 cptx_vqx_misc_int_t vqx_misc_int;
108
109 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
110 CPTX_VQX_MISC_INT(0, 0));
111 /* W1C for the VF */
112 vqx_misc_int.s.nwrp = 1;
113 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
114 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
115 }
116
117 /*
118 * Clear swerr interrupt of the VF
119 *
120 * @param cptvf cptvf structure
121 */
122 static void
otx_cpt_clear_swerr_intr(struct cpt_vf * cptvf)123 otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf)
124 {
125 cptx_vqx_misc_int_t vqx_misc_int;
126
127 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
128 CPTX_VQX_MISC_INT(0, 0));
129 /* W1C for the VF */
130 vqx_misc_int.s.swerr = 1;
131 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
132 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
133 }
134
135 /*
136 * Clear hwerr interrupt of the VF
137 *
138 * @param cptvf cptvf structure
139 */
140 static void
otx_cpt_clear_hwerr_intr(struct cpt_vf * cptvf)141 otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf)
142 {
143 cptx_vqx_misc_int_t vqx_misc_int;
144
145 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
146 CPTX_VQX_MISC_INT(0, 0));
147 /* W1C for the VF */
148 vqx_misc_int.s.hwerr = 1;
149 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
150 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
151 }
152
153 /*
154 * Clear translation fault interrupt of the VF
155 *
156 * @param cptvf cptvf structure
157 */
158 static void
otx_cpt_clear_fault_intr(struct cpt_vf * cptvf)159 otx_cpt_clear_fault_intr(struct cpt_vf *cptvf)
160 {
161 cptx_vqx_misc_int_t vqx_misc_int;
162
163 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
164 CPTX_VQX_MISC_INT(0, 0));
165 /* W1C for the VF */
166 vqx_misc_int.s.fault = 1;
167 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
168 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
169 }
170
171 /*
172 * Clear doorbell overflow interrupt of the VF
173 *
174 * @param cptvf cptvf structure
175 */
176 static void
otx_cpt_clear_dovf_intr(struct cpt_vf * cptvf)177 otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf)
178 {
179 cptx_vqx_misc_int_t vqx_misc_int;
180
181 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
182 CPTX_VQX_MISC_INT(0, 0));
183 /* W1C for the VF */
184 vqx_misc_int.s.dovf = 1;
185 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
186 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
187 }
188
189 /* Write to VQX_CTL register
190 */
191 static void
otx_cpt_write_vq_ctl(struct cpt_vf * cptvf,bool val)192 otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val)
193 {
194 cptx_vqx_ctl_t vqx_ctl;
195
196 vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
197 CPTX_VQX_CTL(0, 0));
198 vqx_ctl.s.ena = val;
199 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
200 CPTX_VQX_CTL(0, 0), vqx_ctl.u);
201 }
202
203 /* Write to VQX_INPROG register
204 */
205 static void
otx_cpt_write_vq_inprog(struct cpt_vf * cptvf,uint8_t val)206 otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val)
207 {
208 cptx_vqx_inprog_t vqx_inprg;
209
210 vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
211 CPTX_VQX_INPROG(0, 0));
212 vqx_inprg.s.inflight = val;
213 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
214 CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
215 }
216
217 /* Write to VQX_DONE_WAIT NUMWAIT register
218 */
219 static void
otx_cpt_write_vq_done_numwait(struct cpt_vf * cptvf,uint32_t val)220 otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val)
221 {
222 cptx_vqx_done_wait_t vqx_dwait;
223
224 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
225 CPTX_VQX_DONE_WAIT(0, 0));
226 vqx_dwait.s.num_wait = val;
227 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
228 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
229 }
230
231 /* Write to VQX_DONE_WAIT NUM_WAIT register
232 */
233 static void
otx_cpt_write_vq_done_timewait(struct cpt_vf * cptvf,uint16_t val)234 otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val)
235 {
236 cptx_vqx_done_wait_t vqx_dwait;
237
238 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
239 CPTX_VQX_DONE_WAIT(0, 0));
240 vqx_dwait.s.time_wait = val;
241 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
242 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
243 }
244
245 /* Write to VQX_SADDR register
246 */
247 static void
otx_cpt_write_vq_saddr(struct cpt_vf * cptvf,uint64_t val)248 otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val)
249 {
250 cptx_vqx_saddr_t vqx_saddr;
251
252 vqx_saddr.u = val;
253 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
254 CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
255 }
256
257 static void
otx_cpt_vfvq_init(struct cpt_vf * cptvf)258 otx_cpt_vfvq_init(struct cpt_vf *cptvf)
259 {
260 uint64_t base_addr = 0;
261
262 /* Disable the VQ */
263 otx_cpt_write_vq_ctl(cptvf, 0);
264
265 /* Reset the doorbell */
266 otx_cpt_write_vq_doorbell(cptvf, 0);
267 /* Clear inflight */
268 otx_cpt_write_vq_inprog(cptvf, 0);
269
270 /* Write VQ SADDR */
271 base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr);
272 otx_cpt_write_vq_saddr(cptvf, base_addr);
273
274 /* Configure timerhold / coalescence */
275 otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
276 otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD);
277
278 /* Enable the VQ */
279 otx_cpt_write_vq_ctl(cptvf, 1);
280 }
281
282 static int
cpt_vq_init(struct cpt_vf * cptvf,uint8_t group)283 cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
284 {
285 int err;
286
287 /* Convey VQ LEN to PF */
288 err = otx_cpt_send_vq_size_msg(cptvf);
289 if (err) {
290 CPT_LOG_ERR("%s: PF not responding to QLEN msg",
291 cptvf->dev_name);
292 err = -EBUSY;
293 goto cleanup;
294 }
295
296 /* CPT VF device initialization */
297 otx_cpt_vfvq_init(cptvf);
298
299 /* Send msg to PF to assign current Q to required group */
300 cptvf->vfgrp = group;
301 err = otx_cpt_send_vf_grp_msg(cptvf, group);
302 if (err) {
303 CPT_LOG_ERR("%s: PF not responding to VF_GRP msg",
304 cptvf->dev_name);
305 err = -EBUSY;
306 goto cleanup;
307 }
308
309 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
310 return 0;
311
312 cleanup:
313 return err;
314 }
315
316 void
otx_cpt_poll_misc(struct cpt_vf * cptvf)317 otx_cpt_poll_misc(struct cpt_vf *cptvf)
318 {
319 uint64_t intr;
320
321 intr = otx_cpt_read_vf_misc_intr_status(cptvf);
322
323 if (!intr)
324 return;
325
326 /* Check for MISC interrupt types */
327 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
328 CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d",
329 cptvf->dev_name, (unsigned int long)intr, cptvf->vfid);
330 otx_cpt_handle_mbox_intr(cptvf);
331 otx_cpt_clear_mbox_intr(cptvf);
332 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
333 otx_cpt_clear_irde_intr(cptvf);
334 CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt "
335 "0x%lx on CPT VF %d", cptvf->dev_name,
336 (unsigned int long)intr, cptvf->vfid);
337 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
338 otx_cpt_clear_nwrp_intr(cptvf);
339 CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx"
340 " on CPT VF %d", cptvf->dev_name,
341 (unsigned int long)intr, cptvf->vfid);
342 } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) {
343 otx_cpt_clear_swerr_intr(cptvf);
344 CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF "
345 "%d", cptvf->dev_name, (unsigned int long)intr,
346 cptvf->vfid);
347 } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) {
348 otx_cpt_clear_hwerr_intr(cptvf);
349 CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF "
350 "%d", cptvf->dev_name, (unsigned int long)intr,
351 cptvf->vfid);
352 } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) {
353 otx_cpt_clear_fault_intr(cptvf);
354 CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF "
355 "%d", cptvf->dev_name, (unsigned int long)intr,
356 cptvf->vfid);
357 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
358 otx_cpt_clear_dovf_intr(cptvf);
359 CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF "
360 "%d", cptvf->dev_name, (unsigned int long)intr,
361 cptvf->vfid);
362 } else
363 CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d",
364 cptvf->dev_name, (unsigned int long)intr,
365 cptvf->vfid);
366 }
367
368 int
otx_cpt_hw_init(struct cpt_vf * cptvf,void * pdev,void * reg_base,char * name)369 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name)
370 {
371 memset(cptvf, 0, sizeof(struct cpt_vf));
372
373 /* Bar0 base address */
374 cptvf->reg_base = reg_base;
375
376 /* Save device name */
377 strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name)));
378
379 cptvf->pdev = pdev;
380
381 /* To clear if there are any pending mbox msgs */
382 otx_cpt_poll_misc(cptvf);
383
384 if (otx_cpt_vf_init(cptvf)) {
385 CPT_LOG_ERR("Failed to initialize CPT VF device");
386 return -1;
387 }
388
389 /* Gets device type */
390 if (otx_cpt_get_dev_type(cptvf)) {
391 CPT_LOG_ERR("Failed to get device type");
392 return -1;
393 }
394
395 return 0;
396 }
397
398 int
otx_cpt_deinit_device(void * dev)399 otx_cpt_deinit_device(void *dev)
400 {
401 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
402
403 /* Do misc work one last time */
404 otx_cpt_poll_misc(cptvf);
405
406 return 0;
407 }
408
409 static int
otx_cpt_metabuf_mempool_create(const struct rte_cryptodev * dev,struct cpt_instance * instance,uint8_t qp_id,unsigned int nb_elements)410 otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
411 struct cpt_instance *instance, uint8_t qp_id,
412 unsigned int nb_elements)
413 {
414 char mempool_name[RTE_MEMPOOL_NAMESIZE];
415 struct cpt_qp_meta_info *meta_info;
416 struct rte_mempool *pool;
417 int max_mlen = 0;
418 int sg_mlen = 0;
419 int lb_mlen = 0;
420 int mb_pool_sz;
421 int ret;
422
423 /*
424 * Calculate metabuf length required. The 'crypto_octeontx' device
425 * would be either SYMMETRIC or ASYMMETRIC.
426 */
427
428 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
429
430 /* Get meta len for scatter gather mode */
431 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
432
433 /* Extra 32B saved for future considerations */
434 sg_mlen += 4 * sizeof(uint64_t);
435
436 /* Get meta len for linear buffer (direct) mode */
437 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
438
439 /* Extra 32B saved for future considerations */
440 lb_mlen += 4 * sizeof(uint64_t);
441
442 /* Check max requirement for meta buffer */
443 max_mlen = RTE_MAX(lb_mlen, sg_mlen);
444 } else {
445
446 /* Asymmetric device */
447
448 /* Get meta len for asymmetric operations */
449 max_mlen = cpt_pmd_ops_helper_asym_get_mlen();
450 }
451
452 /* Allocate mempool */
453
454 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u",
455 dev->data->dev_id, qp_id);
456
457 mb_pool_sz = RTE_MAX(nb_elements, (METABUF_POOL_CACHE_SIZE * rte_lcore_count()));
458
459 pool = rte_mempool_create_empty(mempool_name, mb_pool_sz, max_mlen,
460 METABUF_POOL_CACHE_SIZE, 0,
461 rte_socket_id(), 0);
462
463 if (pool == NULL) {
464 CPT_LOG_ERR("Could not create mempool for metabuf");
465 return rte_errno;
466 }
467
468 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
469 NULL);
470 if (ret) {
471 CPT_LOG_ERR("Could not set mempool ops");
472 goto mempool_free;
473 }
474
475 ret = rte_mempool_populate_default(pool);
476 if (ret <= 0) {
477 CPT_LOG_ERR("Could not populate metabuf pool");
478 goto mempool_free;
479 }
480
481 meta_info = &instance->meta_info;
482
483 meta_info->pool = pool;
484 meta_info->lb_mlen = lb_mlen;
485 meta_info->sg_mlen = sg_mlen;
486
487 return 0;
488
489 mempool_free:
490 rte_mempool_free(pool);
491 return ret;
492 }
493
494 static void
otx_cpt_metabuf_mempool_destroy(struct cpt_instance * instance)495 otx_cpt_metabuf_mempool_destroy(struct cpt_instance *instance)
496 {
497 struct cpt_qp_meta_info *meta_info = &instance->meta_info;
498
499 rte_mempool_free(meta_info->pool);
500
501 meta_info->pool = NULL;
502 meta_info->lb_mlen = 0;
503 meta_info->sg_mlen = 0;
504 }
505
506 int
otx_cpt_get_resource(const struct rte_cryptodev * dev,uint8_t group,struct cpt_instance ** instance,uint16_t qp_id)507 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
508 struct cpt_instance **instance, uint16_t qp_id)
509 {
510 int ret = -ENOENT, len, qlen, i;
511 int chunk_len, chunks, chunk_size;
512 struct cpt_vf *cptvf = dev->data->dev_private;
513 struct cpt_instance *cpt_instance;
514 struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
515 struct command_chunk *chunk = NULL;
516 uint8_t *mem;
517 const struct rte_memzone *rz;
518 uint64_t dma_addr = 0, alloc_len, used_len;
519 uint64_t *next_ptr;
520 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
521
522 CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name);
523
524 cpt_instance = &cptvf->instance;
525
526 memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue));
527 memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
528
529 /* Chunks are of fixed size buffers */
530
531 qlen = DEFAULT_CMD_QLEN;
532 chunks = DEFAULT_CMD_QCHUNKS;
533 chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
534 /* Chunk size includes 8 bytes of next chunk ptr */
535 chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
536
537 /* For command chunk structures */
538 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
539
540 /* For pending queue */
541 len += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
542
543 /* So that instruction queues start as pg size aligned */
544 len = RTE_ALIGN(len, pg_sz);
545
546 /* For Instruction queues */
547 len += chunks * RTE_ALIGN(chunk_size, 128);
548
549 /* Wastage after instruction queues */
550 len = RTE_ALIGN(len, pg_sz);
551
552 rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node,
553 RTE_MEMZONE_SIZE_HINT_ONLY |
554 RTE_MEMZONE_256MB,
555 RTE_CACHE_LINE_SIZE);
556 if (!rz) {
557 ret = rte_errno;
558 goto exit;
559 }
560
561 mem = rz->addr;
562 dma_addr = rz->iova;
563 alloc_len = len;
564
565 memset(mem, 0, len);
566
567 cpt_instance->rsvd = (uintptr_t)rz;
568
569 ret = otx_cpt_metabuf_mempool_create(dev, cpt_instance, qp_id, qlen);
570 if (ret) {
571 CPT_LOG_ERR("Could not create mempool for metabuf");
572 goto memzone_free;
573 }
574
575 /* Pending queue setup */
576 cptvf->pqueue.rid_queue = (void **)mem;
577
578 mem += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
579 len -= qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
580 dma_addr += qlen * RTE_ALIGN(sizeof(cptvf->pqueue.rid_queue[0]), 8);
581
582 /* Alignment wastage */
583 used_len = alloc_len - len;
584 mem += RTE_ALIGN(used_len, pg_sz) - used_len;
585 len -= RTE_ALIGN(used_len, pg_sz) - used_len;
586 dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len;
587
588 /* Init instruction queues */
589 chunk_head = &cptvf->cqueue.chead[0];
590 i = qlen;
591
592 chunk_prev = NULL;
593 for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) {
594 int csize;
595
596 chunk = &cptvf->cqueue.chead[i];
597 chunk->head = mem;
598 chunk->dma_addr = dma_addr;
599
600 csize = RTE_ALIGN(chunk_size, 128);
601 mem += csize;
602 dma_addr += csize;
603 len -= csize;
604
605 if (chunk_prev) {
606 next_ptr = (uint64_t *)(chunk_prev->head +
607 chunk_size - 8);
608 *next_ptr = (uint64_t)chunk->dma_addr;
609 }
610 chunk_prev = chunk;
611 }
612 /* Circular loop */
613 next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8);
614 *next_ptr = (uint64_t)chunk_head->dma_addr;
615
616 assert(!len);
617
618 /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */
619 cptvf->qsize = chunk_size / 8;
620 cptvf->cqueue.qhead = chunk_head->head;
621 cptvf->cqueue.idx = 0;
622 cptvf->cqueue.cchunk = 0;
623
624 if (cpt_vq_init(cptvf, group)) {
625 CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
626 cptvf->dev_name);
627 ret = -EBUSY;
628 goto mempool_destroy;
629 }
630
631 *instance = cpt_instance;
632
633 CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
634
635 return 0;
636
637 mempool_destroy:
638 otx_cpt_metabuf_mempool_destroy(cpt_instance);
639 memzone_free:
640 rte_memzone_free(rz);
641 exit:
642 *instance = NULL;
643 return ret;
644 }
645
646 int
otx_cpt_put_resource(struct cpt_instance * instance)647 otx_cpt_put_resource(struct cpt_instance *instance)
648 {
649 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
650 struct rte_memzone *rz;
651
652 if (!cptvf) {
653 CPT_LOG_ERR("Invalid CPTVF handle");
654 return -EINVAL;
655 }
656
657 CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
658
659 otx_cpt_metabuf_mempool_destroy(instance);
660
661 rz = (struct rte_memzone *)instance->rsvd;
662 rte_memzone_free(rz);
663 return 0;
664 }
665
666 int
otx_cpt_start_device(void * dev)667 otx_cpt_start_device(void *dev)
668 {
669 int rc;
670 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
671
672 rc = otx_cpt_send_vf_up(cptvf);
673 if (rc) {
674 CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d",
675 cptvf->dev_name, rc);
676 return -EFAULT;
677 }
678
679 return 0;
680 }
681
682 void
otx_cpt_stop_device(void * dev)683 otx_cpt_stop_device(void *dev)
684 {
685 int rc;
686 uint32_t pending, retries = 5;
687 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
688
689 /* Wait for pending entries to complete */
690 pending = otx_cpt_read_vq_doorbell(cptvf);
691 while (pending) {
692 CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete",
693 cptvf->dev_name, pending);
694 sleep(1);
695 pending = otx_cpt_read_vq_doorbell(cptvf);
696 retries--;
697 if (!retries)
698 break;
699 }
700
701 if (!retries && pending) {
702 CPT_LOG_ERR("%s: Timeout waiting for commands(%u)",
703 cptvf->dev_name, pending);
704 return;
705 }
706
707 rc = otx_cpt_send_vf_down(cptvf);
708 if (rc) {
709 CPT_LOG_ERR("Failed to bring down vf %s, rc %d",
710 cptvf->dev_name, rc);
711 return;
712 }
713 }
714