1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021-2024 Advanced Micro Devices, Inc.
3 */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8 #include <rte_malloc.h>
9 #include <rte_mempool.h>
10
11 #include "ionic_crypto.h"
12
13 static int
iocpt_op_config(struct rte_cryptodev * cdev,struct rte_cryptodev_config * config __rte_unused)14 iocpt_op_config(struct rte_cryptodev *cdev,
15 struct rte_cryptodev_config *config __rte_unused)
16 {
17 struct iocpt_dev *dev = cdev->data->dev_private;
18
19 iocpt_configure(dev);
20
21 return 0;
22 }
23
24 static int
iocpt_op_start(struct rte_cryptodev * cdev)25 iocpt_op_start(struct rte_cryptodev *cdev)
26 {
27 struct iocpt_dev *dev = cdev->data->dev_private;
28
29 return iocpt_start(dev);
30 }
31
32 static void
iocpt_op_stop(struct rte_cryptodev * cdev)33 iocpt_op_stop(struct rte_cryptodev *cdev)
34 {
35 struct iocpt_dev *dev = cdev->data->dev_private;
36
37 return iocpt_stop(dev);
38 }
39
40 static int
iocpt_op_close(struct rte_cryptodev * cdev)41 iocpt_op_close(struct rte_cryptodev *cdev)
42 {
43 struct iocpt_dev *dev = cdev->data->dev_private;
44
45 iocpt_deinit(dev);
46
47 return 0;
48 }
49
50 static void
iocpt_op_info_get(struct rte_cryptodev * cdev,struct rte_cryptodev_info * info)51 iocpt_op_info_get(struct rte_cryptodev *cdev, struct rte_cryptodev_info *info)
52 {
53 struct iocpt_dev *dev = cdev->data->dev_private;
54
55 if (info == NULL)
56 return;
57
58 info->max_nb_queue_pairs = dev->max_qps;
59 info->feature_flags = dev->features;
60 info->capabilities = iocpt_get_caps(info->feature_flags);
61 /* Reserve one session for watchdog */
62 info->sym.max_nb_sessions = dev->max_sessions - 1;
63 info->driver_id = dev->driver_id;
64 info->min_mbuf_headroom_req = 0;
65 info->min_mbuf_tailroom_req = 0;
66 }
67
68 static void
iocpt_op_stats_get(struct rte_cryptodev * cdev,struct rte_cryptodev_stats * stats)69 iocpt_op_stats_get(struct rte_cryptodev *cdev,
70 struct rte_cryptodev_stats *stats)
71 {
72 struct iocpt_dev *dev = cdev->data->dev_private;
73
74 iocpt_get_stats(dev, stats);
75 }
76
77 static void
iocpt_op_stats_reset(struct rte_cryptodev * cdev)78 iocpt_op_stats_reset(struct rte_cryptodev *cdev)
79 {
80 struct iocpt_dev *dev = cdev->data->dev_private;
81
82 iocpt_reset_stats(dev);
83 }
84
85 static int
iocpt_op_queue_release(struct rte_cryptodev * cdev,uint16_t queue_id)86 iocpt_op_queue_release(struct rte_cryptodev *cdev, uint16_t queue_id)
87 {
88 struct iocpt_crypto_q *cptq = cdev->data->queue_pairs[queue_id];
89
90 IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
91
92 assert(!(cptq->flags & IOCPT_Q_F_INITED));
93
94 iocpt_cryptoq_free(cptq);
95
96 cdev->data->queue_pairs[queue_id] = NULL;
97
98 return 0;
99 }
100
101 static int
iocpt_op_queue_setup(struct rte_cryptodev * cdev,uint16_t queue_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)102 iocpt_op_queue_setup(struct rte_cryptodev *cdev, uint16_t queue_id,
103 const struct rte_cryptodev_qp_conf *qp_conf,
104 int socket_id)
105 {
106 struct iocpt_dev *dev = cdev->data->dev_private;
107 int err;
108
109 if (cdev->data->queue_pairs[queue_id] != NULL)
110 iocpt_op_queue_release(cdev, queue_id);
111
112 if (qp_conf->nb_descriptors < (1 << IOCPT_QSIZE_MIN_LG2) ||
113 qp_conf->nb_descriptors > (1 << IOCPT_QSIZE_MAX_LG2)) {
114 IOCPT_PRINT(ERR, "invalid nb_descriptors %u, use range %u..%u",
115 qp_conf->nb_descriptors,
116 1 << IOCPT_QSIZE_MIN_LG2, 1 << IOCPT_QSIZE_MAX_LG2);
117 return -ERANGE;
118 }
119
120 IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
121
122 err = iocpt_cryptoq_alloc(dev, socket_id, queue_id,
123 qp_conf->nb_descriptors);
124 if (err != 0)
125 return err;
126
127 cdev->data->queue_pairs[queue_id] = dev->cryptoqs[queue_id];
128
129 return 0;
130 }
131
132 static unsigned int
iocpt_op_get_session_size(struct rte_cryptodev * cdev __rte_unused)133 iocpt_op_get_session_size(struct rte_cryptodev *cdev __rte_unused)
134 {
135 return iocpt_session_size();
136 }
137
138 static inline int
iocpt_is_algo_supported(struct rte_crypto_sym_xform * xform)139 iocpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
140 {
141 if (xform->next != NULL) {
142 IOCPT_PRINT(ERR, "chaining not supported");
143 return -ENOTSUP;
144 }
145
146 if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
147 IOCPT_PRINT(ERR, "xform->type %d not supported", xform->type);
148 return -ENOTSUP;
149 }
150
151 return 0;
152 }
153
154 static __rte_always_inline int
iocpt_fill_sess_aead(struct rte_crypto_sym_xform * xform,struct iocpt_session_priv * priv)155 iocpt_fill_sess_aead(struct rte_crypto_sym_xform *xform,
156 struct iocpt_session_priv *priv)
157 {
158 struct rte_crypto_aead_xform *aead_form = &xform->aead;
159
160 if (aead_form->algo != RTE_CRYPTO_AEAD_AES_GCM) {
161 IOCPT_PRINT(ERR, "Unknown algo");
162 return -EINVAL;
163 }
164 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
165 priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
166 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
167 priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_DECRYPT;
168 } else {
169 IOCPT_PRINT(ERR, "Unknown cipher operations");
170 return -1;
171 }
172
173 if (aead_form->key.length < IOCPT_SESS_KEY_LEN_MIN ||
174 aead_form->key.length > IOCPT_SESS_KEY_LEN_MAX_SYMM) {
175 IOCPT_PRINT(ERR, "Invalid cipher keylen %u",
176 aead_form->key.length);
177 return -1;
178 }
179 priv->key_len = aead_form->key.length;
180 memcpy(priv->key, aead_form->key.data, priv->key_len);
181
182 priv->type = IOCPT_SESS_AEAD_AES_GCM;
183 priv->iv_offset = aead_form->iv.offset;
184 priv->iv_length = aead_form->iv.length;
185 priv->digest_length = aead_form->digest_length;
186 priv->aad_length = aead_form->aad_length;
187
188 return 0;
189 }
190
191 static int
iocpt_session_cfg(struct iocpt_dev * dev,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess)192 iocpt_session_cfg(struct iocpt_dev *dev,
193 struct rte_crypto_sym_xform *xform,
194 struct rte_cryptodev_sym_session *sess)
195 {
196 struct rte_crypto_sym_xform *chain;
197 struct iocpt_session_priv *priv = NULL;
198
199 if (iocpt_is_algo_supported(xform) < 0)
200 return -ENOTSUP;
201
202 if (unlikely(sess == NULL)) {
203 IOCPT_PRINT(ERR, "invalid session");
204 return -EINVAL;
205 }
206
207 priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
208 priv->dev = dev;
209
210 chain = xform;
211 while (chain) {
212 switch (chain->type) {
213 case RTE_CRYPTO_SYM_XFORM_AEAD:
214 if (iocpt_fill_sess_aead(chain, priv))
215 return -EIO;
216 break;
217 default:
218 IOCPT_PRINT(ERR, "invalid crypto xform type %d",
219 chain->type);
220 return -ENOTSUP;
221 }
222 chain = chain->next;
223 }
224
225 return iocpt_session_init(priv);
226 }
227
228 static int
iocpt_op_session_cfg(struct rte_cryptodev * cdev,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess)229 iocpt_op_session_cfg(struct rte_cryptodev *cdev,
230 struct rte_crypto_sym_xform *xform,
231 struct rte_cryptodev_sym_session *sess)
232 {
233 struct iocpt_dev *dev = cdev->data->dev_private;
234
235 return iocpt_session_cfg(dev, xform, sess);
236 }
237
238 static void
iocpt_session_clear(struct rte_cryptodev_sym_session * sess)239 iocpt_session_clear(struct rte_cryptodev_sym_session *sess)
240 {
241 iocpt_session_deinit(CRYPTODEV_GET_SYM_SESS_PRIV(sess));
242 }
243
244 static void
iocpt_op_session_clear(struct rte_cryptodev * cdev __rte_unused,struct rte_cryptodev_sym_session * sess)245 iocpt_op_session_clear(struct rte_cryptodev *cdev __rte_unused,
246 struct rte_cryptodev_sym_session *sess)
247 {
248 iocpt_session_clear(sess);
249 }
250
251 static inline void
iocpt_fill_sge(struct iocpt_crypto_sg_elem * arr,uint8_t idx,uint64_t addr,uint16_t len)252 iocpt_fill_sge(struct iocpt_crypto_sg_elem *arr, uint8_t idx,
253 uint64_t addr, uint16_t len)
254 {
255 arr[idx].addr = rte_cpu_to_le_64(addr);
256 arr[idx].len = rte_cpu_to_le_16(len);
257 }
258
259 static __rte_always_inline int
iocpt_enq_one_aead(struct iocpt_crypto_q * cptq,struct iocpt_session_priv * priv,struct rte_crypto_op * op)260 iocpt_enq_one_aead(struct iocpt_crypto_q *cptq,
261 struct iocpt_session_priv *priv, struct rte_crypto_op *op)
262 {
263 struct rte_crypto_sym_op *sym_op = op->sym;
264 struct iocpt_queue *q = &cptq->q;
265 struct iocpt_crypto_desc *desc, *desc_base = q->base;
266 struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
267 struct iocpt_crypto_sg_elem *src, *dst;
268 rte_iova_t aad_addr, digest_addr, iv_addr, seg_addr;
269 uint32_t data_len, data_offset, seg_len;
270 uint8_t nsge_src = 0, nsge_dst = 0, flags = 0;
271 struct rte_mbuf *m;
272
273 desc = &desc_base[q->head_idx];
274 sg_desc = &sg_desc_base[q->head_idx];
275 src = sg_desc->src_elems;
276 dst = sg_desc->dst_elems;
277
278 /* Fill the first SGE with the IV / Nonce */
279 iv_addr = rte_crypto_op_ctophys_offset(op, priv->iv_offset);
280 iocpt_fill_sge(src, nsge_src++, iv_addr, priv->iv_length);
281
282 /* Fill the second SGE with the AAD, if applicable */
283 if (priv->aad_length > 0) {
284 aad_addr = sym_op->aead.aad.phys_addr;
285 iocpt_fill_sge(src, nsge_src++, aad_addr, priv->aad_length);
286 flags |= IOCPT_DESC_F_AAD_VALID;
287 }
288
289 m = sym_op->m_src;
290 data_len = sym_op->aead.data.length;
291
292 /* Fast-forward through mbuf chain to account for data offset */
293 data_offset = sym_op->aead.data.offset;
294 while (m != NULL && data_offset >= m->data_len) {
295 data_offset -= m->data_len;
296 m = m->next;
297 }
298
299 /* Fill the next SGEs with the payload segments */
300 while (m != NULL && data_len > 0) {
301 seg_addr = rte_mbuf_data_iova(m) + data_offset;
302 seg_len = RTE_MIN(m->data_len - data_offset, data_len);
303 data_offset = 0;
304 data_len -= seg_len;
305
306 /* Use -1 to save room for digest */
307 if (nsge_src >= IOCPT_CRYPTO_MAX_SG_ELEMS - 1)
308 return -ERANGE;
309
310 iocpt_fill_sge(src, nsge_src++, seg_addr, seg_len);
311
312 m = m->next;
313 }
314
315 /* AEAD AES-GCM: digest == authentication tag */
316 digest_addr = sym_op->aead.digest.phys_addr;
317 iocpt_fill_sge(src, nsge_src++, digest_addr, priv->digest_length);
318
319 /* Process Out-Of-Place destination SGL */
320 if (sym_op->m_dst != NULL) {
321 /* Put the AAD here, too */
322 if (priv->aad_length > 0)
323 iocpt_fill_sge(dst, nsge_dst++,
324 sym_op->aead.aad.phys_addr, priv->aad_length);
325
326 m = sym_op->m_dst;
327 data_len = sym_op->aead.data.length;
328
329 /* Fast-forward through chain to account for data offset */
330 data_offset = sym_op->aead.data.offset;
331 while (m != NULL && data_offset >= m->data_len) {
332 data_offset -= m->data_len;
333 m = m->next;
334 }
335
336 /* Fill in the SGEs with the payload segments */
337 while (m != NULL && data_len > 0) {
338 seg_addr = rte_mbuf_data_iova(m) + data_offset;
339 seg_len = RTE_MIN(m->data_len - data_offset, data_len);
340 data_offset = 0;
341 data_len -= seg_len;
342
343 if (nsge_dst >= IOCPT_CRYPTO_MAX_SG_ELEMS)
344 return -ERANGE;
345
346 iocpt_fill_sge(dst, nsge_dst++, seg_addr, seg_len);
347
348 m = m->next;
349 }
350 }
351
352 desc->opcode = priv->op;
353 desc->flags = flags;
354 desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, nsge_dst);
355 desc->session_tag = rte_cpu_to_le_32(priv->index);
356
357 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
358 q->info[q->head_idx] = op;
359 q->head_idx = Q_NEXT_TO_POST(q, 1);
360
361 return 0;
362 }
363
364 static uint16_t
iocpt_enqueue_sym(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)365 iocpt_enqueue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
366 {
367 struct iocpt_crypto_q *cptq = qp;
368 struct rte_crypto_op *op;
369 struct iocpt_session_priv *priv;
370 struct rte_cryptodev_stats *stats = &cptq->stats;
371 uint16_t avail, count;
372 int err;
373
374 avail = iocpt_q_space_avail(&cptq->q);
375 if (unlikely(nb_ops > avail))
376 nb_ops = avail;
377
378 count = 0;
379 while (likely(count < nb_ops)) {
380 op = ops[count];
381
382 if (unlikely(op->sess_type != RTE_CRYPTO_OP_WITH_SESSION)) {
383 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
384 break;
385 }
386
387 priv = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
388 if (unlikely(priv == NULL)) {
389 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
390 break;
391 }
392
393 err = iocpt_enq_one_aead(cptq, priv, op);
394 if (unlikely(err != 0)) {
395 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
396 stats->enqueue_err_count++;
397 break;
398 }
399
400 count++;
401 }
402
403 if (likely(count > 0)) {
404 iocpt_q_flush(&cptq->q);
405
406 /* Restart timer if ops are being enqueued */
407 cptq->last_wdog_cycles = rte_get_timer_cycles();
408
409 stats->enqueued_count += count;
410 }
411
412 return count;
413 }
414
415 static void
iocpt_enqueue_wdog(struct iocpt_crypto_q * cptq)416 iocpt_enqueue_wdog(struct iocpt_crypto_q *cptq)
417 {
418 struct iocpt_queue *q = &cptq->q;
419 struct iocpt_crypto_desc *desc, *desc_base = q->base;
420 struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
421 struct iocpt_crypto_sg_elem *src;
422 struct rte_crypto_op *wdog_op;
423 rte_iova_t iv_addr, pld_addr, tag_addr;
424 uint8_t nsge_src = 0;
425 uint16_t avail;
426
427 avail = iocpt_q_space_avail(&cptq->q);
428 if (avail < 1)
429 goto out_flush;
430
431 wdog_op = rte_zmalloc_socket("iocpt", sizeof(*wdog_op),
432 RTE_CACHE_LINE_SIZE, rte_socket_id());
433 if (wdog_op == NULL)
434 goto out_flush;
435
436 wdog_op->type = IOCPT_Q_WDOG_OP_TYPE;
437 wdog_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
438
439 desc = &desc_base[q->head_idx];
440 sg_desc = &sg_desc_base[q->head_idx];
441 src = sg_desc->src_elems;
442
443 /* Fill the first SGE with the IV / Nonce */
444 iv_addr = rte_mem_virt2iova(cptq->wdog_iv);
445 iocpt_fill_sge(src, nsge_src++, iv_addr, IOCPT_Q_WDOG_IV_LEN);
446
447 /* Fill the second SGE with the payload segment */
448 pld_addr = rte_mem_virt2iova(cptq->wdog_pld);
449 iocpt_fill_sge(src, nsge_src++, pld_addr, IOCPT_Q_WDOG_PLD_LEN);
450
451 /* AEAD AES-GCM: digest == authentication tag */
452 tag_addr = rte_mem_virt2iova(cptq->wdog_tag);
453 iocpt_fill_sge(src, nsge_src++, tag_addr, IOCPT_Q_WDOG_TAG_LEN);
454
455 desc->opcode = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
456 desc->flags = 0;
457 desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, 0);
458 desc->session_tag = rte_cpu_to_le_32(IOCPT_Q_WDOG_SESS_IDX);
459
460 q->info[q->head_idx] = wdog_op;
461 q->head_idx = Q_NEXT_TO_POST(q, 1);
462
463 IOCPT_PRINT(DEBUG, "Queue %u wdog enq %p ops %"PRIu64,
464 q->index, wdog_op, cptq->stats.enqueued_count);
465 cptq->enqueued_wdogs++;
466
467 out_flush:
468 iocpt_q_flush(q);
469 }
470
471 static uint16_t
iocpt_dequeue_sym(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)472 iocpt_dequeue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
473 {
474 struct iocpt_crypto_q *cptq = qp;
475 struct iocpt_queue *q = &cptq->q;
476 struct iocpt_cq *cq = &cptq->cq;
477 struct rte_crypto_op *op;
478 struct iocpt_crypto_comp *cq_desc_base = cq->base;
479 volatile struct iocpt_crypto_comp *cq_desc;
480 struct rte_cryptodev_stats *stats = &cptq->stats;
481 uint64_t then, now, hz, delta;
482 uint16_t count = 0;
483
484 cq_desc = &cq_desc_base[cq->tail_idx];
485
486 /* First walk the CQ to update any completed op's status
487 * NB: These can arrive out of order!
488 */
489 while ((cq_desc->color & 0x1) == cq->done_color) {
490 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
491 if (unlikely(cq->tail_idx == 0))
492 cq->done_color = !cq->done_color;
493
494 op = q->info[rte_le_to_cpu_16(cq_desc->comp_index)];
495
496 /* Process returned CQ descriptor status */
497 if (unlikely(cq_desc->status)) {
498 switch (cq_desc->status) {
499 case IOCPT_COMP_SYMM_AUTH_VERIFY_ERROR:
500 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
501 break;
502 case IOCPT_COMP_INVAL_OPCODE_ERROR:
503 case IOCPT_COMP_UNSUPP_OPCODE_ERROR:
504 case IOCPT_COMP_SYMM_SRC_SG_ERROR:
505 case IOCPT_COMP_SYMM_DST_SG_ERROR:
506 case IOCPT_COMP_SYMM_SRC_DST_LEN_MISMATCH:
507 case IOCPT_COMP_SYMM_KEY_IDX_ERROR:
508 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
509 break;
510 default:
511 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
512 break;
513 }
514 } else
515 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
516
517 cq_desc = &cq_desc_base[cq->tail_idx];
518 }
519
520 /* Next walk the SQ to pop off completed ops in-order */
521 while (count < nb_ops) {
522 op = q->info[q->tail_idx];
523
524 /* No more completions */
525 if (op == NULL ||
526 op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
527 break;
528
529 /* Handle watchdog operations */
530 if (unlikely(op->type == IOCPT_Q_WDOG_OP_TYPE)) {
531 IOCPT_PRINT(DEBUG, "Queue %u wdog deq %p st %d",
532 q->index, op, op->status);
533 q->info[q->tail_idx] = NULL;
534 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
535 cptq->dequeued_wdogs++;
536 rte_free(op);
537 continue;
538 }
539
540 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
541 stats->dequeue_err_count++;
542
543 ops[count] = op;
544 q->info[q->tail_idx] = NULL;
545
546 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
547 count++;
548 }
549
550 if (!count) {
551 /*
552 * Ring the doorbell again if no work was dequeued and work
553 * is still pending after the deadline.
554 */
555 if (q->head_idx != q->tail_idx) {
556 then = cptq->last_wdog_cycles;
557 now = rte_get_timer_cycles();
558 hz = rte_get_timer_hz();
559 delta = (now - then) * 1000;
560
561 if (delta >= hz * IONIC_Q_WDOG_MS) {
562 iocpt_enqueue_wdog(cptq);
563 cptq->last_wdog_cycles = now;
564 }
565 }
566 } else
567 /* Restart timer if the queue is making progress */
568 cptq->last_wdog_cycles = rte_get_timer_cycles();
569
570 stats->dequeued_count += count;
571
572 return count;
573 }
574
575 static struct rte_cryptodev_ops iocpt_ops = {
576 .dev_configure = iocpt_op_config,
577 .dev_start = iocpt_op_start,
578 .dev_stop = iocpt_op_stop,
579 .dev_close = iocpt_op_close,
580 .dev_infos_get = iocpt_op_info_get,
581
582 .stats_get = iocpt_op_stats_get,
583 .stats_reset = iocpt_op_stats_reset,
584 .queue_pair_setup = iocpt_op_queue_setup,
585 .queue_pair_release = iocpt_op_queue_release,
586
587 .sym_session_get_size = iocpt_op_get_session_size,
588 .sym_session_configure = iocpt_op_session_cfg,
589 .sym_session_clear = iocpt_op_session_clear,
590 };
591
592 int
iocpt_assign_ops(struct rte_cryptodev * cdev)593 iocpt_assign_ops(struct rte_cryptodev *cdev)
594 {
595 struct iocpt_dev *dev = cdev->data->dev_private;
596
597 cdev->dev_ops = &iocpt_ops;
598 cdev->feature_flags = dev->features;
599
600 if (dev->features & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
601 cdev->enqueue_burst = iocpt_enqueue_sym;
602 cdev->dequeue_burst = iocpt_dequeue_sym;
603 }
604
605 return 0;
606 }
607