xref: /dpdk/drivers/crypto/mlx5/mlx5_crypto.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mempool.h>
7 #include <rte_eal_paging.h>
8 #include <rte_errno.h>
9 #include <rte_log.h>
10 #include <bus_pci_driver.h>
11 #include <rte_memory.h>
12 
13 #include <mlx5_glue.h>
14 #include <mlx5_common.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common_os.h>
17 
18 #include "mlx5_crypto_utils.h"
19 #include "mlx5_crypto.h"
20 
21 #define MLX5_CRYPTO_DRIVER_NAME crypto_mlx5
22 #define MLX5_CRYPTO_LOG_NAME pmd.crypto.mlx5
23 #define MLX5_CRYPTO_MAX_QPS 128
24 #define MLX5_CRYPTO_MAX_SEGS 56
25 
26 #define MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode) \
27 	(RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
28 	 RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | \
29 	 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | \
30 	 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | \
31 	 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | \
32 	 (wrapped_mode ? RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY : 0) | \
33 	 RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS)
34 
35 TAILQ_HEAD(mlx5_crypto_privs, mlx5_crypto_priv) mlx5_crypto_priv_list =
36 				TAILQ_HEAD_INITIALIZER(mlx5_crypto_priv_list);
37 static pthread_mutex_t priv_list_lock;
38 
39 int mlx5_crypto_logtype;
40 
41 uint8_t mlx5_crypto_driver_id;
42 
43 const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {
44 	{		/* AES XTS */
45 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46 		{.sym = {
47 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48 			{.cipher = {
49 				.algo = RTE_CRYPTO_CIPHER_AES_XTS,
50 				.block_size = 16,
51 				.key_size = {
52 					.min = 32,
53 					.max = 64,
54 					.increment = 32
55 				},
56 				.iv_size = {
57 					.min = 16,
58 					.max = 16,
59 					.increment = 0
60 				},
61 				.dataunit_set =
62 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |
63 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES |
64 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES,
65 			}, }
66 		}, }
67 	},
68 };
69 
70 static const char mlx5_crypto_drv_name[] = RTE_STR(MLX5_CRYPTO_DRIVER_NAME);
71 
72 static const struct rte_driver mlx5_drv = {
73 	.name = mlx5_crypto_drv_name,
74 	.alias = mlx5_crypto_drv_name
75 };
76 
77 static struct cryptodev_driver mlx5_cryptodev_driver;
78 
79 struct mlx5_crypto_session {
80 	uint32_t bs_bpt_eo_es;
81 	/**< bsf_size, bsf_p_type, encryption_order and encryption standard,
82 	 * saved in big endian format.
83 	 */
84 	uint32_t bsp_res;
85 	/**< crypto_block_size_pointer and reserved 24 bits saved in big
86 	 * endian format.
87 	 */
88 	uint32_t iv_offset:16;
89 	/**< Starting point for Initialisation Vector. */
90 	struct mlx5_crypto_dek *dek; /**< Pointer to dek struct. */
91 	uint32_t dek_id; /**< DEK ID */
92 } __rte_packed;
93 
94 static void
95 mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,
96 			  struct rte_cryptodev_info *dev_info)
97 {
98 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
99 
100 	RTE_SET_USED(dev);
101 	if (dev_info != NULL) {
102 		dev_info->driver_id = mlx5_crypto_driver_id;
103 		dev_info->feature_flags =
104 			MLX5_CRYPTO_FEATURE_FLAGS(priv->is_wrapped_mode);
105 		dev_info->capabilities = mlx5_crypto_caps;
106 		dev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;
107 		dev_info->min_mbuf_headroom_req = 0;
108 		dev_info->min_mbuf_tailroom_req = 0;
109 		dev_info->sym.max_nb_sessions = 0;
110 		/*
111 		 * If 0, the device does not have any limitation in number of
112 		 * sessions that can be used.
113 		 */
114 	}
115 }
116 
117 static int
118 mlx5_crypto_dev_configure(struct rte_cryptodev *dev,
119 			  struct rte_cryptodev_config *config)
120 {
121 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
122 
123 	if (config == NULL) {
124 		DRV_LOG(ERR, "Invalid crypto dev configure parameters.");
125 		return -EINVAL;
126 	}
127 	if ((config->ff_disable & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) != 0) {
128 		DRV_LOG(ERR,
129 			"Disabled symmetric crypto feature is not supported.");
130 		return -ENOTSUP;
131 	}
132 	if (mlx5_crypto_dek_setup(priv) != 0) {
133 		DRV_LOG(ERR, "Dek hash list creation has failed.");
134 		return -ENOMEM;
135 	}
136 	priv->dev_config = *config;
137 	DRV_LOG(DEBUG, "Device %u was configured.", dev->driver_id);
138 	return 0;
139 }
140 
141 static void
142 mlx5_crypto_dev_stop(struct rte_cryptodev *dev)
143 {
144 	RTE_SET_USED(dev);
145 }
146 
147 static int
148 mlx5_crypto_dev_start(struct rte_cryptodev *dev)
149 {
150 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
151 
152 	return mlx5_dev_mempool_subscribe(priv->cdev);
153 }
154 
155 static int
156 mlx5_crypto_dev_close(struct rte_cryptodev *dev)
157 {
158 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
159 
160 	mlx5_crypto_dek_unset(priv);
161 	DRV_LOG(DEBUG, "Device %u was closed.", dev->driver_id);
162 	return 0;
163 }
164 
165 static unsigned int
166 mlx5_crypto_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
167 {
168 	return sizeof(struct mlx5_crypto_session);
169 }
170 
171 static int
172 mlx5_crypto_sym_session_configure(struct rte_cryptodev *dev,
173 				  struct rte_crypto_sym_xform *xform,
174 				  struct rte_cryptodev_sym_session *session)
175 {
176 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
177 	struct mlx5_crypto_session *sess_private_data =
178 		CRYPTODEV_GET_SYM_SESS_PRIV(session);
179 	struct rte_crypto_cipher_xform *cipher;
180 	uint8_t encryption_order;
181 
182 	if (unlikely(xform->next != NULL)) {
183 		DRV_LOG(ERR, "Xform next is not supported.");
184 		return -ENOTSUP;
185 	}
186 	if (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||
187 		     (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {
188 		DRV_LOG(ERR, "Only AES-XTS algorithm is supported.");
189 		return -ENOTSUP;
190 	}
191 	cipher = &xform->cipher;
192 	sess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);
193 	if (sess_private_data->dek == NULL) {
194 		DRV_LOG(ERR, "Failed to prepare dek.");
195 		return -ENOMEM;
196 	}
197 	if (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
198 		encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;
199 	else
200 		encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;
201 	sess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32
202 			(MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |
203 			 MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |
204 			 encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |
205 			 MLX5_ENCRYPTION_STANDARD_AES_XTS);
206 	switch (xform->cipher.dataunit_len) {
207 	case 0:
208 		sess_private_data->bsp_res = 0;
209 		break;
210 	case 512:
211 		sess_private_data->bsp_res = rte_cpu_to_be_32
212 					     ((uint32_t)MLX5_BLOCK_SIZE_512B <<
213 					     MLX5_BLOCK_SIZE_OFFSET);
214 		break;
215 	case 4096:
216 		sess_private_data->bsp_res = rte_cpu_to_be_32
217 					     ((uint32_t)MLX5_BLOCK_SIZE_4096B <<
218 					     MLX5_BLOCK_SIZE_OFFSET);
219 		break;
220 	case 1048576:
221 		sess_private_data->bsp_res = rte_cpu_to_be_32
222 					     ((uint32_t)MLX5_BLOCK_SIZE_1MB <<
223 					     MLX5_BLOCK_SIZE_OFFSET);
224 		break;
225 	default:
226 		DRV_LOG(ERR, "Cipher data unit length is not supported.");
227 		return -ENOTSUP;
228 	}
229 	sess_private_data->iv_offset = cipher->iv.offset;
230 	sess_private_data->dek_id =
231 			rte_cpu_to_be_32(sess_private_data->dek->obj->id &
232 					 0xffffff);
233 	DRV_LOG(DEBUG, "Session %p was configured.", sess_private_data);
234 	return 0;
235 }
236 
237 static void
238 mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev,
239 			      struct rte_cryptodev_sym_session *sess)
240 {
241 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
242 	struct mlx5_crypto_session *spriv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
243 
244 	if (unlikely(spriv == NULL)) {
245 		DRV_LOG(ERR, "Failed to get session %p private data.", spriv);
246 		return;
247 	}
248 	mlx5_crypto_dek_destroy(priv, spriv->dek);
249 	DRV_LOG(DEBUG, "Session %p was cleared.", spriv);
250 }
251 
252 static void
253 mlx5_crypto_indirect_mkeys_release(struct mlx5_crypto_qp *qp, uint16_t n)
254 {
255 	uint16_t i;
256 
257 	for (i = 0; i < n; i++)
258 		if (qp->mkey[i])
259 			claim_zero(mlx5_devx_cmd_destroy(qp->mkey[i]));
260 }
261 
262 static void
263 mlx5_crypto_qp_release(struct mlx5_crypto_qp *qp)
264 {
265 	if (qp == NULL)
266 		return;
267 	mlx5_devx_qp_destroy(&qp->qp_obj);
268 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
269 	mlx5_devx_cq_destroy(&qp->cq_obj);
270 	rte_free(qp);
271 }
272 
273 static int
274 mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
275 {
276 	struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
277 
278 	mlx5_crypto_indirect_mkeys_release(qp, qp->entries_n);
279 	mlx5_crypto_qp_release(qp);
280 	dev->data->queue_pairs[qp_id] = NULL;
281 	return 0;
282 }
283 
284 static __rte_noinline uint32_t
285 mlx5_crypto_get_block_size(struct rte_crypto_op *op)
286 {
287 	uint32_t bl = op->sym->cipher.data.length;
288 
289 	switch (bl) {
290 	case (1 << 20):
291 		return RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);
292 	case (1 << 12):
293 		return RTE_BE32(MLX5_BLOCK_SIZE_4096B <<
294 				MLX5_BLOCK_SIZE_OFFSET);
295 	case (1 << 9):
296 		return RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);
297 	default:
298 		DRV_LOG(ERR, "Unknown block size: %u.", bl);
299 		return UINT32_MAX;
300 	}
301 }
302 
303 static __rte_always_inline uint32_t
304 mlx5_crypto_klm_set(struct mlx5_crypto_qp *qp, struct rte_mbuf *mbuf,
305 		    struct mlx5_wqe_dseg *klm, uint32_t offset,
306 		    uint32_t *remain)
307 {
308 	uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);
309 	uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
310 
311 	if (data_len > *remain)
312 		data_len = *remain;
313 	*remain -= data_len;
314 	klm->bcount = rte_cpu_to_be_32(data_len);
315 	klm->pbuf = rte_cpu_to_be_64(addr);
316 	klm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
317 	return klm->lkey;
318 
319 }
320 
321 static __rte_always_inline uint32_t
322 mlx5_crypto_klms_set(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op,
323 		     struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm)
324 {
325 	uint32_t remain_len = op->sym->cipher.data.length;
326 	uint32_t nb_segs = mbuf->nb_segs;
327 	uint32_t klm_n = 1u;
328 
329 	/* First mbuf needs to take the cipher offset. */
330 	if (unlikely(mlx5_crypto_klm_set(qp, mbuf, klm,
331 		     op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {
332 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
333 		return 0;
334 	}
335 	while (remain_len) {
336 		nb_segs--;
337 		mbuf = mbuf->next;
338 		if (unlikely(mbuf == NULL || nb_segs == 0)) {
339 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
340 			return 0;
341 		}
342 		if (unlikely(mlx5_crypto_klm_set(qp, mbuf, ++klm, 0,
343 						 &remain_len) == UINT32_MAX)) {
344 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
345 			return 0;
346 		}
347 		klm_n++;
348 	}
349 	return klm_n;
350 }
351 
352 static __rte_always_inline int
353 mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
354 			 struct mlx5_crypto_qp *qp,
355 			 struct rte_crypto_op *op,
356 			 struct mlx5_umr_wqe *umr)
357 {
358 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
359 	struct mlx5_wqe_cseg *cseg = &umr->ctr;
360 	struct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;
361 	struct mlx5_wqe_dseg *klms = &umr->kseg[0];
362 	struct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)
363 				      RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;
364 	uint32_t ds;
365 	bool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;
366 	/* Set UMR WQE. */
367 	uint32_t klm_n = mlx5_crypto_klms_set(qp, op,
368 				   ipl ? op->sym->m_src : op->sym->m_dst, klms);
369 
370 	if (unlikely(klm_n == 0))
371 		return 0;
372 	bsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;
373 	if (unlikely(!sess->bsp_res)) {
374 		bsf->bsp_res = mlx5_crypto_get_block_size(op);
375 		if (unlikely(bsf->bsp_res == UINT32_MAX)) {
376 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
377 			return 0;
378 		}
379 	} else {
380 		bsf->bsp_res = sess->bsp_res;
381 	}
382 	bsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);
383 	memcpy(bsf->xts_initial_tweak,
384 	       rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);
385 	bsf->res_dp = sess->dek_id;
386 	mkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);
387 	cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);
388 	qp->db_pi += priv->umr_wqe_stride;
389 	/* Set RDMA_WRITE WQE. */
390 	cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
391 	klms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));
392 	if (!ipl) {
393 		klm_n = mlx5_crypto_klms_set(qp, op, op->sym->m_src, klms);
394 		if (unlikely(klm_n == 0))
395 			return 0;
396 	} else {
397 		memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
398 	}
399 	ds = 2 + klm_n;
400 	cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
401 	cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
402 							MLX5_OPCODE_RDMA_WRITE);
403 	ds = RTE_ALIGN(ds, 4);
404 	qp->db_pi += ds >> 2;
405 	/* Set NOP WQE if needed. */
406 	if (priv->max_rdmar_ds > ds) {
407 		cseg += ds;
408 		ds = priv->max_rdmar_ds - ds;
409 		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
410 		cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
411 							       MLX5_OPCODE_NOP);
412 		qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
413 	}
414 	qp->wqe = (uint8_t *)cseg;
415 	return 1;
416 }
417 
418 static uint16_t
419 mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
420 			  uint16_t nb_ops)
421 {
422 	struct mlx5_crypto_qp *qp = queue_pair;
423 	struct mlx5_crypto_priv *priv = qp->priv;
424 	struct mlx5_umr_wqe *umr;
425 	struct rte_crypto_op *op;
426 	uint16_t mask = qp->entries_n - 1;
427 	uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
428 	uint32_t idx;
429 
430 	if (remain < nb_ops)
431 		nb_ops = remain;
432 	else
433 		remain = nb_ops;
434 	if (unlikely(remain == 0))
435 		return 0;
436 	do {
437 		idx = qp->pi & mask;
438 		op = *ops++;
439 		umr = RTE_PTR_ADD(qp->qp_obj.umem_buf,
440 			priv->wqe_set_size * idx);
441 		if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
442 			qp->stats.enqueue_err_count++;
443 			if (remain != nb_ops) {
444 				qp->stats.enqueued_count -= remain;
445 				break;
446 			}
447 			return 0;
448 		}
449 		qp->ops[idx] = op;
450 		qp->pi++;
451 	} while (--remain);
452 	qp->stats.enqueued_count += nb_ops;
453 	mlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,
454 			   qp->db_pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],
455 			   !priv->uar.dbnc);
456 	return nb_ops;
457 }
458 
459 static __rte_noinline void
460 mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
461 {
462 	const uint32_t idx = qp->ci & (qp->entries_n - 1);
463 	volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
464 							&qp->cq_obj.cqes[idx];
465 
466 	op->status = RTE_CRYPTO_OP_STATUS_ERROR;
467 	qp->stats.dequeue_err_count++;
468 	DRV_LOG(ERR, "CQE ERR:%x.\n", rte_be_to_cpu_32(cqe->syndrome));
469 }
470 
471 static uint16_t
472 mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
473 			  uint16_t nb_ops)
474 {
475 	struct mlx5_crypto_qp *qp = queue_pair;
476 	volatile struct mlx5_cqe *restrict cqe;
477 	struct rte_crypto_op *restrict op;
478 	const unsigned int cq_size = qp->entries_n;
479 	const unsigned int mask = cq_size - 1;
480 	uint32_t idx;
481 	uint32_t next_idx = qp->ci & mask;
482 	const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
483 	uint16_t i = 0;
484 	int ret;
485 
486 	if (unlikely(max == 0))
487 		return 0;
488 	do {
489 		idx = next_idx;
490 		next_idx = (qp->ci + 1) & mask;
491 		op = qp->ops[idx];
492 		cqe = &qp->cq_obj.cqes[idx];
493 		ret = check_cqe(cqe, cq_size, qp->ci);
494 		rte_io_rmb();
495 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
496 			if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
497 				mlx5_crypto_cqe_err_handle(qp, op);
498 			break;
499 		}
500 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
501 		ops[i++] = op;
502 		qp->ci++;
503 	} while (i < max);
504 	if (likely(i != 0)) {
505 		rte_io_wmb();
506 		qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);
507 		qp->stats.dequeued_count += i;
508 	}
509 	return i;
510 }
511 
512 static void
513 mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
514 {
515 	uint32_t i;
516 
517 	for (i = 0 ; i < qp->entries_n; i++) {
518 		struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,
519 			i * priv->wqe_set_size);
520 		struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
521 								     (cseg + 1);
522 		struct mlx5_wqe_umr_bsf_seg *bsf =
523 			(struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,
524 						       priv->umr_wqe_size)) - 1;
525 		struct mlx5_wqe_rseg *rseg;
526 
527 		/* Init UMR WQE. */
528 		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
529 					 (priv->umr_wqe_size / MLX5_WSEG_SIZE));
530 		cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
531 				       MLX5_COMP_MODE_OFFSET);
532 		cseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);
533 		ucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);
534 		ucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */
535 		ucseg->ko_to_bs = rte_cpu_to_be_32
536 			((MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size) <<
537 			 MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));
538 		bsf->keytag = priv->keytag;
539 		/* Init RDMA WRITE WQE. */
540 		cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
541 		cseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<
542 				      MLX5_COMP_MODE_OFFSET) |
543 				      MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);
544 		rseg = (struct mlx5_wqe_rseg *)(cseg + 1);
545 		rseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);
546 	}
547 }
548 
549 static int
550 mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
551 				  struct mlx5_crypto_qp *qp)
552 {
553 	struct mlx5_umr_wqe *umr;
554 	uint32_t i;
555 	struct mlx5_devx_mkey_attr attr = {
556 		.pd = priv->cdev->pdn,
557 		.umr_en = 1,
558 		.crypto_en = 1,
559 		.set_remote_rw = 1,
560 		.klm_num = MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),
561 	};
562 
563 	for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
564 	   i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
565 		attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
566 		qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &attr);
567 		if (!qp->mkey[i])
568 			goto error;
569 	}
570 	return 0;
571 error:
572 	DRV_LOG(ERR, "Failed to allocate indirect mkey.");
573 	mlx5_crypto_indirect_mkeys_release(qp, i);
574 	return -1;
575 }
576 
577 static int
578 mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
579 			     const struct rte_cryptodev_qp_conf *qp_conf,
580 			     int socket_id)
581 {
582 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
583 	struct mlx5_devx_qp_attr attr = {0};
584 	struct mlx5_crypto_qp *qp;
585 	uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
586 	uint32_t ret;
587 	uint32_t alloc_size = sizeof(*qp);
588 	uint32_t log_wqbb_n;
589 	struct mlx5_devx_cq_attr cq_attr = {
590 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
591 	};
592 
593 	if (dev->data->queue_pairs[qp_id] != NULL)
594 		mlx5_crypto_queue_pair_release(dev, qp_id);
595 	alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
596 	alloc_size += (sizeof(struct rte_crypto_op *) +
597 		       sizeof(struct mlx5_devx_obj *)) *
598 		       RTE_BIT32(log_nb_desc);
599 	qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
600 				socket_id);
601 	if (qp == NULL) {
602 		DRV_LOG(ERR, "Failed to allocate QP memory.");
603 		rte_errno = ENOMEM;
604 		return -rte_errno;
605 	}
606 	if (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,
607 				&cq_attr, socket_id) != 0) {
608 		DRV_LOG(ERR, "Failed to create CQ.");
609 		goto error;
610 	}
611 	log_wqbb_n = rte_log2_u32(RTE_BIT32(log_nb_desc) *
612 				(priv->wqe_set_size / MLX5_SEND_WQE_BB));
613 	attr.pd = priv->cdev->pdn;
614 	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
615 	attr.cqn = qp->cq_obj.cq->id;
616 	attr.num_of_receive_wqes = 0;
617 	attr.num_of_send_wqbbs = RTE_BIT32(log_wqbb_n);
618 	attr.ts_format =
619 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
620 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj,
621 					attr.num_of_send_wqbbs * MLX5_WQE_SIZE,
622 					&attr, socket_id);
623 	if (ret) {
624 		DRV_LOG(ERR, "Failed to create QP.");
625 		goto error;
626 	}
627 	if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
628 			      priv->dev_config.socket_id) != 0) {
629 		DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
630 			(uint32_t)qp_id);
631 		rte_errno = ENOMEM;
632 		goto error;
633 	}
634 	/*
635 	 * In Order to configure self loopback, when calling devx qp2rts the
636 	 * remote QP id that is used is the id of the same QP.
637 	 */
638 	if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))
639 		goto error;
640 	qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
641 							   RTE_CACHE_LINE_SIZE);
642 	qp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));
643 	qp->entries_n = 1 << log_nb_desc;
644 	if (mlx5_crypto_indirect_mkeys_prepare(priv, qp)) {
645 		DRV_LOG(ERR, "Cannot allocate indirect memory regions.");
646 		rte_errno = ENOMEM;
647 		goto error;
648 	}
649 	mlx5_crypto_qp_init(priv, qp);
650 	qp->priv = priv;
651 	dev->data->queue_pairs[qp_id] = qp;
652 	return 0;
653 error:
654 	mlx5_crypto_qp_release(qp);
655 	return -1;
656 }
657 
658 static void
659 mlx5_crypto_stats_get(struct rte_cryptodev *dev,
660 		      struct rte_cryptodev_stats *stats)
661 {
662 	int qp_id;
663 
664 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
665 		struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
666 
667 		stats->enqueued_count += qp->stats.enqueued_count;
668 		stats->dequeued_count += qp->stats.dequeued_count;
669 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
670 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
671 	}
672 }
673 
674 static void
675 mlx5_crypto_stats_reset(struct rte_cryptodev *dev)
676 {
677 	int qp_id;
678 
679 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
680 		struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
681 
682 		memset(&qp->stats, 0, sizeof(qp->stats));
683 	}
684 }
685 
686 static struct rte_cryptodev_ops mlx5_crypto_ops = {
687 	.dev_configure			= mlx5_crypto_dev_configure,
688 	.dev_start			= mlx5_crypto_dev_start,
689 	.dev_stop			= mlx5_crypto_dev_stop,
690 	.dev_close			= mlx5_crypto_dev_close,
691 	.dev_infos_get			= mlx5_crypto_dev_infos_get,
692 	.stats_get			= mlx5_crypto_stats_get,
693 	.stats_reset			= mlx5_crypto_stats_reset,
694 	.queue_pair_setup		= mlx5_crypto_queue_pair_setup,
695 	.queue_pair_release		= mlx5_crypto_queue_pair_release,
696 	.sym_session_get_size		= mlx5_crypto_sym_session_get_size,
697 	.sym_session_configure		= mlx5_crypto_sym_session_configure,
698 	.sym_session_clear		= mlx5_crypto_sym_session_clear,
699 	.sym_get_raw_dp_ctx_size	= NULL,
700 	.sym_configure_raw_dp_ctx	= NULL,
701 };
702 
703 static int
704 mlx5_crypto_args_check_handler(const char *key, const char *val, void *opaque)
705 {
706 	struct mlx5_crypto_devarg_params *devarg_prms = opaque;
707 	struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
708 	unsigned long tmp;
709 	FILE *file;
710 	int ret;
711 	int i;
712 
713 	if (strcmp(key, "wcs_file") == 0) {
714 		file = fopen(val, "rb");
715 		if (file == NULL) {
716 			rte_errno = ENOTSUP;
717 			return -rte_errno;
718 		}
719 		for (i = 0 ; i < MLX5_CRYPTO_CREDENTIAL_SIZE ; i++) {
720 			ret = fscanf(file, "%02hhX", &attr->credential[i]);
721 			if (ret <= 0) {
722 				fclose(file);
723 				DRV_LOG(ERR,
724 					"Failed to read credential from file.");
725 				rte_errno = EINVAL;
726 				return -rte_errno;
727 			}
728 		}
729 		fclose(file);
730 		devarg_prms->login_devarg = true;
731 		return 0;
732 	}
733 	errno = 0;
734 	tmp = strtoul(val, NULL, 0);
735 	if (errno) {
736 		DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
737 		return -errno;
738 	}
739 	if (strcmp(key, "max_segs_num") == 0) {
740 		if (!tmp) {
741 			DRV_LOG(ERR, "max_segs_num must be greater than 0.");
742 			rte_errno = EINVAL;
743 			return -rte_errno;
744 		}
745 		devarg_prms->max_segs_num = (uint32_t)tmp;
746 	} else if (strcmp(key, "import_kek_id") == 0) {
747 		attr->session_import_kek_ptr = (uint32_t)tmp;
748 	} else if (strcmp(key, "credential_id") == 0) {
749 		attr->credential_pointer = (uint32_t)tmp;
750 	} else if (strcmp(key, "keytag") == 0) {
751 		devarg_prms->keytag = tmp;
752 	}
753 	return 0;
754 }
755 
756 static int
757 mlx5_crypto_parse_devargs(struct mlx5_kvargs_ctrl *mkvlist,
758 			  struct mlx5_crypto_devarg_params *devarg_prms,
759 			  bool wrapped_mode)
760 {
761 	struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
762 	const char **params = (const char *[]){
763 		"credential_id",
764 		"import_kek_id",
765 		"keytag",
766 		"max_segs_num",
767 		"wcs_file",
768 		NULL,
769 	};
770 
771 	/* Default values. */
772 	attr->credential_pointer = 0;
773 	attr->session_import_kek_ptr = 0;
774 	devarg_prms->keytag = 0;
775 	devarg_prms->max_segs_num = 8;
776 	if (mkvlist == NULL) {
777 		if (!wrapped_mode)
778 			return 0;
779 		DRV_LOG(ERR,
780 			"No login devargs in order to enable crypto operations in the device.");
781 		rte_errno = EINVAL;
782 		return -1;
783 	}
784 	if (mlx5_kvargs_process(mkvlist, params, mlx5_crypto_args_check_handler,
785 				devarg_prms) != 0) {
786 		DRV_LOG(ERR, "Devargs handler function Failed.");
787 		rte_errno = EINVAL;
788 		return -1;
789 	}
790 	if (devarg_prms->login_devarg == false && wrapped_mode) {
791 		DRV_LOG(ERR,
792 			"No login credential devarg in order to enable crypto operations in the device while in wrapped import method.");
793 		rte_errno = EINVAL;
794 		return -1;
795 	}
796 	return 0;
797 }
798 
799 /*
800  * Calculate UMR WQE size and RDMA Write WQE size with the
801  * following limitations:
802  *	- Each WQE size is multiple of 64.
803  *	- The summarize of both UMR WQE and RDMA_W WQE is a power of 2.
804  *	- The number of entries in the UMR WQE's KLM list is multiple of 4.
805  */
806 static void
807 mlx5_crypto_get_wqe_sizes(uint32_t segs_num, uint32_t *umr_size,
808 			uint32_t *rdmaw_size)
809 {
810 	uint32_t diff, wqe_set_size;
811 
812 	*umr_size = MLX5_CRYPTO_UMR_WQE_STATIC_SIZE +
813 			RTE_ALIGN(segs_num, 4) *
814 			sizeof(struct mlx5_wqe_dseg);
815 	/* Make sure UMR WQE size is multiple of WQBB. */
816 	*umr_size = RTE_ALIGN(*umr_size, MLX5_SEND_WQE_BB);
817 	*rdmaw_size = sizeof(struct mlx5_rdma_write_wqe) +
818 			sizeof(struct mlx5_wqe_dseg) *
819 			(segs_num <= 2 ? 2 : 2 +
820 			RTE_ALIGN(segs_num - 2, 4));
821 	/* Make sure RDMA_WRITE WQE size is multiple of WQBB. */
822 	*rdmaw_size = RTE_ALIGN(*rdmaw_size, MLX5_SEND_WQE_BB);
823 	wqe_set_size = *rdmaw_size + *umr_size;
824 	diff = rte_align32pow2(wqe_set_size) - wqe_set_size;
825 	/* Make sure wqe_set size is power of 2. */
826 	if (diff)
827 		*umr_size += diff;
828 }
829 
830 static uint8_t
831 mlx5_crypto_max_segs_num(uint16_t max_wqe_size)
832 {
833 	int klms_sizes = max_wqe_size - MLX5_CRYPTO_UMR_WQE_STATIC_SIZE;
834 	uint32_t max_segs_cap = RTE_ALIGN_FLOOR(klms_sizes, MLX5_SEND_WQE_BB) /
835 			sizeof(struct mlx5_wqe_dseg);
836 
837 	MLX5_ASSERT(klms_sizes >= MLX5_SEND_WQE_BB);
838 	while (max_segs_cap) {
839 		uint32_t umr_wqe_size, rdmw_wqe_size;
840 
841 		mlx5_crypto_get_wqe_sizes(max_segs_cap, &umr_wqe_size,
842 						&rdmw_wqe_size);
843 		if (umr_wqe_size <= max_wqe_size &&
844 				rdmw_wqe_size <= max_wqe_size)
845 			break;
846 		max_segs_cap -= 4;
847 	}
848 	return max_segs_cap;
849 }
850 
851 static int
852 mlx5_crypto_configure_wqe_size(struct mlx5_crypto_priv *priv,
853 				uint16_t max_wqe_size, uint32_t max_segs_num)
854 {
855 	uint32_t rdmw_wqe_size, umr_wqe_size;
856 
857 	mlx5_crypto_get_wqe_sizes(max_segs_num, &umr_wqe_size,
858 					&rdmw_wqe_size);
859 	priv->wqe_set_size = rdmw_wqe_size + umr_wqe_size;
860 	if (umr_wqe_size > max_wqe_size ||
861 				rdmw_wqe_size > max_wqe_size) {
862 		DRV_LOG(ERR, "Invalid max_segs_num: %u. should be %u or lower.",
863 			max_segs_num,
864 			mlx5_crypto_max_segs_num(max_wqe_size));
865 		rte_errno = EINVAL;
866 		return -EINVAL;
867 	}
868 	priv->umr_wqe_size = (uint16_t)umr_wqe_size;
869 	priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
870 	priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
871 	return 0;
872 }
873 
874 static int
875 mlx5_crypto_dev_probe(struct mlx5_common_device *cdev,
876 		      struct mlx5_kvargs_ctrl *mkvlist)
877 {
878 	struct rte_cryptodev *crypto_dev;
879 	struct mlx5_devx_obj *login;
880 	struct mlx5_crypto_priv *priv;
881 	struct mlx5_crypto_devarg_params devarg_prms = { 0 };
882 	struct rte_cryptodev_pmd_init_params init_params = {
883 		.name = "",
884 		.private_data_size = sizeof(struct mlx5_crypto_priv),
885 		.socket_id = cdev->dev->numa_node,
886 		.max_nb_queue_pairs =
887 				RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
888 	};
889 	const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
890 	int ret;
891 	bool wrapped_mode;
892 
893 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
894 		DRV_LOG(ERR, "Non-primary process type is not supported.");
895 		rte_errno = ENOTSUP;
896 		return -rte_errno;
897 	}
898 	if (!cdev->config.hca_attr.crypto || !cdev->config.hca_attr.aes_xts) {
899 		DRV_LOG(ERR, "Not enough capabilities to support crypto "
900 			"operations, maybe old FW/OFED version?");
901 		rte_errno = ENOTSUP;
902 		return -ENOTSUP;
903 	}
904 	wrapped_mode = !!cdev->config.hca_attr.crypto_wrapped_import_method;
905 	ret = mlx5_crypto_parse_devargs(mkvlist, &devarg_prms, wrapped_mode);
906 	if (ret) {
907 		DRV_LOG(ERR, "Failed to parse devargs.");
908 		return -rte_errno;
909 	}
910 	crypto_dev = rte_cryptodev_pmd_create(ibdev_name, cdev->dev,
911 					      &init_params);
912 	if (crypto_dev == NULL) {
913 		DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
914 		return -ENODEV;
915 	}
916 	DRV_LOG(INFO,
917 		"Crypto device %s was created successfully.", ibdev_name);
918 	crypto_dev->dev_ops = &mlx5_crypto_ops;
919 	crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
920 	crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
921 	crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode);
922 	crypto_dev->driver_id = mlx5_crypto_driver_id;
923 	priv = crypto_dev->data->dev_private;
924 	priv->cdev = cdev;
925 	priv->crypto_dev = crypto_dev;
926 	priv->is_wrapped_mode = wrapped_mode;
927 	if (mlx5_devx_uar_prepare(cdev, &priv->uar) != 0) {
928 		rte_cryptodev_pmd_destroy(priv->crypto_dev);
929 		return -1;
930 	}
931 	if (wrapped_mode) {
932 		login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx,
933 						      &devarg_prms.login_attr);
934 		if (login == NULL) {
935 			DRV_LOG(ERR, "Failed to configure login.");
936 			mlx5_devx_uar_release(&priv->uar);
937 			rte_cryptodev_pmd_destroy(priv->crypto_dev);
938 			return -rte_errno;
939 		}
940 		priv->login_obj = login;
941 	}
942 	ret = mlx5_crypto_configure_wqe_size(priv,
943 		cdev->config.hca_attr.max_wqe_sz_sq, devarg_prms.max_segs_num);
944 	if (ret) {
945 		claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
946 		mlx5_devx_uar_release(&priv->uar);
947 		rte_cryptodev_pmd_destroy(priv->crypto_dev);
948 		return -1;
949 	}
950 	priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
951 	DRV_LOG(INFO, "Max number of segments: %u.",
952 		(unsigned int)RTE_MIN(
953 			MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),
954 			(uint16_t)(priv->max_rdmar_ds - 2)));
955 	pthread_mutex_lock(&priv_list_lock);
956 	TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
957 	pthread_mutex_unlock(&priv_list_lock);
958 
959 	rte_cryptodev_pmd_probing_finish(crypto_dev);
960 
961 	return 0;
962 }
963 
964 static int
965 mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)
966 {
967 	struct mlx5_crypto_priv *priv = NULL;
968 
969 	pthread_mutex_lock(&priv_list_lock);
970 	TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
971 		if (priv->crypto_dev->device == cdev->dev)
972 			break;
973 	if (priv)
974 		TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
975 	pthread_mutex_unlock(&priv_list_lock);
976 	if (priv) {
977 		claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
978 		mlx5_devx_uar_release(&priv->uar);
979 		rte_cryptodev_pmd_destroy(priv->crypto_dev);
980 	}
981 	return 0;
982 }
983 
984 static const struct rte_pci_id mlx5_crypto_pci_id_map[] = {
985 		{
986 			RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
987 					PCI_DEVICE_ID_MELLANOX_CONNECTX6)
988 		},
989 		{
990 			RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
991 					PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
992 		},
993 		{
994 			RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
995 					PCI_DEVICE_ID_MELLANOX_BLUEFIELD2)
996 		},
997 		{
998 			RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
999 					PCI_DEVICE_ID_MELLANOX_CONNECTX7)
1000 		},
1001 		{
1002 			RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1003 					PCI_DEVICE_ID_MELLANOX_BLUEFIELD3)
1004 		},
1005 		{
1006 			.vendor_id = 0
1007 		}
1008 };
1009 
1010 static struct mlx5_class_driver mlx5_crypto_driver = {
1011 	.drv_class = MLX5_CLASS_CRYPTO,
1012 	.name = RTE_STR(MLX5_CRYPTO_DRIVER_NAME),
1013 	.id_table = mlx5_crypto_pci_id_map,
1014 	.probe = mlx5_crypto_dev_probe,
1015 	.remove = mlx5_crypto_dev_remove,
1016 };
1017 
1018 RTE_INIT(rte_mlx5_crypto_init)
1019 {
1020 	pthread_mutex_init(&priv_list_lock, NULL);
1021 	mlx5_common_init();
1022 	if (mlx5_glue != NULL)
1023 		mlx5_class_driver_register(&mlx5_crypto_driver);
1024 }
1025 
1026 RTE_PMD_REGISTER_CRYPTO_DRIVER(mlx5_cryptodev_driver, mlx5_drv,
1027 			       mlx5_crypto_driver_id);
1028 
1029 RTE_LOG_REGISTER_DEFAULT(mlx5_crypto_logtype, NOTICE)
1030 RTE_PMD_EXPORT_NAME(MLX5_CRYPTO_DRIVER_NAME, __COUNTER__);
1031 RTE_PMD_REGISTER_PCI_TABLE(MLX5_CRYPTO_DRIVER_NAME, mlx5_crypto_pci_id_map);
1032 RTE_PMD_REGISTER_KMOD_DEP(MLX5_CRYPTO_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");
1033