xref: /dpdk/drivers/crypto/nitrox/nitrox_sym.c (revision e99981af34632ecce3bac82d05db97b08308f9b5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 
10 #include "nitrox_sym.h"
11 #include "nitrox_device.h"
12 #include "nitrox_sym_capabilities.h"
13 #include "nitrox_qp.h"
14 #include "nitrox_sym_reqmgr.h"
15 #include "nitrox_sym_ctx.h"
16 #include "nitrox_logs.h"
17 
18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c
20 #define NPS_PKT_IN_INSTR_SIZE 64
21 #define IV_FROM_DPTR 1
22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
23 #define FLEXI_CRYPTO_MAX_AAD_LEN 512
24 #define AES_KEYSIZE_128 16
25 #define AES_KEYSIZE_192 24
26 #define AES_KEYSIZE_256 32
27 #define MAX_IV_LEN 16
28 
29 struct nitrox_sym_device {
30 	struct rte_cryptodev *cdev;
31 	struct nitrox_device *ndev;
32 };
33 
34 /* Cipher opcodes */
35 enum flexi_cipher {
36 	CIPHER_NULL = 0,
37 	CIPHER_3DES_CBC,
38 	CIPHER_3DES_ECB,
39 	CIPHER_AES_CBC,
40 	CIPHER_AES_ECB,
41 	CIPHER_AES_CFB,
42 	CIPHER_AES_CTR,
43 	CIPHER_AES_GCM,
44 	CIPHER_AES_XTS,
45 	CIPHER_AES_CCM,
46 	CIPHER_AES_CBC_CTS,
47 	CIPHER_AES_ECB_CTS,
48 	CIPHER_INVALID
49 };
50 
51 /* Auth opcodes */
52 enum flexi_auth {
53 	AUTH_NULL = 0,
54 	AUTH_MD5,
55 	AUTH_SHA1,
56 	AUTH_SHA2_SHA224,
57 	AUTH_SHA2_SHA256,
58 	AUTH_SHA2_SHA384,
59 	AUTH_SHA2_SHA512,
60 	AUTH_GMAC,
61 	AUTH_INVALID
62 };
63 
64 uint8_t nitrox_sym_drv_id;
65 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD);
66 static const struct rte_driver nitrox_rte_sym_drv = {
67 	.name = nitrox_sym_drv_name,
68 	.alias = nitrox_sym_drv_name
69 };
70 
71 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
72 				     uint16_t qp_id);
73 
74 static int
75 nitrox_sym_dev_config(struct rte_cryptodev *cdev,
76 		      struct rte_cryptodev_config *config)
77 {
78 	struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
79 	struct nitrox_device *ndev = sym_dev->ndev;
80 
81 	if (config->nb_queue_pairs > ndev->nr_queues) {
82 		NITROX_LOG_LINE(ERR, "Invalid queue pairs, max supported %d",
83 			   ndev->nr_queues);
84 		return -EINVAL;
85 	}
86 
87 	return 0;
88 }
89 
90 static int
91 nitrox_sym_dev_start(struct rte_cryptodev *cdev)
92 {
93 	/* SE cores initialization is done in PF */
94 	RTE_SET_USED(cdev);
95 	return 0;
96 }
97 
98 static void
99 nitrox_sym_dev_stop(struct rte_cryptodev *cdev)
100 {
101 	/* SE cores cleanup is done in PF */
102 	RTE_SET_USED(cdev);
103 }
104 
105 static int
106 nitrox_sym_dev_close(struct rte_cryptodev *cdev)
107 {
108 	int i, ret;
109 
110 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
111 		ret = nitrox_sym_dev_qp_release(cdev, i);
112 		if (ret)
113 			return ret;
114 	}
115 
116 	return 0;
117 }
118 
119 static void
120 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev,
121 			struct rte_cryptodev_info *info)
122 {
123 	struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
124 	struct nitrox_device *ndev = sym_dev->ndev;
125 
126 	if (!info)
127 		return;
128 
129 	info->max_nb_queue_pairs = ndev->nr_queues;
130 	info->feature_flags = cdev->feature_flags;
131 	info->capabilities = nitrox_get_sym_capabilities();
132 	info->driver_id = nitrox_sym_drv_id;
133 	info->sym.max_nb_sessions = 0;
134 }
135 
136 static void
137 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
138 			 struct rte_cryptodev_stats *stats)
139 {
140 	int qp_id;
141 
142 	for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
143 		struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
144 
145 		if (!qp)
146 			continue;
147 
148 		stats->enqueued_count += qp->stats.enqueued_count;
149 		stats->dequeued_count += qp->stats.dequeued_count;
150 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
151 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
152 	}
153 }
154 
155 static void
156 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
157 {
158 	int qp_id;
159 
160 	for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
161 		struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
162 
163 		if (!qp)
164 			continue;
165 
166 		memset(&qp->stats, 0, sizeof(qp->stats));
167 	}
168 }
169 
170 static int
171 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
172 			const struct rte_cryptodev_qp_conf *qp_conf,
173 			int socket_id)
174 {
175 	struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
176 	struct nitrox_device *ndev = sym_dev->ndev;
177 	struct nitrox_qp *qp = NULL;
178 	int err;
179 
180 	NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
181 	if (qp_id >= ndev->nr_queues) {
182 		NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
183 			   qp_id, ndev->nr_queues);
184 		return -EINVAL;
185 	}
186 
187 	if (cdev->data->queue_pairs[qp_id]) {
188 		err = nitrox_sym_dev_qp_release(cdev, qp_id);
189 		if (err)
190 			return err;
191 	}
192 
193 	qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
194 				RTE_CACHE_LINE_SIZE,
195 				socket_id);
196 	if (!qp) {
197 		NITROX_LOG_LINE(ERR, "Failed to allocate nitrox qp");
198 		return -ENOMEM;
199 	}
200 
201 	qp->type = NITROX_QUEUE_SE;
202 	qp->qno = qp_id;
203 	err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
204 			      qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
205 			      socket_id);
206 	if (unlikely(err))
207 		goto qp_setup_err;
208 
209 	qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
210 					       socket_id);
211 	if (unlikely(!qp->sr_mp))
212 		goto req_pool_err;
213 
214 	cdev->data->queue_pairs[qp_id] = qp;
215 	NITROX_LOG_LINE(DEBUG, "queue %d setup done", qp_id);
216 	return 0;
217 
218 req_pool_err:
219 	nitrox_qp_release(qp, ndev->bar_addr);
220 qp_setup_err:
221 	rte_free(qp);
222 	return err;
223 }
224 
225 static int
226 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
227 {
228 	struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
229 	struct nitrox_device *ndev = sym_dev->ndev;
230 	struct nitrox_qp *qp;
231 	int err;
232 
233 	NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
234 	if (qp_id >= ndev->nr_queues) {
235 		NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
236 			   qp_id, ndev->nr_queues);
237 		return -EINVAL;
238 	}
239 
240 	qp = cdev->data->queue_pairs[qp_id];
241 	if (!qp) {
242 		NITROX_LOG_LINE(DEBUG, "queue %u already freed", qp_id);
243 		return 0;
244 	}
245 
246 	if (!nitrox_qp_is_empty(qp)) {
247 		NITROX_LOG_LINE(ERR, "queue %d not empty", qp_id);
248 		return -EAGAIN;
249 	}
250 
251 	cdev->data->queue_pairs[qp_id] = NULL;
252 	err = nitrox_qp_release(qp, ndev->bar_addr);
253 	nitrox_sym_req_pool_free(qp->sr_mp);
254 	rte_free(qp);
255 	NITROX_LOG_LINE(DEBUG, "queue %d release done", qp_id);
256 	return err;
257 }
258 
259 static unsigned int
260 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev)
261 {
262 	return sizeof(struct nitrox_crypto_ctx);
263 }
264 
265 static enum nitrox_chain
266 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
267 {
268 	enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED;
269 
270 	if (unlikely(xform == NULL))
271 		return res;
272 
273 	switch (xform->type) {
274 	case RTE_CRYPTO_SYM_XFORM_AUTH:
275 		if (xform->next == NULL) {
276 			res = NITROX_CHAIN_NOT_SUPPORTED;
277 		} else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
278 			if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
279 			    xform->next->cipher.op ==
280 			    RTE_CRYPTO_CIPHER_OP_DECRYPT) {
281 				res = NITROX_CHAIN_AUTH_CIPHER;
282 			} else {
283 				NITROX_LOG_LINE(ERR, "auth op %d, cipher op %d",
284 				    xform->auth.op, xform->next->cipher.op);
285 			}
286 		}
287 		break;
288 	case RTE_CRYPTO_SYM_XFORM_CIPHER:
289 		if (xform->next == NULL) {
290 			res = NITROX_CHAIN_CIPHER_ONLY;
291 		} else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
292 			if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
293 			    xform->next->auth.op ==
294 			    RTE_CRYPTO_AUTH_OP_GENERATE) {
295 				res = NITROX_CHAIN_CIPHER_AUTH;
296 			} else {
297 				NITROX_LOG_LINE(ERR, "cipher op %d, auth op %d",
298 				    xform->cipher.op, xform->next->auth.op);
299 			}
300 		}
301 		break;
302 	case RTE_CRYPTO_SYM_XFORM_AEAD:
303 		res = NITROX_CHAIN_COMBINED;
304 		break;
305 	default:
306 		break;
307 	}
308 
309 	return res;
310 }
311 
312 static enum flexi_cipher
313 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
314 {
315 	enum flexi_cipher type;
316 
317 	switch (algo) {
318 	case RTE_CRYPTO_CIPHER_AES_CBC:
319 		type = CIPHER_AES_CBC;
320 		*is_aes = true;
321 		break;
322 	case RTE_CRYPTO_CIPHER_3DES_CBC:
323 		type = CIPHER_3DES_CBC;
324 		*is_aes = false;
325 		break;
326 	default:
327 		type = CIPHER_INVALID;
328 		NITROX_LOG_LINE(ERR, "Algorithm not supported %d", algo);
329 		break;
330 	}
331 
332 	return type;
333 }
334 
335 static int
336 flexi_aes_keylen(size_t keylen, bool is_aes)
337 {
338 	int aes_keylen;
339 
340 	if (!is_aes)
341 		return 0;
342 
343 	switch (keylen) {
344 	case AES_KEYSIZE_128:
345 		aes_keylen = 1;
346 		break;
347 	case AES_KEYSIZE_192:
348 		aes_keylen = 2;
349 		break;
350 	case AES_KEYSIZE_256:
351 		aes_keylen = 3;
352 		break;
353 	default:
354 		NITROX_LOG_LINE(ERR, "Invalid keylen %zu", keylen);
355 		aes_keylen = -EINVAL;
356 		break;
357 	}
358 
359 	return aes_keylen;
360 }
361 
362 static bool
363 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
364 		    struct flexi_crypto_context *fctx)
365 {
366 	if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
367 		NITROX_LOG_LINE(ERR, "Invalid crypto key length %d",
368 			   xform->key.length);
369 		return false;
370 	}
371 
372 	return true;
373 }
374 
375 static int
376 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform,
377 		     struct nitrox_crypto_ctx *ctx)
378 {
379 	enum flexi_cipher type;
380 	bool cipher_is_aes = false;
381 	int aes_keylen;
382 	struct flexi_crypto_context *fctx = &ctx->fctx;
383 
384 	type = get_flexi_cipher_type(xform->algo, &cipher_is_aes);
385 	if (unlikely(type == CIPHER_INVALID))
386 		return -ENOTSUP;
387 
388 	aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes);
389 	if (unlikely(aes_keylen < 0))
390 		return -EINVAL;
391 
392 	if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx)))
393 		return -EINVAL;
394 
395 	if (unlikely(xform->iv.length > MAX_IV_LEN))
396 		return -EINVAL;
397 
398 	fctx->flags = rte_be_to_cpu_64(fctx->flags);
399 	fctx->w0.cipher_type = type;
400 	fctx->w0.aes_keylen = aes_keylen;
401 	fctx->w0.iv_source = IV_FROM_DPTR;
402 	fctx->flags = rte_cpu_to_be_64(fctx->flags);
403 	memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
404 	memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
405 
406 	ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
407 	ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
408 			NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
409 	ctx->iv.offset = xform->iv.offset;
410 	ctx->iv.length = xform->iv.length;
411 	return 0;
412 }
413 
414 static enum flexi_auth
415 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
416 {
417 	enum flexi_auth type;
418 
419 	switch (algo) {
420 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
421 		type = AUTH_SHA1;
422 		break;
423 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
424 		type = AUTH_SHA2_SHA224;
425 		break;
426 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
427 		type = AUTH_SHA2_SHA256;
428 		break;
429 	default:
430 		NITROX_LOG_LINE(ERR, "Algorithm not supported %d", algo);
431 		type = AUTH_INVALID;
432 		break;
433 	}
434 
435 	return type;
436 }
437 
438 static bool
439 auth_key_is_valid(const uint8_t *data, uint16_t length,
440 		  struct flexi_crypto_context *fctx)
441 {
442 	if (unlikely(!data && length)) {
443 		NITROX_LOG_LINE(ERR, "Invalid auth key");
444 		return false;
445 	}
446 
447 	if (unlikely(length > sizeof(fctx->auth.opad))) {
448 		NITROX_LOG_LINE(ERR, "Invalid auth key length %d",
449 			   length);
450 		return false;
451 	}
452 
453 	return true;
454 }
455 
456 static int
457 configure_auth_ctx(struct rte_crypto_auth_xform *xform,
458 		   struct nitrox_crypto_ctx *ctx)
459 {
460 	enum flexi_auth type;
461 	struct flexi_crypto_context *fctx = &ctx->fctx;
462 
463 	type = get_flexi_auth_type(xform->algo);
464 	if (unlikely(type == AUTH_INVALID))
465 		return -ENOTSUP;
466 
467 	if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
468 					fctx)))
469 		return -EINVAL;
470 
471 	ctx->digest_length = xform->digest_length;
472 
473 	fctx->flags = rte_be_to_cpu_64(fctx->flags);
474 	fctx->w0.hash_type = type;
475 	fctx->w0.auth_input_type = 1;
476 	fctx->w0.mac_len = xform->digest_length;
477 	fctx->flags = rte_cpu_to_be_64(fctx->flags);
478 	memset(&fctx->auth, 0, sizeof(fctx->auth));
479 	memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
480 	return 0;
481 }
482 
483 static int
484 configure_aead_ctx(struct rte_crypto_aead_xform *xform,
485 		   struct nitrox_crypto_ctx *ctx)
486 {
487 	int aes_keylen;
488 	struct flexi_crypto_context *fctx = &ctx->fctx;
489 
490 	if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
491 		NITROX_LOG_LINE(ERR, "AAD length %d not supported",
492 			   xform->aad_length);
493 		return -ENOTSUP;
494 	}
495 
496 	if (unlikely(xform->algo != RTE_CRYPTO_AEAD_AES_GCM &&
497 		     xform->algo != RTE_CRYPTO_AEAD_AES_CCM))
498 		return -ENOTSUP;
499 
500 	aes_keylen = flexi_aes_keylen(xform->key.length, true);
501 	if (unlikely(aes_keylen < 0))
502 		return -EINVAL;
503 
504 	if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
505 					fctx)))
506 		return -EINVAL;
507 
508 	if (unlikely(xform->iv.length > MAX_IV_LEN))
509 		return -EINVAL;
510 
511 	if (xform->algo == RTE_CRYPTO_AEAD_AES_CCM) {
512 		int L;
513 
514 		/* digest_length must be 4, 6, 8, 10, 12, 14, 16 bytes */
515 		if (unlikely(xform->digest_length < 4 ||
516 			     xform->digest_length > 16 ||
517 			     (xform->digest_length & 1) == 1)) {
518 			NITROX_LOG_LINE(ERR, "Invalid digest length %d",
519 				   xform->digest_length);
520 			return -EINVAL;
521 		}
522 
523 		L = 15 - xform->iv.length;
524 		if (unlikely(L < 2 || L > 8)) {
525 			NITROX_LOG_LINE(ERR, "Invalid iv length %d",
526 				   xform->iv.length);
527 			return -EINVAL;
528 		}
529 	}
530 
531 	fctx->flags = rte_be_to_cpu_64(fctx->flags);
532 	fctx->w0.cipher_type = (xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ?
533 				CIPHER_AES_GCM : CIPHER_AES_CCM;
534 	fctx->w0.aes_keylen = aes_keylen;
535 	fctx->w0.iv_source = IV_FROM_DPTR;
536 	fctx->w0.hash_type = AUTH_NULL;
537 	fctx->w0.auth_input_type = 1;
538 	fctx->w0.mac_len = xform->digest_length;
539 	fctx->flags = rte_cpu_to_be_64(fctx->flags);
540 	memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
541 	memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
542 	memset(&fctx->auth, 0, sizeof(fctx->auth));
543 	memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
544 
545 	ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
546 	ctx->req_op = (xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
547 			NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
548 	ctx->iv.offset = xform->iv.offset;
549 	ctx->iv.length = xform->iv.length;
550 	ctx->digest_length = xform->digest_length;
551 	ctx->aad_length = xform->aad_length;
552 	ctx->aead_algo = xform->algo;
553 	return 0;
554 }
555 
556 static int
557 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev __rte_unused,
558 			      struct rte_crypto_sym_xform *xform,
559 			      struct rte_cryptodev_sym_session *sess)
560 {
561 	struct nitrox_crypto_ctx *ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
562 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
563 	struct rte_crypto_auth_xform *auth_xform = NULL;
564 	struct rte_crypto_aead_xform *aead_xform = NULL;
565 	int ret = -EINVAL;
566 
567 	ctx->nitrox_chain = get_crypto_chain_order(xform);
568 	switch (ctx->nitrox_chain) {
569 	case NITROX_CHAIN_CIPHER_ONLY:
570 		cipher_xform = &xform->cipher;
571 		break;
572 	case NITROX_CHAIN_CIPHER_AUTH:
573 		cipher_xform = &xform->cipher;
574 		auth_xform = &xform->next->auth;
575 		break;
576 	case NITROX_CHAIN_AUTH_CIPHER:
577 		auth_xform = &xform->auth;
578 		cipher_xform = &xform->next->cipher;
579 		break;
580 	case NITROX_CHAIN_COMBINED:
581 		aead_xform = &xform->aead;
582 		break;
583 	default:
584 		NITROX_LOG_LINE(ERR, "Crypto chain not supported");
585 		ret = -ENOTSUP;
586 		goto err;
587 	}
588 
589 	if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
590 		NITROX_LOG_LINE(ERR, "Failed to configure cipher ctx");
591 		goto err;
592 	}
593 
594 	if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
595 		NITROX_LOG_LINE(ERR, "Failed to configure auth ctx");
596 		goto err;
597 	}
598 
599 	if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
600 		NITROX_LOG_LINE(ERR, "Failed to configure aead ctx");
601 		goto err;
602 	}
603 
604 	ctx->iova = CRYPTODEV_GET_SYM_SESS_PRIV_IOVA(sess);
605 	return 0;
606 err:
607 	return ret;
608 }
609 
610 static void
611 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev __rte_unused,
612 			  struct rte_cryptodev_sym_session *sess __rte_unused)
613 {}
614 
615 static struct nitrox_crypto_ctx *
616 get_crypto_ctx(struct rte_crypto_op *op)
617 {
618 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
619 		if (likely(op->sym->session))
620 			return CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
621 	}
622 
623 	return NULL;
624 }
625 
626 static int
627 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
628 {
629 	struct nitrox_crypto_ctx *ctx;
630 	struct nitrox_softreq *sr;
631 	int err;
632 
633 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
634 	ctx = get_crypto_ctx(op);
635 	if (unlikely(!ctx)) {
636 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
637 		return -EINVAL;
638 	}
639 
640 	if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
641 		return -ENOMEM;
642 
643 	err = nitrox_process_se_req(qp->qno, op, ctx, sr);
644 	if (unlikely(err)) {
645 		rte_mempool_put(qp->sr_mp, sr);
646 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
647 		return err;
648 	}
649 
650 	nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
651 	return 0;
652 }
653 
654 static uint16_t
655 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
656 			 uint16_t nb_ops)
657 {
658 	struct nitrox_qp *qp = queue_pair;
659 	uint16_t free_slots = 0;
660 	uint16_t cnt = 0;
661 	bool err = false;
662 
663 	free_slots = nitrox_qp_free_count(qp);
664 	if (nb_ops > free_slots)
665 		nb_ops = free_slots;
666 
667 	for (cnt = 0; cnt < nb_ops; cnt++) {
668 		if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
669 			err = true;
670 			break;
671 		}
672 	}
673 
674 	nitrox_ring_dbell(qp, cnt);
675 	qp->stats.enqueued_count += cnt;
676 	if (unlikely(err))
677 		qp->stats.enqueue_err_count++;
678 
679 	return cnt;
680 }
681 
682 static int
683 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
684 {
685 	struct nitrox_softreq *sr;
686 	int ret;
687 	struct rte_crypto_op *op;
688 
689 	sr = nitrox_qp_get_softreq(qp);
690 	ret = nitrox_check_se_req(sr, op_ptr);
691 	if (ret < 0)
692 		return -EAGAIN;
693 
694 	op = *op_ptr;
695 	nitrox_qp_dequeue(qp);
696 	rte_mempool_put(qp->sr_mp, sr);
697 	if (!ret) {
698 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
699 		qp->stats.dequeued_count++;
700 
701 		return 0;
702 	}
703 
704 	if (ret == MC_MAC_MISMATCH_ERR_CODE)
705 		op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
706 	else
707 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
708 
709 	qp->stats.dequeue_err_count++;
710 	return 0;
711 }
712 
713 static uint16_t
714 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
715 			 uint16_t nb_ops)
716 {
717 	struct nitrox_qp *qp = queue_pair;
718 	uint16_t filled_slots = nitrox_qp_used_count(qp);
719 	int cnt = 0;
720 
721 	if (nb_ops > filled_slots)
722 		nb_ops = filled_slots;
723 
724 	for (cnt = 0; cnt < nb_ops; cnt++)
725 		if (nitrox_deq_single_op(qp, &ops[cnt]))
726 			break;
727 
728 	return cnt;
729 }
730 
731 static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
732 	.dev_configure		= nitrox_sym_dev_config,
733 	.dev_start		= nitrox_sym_dev_start,
734 	.dev_stop		= nitrox_sym_dev_stop,
735 	.dev_close		= nitrox_sym_dev_close,
736 	.dev_infos_get		= nitrox_sym_dev_info_get,
737 	.stats_get		= nitrox_sym_dev_stats_get,
738 	.stats_reset		= nitrox_sym_dev_stats_reset,
739 	.queue_pair_setup	= nitrox_sym_dev_qp_setup,
740 	.queue_pair_release     = nitrox_sym_dev_qp_release,
741 	.sym_session_get_size   = nitrox_sym_dev_sess_get_size,
742 	.sym_session_configure  = nitrox_sym_dev_sess_configure,
743 	.sym_session_clear      = nitrox_sym_dev_sess_clear
744 };
745 
746 int
747 nitrox_sym_pmd_create(struct nitrox_device *ndev)
748 {
749 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
750 	struct rte_cryptodev_pmd_init_params init_params = {
751 			.name = "",
752 			.socket_id = ndev->pdev->device.numa_node,
753 			.private_data_size = sizeof(struct nitrox_sym_device)
754 	};
755 	struct rte_cryptodev *cdev;
756 
757 	rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
758 	snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name),
759 		 "_n5sym");
760 	ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
761 	ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
762 	ndev->rte_sym_dev.devargs = NULL;
763 	cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
764 					&init_params);
765 	if (!cdev) {
766 		NITROX_LOG_LINE(ERR, "Cryptodev '%s' creation failed", name);
767 		return -ENODEV;
768 	}
769 
770 	ndev->rte_sym_dev.name = cdev->data->name;
771 	cdev->driver_id = nitrox_sym_drv_id;
772 	cdev->dev_ops = &nitrox_cryptodev_ops;
773 	cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
774 	cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
775 	cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
776 		RTE_CRYPTODEV_FF_HW_ACCELERATED |
777 		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
778 		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
779 		RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
780 		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
781 		RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
782 		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
783 
784 	ndev->sym_dev = cdev->data->dev_private;
785 	ndev->sym_dev->cdev = cdev;
786 	ndev->sym_dev->ndev = ndev;
787 
788 	rte_cryptodev_pmd_probing_finish(cdev);
789 
790 	NITROX_LOG_LINE(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d",
791 		   cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
792 	return 0;
793 }
794 
795 int
796 nitrox_sym_pmd_destroy(struct nitrox_device *ndev)
797 {
798 	return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev);
799 }
800 
801 static struct cryptodev_driver nitrox_crypto_drv;
802 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv,
803 		nitrox_rte_sym_drv,
804 		nitrox_sym_drv_id);
805