xref: /dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4 
5 #include "pmd_snow3g_priv.h"
6 
7 /** Parse crypto xform chain and set private session parameters. */
8 static int
9 snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
10 		const struct rte_crypto_sym_xform *xform)
11 {
12 	struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
13 	const struct rte_crypto_sym_xform *auth_xform = NULL;
14 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
15 	enum ipsec_mb_operation mode;
16 
17 	/* Select Crypto operation - hash then cipher / cipher then hash */
18 	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
19 				&cipher_xform, NULL);
20 	if (ret)
21 		return ret;
22 
23 	if (cipher_xform) {
24 		/* Only SNOW 3G UEA2 supported */
25 		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
26 			return -ENOTSUP;
27 
28 		if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
29 			IPSEC_MB_LOG(ERR, "Wrong IV length");
30 			return -EINVAL;
31 		}
32 		if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
33 			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
34 			return -ENOMEM;
35 		}
36 
37 		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
38 
39 		/* Initialize key */
40 		IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
41 					&sess->pKeySched_cipher);
42 	}
43 
44 	if (auth_xform) {
45 		/* Only SNOW 3G UIA2 supported */
46 		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
47 			return -ENOTSUP;
48 
49 		if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
50 			IPSEC_MB_LOG(ERR, "Wrong digest length");
51 			return -EINVAL;
52 		}
53 		if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
54 			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
55 			return -ENOMEM;
56 		}
57 
58 		sess->auth_op = auth_xform->auth.op;
59 
60 		if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
61 			IPSEC_MB_LOG(ERR, "Wrong IV length");
62 			return -EINVAL;
63 		}
64 		sess->auth_iv_offset = auth_xform->auth.iv.offset;
65 
66 		/* Initialize key */
67 		IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
68 					&sess->pKeySched_hash);
69 	}
70 
71 	sess->op = mode;
72 
73 	return 0;
74 }
75 
76 /** Check if conditions are met for digest-appended operations */
77 static uint8_t *
78 snow3g_digest_appended_in_src(struct rte_crypto_op *op)
79 {
80 	unsigned int auth_size, cipher_size;
81 
82 	auth_size = (op->sym->auth.data.offset >> 3) +
83 		(op->sym->auth.data.length >> 3);
84 	cipher_size = (op->sym->cipher.data.offset >> 3) +
85 		(op->sym->cipher.data.length >> 3);
86 
87 	if (auth_size < cipher_size)
88 		return rte_pktmbuf_mtod_offset(op->sym->m_src,
89 				uint8_t *, auth_size);
90 
91 	return NULL;
92 }
93 
94 /** Encrypt/decrypt mbufs with same cipher key. */
95 static uint8_t
96 process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
97 		struct snow3g_session *session,
98 		uint8_t num_ops)
99 {
100 	uint32_t i;
101 	uint8_t processed_ops = 0;
102 	const void *src[SNOW3G_MAX_BURST] = {NULL};
103 	void *dst[SNOW3G_MAX_BURST] = {NULL};
104 	uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
105 	const void *iv[SNOW3G_MAX_BURST] = {NULL};
106 	uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
107 	uint32_t cipher_off, cipher_len;
108 	int unencrypted_bytes = 0;
109 
110 	for (i = 0; i < num_ops; i++) {
111 
112 		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
113 		cipher_len = ops[i]->sym->cipher.data.length >> 3;
114 		src[i] = rte_pktmbuf_mtod_offset(
115 			ops[i]->sym->m_src,	uint8_t *, cipher_off);
116 
117 		/* If out-of-place operation */
118 		if (ops[i]->sym->m_dst &&
119 			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
120 			dst[i] = rte_pktmbuf_mtod_offset(
121 				ops[i]->sym->m_dst, uint8_t *, cipher_off);
122 
123 			/* In case of out-of-place, auth-cipher operation
124 			 * with partial encryption of the digest, copy
125 			 * the remaining, unencrypted part.
126 			 */
127 			if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
128 			    || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
129 				unencrypted_bytes =
130 					(ops[i]->sym->auth.data.offset >> 3) +
131 					(ops[i]->sym->auth.data.length >> 3) +
132 					(SNOW3G_DIGEST_LENGTH) -
133 					cipher_off - cipher_len;
134 			if (unencrypted_bytes > 0)
135 				rte_memcpy(
136 					rte_pktmbuf_mtod_offset(
137 						ops[i]->sym->m_dst, uint8_t *,
138 						cipher_off + cipher_len),
139 					rte_pktmbuf_mtod_offset(
140 						ops[i]->sym->m_src, uint8_t *,
141 						cipher_off + cipher_len),
142 					unencrypted_bytes);
143 		} else
144 			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
145 						uint8_t *, cipher_off);
146 
147 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
148 				session->cipher_iv_offset);
149 		num_bytes[i] = cipher_len;
150 		processed_ops++;
151 	}
152 
153 	IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
154 			src, dst, num_bytes, processed_ops);
155 
156 	/* Take care of the raw digest data in src buffer */
157 	for (i = 0; i < num_ops; i++) {
158 		if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
159 			session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
160 				ops[i]->sym->m_dst != NULL) {
161 			digest_appended[i] =
162 				snow3g_digest_appended_in_src(ops[i]);
163 			/* Clear unencrypted digest from
164 			 * the src buffer
165 			 */
166 			if (digest_appended[i] != NULL)
167 				memset(digest_appended[i],
168 					0, SNOW3G_DIGEST_LENGTH);
169 		}
170 	}
171 	return processed_ops;
172 }
173 
174 /** Encrypt/decrypt mbuf (bit level function). */
175 static uint8_t
176 process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
177 		struct rte_crypto_op *op,
178 		struct snow3g_session *session)
179 {
180 	uint8_t *src, *dst;
181 	uint8_t *iv;
182 	uint32_t length_in_bits, offset_in_bits;
183 	int unencrypted_bytes = 0;
184 
185 	offset_in_bits = op->sym->cipher.data.offset;
186 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
187 	if (op->sym->m_dst == NULL) {
188 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
189 		IPSEC_MB_LOG(ERR, "bit-level in-place not supported");
190 		return 0;
191 	}
192 	length_in_bits = op->sym->cipher.data.length;
193 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
194 	/* In case of out-of-place, auth-cipher operation
195 	 * with partial encryption of the digest, copy
196 	 * the remaining, unencrypted part.
197 	 */
198 	if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
199 		session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
200 		unencrypted_bytes =
201 			(op->sym->auth.data.offset >> 3) +
202 			(op->sym->auth.data.length >> 3) +
203 			(SNOW3G_DIGEST_LENGTH) -
204 			(offset_in_bits >> 3) -
205 			(length_in_bits >> 3);
206 	if (unencrypted_bytes > 0)
207 		rte_memcpy(
208 			rte_pktmbuf_mtod_offset(
209 				op->sym->m_dst, uint8_t *,
210 				(length_in_bits >> 3)),
211 			rte_pktmbuf_mtod_offset(
212 				op->sym->m_src, uint8_t *,
213 				(length_in_bits >> 3)),
214 				unencrypted_bytes);
215 
216 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
217 				session->cipher_iv_offset);
218 
219 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
220 			src, dst, length_in_bits, offset_in_bits);
221 
222 	return 1;
223 }
224 
225 /** Generate/verify hash from mbufs with same hash key. */
226 static int
227 process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
228 		struct snow3g_session *session,
229 		uint8_t num_ops)
230 {
231 	uint32_t i;
232 	uint8_t processed_ops = 0;
233 	uint8_t *src, *dst;
234 	uint32_t length_in_bits;
235 	uint8_t *iv;
236 	uint8_t digest_appended = 0;
237 	struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
238 
239 	for (i = 0; i < num_ops; i++) {
240 		/* Data must be byte aligned */
241 		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
242 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
243 			IPSEC_MB_LOG(ERR, "Offset");
244 			break;
245 		}
246 
247 		dst = NULL;
248 
249 		length_in_bits = ops[i]->sym->auth.data.length;
250 
251 		src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
252 					      (ops[i]->sym->auth.data.offset >> 3));
253 		iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
254 				session->auth_iv_offset);
255 
256 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
257 			dst = qp_data->temp_digest;
258 			 /* Handle auth cipher verify oop case*/
259 			if ((session->op ==
260 				IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
261 				session->op ==
262 				IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
263 				ops[i]->sym->m_dst != NULL)
264 				src = rte_pktmbuf_mtod_offset(
265 					ops[i]->sym->m_dst, uint8_t *,
266 					ops[i]->sym->auth.data.offset >> 3);
267 
268 			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
269 					&session->pKeySched_hash,
270 					iv, src, length_in_bits, dst);
271 			/* Verify digest. */
272 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
273 					SNOW3G_DIGEST_LENGTH) != 0)
274 				ops[i]->status =
275 					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
276 		} else {
277 			if (session->op ==
278 				IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
279 				session->op ==
280 				IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
281 				dst = snow3g_digest_appended_in_src(ops[i]);
282 
283 			if (dst != NULL)
284 				digest_appended = 1;
285 			else
286 				dst = ops[i]->sym->auth.digest.data;
287 
288 			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
289 					&session->pKeySched_hash,
290 					iv, src, length_in_bits, dst);
291 
292 			/* Copy back digest from src to auth.digest.data */
293 			if (digest_appended)
294 				rte_memcpy(ops[i]->sym->auth.digest.data,
295 					dst, SNOW3G_DIGEST_LENGTH);
296 		}
297 		processed_ops++;
298 	}
299 
300 	return processed_ops;
301 }
302 
303 /** Process a batch of crypto ops which shares the same session. */
304 static int
305 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
306 		struct ipsec_mb_qp *qp, uint8_t num_ops)
307 {
308 	uint32_t i;
309 	uint32_t processed_ops;
310 
311 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
312 	for (i = 0; i < num_ops; i++) {
313 		if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
314 				(ops[i]->sym->m_dst != NULL &&
315 				!rte_pktmbuf_is_contiguous(
316 						ops[i]->sym->m_dst))) {
317 			IPSEC_MB_LOG(ERR,
318 				"PMD supports only contiguous mbufs, "
319 				"op (%p) provides noncontiguous mbuf as "
320 				"source/destination buffer.", ops[i]);
321 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
322 			return 0;
323 		}
324 	}
325 #endif
326 
327 	switch (session->op) {
328 	case IPSEC_MB_OP_ENCRYPT_ONLY:
329 	case IPSEC_MB_OP_DECRYPT_ONLY:
330 		processed_ops = process_snow3g_cipher_op(qp, ops,
331 				session, num_ops);
332 		break;
333 	case IPSEC_MB_OP_HASH_GEN_ONLY:
334 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
335 		processed_ops = process_snow3g_hash_op(qp, ops, session,
336 				num_ops);
337 		break;
338 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
339 	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
340 		processed_ops = process_snow3g_cipher_op(qp, ops, session,
341 				num_ops);
342 		process_snow3g_hash_op(qp, ops, session, processed_ops);
343 		break;
344 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
345 	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
346 		processed_ops = process_snow3g_hash_op(qp, ops, session,
347 				num_ops);
348 		process_snow3g_cipher_op(qp, ops, session, processed_ops);
349 		break;
350 	default:
351 		/* Operation not supported. */
352 		processed_ops = 0;
353 	}
354 
355 	for (i = 0; i < num_ops; i++) {
356 		/*
357 		 * If there was no error/authentication failure,
358 		 * change status to successful.
359 		 */
360 		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
361 			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
362 		/* Free session if a session-less crypto op. */
363 		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
364 			memset(session, 0, sizeof(struct snow3g_session));
365 			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
366 			ops[i]->sym->session = NULL;
367 		}
368 	}
369 	return processed_ops;
370 }
371 
372 /** Process a crypto op with length/offset in bits. */
373 static int
374 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
375 		struct ipsec_mb_qp *qp)
376 {
377 	unsigned int processed_op;
378 	int ret;
379 
380 	switch (session->op) {
381 	case IPSEC_MB_OP_ENCRYPT_ONLY:
382 	case IPSEC_MB_OP_DECRYPT_ONLY:
383 
384 		processed_op = process_snow3g_cipher_op_bit(qp, op,
385 				session);
386 		break;
387 	case IPSEC_MB_OP_HASH_GEN_ONLY:
388 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
389 		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
390 		break;
391 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
392 	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
393 		processed_op = process_snow3g_cipher_op_bit(qp, op, session);
394 		if (processed_op == 1)
395 			process_snow3g_hash_op(qp, &op, session, 1);
396 		break;
397 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
398 	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
399 		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
400 		if (processed_op == 1)
401 			process_snow3g_cipher_op_bit(qp, op, session);
402 		break;
403 	default:
404 		/* Operation not supported. */
405 		processed_op = 0;
406 	}
407 
408 	/*
409 	 * If there was no error/authentication failure,
410 	 * change status to successful.
411 	 */
412 	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
413 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
414 
415 	/* Free session if a session-less crypto op. */
416 	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
417 		memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
418 			sizeof(struct snow3g_session));
419 		rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
420 		op->sym->session = NULL;
421 	}
422 
423 	if (unlikely(processed_op != 1))
424 		return 0;
425 
426 	ret = rte_ring_enqueue(qp->ingress_queue, op);
427 	if (ret != 0)
428 		return ret;
429 
430 	return 1;
431 }
432 
433 static uint16_t
434 snow3g_pmd_dequeue_burst(void *queue_pair,
435 		struct rte_crypto_op **ops, uint16_t nb_ops)
436 {
437 	struct ipsec_mb_qp *qp = queue_pair;
438 	struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
439 	struct rte_crypto_op *curr_c_op;
440 
441 	struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
442 	uint32_t i;
443 	uint8_t burst_size = 0;
444 	uint8_t processed_ops;
445 	uint32_t nb_dequeued;
446 
447 	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
448 			(void **)ops, nb_ops, NULL);
449 
450 	for (i = 0; i < nb_dequeued; i++) {
451 		curr_c_op = ops[i];
452 
453 		/* Set status as enqueued (not processed yet) by default. */
454 		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
455 
456 		curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
457 		if (unlikely(curr_sess == NULL ||
458 				curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
459 			curr_c_op->status =
460 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
461 			break;
462 		}
463 
464 		/* If length/offset is at bit-level,
465 		 * process this buffer alone.
466 		 */
467 		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
468 				|| ((curr_c_op->sym->cipher.data.offset
469 					% BYTE_LEN) != 0)) {
470 			/* Process the ops of the previous session. */
471 			if (prev_sess != NULL) {
472 				processed_ops = process_ops(c_ops, prev_sess,
473 						qp, burst_size);
474 				if (processed_ops < burst_size) {
475 					burst_size = 0;
476 					break;
477 				}
478 
479 				burst_size = 0;
480 				prev_sess = NULL;
481 			}
482 
483 			processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
484 			if (processed_ops != 1)
485 				break;
486 
487 			continue;
488 		}
489 
490 		/* Batch ops that share the same session. */
491 		if (prev_sess == NULL) {
492 			prev_sess = curr_sess;
493 			c_ops[burst_size++] = curr_c_op;
494 		} else if (curr_sess == prev_sess) {
495 			c_ops[burst_size++] = curr_c_op;
496 			/*
497 			 * When there are enough ops to process in a batch,
498 			 * process them, and start a new batch.
499 			 */
500 			if (burst_size == SNOW3G_MAX_BURST) {
501 				processed_ops = process_ops(c_ops, prev_sess,
502 						qp, burst_size);
503 				if (processed_ops < burst_size) {
504 					burst_size = 0;
505 					break;
506 				}
507 
508 				burst_size = 0;
509 				prev_sess = NULL;
510 			}
511 		} else {
512 			/*
513 			 * Different session, process the ops
514 			 * of the previous session.
515 			 */
516 			processed_ops = process_ops(c_ops, prev_sess,
517 					qp, burst_size);
518 			if (processed_ops < burst_size) {
519 				burst_size = 0;
520 				break;
521 			}
522 
523 			burst_size = 0;
524 			prev_sess = curr_sess;
525 
526 			c_ops[burst_size++] = curr_c_op;
527 		}
528 	}
529 
530 	if (burst_size != 0) {
531 		/* Process the crypto ops of the last session. */
532 		processed_ops = process_ops(c_ops, prev_sess,
533 				qp, burst_size);
534 	}
535 
536 	qp->stats.dequeued_count += i;
537 	return i;
538 }
539 
540 struct rte_cryptodev_ops snow3g_pmd_ops = {
541 	.dev_configure = ipsec_mb_config,
542 	.dev_start = ipsec_mb_start,
543 	.dev_stop = ipsec_mb_stop,
544 	.dev_close = ipsec_mb_close,
545 
546 	.stats_get = ipsec_mb_stats_get,
547 	.stats_reset = ipsec_mb_stats_reset,
548 
549 	.dev_infos_get = ipsec_mb_info_get,
550 
551 	.queue_pair_setup = ipsec_mb_qp_setup,
552 	.queue_pair_release = ipsec_mb_qp_release,
553 
554 	.sym_session_get_size = ipsec_mb_sym_session_get_size,
555 	.sym_session_configure = ipsec_mb_sym_session_configure,
556 	.sym_session_clear = ipsec_mb_sym_session_clear
557 };
558 
559 struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
560 
561 static int
562 snow3g_probe(struct rte_vdev_device *vdev)
563 {
564 	return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G);
565 }
566 
567 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
568 	.probe = snow3g_probe,
569 	.remove = ipsec_mb_remove
570 };
571 
572 static struct cryptodev_driver snow3g_crypto_drv;
573 
574 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
575 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
576 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
577 			       "max_nb_queue_pairs=<int> socket_id=<int>");
578 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
579 				cryptodev_snow3g_pmd_drv.driver,
580 				pmd_driver_id_snow3g);
581 
582 /* Constructor function to register snow3g PMD */
583 RTE_INIT(ipsec_mb_register_snow3g)
584 {
585 	struct ipsec_mb_internals *snow3g_data
586 		= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
587 
588 	snow3g_data->caps = snow3g_capabilities;
589 	snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
590 	snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
591 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
592 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
593 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
594 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
595 			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
596 	snow3g_data->internals_priv_size = 0;
597 	snow3g_data->ops = &snow3g_pmd_ops;
598 	snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
599 	snow3g_data->session_configure = snow3g_session_configure;
600 	snow3g_data->session_priv_size = sizeof(struct snow3g_session);
601 }
602