1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
4 * All rights reserved.
5 */
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_security_driver.h>
12 #include <bus_vdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_cpuflags.h>
15 #include <rte_kvargs.h>
16 #include <rte_mvep_common.h>
17
18 #include "mrvl_pmd_private.h"
19
20 #define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
21 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
22
23 static uint8_t cryptodev_driver_id;
24
25 struct mrvl_pmd_init_params {
26 struct rte_cryptodev_pmd_init_params common;
27 uint32_t max_nb_sessions;
28 };
29
30 const char *mrvl_pmd_valid_params[] = {
31 RTE_CRYPTODEV_PMD_NAME_ARG,
32 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
33 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
34 MRVL_PMD_MAX_NB_SESS_ARG
35 };
36
37 /**
38 * Flag if particular crypto algorithm is supported by PMD/MUSDK.
39 *
40 * The idea is to have Not Supported value as default (0).
41 * This way we need only to define proper map sizes,
42 * non-initialized entries will be by default not supported.
43 */
44 enum algo_supported {
45 ALGO_NOT_SUPPORTED = 0,
46 ALGO_SUPPORTED = 1,
47 };
48
49 /** Map elements for cipher mapping.*/
50 /* We want to squeeze in multiple maps into the cache line. */
51 struct __rte_aligned(32) cipher_params_mapping {
52 enum algo_supported supported; /**< On/Off switch */
53 enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
54 enum sam_cipher_mode cipher_mode; /**< Cipher mode */
55 unsigned int max_key_len; /**< Maximum key length (in bytes)*/
56 };
57
58 /** Map elements for auth mapping.*/
59 /* We want to squeeze in multiple maps into the cache line. */
60 struct __rte_aligned(32) auth_params_mapping {
61 enum algo_supported supported; /**< On/off switch */
62 enum sam_auth_alg auth_alg; /**< Auth algorithm */
63 };
64
65 /**
66 * Map of supported cipher algorithms.
67 */
68 static const
69 struct cipher_params_mapping cipher_map[] = {
70 [RTE_CRYPTO_CIPHER_NULL] = {
71 .supported = ALGO_SUPPORTED,
72 .cipher_alg = SAM_CIPHER_NONE },
73 [RTE_CRYPTO_CIPHER_3DES_CBC] = {
74 .supported = ALGO_SUPPORTED,
75 .cipher_alg = SAM_CIPHER_3DES,
76 .cipher_mode = SAM_CIPHER_CBC,
77 .max_key_len = BITS2BYTES(192) },
78 [RTE_CRYPTO_CIPHER_3DES_CTR] = {
79 .supported = ALGO_SUPPORTED,
80 .cipher_alg = SAM_CIPHER_3DES,
81 .cipher_mode = SAM_CIPHER_CTR,
82 .max_key_len = BITS2BYTES(192) },
83 [RTE_CRYPTO_CIPHER_3DES_ECB] = {
84 .supported = ALGO_SUPPORTED,
85 .cipher_alg = SAM_CIPHER_3DES,
86 .cipher_mode = SAM_CIPHER_ECB,
87 .max_key_len = BITS2BYTES(192) },
88 [RTE_CRYPTO_CIPHER_AES_CBC] = {
89 .supported = ALGO_SUPPORTED,
90 .cipher_alg = SAM_CIPHER_AES,
91 .cipher_mode = SAM_CIPHER_CBC,
92 .max_key_len = BITS2BYTES(256) },
93 [RTE_CRYPTO_CIPHER_AES_CTR] = {
94 .supported = ALGO_SUPPORTED,
95 .cipher_alg = SAM_CIPHER_AES,
96 .cipher_mode = SAM_CIPHER_CTR,
97 .max_key_len = BITS2BYTES(256) },
98 [RTE_CRYPTO_CIPHER_AES_ECB] = {
99 .supported = ALGO_SUPPORTED,
100 .cipher_alg = SAM_CIPHER_AES,
101 .cipher_mode = SAM_CIPHER_ECB,
102 .max_key_len = BITS2BYTES(256) },
103 };
104
105 /**
106 * Map of supported auth algorithms.
107 */
108 static const
109 struct auth_params_mapping auth_map[] = {
110 [RTE_CRYPTO_AUTH_NULL] = {
111 .supported = ALGO_SUPPORTED,
112 .auth_alg = SAM_AUTH_NONE },
113 [RTE_CRYPTO_AUTH_MD5_HMAC] = {
114 .supported = ALGO_SUPPORTED,
115 .auth_alg = SAM_AUTH_HMAC_MD5 },
116 [RTE_CRYPTO_AUTH_MD5] = {
117 .supported = ALGO_SUPPORTED,
118 .auth_alg = SAM_AUTH_HASH_MD5 },
119 [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
120 .supported = ALGO_SUPPORTED,
121 .auth_alg = SAM_AUTH_HMAC_SHA1 },
122 [RTE_CRYPTO_AUTH_SHA1] = {
123 .supported = ALGO_SUPPORTED,
124 .auth_alg = SAM_AUTH_HASH_SHA1 },
125 [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
126 .supported = ALGO_SUPPORTED,
127 .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
128 [RTE_CRYPTO_AUTH_SHA224] = {
129 .supported = ALGO_SUPPORTED,
130 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
131 [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
132 .supported = ALGO_SUPPORTED,
133 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
134 [RTE_CRYPTO_AUTH_SHA256] = {
135 .supported = ALGO_SUPPORTED,
136 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
137 [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
138 .supported = ALGO_SUPPORTED,
139 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
140 [RTE_CRYPTO_AUTH_SHA384] = {
141 .supported = ALGO_SUPPORTED,
142 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
143 [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
144 .supported = ALGO_SUPPORTED,
145 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
146 [RTE_CRYPTO_AUTH_SHA512] = {
147 .supported = ALGO_SUPPORTED,
148 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
149 [RTE_CRYPTO_AUTH_AES_GMAC] = {
150 .supported = ALGO_SUPPORTED,
151 .auth_alg = SAM_AUTH_AES_GMAC },
152 };
153
154 /**
155 * Map of supported aead algorithms.
156 */
157 static const
158 struct cipher_params_mapping aead_map[] = {
159 [RTE_CRYPTO_AEAD_AES_GCM] = {
160 .supported = ALGO_SUPPORTED,
161 .cipher_alg = SAM_CIPHER_AES,
162 .cipher_mode = SAM_CIPHER_GCM,
163 .max_key_len = BITS2BYTES(256) },
164 };
165
166 /*
167 *-----------------------------------------------------------------------------
168 * Forward declarations.
169 *-----------------------------------------------------------------------------
170 */
171 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
172
173 /*
174 *-----------------------------------------------------------------------------
175 * Session Preparation.
176 *-----------------------------------------------------------------------------
177 */
178
179 /**
180 * Get xform chain order.
181 *
182 * @param xform Pointer to configuration structure chain for crypto operations.
183 * @returns Order of crypto operations.
184 */
185 static enum mrvl_crypto_chain_order
mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform * xform)186 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
187 {
188 /* Currently, Marvell supports max 2 operations in chain */
189 if (xform->next != NULL && xform->next->next != NULL)
190 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
191
192 if (xform->next != NULL) {
193 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
194 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
195 return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
196
197 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
198 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
199 return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
200 } else {
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
202 return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
203
204 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
205 return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
206
207 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
208 return MRVL_CRYPTO_CHAIN_COMBINED;
209 }
210 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
211 }
212
213 /**
214 * Set session parameters for cipher part.
215 *
216 * @param sess Crypto session pointer.
217 * @param cipher_xform Pointer to configuration structure for cipher operations.
218 * @returns 0 in case of success, negative value otherwise.
219 */
220 static int
mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session * sess,const struct rte_crypto_sym_xform * cipher_xform)221 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
222 const struct rte_crypto_sym_xform *cipher_xform)
223 {
224 uint8_t *cipher_key;
225
226 /* Make sure we've got proper struct */
227 if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
228 MRVL_LOG(ERR, "Wrong xform struct provided!");
229 return -EINVAL;
230 }
231
232 /* See if map data is present and valid */
233 if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
234 (cipher_map[cipher_xform->cipher.algo].supported
235 != ALGO_SUPPORTED)) {
236 MRVL_LOG(ERR, "Cipher algorithm not supported!");
237 return -EINVAL;
238 }
239
240 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
241
242 sess->sam_sess_params.dir =
243 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
244 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
245 sess->sam_sess_params.cipher_alg =
246 cipher_map[cipher_xform->cipher.algo].cipher_alg;
247 sess->sam_sess_params.cipher_mode =
248 cipher_map[cipher_xform->cipher.algo].cipher_mode;
249
250 /* Assume IV will be passed together with data. */
251 sess->sam_sess_params.cipher_iv = NULL;
252
253 /* Get max key length. */
254 if (cipher_xform->cipher.key.length >
255 cipher_map[cipher_xform->cipher.algo].max_key_len) {
256 MRVL_LOG(ERR, "Wrong key length!");
257 return -EINVAL;
258 }
259
260 cipher_key = malloc(cipher_xform->cipher.key.length);
261 if (cipher_key == NULL) {
262 MRVL_LOG(ERR, "Insufficient memory!");
263 return -ENOMEM;
264 }
265
266 memcpy(cipher_key, cipher_xform->cipher.key.data,
267 cipher_xform->cipher.key.length);
268
269 sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
270 sess->sam_sess_params.cipher_key = cipher_key;
271
272 return 0;
273 }
274
275 /**
276 * Set session parameters for authentication part.
277 *
278 * @param sess Crypto session pointer.
279 * @param auth_xform Pointer to configuration structure for auth operations.
280 * @returns 0 in case of success, negative value otherwise.
281 */
282 static int
mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session * sess,const struct rte_crypto_sym_xform * auth_xform)283 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
284 const struct rte_crypto_sym_xform *auth_xform)
285 {
286 uint8_t *auth_key = NULL;
287
288 /* Make sure we've got proper struct */
289 if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
290 MRVL_LOG(ERR, "Wrong xform struct provided!");
291 return -EINVAL;
292 }
293
294 /* See if map data is present and valid */
295 if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
296 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
297 MRVL_LOG(ERR, "Auth algorithm not supported!");
298 return -EINVAL;
299 }
300
301 sess->sam_sess_params.dir =
302 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
303 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
304 sess->sam_sess_params.auth_alg =
305 auth_map[auth_xform->auth.algo].auth_alg;
306 sess->sam_sess_params.u.basic.auth_icv_len =
307 auth_xform->auth.digest_length;
308
309 if (auth_xform->auth.key.length > 0) {
310 auth_key = malloc(auth_xform->auth.key.length);
311 if (auth_key == NULL) {
312 MRVL_LOG(ERR, "Not enough memory!");
313 return -EINVAL;
314 }
315
316 memcpy(auth_key, auth_xform->auth.key.data,
317 auth_xform->auth.key.length);
318 }
319
320 /* auth_key must be NULL if auth algorithm does not use HMAC */
321 sess->sam_sess_params.auth_key = auth_key;
322 sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
323
324 return 0;
325 }
326
327 /**
328 * Set session parameters for aead part.
329 *
330 * @param sess Crypto session pointer.
331 * @param aead_xform Pointer to configuration structure for aead operations.
332 * @returns 0 in case of success, negative value otherwise.
333 */
334 static int
mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session * sess,const struct rte_crypto_sym_xform * aead_xform)335 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
336 const struct rte_crypto_sym_xform *aead_xform)
337 {
338 uint8_t *aead_key;
339
340 /* Make sure we've got proper struct */
341 if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
342 MRVL_LOG(ERR, "Wrong xform struct provided!");
343 return -EINVAL;
344 }
345
346 /* See if map data is present and valid */
347 if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
348 (aead_map[aead_xform->aead.algo].supported
349 != ALGO_SUPPORTED)) {
350 MRVL_LOG(ERR, "AEAD algorithm not supported!");
351 return -EINVAL;
352 }
353
354 sess->sam_sess_params.dir =
355 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
356 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
357 sess->sam_sess_params.cipher_alg =
358 aead_map[aead_xform->aead.algo].cipher_alg;
359 sess->sam_sess_params.cipher_mode =
360 aead_map[aead_xform->aead.algo].cipher_mode;
361
362 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) {
363 /* IV must include nonce for all counter modes */
364 sess->cipher_iv_offset = aead_xform->cipher.iv.offset;
365
366 /* Set order of authentication then encryption to 0 in GCM */
367 sess->sam_sess_params.u.basic.auth_then_encrypt = 0;
368 }
369
370 /* Assume IV will be passed together with data. */
371 sess->sam_sess_params.cipher_iv = NULL;
372
373 /* Get max key length. */
374 if (aead_xform->aead.key.length >
375 aead_map[aead_xform->aead.algo].max_key_len) {
376 MRVL_LOG(ERR, "Wrong key length!");
377 return -EINVAL;
378 }
379
380 aead_key = malloc(aead_xform->aead.key.length);
381 if (aead_key == NULL) {
382 MRVL_LOG(ERR, "Insufficient memory!");
383 return -ENOMEM;
384 }
385
386 memcpy(aead_key, aead_xform->aead.key.data,
387 aead_xform->aead.key.length);
388
389 sess->sam_sess_params.cipher_key = aead_key;
390 sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
391
392 if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
393 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
394
395 sess->sam_sess_params.u.basic.auth_icv_len =
396 aead_xform->aead.digest_length;
397
398 sess->sam_sess_params.u.basic.auth_aad_len =
399 aead_xform->aead.aad_length;
400
401 return 0;
402 }
403
404 /**
405 * Parse crypto transform chain and setup session parameters.
406 *
407 * @param dev Pointer to crypto device
408 * @param sess Pointer to crypto session
409 * @param xform Pointer to configuration structure chain for crypto operations.
410 * @returns 0 in case of success, negative value otherwise.
411 */
412 int
mrvl_crypto_set_session_parameters(struct mrvl_crypto_session * sess,const struct rte_crypto_sym_xform * xform)413 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
414 const struct rte_crypto_sym_xform *xform)
415 {
416 const struct rte_crypto_sym_xform *cipher_xform = NULL;
417 const struct rte_crypto_sym_xform *auth_xform = NULL;
418 const struct rte_crypto_sym_xform *aead_xform = NULL;
419
420 /* Filter out spurious/broken requests */
421 if (xform == NULL)
422 return -EINVAL;
423
424 sess->chain_order = mrvl_crypto_get_chain_order(xform);
425 switch (sess->chain_order) {
426 case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
427 cipher_xform = xform;
428 auth_xform = xform->next;
429 break;
430 case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
431 auth_xform = xform;
432 cipher_xform = xform->next;
433 break;
434 case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
435 cipher_xform = xform;
436 break;
437 case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
438 auth_xform = xform;
439 break;
440 case MRVL_CRYPTO_CHAIN_COMBINED:
441 aead_xform = xform;
442 break;
443 default:
444 return -EINVAL;
445 }
446
447 if ((cipher_xform != NULL) &&
448 (mrvl_crypto_set_cipher_session_parameters(
449 sess, cipher_xform) < 0)) {
450 MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
451 return -EINVAL;
452 }
453
454 if ((auth_xform != NULL) &&
455 (mrvl_crypto_set_auth_session_parameters(
456 sess, auth_xform) < 0)) {
457 MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
458 return -EINVAL;
459 }
460
461 if ((aead_xform != NULL) &&
462 (mrvl_crypto_set_aead_session_parameters(
463 sess, aead_xform) < 0)) {
464 MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
465 return -EINVAL;
466 }
467
468 return 0;
469 }
470
471 static int
replay_wsz_to_mask(uint32_t replay_win_sz)472 replay_wsz_to_mask(uint32_t replay_win_sz)
473 {
474 int mask = 0;
475
476 switch (replay_win_sz) {
477 case 0:
478 mask = SAM_ANTI_REPLY_MASK_NONE;
479 break;
480 case 32:
481 mask = SAM_ANTI_REPLY_MASK_32B;
482 break;
483 case 64:
484 mask = SAM_ANTI_REPLY_MASK_64B;
485 break;
486 case 128:
487 mask = SAM_ANTI_REPLY_MASK_128B;
488 break;
489 default:
490 MRVL_LOG(ERR, "Invalid antireplay window size");
491 return -EINVAL;
492 }
493
494 return mask;
495 }
496
497 /**
498 * Parse IPSEC session parameters.
499 *
500 * @param sess Pointer to security session
501 * @param ipsec_xform Pointer to configuration structure IPSEC operations.
502 * @param crypto_xform Pointer to chain for crypto operations.
503 * @returns 0 in case of success, negative value otherwise.
504 */
505 int
mrvl_ipsec_set_session_parameters(struct mrvl_crypto_session * sess,struct rte_security_ipsec_xform * ipsec_xform,struct rte_crypto_sym_xform * crypto_xform)506 mrvl_ipsec_set_session_parameters(struct mrvl_crypto_session *sess,
507 struct rte_security_ipsec_xform *ipsec_xform,
508 struct rte_crypto_sym_xform *crypto_xform)
509 {
510 int seq_mask_size;
511
512 /* Filter out spurious/broken requests */
513 if (ipsec_xform == NULL || crypto_xform == NULL)
514 return -EINVAL;
515
516 /* Crypto parameters handling */
517 if (mrvl_crypto_set_session_parameters(sess, crypto_xform))
518 return -EINVAL;
519
520 seq_mask_size = replay_wsz_to_mask(ipsec_xform->replay_win_sz);
521 if (seq_mask_size < 0)
522 return -EINVAL;
523
524 /* IPSEC protocol parameters handling */
525 sess->sam_sess_params.proto = SAM_PROTO_IPSEC;
526 sess->sam_sess_params.u.ipsec.is_esp =
527 (ipsec_xform->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
528 1 : 0;
529 sess->sam_sess_params.u.ipsec.is_ip6 = 0;
530 sess->sam_sess_params.u.ipsec.is_tunnel =
531 (ipsec_xform->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) ?
532 1 : 0;
533 sess->sam_sess_params.u.ipsec.is_esn = ipsec_xform->options.esn;
534 sess->sam_sess_params.u.ipsec.seq_mask_size = seq_mask_size;
535
536 sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.sip =
537 (uint8_t *)(&ipsec_xform->tunnel.ipv4.src_ip.s_addr);
538 sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.dip =
539 (uint8_t *)&(ipsec_xform->tunnel.ipv4.dst_ip.s_addr);
540
541 sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.dscp =
542 ipsec_xform->tunnel.ipv4.dscp;
543 sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.ttl =
544 ipsec_xform->tunnel.ipv4.ttl;
545 sess->sam_sess_params.u.ipsec.tunnel.u.ipv4.df =
546 ipsec_xform->tunnel.ipv4.df;
547 sess->sam_sess_params.u.ipsec.tunnel.copy_dscp =
548 ipsec_xform->options.copy_dscp;
549 sess->sam_sess_params.u.ipsec.tunnel.copy_flabel =
550 ipsec_xform->options.copy_flabel;
551 sess->sam_sess_params.u.ipsec.tunnel.copy_df =
552 ipsec_xform->options.copy_df;
553
554 sess->sam_sess_params.u.ipsec.is_natt = 0;
555 sess->sam_sess_params.u.ipsec.spi = ipsec_xform->spi;
556 sess->sam_sess_params.u.ipsec.seq = 0;
557
558 return 0;
559 }
560
561 /*
562 *-----------------------------------------------------------------------------
563 * Process Operations
564 *-----------------------------------------------------------------------------
565 */
566
567 /**
568 * Prepare a single request.
569 *
570 * This function basically translates DPDK crypto request into one
571 * understandable by MUDSK's SAM. If this is a first request in a session,
572 * it starts the session.
573 *
574 * @param request Pointer to pre-allocated && reset request buffer [Out].
575 * @param src_bd Pointer to pre-allocated source descriptor [Out].
576 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
577 * @param op Pointer to DPDK crypto operation struct [In].
578 */
579 static inline int
mrvl_request_prepare_crp(struct sam_cio_op_params * request,struct sam_buf_info * src_bd,struct sam_buf_info * dst_bd,struct rte_crypto_op * op)580 mrvl_request_prepare_crp(struct sam_cio_op_params *request,
581 struct sam_buf_info *src_bd,
582 struct sam_buf_info *dst_bd,
583 struct rte_crypto_op *op)
584 {
585 struct mrvl_crypto_session *sess;
586 struct rte_mbuf *src_mbuf, *dst_mbuf;
587 uint16_t segments_nb;
588 uint8_t *digest;
589 int i;
590
591 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
592 MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
593 "oriented requests, op (%p) is sessionless!",
594 op);
595 return -EINVAL;
596 }
597
598 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
599
600 request->sa = sess->sam_sess;
601 request->cookie = op;
602
603 src_mbuf = op->sym->m_src;
604 segments_nb = src_mbuf->nb_segs;
605 /* The following conditions must be met:
606 * - Destination buffer is required when segmented source buffer
607 * - Segmented destination buffer is not supported
608 */
609 if ((segments_nb > 1) && (!op->sym->m_dst)) {
610 MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
611 return -1;
612 }
613 /* For non SG case:
614 * If application delivered us null dst buffer, it means it expects
615 * us to deliver the result in src buffer.
616 */
617 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
618
619 if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
620 MRVL_LOG(ERR, "Segmented destination buffer not supported!");
621 return -1;
622 }
623
624 request->num_bufs = segments_nb;
625 for (i = 0; i < segments_nb; i++) {
626 /* Empty source. */
627 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
628 /* EIP does not support 0 length buffers. */
629 MRVL_LOG(ERR, "Buffer length == 0 not supported!");
630 return -1;
631 }
632 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
633 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
634 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
635
636 src_mbuf = src_mbuf->next;
637 }
638 request->src = src_bd;
639
640 /* Empty destination. */
641 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
642 /* Make dst buffer fit at least source data. */
643 if (rte_pktmbuf_append(dst_mbuf,
644 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
645 MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
646 return -1;
647 }
648 }
649
650 request->dst = dst_bd;
651 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
652 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
653
654 /*
655 * We can use all available space in dst_mbuf,
656 * not only what's used currently.
657 */
658 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
659
660 if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
661 request->cipher_len = op->sym->aead.data.length;
662 request->cipher_offset = op->sym->aead.data.offset;
663 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
664 sess->cipher_iv_offset);
665
666 request->auth_aad = op->sym->aead.aad.data;
667 request->auth_offset = request->cipher_offset;
668 request->auth_len = request->cipher_len;
669 } else {
670 request->cipher_len = op->sym->cipher.data.length;
671 request->cipher_offset = op->sym->cipher.data.offset;
672 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
673 sess->cipher_iv_offset);
674
675 request->auth_offset = op->sym->auth.data.offset;
676 request->auth_len = op->sym->auth.data.length;
677 }
678
679 digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
680 op->sym->aead.digest.data : op->sym->auth.digest.data;
681 if (digest == NULL) {
682 /* No auth - no worry. */
683 return 0;
684 }
685
686 request->auth_icv_offset = request->auth_offset + request->auth_len;
687
688 /*
689 * EIP supports only scenarios where ICV(digest buffer) is placed at
690 * auth_icv_offset.
691 */
692 if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
693 /*
694 * This should be the most common case anyway,
695 * EIP will overwrite DST buffer at auth_icv_offset.
696 */
697 if (rte_pktmbuf_mtod_offset(
698 dst_mbuf, uint8_t *,
699 request->auth_icv_offset) == digest)
700 return 0;
701 } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
702 /*
703 * EIP will look for digest at auth_icv_offset
704 * offset in SRC buffer. It must be placed in the last
705 * segment and the offset must be set to reach digest
706 * in the last segment
707 */
708 struct rte_mbuf *last_seg = op->sym->m_src;
709 uint32_t d_offset = request->auth_icv_offset;
710 u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
711 unsigned char *d_ptr;
712
713 /* Find the last segment and the offset for the last segment */
714 while ((last_seg->next != NULL) &&
715 (d_offset >= last_seg->data_len)) {
716 d_offset -= last_seg->data_len;
717 last_seg = last_seg->next;
718 }
719
720 if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
721 d_offset) == digest)
722 return 0;
723
724 /* copy digest to last segment */
725 if (last_seg->buf_len >= (d_size + d_offset)) {
726 d_ptr = (unsigned char *)last_seg->buf_addr +
727 d_offset;
728 rte_memcpy(d_ptr, digest, d_size);
729 return 0;
730 }
731 }
732
733 /*
734 * If we landed here it means that digest pointer is
735 * at different than expected place.
736 */
737 return -1;
738 }
739
740 /**
741 * Prepare a single security protocol request.
742 *
743 * This function basically translates DPDK security request into one
744 * understandable by MUDSK's SAM. If this is a first request in a session,
745 * it starts the session.
746 *
747 * @param request Pointer to pre-allocated && reset request buffer [Out].
748 * @param src_bd Pointer to pre-allocated source descriptor [Out].
749 * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
750 * @param op Pointer to DPDK crypto operation struct [In].
751 */
752 static inline int
mrvl_request_prepare_sec(struct sam_cio_ipsec_params * request,struct sam_buf_info * src_bd,struct sam_buf_info * dst_bd,struct rte_crypto_op * op)753 mrvl_request_prepare_sec(struct sam_cio_ipsec_params *request,
754 struct sam_buf_info *src_bd,
755 struct sam_buf_info *dst_bd,
756 struct rte_crypto_op *op)
757 {
758 struct mrvl_crypto_session *sess;
759 struct rte_mbuf *src_mbuf, *dst_mbuf;
760 uint16_t segments_nb;
761 int i;
762
763 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
764 MRVL_LOG(ERR, "MRVL SECURITY: sess_type is not SECURITY_SESSION");
765 return -EINVAL;
766 }
767
768 sess = SECURITY_GET_SESS_PRIV(op->sym->session);
769 if (unlikely(sess == NULL)) {
770 MRVL_LOG(ERR, "Session was not created for this device! %d",
771 cryptodev_driver_id);
772 return -EINVAL;
773 }
774
775 request->sa = sess->sam_sess;
776 request->cookie = op;
777 src_mbuf = op->sym->m_src;
778 segments_nb = src_mbuf->nb_segs;
779 /* The following conditions must be met:
780 * - Destination buffer is required when segmented source buffer
781 * - Segmented destination buffer is not supported
782 */
783 if ((segments_nb > 1) && (!op->sym->m_dst)) {
784 MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
785 return -1;
786 }
787 /* For non SG case:
788 * If application delivered us null dst buffer, it means it expects
789 * us to deliver the result in src buffer.
790 */
791 dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
792
793 if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
794 MRVL_LOG(ERR, "Segmented destination buffer not supported!");
795 return -1;
796 }
797
798 request->num_bufs = segments_nb;
799 for (i = 0; i < segments_nb; i++) {
800 /* Empty source. */
801 if (rte_pktmbuf_data_len(src_mbuf) == 0) {
802 /* EIP does not support 0 length buffers. */
803 MRVL_LOG(ERR, "Buffer length == 0 not supported!");
804 return -1;
805 }
806 src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
807 src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
808 src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
809
810 src_mbuf = src_mbuf->next;
811 }
812 request->src = src_bd;
813
814 /* Empty destination. */
815 if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
816 /* Make dst buffer fit at least source data. */
817 if (rte_pktmbuf_append(dst_mbuf,
818 rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
819 MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
820 return -1;
821 }
822 }
823
824 request->dst = dst_bd;
825 dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
826 dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
827
828 /*
829 * We can use all available space in dst_mbuf,
830 * not only what's used currently.
831 */
832 dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
833
834
835 request->l3_offset = 0;
836 request->pkt_size = rte_pktmbuf_pkt_len(op->sym->m_src);
837
838 return 0;
839 }
840
841 /*
842 *-----------------------------------------------------------------------------
843 * PMD Framework handlers
844 *-----------------------------------------------------------------------------
845 */
846
847 /**
848 * Enqueue burst.
849 *
850 * @param queue_pair Pointer to queue pair.
851 * @param ops Pointer to ops requests array.
852 * @param nb_ops Number of elements in ops requests array.
853 * @returns Number of elements consumed from ops.
854 */
855 static uint16_t
mrvl_crypto_pmd_enqueue_burst(void * queue_pair,struct rte_crypto_op ** ops,uint16_t nb_ops)856 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
857 uint16_t nb_ops)
858 {
859 uint16_t iter_ops = 0;
860 uint16_t to_enq_crp = 0;
861 uint16_t to_enq_sec = 0;
862 uint16_t consumed = 0;
863 int ret;
864 int iter;
865 struct sam_cio_op_params requests_crp[nb_ops];
866 struct sam_cio_ipsec_params requests_sec[nb_ops];
867 uint16_t indx_map_crp[nb_ops];
868 uint16_t indx_map_sec[nb_ops];
869
870 /*
871 * SAM does not store bd pointers, so on-stack scope will be enough.
872 */
873 struct mrvl_crypto_src_table src_bd[nb_ops];
874 struct sam_buf_info dst_bd[nb_ops];
875 struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
876
877 if (nb_ops == 0)
878 return 0;
879
880 /* Prepare the burst. */
881 memset(&requests_crp, 0, sizeof(requests_crp));
882 memset(&requests_sec, 0, sizeof(requests_sec));
883 memset(&src_bd, 0, sizeof(src_bd));
884
885 /* Iterate through */
886 for (; iter_ops < nb_ops; ++iter_ops) {
887 /* store the op id for debug */
888 if (ops[iter_ops]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
889 src_bd[iter_ops].iter_ops = to_enq_crp;
890 indx_map_crp[to_enq_crp] = iter_ops;
891
892 if (mrvl_request_prepare_crp(&requests_crp[to_enq_crp],
893 src_bd[iter_ops].src_bd,
894 &dst_bd[iter_ops],
895 ops[iter_ops]) < 0) {
896 MRVL_LOG(ERR,
897 "Error while preparing parameters!");
898 qp->stats.enqueue_err_count++;
899 ops[iter_ops]->status =
900 RTE_CRYPTO_OP_STATUS_ERROR;
901 /*
902 * Number of handled ops is increased
903 * (even if the result of handling is error).
904 */
905 ++consumed;
906
907 break;
908 }
909 /* Increase the number of ops to enqueue. */
910 ++to_enq_crp;
911 } else {
912 src_bd[iter_ops].iter_ops = to_enq_sec;
913 indx_map_sec[to_enq_sec] = iter_ops;
914 if (mrvl_request_prepare_sec(&requests_sec[to_enq_sec],
915 src_bd[iter_ops].src_bd,
916 &dst_bd[iter_ops],
917 ops[iter_ops]) < 0) {
918 MRVL_LOG(ERR,
919 "Error while preparing parameters!");
920 qp->stats.enqueue_err_count++;
921 ops[iter_ops]->status =
922 RTE_CRYPTO_OP_STATUS_ERROR;
923 /*
924 * Number of handled ops is increased
925 * (even if the result of handling is error).
926 */
927 ++consumed;
928
929 break;
930 }
931 /* Increase the number of ops to enqueue. */
932 ++to_enq_sec;
933 }
934
935 ops[iter_ops]->status =
936 RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
937
938 } /* for (; iter_ops < nb_ops;... */
939
940 if (to_enq_crp > 0) {
941 /* Send the burst */
942 ret = sam_cio_enq(qp->cio, requests_crp, &to_enq_crp);
943 consumed += to_enq_crp;
944 if (ret < 0) {
945 /*
946 * Trust SAM that in this case returned value will be at
947 * some point correct (now it is returned unmodified).
948 */
949 qp->stats.enqueue_err_count += to_enq_crp;
950 for (iter = 0; iter < to_enq_crp; ++iter)
951 ops[indx_map_crp[iter]]->status =
952 RTE_CRYPTO_OP_STATUS_ERROR;
953 }
954 }
955
956 if (to_enq_sec > 0) {
957 /* Send the burst */
958 ret = sam_cio_enq_ipsec(qp->cio, requests_sec, &to_enq_sec);
959 consumed += to_enq_sec;
960 if (ret < 0) {
961 /*
962 * Trust SAM that in this case returned value will be at
963 * some point correct (now it is returned unmodified).
964 */
965 qp->stats.enqueue_err_count += to_enq_sec;
966 for (iter = 0; iter < to_enq_crp; ++iter)
967 ops[indx_map_sec[iter]]->status =
968 RTE_CRYPTO_OP_STATUS_ERROR;
969 }
970 }
971
972 qp->stats.enqueued_count += to_enq_sec + to_enq_crp;
973 return consumed;
974 }
975
976 /**
977 * Dequeue burst.
978 *
979 * @param queue_pair Pointer to queue pair.
980 * @param ops Pointer to ops requests array.
981 * @param nb_ops Number of elements in ops requests array.
982 * @returns Number of elements dequeued.
983 */
984 static uint16_t
mrvl_crypto_pmd_dequeue_burst(void * queue_pair,struct rte_crypto_op ** ops,uint16_t nb_ops)985 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
986 struct rte_crypto_op **ops,
987 uint16_t nb_ops)
988 {
989 int ret;
990 struct mrvl_crypto_qp *qp = queue_pair;
991 struct sam_cio *cio = qp->cio;
992 struct sam_cio_op_result results[nb_ops];
993 uint16_t i;
994 struct rte_mbuf *dst;
995
996 ret = sam_cio_deq(cio, results, &nb_ops);
997 if (ret < 0) {
998 /* Count all dequeued as error. */
999 qp->stats.dequeue_err_count += nb_ops;
1000
1001 /* But act as they were dequeued anyway*/
1002 qp->stats.dequeued_count += nb_ops;
1003
1004 return 0;
1005 }
1006
1007 /* Unpack and check results. */
1008 for (i = 0; i < nb_ops; ++i) {
1009 ops[i] = results[i].cookie;
1010
1011 switch (results[i].status) {
1012 case SAM_CIO_OK:
1013 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1014 if (ops[i]->sess_type ==
1015 RTE_CRYPTO_OP_SECURITY_SESSION) {
1016
1017 if (ops[i]->sym->m_dst)
1018 dst = ops[i]->sym->m_dst;
1019 else
1020 dst = ops[i]->sym->m_src;
1021 dst->pkt_len = results[i].out_len;
1022 dst->data_len = results[i].out_len;
1023 }
1024 break;
1025 case SAM_CIO_ERR_ICV:
1026 MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
1027 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1028 break;
1029 default:
1030 MRVL_LOG(DEBUG,
1031 "CIO returned Error: %d.", results[i].status);
1032 ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1033 break;
1034 }
1035 }
1036
1037 qp->stats.dequeued_count += nb_ops;
1038 return nb_ops;
1039 }
1040
1041 /**
1042 * Create a new crypto device.
1043 *
1044 * @param name Driver name.
1045 * @param vdev Pointer to device structure.
1046 * @param init_params Pointer to initialization parameters.
1047 * @returns 0 in case of success, negative value otherwise.
1048 */
1049 static int
cryptodev_mrvl_crypto_create(const char * name,struct rte_vdev_device * vdev,struct mrvl_pmd_init_params * init_params)1050 cryptodev_mrvl_crypto_create(const char *name,
1051 struct rte_vdev_device *vdev,
1052 struct mrvl_pmd_init_params *init_params)
1053 {
1054 struct rte_cryptodev *dev;
1055 struct mrvl_crypto_private *internals;
1056 struct sam_init_params sam_params;
1057 struct rte_security_ctx *security_instance;
1058 int ret = -EINVAL;
1059
1060 dev = rte_cryptodev_pmd_create(name, &vdev->device,
1061 &init_params->common);
1062 if (dev == NULL) {
1063 MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
1064 goto init_error;
1065 }
1066
1067 dev->driver_id = cryptodev_driver_id;
1068 dev->dev_ops = rte_mrvl_crypto_pmd_ops;
1069
1070 /* Register rx/tx burst functions for data path. */
1071 dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
1072 dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
1073
1074 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1075 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1076 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1077 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1078 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
1079 RTE_CRYPTODEV_FF_SECURITY;
1080
1081 internals = dev->data->dev_private;
1082
1083 internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
1084 internals->max_nb_sessions = init_params->max_nb_sessions;
1085
1086 ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
1087 if (ret)
1088 goto init_error;
1089
1090 sam_params.max_num_sessions = internals->max_nb_sessions;
1091
1092 /* Initialize security_ctx only for primary process*/
1093 security_instance = rte_malloc("rte_security_instances_ops",
1094 sizeof(struct rte_security_ctx), 0);
1095 if (security_instance == NULL)
1096 return -ENOMEM;
1097 security_instance->device = (void *)dev;
1098 security_instance->ops = rte_mrvl_security_pmd_ops;
1099 security_instance->sess_cnt = 0;
1100 dev->security_ctx = security_instance;
1101
1102 /*sam_set_debug_flags(3);*/
1103
1104 ret = sam_init(&sam_params);
1105 if (ret)
1106 goto init_error;
1107
1108 rte_cryptodev_pmd_probing_finish(dev);
1109
1110 return 0;
1111
1112 init_error:
1113 MRVL_LOG(ERR,
1114 "Driver %s: %s failed!", init_params->common.name, __func__);
1115
1116 cryptodev_mrvl_crypto_uninit(vdev);
1117 return ret;
1118 }
1119
1120 /** Parse integer from integer argument */
1121 static int
parse_integer_arg(const char * key __rte_unused,const char * value,void * extra_args)1122 parse_integer_arg(const char *key __rte_unused,
1123 const char *value, void *extra_args)
1124 {
1125 int *i = (int *) extra_args;
1126
1127 *i = atoi(value);
1128 if (*i < 0) {
1129 MRVL_LOG(ERR, "Argument has to be positive!");
1130 return -EINVAL;
1131 }
1132
1133 return 0;
1134 }
1135
1136 /** Parse name */
1137 static int
parse_name_arg(const char * key __rte_unused,const char * value,void * extra_args)1138 parse_name_arg(const char *key __rte_unused,
1139 const char *value, void *extra_args)
1140 {
1141 struct rte_cryptodev_pmd_init_params *params = extra_args;
1142
1143 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
1144 MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
1145 value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
1146 return -EINVAL;
1147 }
1148
1149 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
1150
1151 return 0;
1152 }
1153
1154 static int
mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params * params,const char * input_args)1155 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
1156 const char *input_args)
1157 {
1158 struct rte_kvargs *kvlist = NULL;
1159 int ret = 0;
1160
1161 if (params == NULL)
1162 return -EINVAL;
1163
1164 if (input_args) {
1165 kvlist = rte_kvargs_parse(input_args,
1166 mrvl_pmd_valid_params);
1167 if (kvlist == NULL)
1168 return -1;
1169
1170 /* Common VDEV parameters */
1171 ret = rte_kvargs_process(kvlist,
1172 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
1173 &parse_integer_arg,
1174 ¶ms->common.max_nb_queue_pairs);
1175 if (ret < 0)
1176 goto free_kvlist;
1177
1178 ret = rte_kvargs_process(kvlist,
1179 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
1180 &parse_integer_arg,
1181 ¶ms->common.socket_id);
1182 if (ret < 0)
1183 goto free_kvlist;
1184
1185 ret = rte_kvargs_process(kvlist,
1186 RTE_CRYPTODEV_PMD_NAME_ARG,
1187 &parse_name_arg,
1188 ¶ms->common.name);
1189 if (ret < 0)
1190 goto free_kvlist;
1191
1192 ret = rte_kvargs_process(kvlist,
1193 MRVL_PMD_MAX_NB_SESS_ARG,
1194 &parse_integer_arg,
1195 ¶ms->max_nb_sessions);
1196 if (ret < 0)
1197 goto free_kvlist;
1198
1199 }
1200
1201 free_kvlist:
1202 rte_kvargs_free(kvlist);
1203 return ret;
1204 }
1205
1206 /**
1207 * Initialize the crypto device.
1208 *
1209 * @param vdev Pointer to device structure.
1210 * @returns 0 in case of success, negative value otherwise.
1211 */
1212 static int
cryptodev_mrvl_crypto_init(struct rte_vdev_device * vdev)1213 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
1214 {
1215 struct mrvl_pmd_init_params init_params = {
1216 .common = {
1217 .name = "",
1218 .private_data_size =
1219 sizeof(struct mrvl_crypto_private),
1220 .max_nb_queue_pairs =
1221 sam_get_num_inst() * sam_get_num_cios(0),
1222 .socket_id = rte_socket_id()
1223 },
1224 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
1225 };
1226
1227 const char *name, *args;
1228 int ret;
1229
1230 name = rte_vdev_device_name(vdev);
1231 if (name == NULL)
1232 return -EINVAL;
1233 args = rte_vdev_device_args(vdev);
1234
1235 ret = mrvl_pmd_parse_input_args(&init_params, args);
1236 if (ret) {
1237 MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
1238 args);
1239 return -EINVAL;
1240 }
1241
1242 return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
1243 }
1244
1245 /**
1246 * Uninitialize the crypto device
1247 *
1248 * @param vdev Pointer to device structure.
1249 * @returns 0 in case of success, negative value otherwise.
1250 */
1251 static int
cryptodev_mrvl_crypto_uninit(struct rte_vdev_device * vdev)1252 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
1253 {
1254 struct rte_cryptodev *cryptodev;
1255 const char *name = rte_vdev_device_name(vdev);
1256
1257 if (name == NULL)
1258 return -EINVAL;
1259
1260 MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
1261 name, rte_socket_id());
1262
1263 sam_deinit();
1264 rte_mvep_deinit(MVEP_MOD_T_SAM);
1265
1266 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1267 if (cryptodev == NULL)
1268 return -ENODEV;
1269
1270 return rte_cryptodev_pmd_destroy(cryptodev);
1271 }
1272
1273 /**
1274 * Basic driver handlers for use in the constructor.
1275 */
1276 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
1277 .probe = cryptodev_mrvl_crypto_init,
1278 .remove = cryptodev_mrvl_crypto_uninit
1279 };
1280
1281 static struct cryptodev_driver mrvl_crypto_drv;
1282
1283 /* Register the driver in constructor. */
1284 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
1285 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
1286 "max_nb_queue_pairs=<int> "
1287 "max_nb_sessions=<int> "
1288 "socket_id=<int>");
1289 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
1290 cryptodev_driver_id);
1291 RTE_LOG_REGISTER_DEFAULT(mrvl_logtype_driver, NOTICE);
1292