1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2021 Intel Corporation 3 */ 4 5 #include "pmd_aesni_mb_priv.h" 6 7 /** 8 * Calculate the authentication pre-computes 9 * 10 * @param one_block_hash Function pointer 11 * to calculate digest on ipad/opad 12 * @param ipad Inner pad output byte array 13 * @param opad Outer pad output byte array 14 * @param hkey Authentication key 15 * @param hkey_len Authentication key length 16 * @param blocksize Block size of selected hash algo 17 */ 18 static void 19 calculate_auth_precomputes(hash_one_block_t one_block_hash, 20 uint8_t *ipad, uint8_t *opad, 21 const uint8_t *hkey, uint16_t hkey_len, 22 uint16_t blocksize) 23 { 24 uint32_t i, length; 25 26 uint8_t ipad_buf[blocksize] __rte_aligned(16); 27 uint8_t opad_buf[blocksize] __rte_aligned(16); 28 29 /* Setup inner and outer pads */ 30 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize); 31 memset(opad_buf, HMAC_OPAD_VALUE, blocksize); 32 33 /* XOR hash key with inner and outer pads */ 34 length = hkey_len > blocksize ? blocksize : hkey_len; 35 36 for (i = 0; i < length; i++) { 37 ipad_buf[i] ^= hkey[i]; 38 opad_buf[i] ^= hkey[i]; 39 } 40 41 /* Compute partial hashes */ 42 (*one_block_hash)(ipad_buf, ipad); 43 (*one_block_hash)(opad_buf, opad); 44 45 /* Clean up stack */ 46 memset(ipad_buf, 0, blocksize); 47 memset(opad_buf, 0, blocksize); 48 } 49 50 static inline int 51 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) 52 { 53 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || 54 hash_alg == IMB_AUTH_AES_CCM || 55 (hash_alg == IMB_AUTH_AES_GMAC && 56 cipher_mode == IMB_CIPHER_GCM)); 57 } 58 59 /** Set session authentication parameters */ 60 static int 61 aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, 62 struct aesni_mb_session *sess, 63 const struct rte_crypto_sym_xform *xform) 64 { 65 hash_one_block_t hash_oneblock_fn = NULL; 66 unsigned int key_larger_block_size = 0; 67 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 }; 68 uint32_t auth_precompute = 1; 69 70 if (xform == NULL) { 71 sess->auth.algo = IMB_AUTH_NULL; 72 return 0; 73 } 74 75 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { 76 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth"); 77 return -1; 78 } 79 80 /* Set IV parameters */ 81 sess->auth_iv.offset = xform->auth.iv.offset; 82 sess->auth_iv.length = xform->auth.iv.length; 83 84 /* Set the request digest size */ 85 sess->auth.req_digest_len = xform->auth.digest_length; 86 87 /* Select auth generate/verify */ 88 sess->auth.operation = xform->auth.op; 89 90 /* Set Authentication Parameters */ 91 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 92 sess->auth.algo = IMB_AUTH_AES_XCBC; 93 94 uint16_t xcbc_mac_digest_len = 95 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); 96 if (sess->auth.req_digest_len != xcbc_mac_digest_len) { 97 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 98 return -EINVAL; 99 } 100 sess->auth.gen_digest_len = sess->auth.req_digest_len; 101 102 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, 103 sess->auth.xcbc.k1_expanded, 104 sess->auth.xcbc.k2, sess->auth.xcbc.k3); 105 return 0; 106 } 107 108 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { 109 uint32_t dust[4*15]; 110 111 sess->auth.algo = IMB_AUTH_AES_CMAC; 112 113 uint16_t cmac_digest_len = 114 get_digest_byte_length(IMB_AUTH_AES_CMAC); 115 116 if (sess->auth.req_digest_len > cmac_digest_len) { 117 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 118 return -EINVAL; 119 } 120 /* 121 * Multi-buffer lib supports digest sizes from 4 to 16 bytes 122 * in version 0.50 and sizes of 12 and 16 bytes, 123 * in version 0.49. 124 * If size requested is different, generate the full digest 125 * (16 bytes) in a temporary location and then memcpy 126 * the requested number of bytes. 127 */ 128 if (sess->auth.req_digest_len < 4) 129 sess->auth.gen_digest_len = cmac_digest_len; 130 else 131 sess->auth.gen_digest_len = sess->auth.req_digest_len; 132 133 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, 134 sess->auth.cmac.expkey, dust); 135 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, 136 sess->auth.cmac.skey1, sess->auth.cmac.skey2); 137 return 0; 138 } 139 140 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 141 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { 142 sess->cipher.direction = IMB_DIR_ENCRYPT; 143 sess->chain_order = IMB_ORDER_CIPHER_HASH; 144 } else 145 sess->cipher.direction = IMB_DIR_DECRYPT; 146 147 sess->auth.algo = IMB_AUTH_AES_GMAC; 148 if (sess->auth.req_digest_len > 149 get_digest_byte_length(IMB_AUTH_AES_GMAC)) { 150 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 151 return -EINVAL; 152 } 153 sess->auth.gen_digest_len = sess->auth.req_digest_len; 154 sess->iv.length = xform->auth.iv.length; 155 sess->iv.offset = xform->auth.iv.offset; 156 157 switch (xform->auth.key.length) { 158 case IMB_KEY_128_BYTES: 159 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, 160 &sess->cipher.gcm_key); 161 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; 162 break; 163 case IMB_KEY_192_BYTES: 164 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, 165 &sess->cipher.gcm_key); 166 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; 167 break; 168 case IMB_KEY_256_BYTES: 169 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, 170 &sess->cipher.gcm_key); 171 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; 172 break; 173 default: 174 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); 175 return -EINVAL; 176 } 177 178 return 0; 179 } 180 181 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { 182 if (xform->auth.key.length == 16) { 183 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN; 184 } else if (xform->auth.key.length == 32) { 185 sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN; 186 } else { 187 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); 188 return -EINVAL; 189 } 190 191 uint16_t zuc_eia3_digest_len = 192 get_truncated_digest_byte_length( 193 IMB_AUTH_ZUC_EIA3_BITLEN); 194 if (sess->auth.req_digest_len != zuc_eia3_digest_len) { 195 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 196 return -EINVAL; 197 } 198 sess->auth.gen_digest_len = sess->auth.req_digest_len; 199 200 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 201 xform->auth.key.length); 202 return 0; 203 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 204 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN; 205 uint16_t snow3g_uia2_digest_len = 206 get_truncated_digest_byte_length( 207 IMB_AUTH_SNOW3G_UIA2_BITLEN); 208 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) { 209 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 210 return -EINVAL; 211 } 212 sess->auth.gen_digest_len = sess->auth.req_digest_len; 213 214 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, 215 &sess->auth.pKeySched_snow3g_auth); 216 return 0; 217 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { 218 sess->auth.algo = IMB_AUTH_KASUMI_UIA1; 219 uint16_t kasumi_f9_digest_len = 220 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); 221 if (sess->auth.req_digest_len != kasumi_f9_digest_len) { 222 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 223 return -EINVAL; 224 } 225 sess->auth.gen_digest_len = sess->auth.req_digest_len; 226 227 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, 228 &sess->auth.pKeySched_kasumi_auth); 229 return 0; 230 } 231 232 switch (xform->auth.algo) { 233 case RTE_CRYPTO_AUTH_MD5_HMAC: 234 sess->auth.algo = IMB_AUTH_MD5; 235 hash_oneblock_fn = mb_mgr->md5_one_block; 236 break; 237 case RTE_CRYPTO_AUTH_SHA1_HMAC: 238 sess->auth.algo = IMB_AUTH_HMAC_SHA_1; 239 hash_oneblock_fn = mb_mgr->sha1_one_block; 240 if (xform->auth.key.length > get_auth_algo_blocksize( 241 IMB_AUTH_HMAC_SHA_1)) { 242 IMB_SHA1(mb_mgr, 243 xform->auth.key.data, 244 xform->auth.key.length, 245 hashed_key); 246 key_larger_block_size = 1; 247 } 248 break; 249 case RTE_CRYPTO_AUTH_SHA1: 250 sess->auth.algo = IMB_AUTH_SHA_1; 251 auth_precompute = 0; 252 break; 253 case RTE_CRYPTO_AUTH_SHA224_HMAC: 254 sess->auth.algo = IMB_AUTH_HMAC_SHA_224; 255 hash_oneblock_fn = mb_mgr->sha224_one_block; 256 if (xform->auth.key.length > get_auth_algo_blocksize( 257 IMB_AUTH_HMAC_SHA_224)) { 258 IMB_SHA224(mb_mgr, 259 xform->auth.key.data, 260 xform->auth.key.length, 261 hashed_key); 262 key_larger_block_size = 1; 263 } 264 break; 265 case RTE_CRYPTO_AUTH_SHA224: 266 sess->auth.algo = IMB_AUTH_SHA_224; 267 auth_precompute = 0; 268 break; 269 case RTE_CRYPTO_AUTH_SHA256_HMAC: 270 sess->auth.algo = IMB_AUTH_HMAC_SHA_256; 271 hash_oneblock_fn = mb_mgr->sha256_one_block; 272 if (xform->auth.key.length > get_auth_algo_blocksize( 273 IMB_AUTH_HMAC_SHA_256)) { 274 IMB_SHA256(mb_mgr, 275 xform->auth.key.data, 276 xform->auth.key.length, 277 hashed_key); 278 key_larger_block_size = 1; 279 } 280 break; 281 case RTE_CRYPTO_AUTH_SHA256: 282 sess->auth.algo = IMB_AUTH_SHA_256; 283 auth_precompute = 0; 284 break; 285 case RTE_CRYPTO_AUTH_SHA384_HMAC: 286 sess->auth.algo = IMB_AUTH_HMAC_SHA_384; 287 hash_oneblock_fn = mb_mgr->sha384_one_block; 288 if (xform->auth.key.length > get_auth_algo_blocksize( 289 IMB_AUTH_HMAC_SHA_384)) { 290 IMB_SHA384(mb_mgr, 291 xform->auth.key.data, 292 xform->auth.key.length, 293 hashed_key); 294 key_larger_block_size = 1; 295 } 296 break; 297 case RTE_CRYPTO_AUTH_SHA384: 298 sess->auth.algo = IMB_AUTH_SHA_384; 299 auth_precompute = 0; 300 break; 301 case RTE_CRYPTO_AUTH_SHA512_HMAC: 302 sess->auth.algo = IMB_AUTH_HMAC_SHA_512; 303 hash_oneblock_fn = mb_mgr->sha512_one_block; 304 if (xform->auth.key.length > get_auth_algo_blocksize( 305 IMB_AUTH_HMAC_SHA_512)) { 306 IMB_SHA512(mb_mgr, 307 xform->auth.key.data, 308 xform->auth.key.length, 309 hashed_key); 310 key_larger_block_size = 1; 311 } 312 break; 313 case RTE_CRYPTO_AUTH_SHA512: 314 sess->auth.algo = IMB_AUTH_SHA_512; 315 auth_precompute = 0; 316 break; 317 default: 318 IPSEC_MB_LOG(ERR, 319 "Unsupported authentication algorithm selection"); 320 return -ENOTSUP; 321 } 322 uint16_t trunc_digest_size = 323 get_truncated_digest_byte_length(sess->auth.algo); 324 uint16_t full_digest_size = 325 get_digest_byte_length(sess->auth.algo); 326 327 if (sess->auth.req_digest_len > full_digest_size || 328 sess->auth.req_digest_len == 0) { 329 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 330 return -EINVAL; 331 } 332 333 if (sess->auth.req_digest_len != trunc_digest_size && 334 sess->auth.req_digest_len != full_digest_size) 335 sess->auth.gen_digest_len = full_digest_size; 336 else 337 sess->auth.gen_digest_len = sess->auth.req_digest_len; 338 339 /* Plain SHA does not require precompute key */ 340 if (auth_precompute == 0) 341 return 0; 342 343 /* Calculate Authentication precomputes */ 344 if (key_larger_block_size) { 345 calculate_auth_precomputes(hash_oneblock_fn, 346 sess->auth.pads.inner, sess->auth.pads.outer, 347 hashed_key, 348 xform->auth.key.length, 349 get_auth_algo_blocksize(sess->auth.algo)); 350 } else { 351 calculate_auth_precomputes(hash_oneblock_fn, 352 sess->auth.pads.inner, sess->auth.pads.outer, 353 xform->auth.key.data, 354 xform->auth.key.length, 355 get_auth_algo_blocksize(sess->auth.algo)); 356 } 357 358 return 0; 359 } 360 361 /** Set session cipher parameters */ 362 static int 363 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, 364 struct aesni_mb_session *sess, 365 const struct rte_crypto_sym_xform *xform) 366 { 367 uint8_t is_aes = 0; 368 uint8_t is_3DES = 0; 369 uint8_t is_docsis = 0; 370 uint8_t is_zuc = 0; 371 uint8_t is_snow3g = 0; 372 uint8_t is_kasumi = 0; 373 374 if (xform == NULL) { 375 sess->cipher.mode = IMB_CIPHER_NULL; 376 return 0; 377 } 378 379 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { 380 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher"); 381 return -EINVAL; 382 } 383 384 /* Select cipher direction */ 385 switch (xform->cipher.op) { 386 case RTE_CRYPTO_CIPHER_OP_ENCRYPT: 387 sess->cipher.direction = IMB_DIR_ENCRYPT; 388 break; 389 case RTE_CRYPTO_CIPHER_OP_DECRYPT: 390 sess->cipher.direction = IMB_DIR_DECRYPT; 391 break; 392 default: 393 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); 394 return -EINVAL; 395 } 396 397 /* Select cipher mode */ 398 switch (xform->cipher.algo) { 399 case RTE_CRYPTO_CIPHER_AES_CBC: 400 sess->cipher.mode = IMB_CIPHER_CBC; 401 is_aes = 1; 402 break; 403 case RTE_CRYPTO_CIPHER_AES_CTR: 404 sess->cipher.mode = IMB_CIPHER_CNTR; 405 is_aes = 1; 406 break; 407 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: 408 sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI; 409 is_docsis = 1; 410 break; 411 case RTE_CRYPTO_CIPHER_DES_CBC: 412 sess->cipher.mode = IMB_CIPHER_DES; 413 break; 414 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: 415 sess->cipher.mode = IMB_CIPHER_DOCSIS_DES; 416 break; 417 case RTE_CRYPTO_CIPHER_3DES_CBC: 418 sess->cipher.mode = IMB_CIPHER_DES3; 419 is_3DES = 1; 420 break; 421 case RTE_CRYPTO_CIPHER_AES_ECB: 422 sess->cipher.mode = IMB_CIPHER_ECB; 423 is_aes = 1; 424 break; 425 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 426 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3; 427 is_zuc = 1; 428 break; 429 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 430 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; 431 is_snow3g = 1; 432 break; 433 case RTE_CRYPTO_CIPHER_KASUMI_F8: 434 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; 435 is_kasumi = 1; 436 break; 437 default: 438 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); 439 return -ENOTSUP; 440 } 441 442 /* Set IV parameters */ 443 sess->iv.offset = xform->cipher.iv.offset; 444 sess->iv.length = xform->cipher.iv.length; 445 446 /* Check key length and choose key expansion function for AES */ 447 if (is_aes) { 448 switch (xform->cipher.key.length) { 449 case IMB_KEY_128_BYTES: 450 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; 451 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 452 sess->cipher.expanded_aes_keys.encode, 453 sess->cipher.expanded_aes_keys.decode); 454 break; 455 case IMB_KEY_192_BYTES: 456 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; 457 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, 458 sess->cipher.expanded_aes_keys.encode, 459 sess->cipher.expanded_aes_keys.decode); 460 break; 461 case IMB_KEY_256_BYTES: 462 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; 463 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 464 sess->cipher.expanded_aes_keys.encode, 465 sess->cipher.expanded_aes_keys.decode); 466 break; 467 default: 468 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 469 return -EINVAL; 470 } 471 } else if (is_docsis) { 472 switch (xform->cipher.key.length) { 473 case IMB_KEY_128_BYTES: 474 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; 475 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 476 sess->cipher.expanded_aes_keys.encode, 477 sess->cipher.expanded_aes_keys.decode); 478 break; 479 case IMB_KEY_256_BYTES: 480 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; 481 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 482 sess->cipher.expanded_aes_keys.encode, 483 sess->cipher.expanded_aes_keys.decode); 484 break; 485 default: 486 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 487 return -EINVAL; 488 } 489 } else if (is_3DES) { 490 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], 491 sess->cipher.exp_3des_keys.key[1], 492 sess->cipher.exp_3des_keys.key[2]}; 493 494 switch (xform->cipher.key.length) { 495 case 24: 496 IMB_DES_KEYSCHED(mb_mgr, keys[0], 497 xform->cipher.key.data); 498 IMB_DES_KEYSCHED(mb_mgr, keys[1], 499 xform->cipher.key.data + 8); 500 IMB_DES_KEYSCHED(mb_mgr, keys[2], 501 xform->cipher.key.data + 16); 502 503 /* Initialize keys - 24 bytes: [K1-K2-K3] */ 504 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 505 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 506 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; 507 break; 508 case 16: 509 IMB_DES_KEYSCHED(mb_mgr, keys[0], 510 xform->cipher.key.data); 511 IMB_DES_KEYSCHED(mb_mgr, keys[1], 512 xform->cipher.key.data + 8); 513 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ 514 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 515 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 516 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 517 break; 518 case 8: 519 IMB_DES_KEYSCHED(mb_mgr, keys[0], 520 xform->cipher.key.data); 521 522 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ 523 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 524 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; 525 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 526 break; 527 default: 528 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 529 return -EINVAL; 530 } 531 532 sess->cipher.key_length_in_bytes = 24; 533 } else if (is_zuc) { 534 if (xform->cipher.key.length != 16 && 535 xform->cipher.key.length != 32) { 536 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 537 return -EINVAL; 538 } 539 sess->cipher.key_length_in_bytes = xform->cipher.key.length; 540 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, 541 xform->cipher.key.length); 542 } else if (is_snow3g) { 543 if (xform->cipher.key.length != 16) { 544 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 545 return -EINVAL; 546 } 547 sess->cipher.key_length_in_bytes = 16; 548 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, 549 &sess->cipher.pKeySched_snow3g_cipher); 550 } else if (is_kasumi) { 551 if (xform->cipher.key.length != 16) { 552 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 553 return -EINVAL; 554 } 555 sess->cipher.key_length_in_bytes = 16; 556 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, 557 &sess->cipher.pKeySched_kasumi_cipher); 558 } else { 559 if (xform->cipher.key.length != 8) { 560 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 561 return -EINVAL; 562 } 563 sess->cipher.key_length_in_bytes = 8; 564 565 IMB_DES_KEYSCHED(mb_mgr, 566 (uint64_t *)sess->cipher.expanded_aes_keys.encode, 567 xform->cipher.key.data); 568 IMB_DES_KEYSCHED(mb_mgr, 569 (uint64_t *)sess->cipher.expanded_aes_keys.decode, 570 xform->cipher.key.data); 571 } 572 573 return 0; 574 } 575 576 static int 577 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, 578 struct aesni_mb_session *sess, 579 const struct rte_crypto_sym_xform *xform) 580 { 581 switch (xform->aead.op) { 582 case RTE_CRYPTO_AEAD_OP_ENCRYPT: 583 sess->cipher.direction = IMB_DIR_ENCRYPT; 584 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 585 break; 586 case RTE_CRYPTO_AEAD_OP_DECRYPT: 587 sess->cipher.direction = IMB_DIR_DECRYPT; 588 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 589 break; 590 default: 591 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter"); 592 return -EINVAL; 593 } 594 595 /* Set IV parameters */ 596 sess->iv.offset = xform->aead.iv.offset; 597 sess->iv.length = xform->aead.iv.length; 598 599 /* Set digest sizes */ 600 sess->auth.req_digest_len = xform->aead.digest_length; 601 sess->auth.gen_digest_len = sess->auth.req_digest_len; 602 603 switch (xform->aead.algo) { 604 case RTE_CRYPTO_AEAD_AES_CCM: 605 sess->cipher.mode = IMB_CIPHER_CCM; 606 sess->auth.algo = IMB_AUTH_AES_CCM; 607 608 /* Check key length and choose key expansion function for AES */ 609 switch (xform->aead.key.length) { 610 case IMB_KEY_128_BYTES: 611 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; 612 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, 613 sess->cipher.expanded_aes_keys.encode, 614 sess->cipher.expanded_aes_keys.decode); 615 break; 616 case IMB_KEY_256_BYTES: 617 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; 618 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, 619 sess->cipher.expanded_aes_keys.encode, 620 sess->cipher.expanded_aes_keys.decode); 621 break; 622 default: 623 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 624 return -EINVAL; 625 } 626 627 /* CCM digests must be between 4 and 16 and an even number */ 628 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || 629 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || 630 (sess->auth.req_digest_len & 1) == 1) { 631 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 632 return -EINVAL; 633 } 634 break; 635 636 case RTE_CRYPTO_AEAD_AES_GCM: 637 sess->cipher.mode = IMB_CIPHER_GCM; 638 sess->auth.algo = IMB_AUTH_AES_GMAC; 639 640 switch (xform->aead.key.length) { 641 case IMB_KEY_128_BYTES: 642 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; 643 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, 644 &sess->cipher.gcm_key); 645 break; 646 case IMB_KEY_192_BYTES: 647 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; 648 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, 649 &sess->cipher.gcm_key); 650 break; 651 case IMB_KEY_256_BYTES: 652 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; 653 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, 654 &sess->cipher.gcm_key); 655 break; 656 default: 657 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 658 return -EINVAL; 659 } 660 661 /* GCM digest size must be between 1 and 16 */ 662 if (sess->auth.req_digest_len == 0 || 663 sess->auth.req_digest_len > 16) { 664 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 665 return -EINVAL; 666 } 667 break; 668 669 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: 670 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305; 671 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305; 672 673 if (xform->aead.key.length != 32) { 674 IPSEC_MB_LOG(ERR, "Invalid key length"); 675 return -EINVAL; 676 } 677 sess->cipher.key_length_in_bytes = 32; 678 memcpy(sess->cipher.expanded_aes_keys.encode, 679 xform->aead.key.data, 32); 680 if (sess->auth.req_digest_len != 16) { 681 IPSEC_MB_LOG(ERR, "Invalid digest size\n"); 682 return -EINVAL; 683 } 684 break; 685 default: 686 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter"); 687 return -ENOTSUP; 688 } 689 690 return 0; 691 } 692 693 /** Configure a aesni multi-buffer session from a crypto xform chain */ 694 static int 695 aesni_mb_session_configure(IMB_MGR *mb_mgr, 696 void *priv_sess, 697 const struct rte_crypto_sym_xform *xform) 698 { 699 const struct rte_crypto_sym_xform *auth_xform = NULL; 700 const struct rte_crypto_sym_xform *cipher_xform = NULL; 701 const struct rte_crypto_sym_xform *aead_xform = NULL; 702 enum ipsec_mb_operation mode; 703 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess; 704 int ret; 705 706 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 707 &cipher_xform, &aead_xform); 708 if (ret) 709 return ret; 710 711 /* Select Crypto operation - hash then cipher / cipher then hash */ 712 switch (mode) { 713 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 714 sess->chain_order = IMB_ORDER_HASH_CIPHER; 715 break; 716 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 717 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 718 sess->chain_order = IMB_ORDER_CIPHER_HASH; 719 break; 720 case IPSEC_MB_OP_HASH_GEN_ONLY: 721 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 722 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 723 sess->chain_order = IMB_ORDER_HASH_CIPHER; 724 break; 725 /* 726 * Multi buffer library operates only at two modes, 727 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER. 728 * When doing ciphering only, chain order depends 729 * on cipher operation: encryption is always 730 * the first operation and decryption the last one. 731 */ 732 case IPSEC_MB_OP_ENCRYPT_ONLY: 733 sess->chain_order = IMB_ORDER_CIPHER_HASH; 734 break; 735 case IPSEC_MB_OP_DECRYPT_ONLY: 736 sess->chain_order = IMB_ORDER_HASH_CIPHER; 737 break; 738 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: 739 sess->chain_order = IMB_ORDER_CIPHER_HASH; 740 sess->aead.aad_len = xform->aead.aad_length; 741 break; 742 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: 743 sess->chain_order = IMB_ORDER_HASH_CIPHER; 744 sess->aead.aad_len = xform->aead.aad_length; 745 break; 746 case IPSEC_MB_OP_NOT_SUPPORTED: 747 default: 748 IPSEC_MB_LOG(ERR, 749 "Unsupported operation chain order parameter"); 750 return -ENOTSUP; 751 } 752 753 /* Default IV length = 0 */ 754 sess->iv.length = 0; 755 sess->auth_iv.length = 0; 756 757 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); 758 if (ret != 0) { 759 IPSEC_MB_LOG(ERR, 760 "Invalid/unsupported authentication parameters"); 761 return ret; 762 } 763 764 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess, 765 cipher_xform); 766 if (ret != 0) { 767 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 768 return ret; 769 } 770 771 if (aead_xform) { 772 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess, 773 aead_xform); 774 if (ret != 0) { 775 IPSEC_MB_LOG(ERR, 776 "Invalid/unsupported aead parameters"); 777 return ret; 778 } 779 } 780 781 return 0; 782 } 783 784 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 785 /** Check DOCSIS security session configuration is valid */ 786 static int 787 check_docsis_sec_session(struct rte_security_session_conf *conf) 788 { 789 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform; 790 struct rte_security_docsis_xform *docsis = &conf->docsis; 791 792 /* Downlink: CRC generate -> Cipher encrypt */ 793 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 794 795 if (crypto_sym != NULL && 796 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 797 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 798 crypto_sym->cipher.algo == 799 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 800 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 801 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 802 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 803 crypto_sym->next == NULL) { 804 return 0; 805 } 806 /* Uplink: Cipher decrypt -> CRC verify */ 807 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) { 808 809 if (crypto_sym != NULL && 810 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 811 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && 812 crypto_sym->cipher.algo == 813 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 814 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 815 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 816 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 817 crypto_sym->next == NULL) { 818 return 0; 819 } 820 } 821 822 return -EINVAL; 823 } 824 825 /** Set DOCSIS security session auth (CRC) parameters */ 826 static int 827 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, 828 struct rte_security_docsis_xform *xform) 829 { 830 if (xform == NULL) { 831 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform"); 832 return -EINVAL; 833 } 834 835 /* Select CRC generate/verify */ 836 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { 837 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; 838 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 839 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 840 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; 841 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 842 } else { 843 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); 844 return -ENOTSUP; 845 } 846 847 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; 848 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN; 849 850 return 0; 851 } 852 853 /** 854 * Parse DOCSIS security session configuration and set private session 855 * parameters 856 */ 857 static int 858 aesni_mb_set_docsis_sec_session_parameters( 859 __rte_unused struct rte_cryptodev *dev, 860 struct rte_security_session_conf *conf, 861 void *sess) 862 { 863 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 864 struct rte_security_docsis_xform *docsis_xform; 865 struct rte_crypto_sym_xform *cipher_xform; 866 struct aesni_mb_session *ipsec_sess = sess; 867 int ret = 0; 868 869 if (!mb_mgr) 870 return -ENOMEM; 871 872 ret = check_docsis_sec_session(conf); 873 if (ret) { 874 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 875 goto error_exit; 876 } 877 878 switch (conf->docsis.direction) { 879 case RTE_SECURITY_DOCSIS_UPLINK: 880 ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH; 881 docsis_xform = &conf->docsis; 882 cipher_xform = conf->crypto_xform; 883 break; 884 case RTE_SECURITY_DOCSIS_DOWNLINK: 885 ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER; 886 cipher_xform = conf->crypto_xform; 887 docsis_xform = &conf->docsis; 888 break; 889 default: 890 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 891 ret = -EINVAL; 892 goto error_exit; 893 } 894 895 /* Default IV length = 0 */ 896 ipsec_sess->iv.length = 0; 897 898 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, 899 docsis_xform); 900 if (ret != 0) { 901 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters"); 902 goto error_exit; 903 } 904 905 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, 906 ipsec_sess, cipher_xform); 907 908 if (ret != 0) { 909 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 910 goto error_exit; 911 } 912 913 error_exit: 914 free_mb_mgr(mb_mgr); 915 return ret; 916 } 917 #endif 918 919 static inline uint64_t 920 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, 921 uint32_t oop) 922 { 923 struct rte_mbuf *m_src, *m_dst; 924 uint8_t *p_src, *p_dst; 925 uintptr_t u_src, u_dst; 926 uint32_t cipher_end, auth_end; 927 928 /* Only cipher then hash needs special calculation. */ 929 if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH) 930 return op->sym->auth.data.offset; 931 932 m_src = op->sym->m_src; 933 m_dst = op->sym->m_dst; 934 935 p_src = rte_pktmbuf_mtod(m_src, uint8_t *); 936 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 937 u_src = (uintptr_t)p_src; 938 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset; 939 940 /** 941 * Copy the content between cipher offset and auth offset for generating 942 * correct digest. 943 */ 944 if (op->sym->cipher.data.offset > op->sym->auth.data.offset) 945 memcpy(p_dst + op->sym->auth.data.offset, 946 p_src + op->sym->auth.data.offset, 947 op->sym->cipher.data.offset - 948 op->sym->auth.data.offset); 949 950 /** 951 * Copy the content between (cipher offset + length) and (auth offset + 952 * length) for generating correct digest 953 */ 954 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length; 955 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length; 956 if (cipher_end < auth_end) 957 memcpy(p_dst + cipher_end, p_src + cipher_end, 958 auth_end - cipher_end); 959 960 /** 961 * Since intel-ipsec-mb only supports positive values, 962 * we need to deduct the correct offset between src and dst. 963 */ 964 965 return u_src < u_dst ? (u_dst - u_src) : 966 (UINT64_MAX - u_src + u_dst + 1); 967 } 968 969 static inline void 970 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, 971 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len, 972 struct rte_crypto_va_iova_ptr *iv, 973 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) 974 { 975 /* Set crypto operation */ 976 job->chain_order = session->chain_order; 977 978 /* Set cipher parameters */ 979 job->cipher_direction = session->cipher.direction; 980 job->cipher_mode = session->cipher.mode; 981 982 job->key_len_in_bytes = session->cipher.key_length_in_bytes; 983 984 /* Set authentication parameters */ 985 job->hash_alg = session->auth.algo; 986 job->iv = iv->va; 987 988 switch (job->hash_alg) { 989 case IMB_AUTH_AES_XCBC: 990 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; 991 job->u.XCBC._k2 = session->auth.xcbc.k2; 992 job->u.XCBC._k3 = session->auth.xcbc.k3; 993 994 job->enc_keys = session->cipher.expanded_aes_keys.encode; 995 job->dec_keys = session->cipher.expanded_aes_keys.decode; 996 break; 997 998 case IMB_AUTH_AES_CCM: 999 job->u.CCM.aad = (uint8_t *)aad->va + 18; 1000 job->u.CCM.aad_len_in_bytes = session->aead.aad_len; 1001 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1002 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1003 job->iv++; 1004 break; 1005 1006 case IMB_AUTH_AES_CMAC: 1007 job->u.CMAC._key_expanded = session->auth.cmac.expkey; 1008 job->u.CMAC._skey1 = session->auth.cmac.skey1; 1009 job->u.CMAC._skey2 = session->auth.cmac.skey2; 1010 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1011 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1012 break; 1013 1014 case IMB_AUTH_AES_GMAC: 1015 if (session->cipher.mode == IMB_CIPHER_GCM) { 1016 job->u.GCM.aad = aad->va; 1017 job->u.GCM.aad_len_in_bytes = session->aead.aad_len; 1018 } else { 1019 /* For GMAC */ 1020 job->u.GCM.aad = buf; 1021 job->u.GCM.aad_len_in_bytes = len; 1022 job->cipher_mode = IMB_CIPHER_GCM; 1023 } 1024 job->enc_keys = &session->cipher.gcm_key; 1025 job->dec_keys = &session->cipher.gcm_key; 1026 break; 1027 1028 case IMB_AUTH_CHACHA20_POLY1305: 1029 job->u.CHACHA20_POLY1305.aad = aad->va; 1030 job->u.CHACHA20_POLY1305.aad_len_in_bytes = 1031 session->aead.aad_len; 1032 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1033 job->dec_keys = session->cipher.expanded_aes_keys.encode; 1034 break; 1035 default: 1036 job->u.HMAC._hashed_auth_key_xor_ipad = 1037 session->auth.pads.inner; 1038 job->u.HMAC._hashed_auth_key_xor_opad = 1039 session->auth.pads.outer; 1040 1041 if (job->cipher_mode == IMB_CIPHER_DES3) { 1042 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; 1043 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; 1044 } else { 1045 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1046 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1047 } 1048 } 1049 1050 /* 1051 * Multi-buffer library current only support returning a truncated 1052 * digest length as specified in the relevant IPsec RFCs 1053 */ 1054 1055 /* Set digest location and length */ 1056 job->auth_tag_output = digest; 1057 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; 1058 1059 /* Set IV parameters */ 1060 job->iv_len_in_bytes = session->iv.length; 1061 1062 /* Data Parameters */ 1063 job->src = buf; 1064 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; 1065 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; 1066 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; 1067 if (job->hash_alg == IMB_AUTH_AES_GMAC && 1068 session->cipher.mode != IMB_CIPHER_GCM) { 1069 job->msg_len_to_hash_in_bytes = 0; 1070 job->msg_len_to_cipher_in_bytes = 0; 1071 } else { 1072 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - 1073 sofs.ofs.auth.tail; 1074 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - 1075 sofs.ofs.cipher.tail; 1076 } 1077 1078 job->user_data = udata; 1079 } 1080 1081 /** 1082 * Process a crypto operation and complete a IMB_JOB job structure for 1083 * submission to the multi buffer library for processing. 1084 * 1085 * @param qp queue pair 1086 * @param job IMB_JOB structure to fill 1087 * @param op crypto op to process 1088 * @param digest_idx ID for digest to use 1089 * 1090 * @return 1091 * - 0 on success, the IMB_JOB will be filled 1092 * - -1 if invalid session, IMB_JOB will not be filled 1093 */ 1094 static inline int 1095 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1096 struct rte_crypto_op *op, uint8_t *digest_idx) 1097 { 1098 struct rte_mbuf *m_src = op->sym->m_src, *m_dst; 1099 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1100 struct aesni_mb_session *session; 1101 uint32_t m_offset, oop; 1102 1103 session = ipsec_mb_get_session_private(qp, op); 1104 if (session == NULL) { 1105 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1106 return -1; 1107 } 1108 1109 /* Set crypto operation */ 1110 job->chain_order = session->chain_order; 1111 1112 /* Set cipher parameters */ 1113 job->cipher_direction = session->cipher.direction; 1114 job->cipher_mode = session->cipher.mode; 1115 1116 job->key_len_in_bytes = session->cipher.key_length_in_bytes; 1117 1118 /* Set authentication parameters */ 1119 job->hash_alg = session->auth.algo; 1120 1121 const int aead = is_aead_algo(job->hash_alg, job->cipher_mode); 1122 1123 if (job->cipher_mode == IMB_CIPHER_DES3) { 1124 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; 1125 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; 1126 } else { 1127 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1128 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1129 } 1130 1131 switch (job->hash_alg) { 1132 case IMB_AUTH_AES_XCBC: 1133 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; 1134 job->u.XCBC._k2 = session->auth.xcbc.k2; 1135 job->u.XCBC._k3 = session->auth.xcbc.k3; 1136 1137 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1138 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1139 break; 1140 1141 case IMB_AUTH_AES_CCM: 1142 job->u.CCM.aad = op->sym->aead.aad.data + 18; 1143 job->u.CCM.aad_len_in_bytes = session->aead.aad_len; 1144 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1145 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1146 break; 1147 1148 case IMB_AUTH_AES_CMAC: 1149 job->u.CMAC._key_expanded = session->auth.cmac.expkey; 1150 job->u.CMAC._skey1 = session->auth.cmac.skey1; 1151 job->u.CMAC._skey2 = session->auth.cmac.skey2; 1152 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1153 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1154 break; 1155 1156 case IMB_AUTH_AES_GMAC: 1157 if (session->cipher.mode == IMB_CIPHER_GCM) { 1158 job->u.GCM.aad = op->sym->aead.aad.data; 1159 job->u.GCM.aad_len_in_bytes = session->aead.aad_len; 1160 } else { 1161 /* For GMAC */ 1162 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src, 1163 uint8_t *, op->sym->auth.data.offset); 1164 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length; 1165 job->cipher_mode = IMB_CIPHER_GCM; 1166 } 1167 job->enc_keys = &session->cipher.gcm_key; 1168 job->dec_keys = &session->cipher.gcm_key; 1169 break; 1170 case IMB_AUTH_ZUC_EIA3_BITLEN: 1171 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1172 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key; 1173 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1174 session->auth_iv.offset); 1175 break; 1176 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1177 job->u.SNOW3G_UIA2._key = (void *) 1178 &session->auth.pKeySched_snow3g_auth; 1179 job->u.SNOW3G_UIA2._iv = 1180 rte_crypto_op_ctod_offset(op, uint8_t *, 1181 session->auth_iv.offset); 1182 break; 1183 case IMB_AUTH_KASUMI_UIA1: 1184 job->u.KASUMI_UIA1._key = (void *) 1185 &session->auth.pKeySched_kasumi_auth; 1186 break; 1187 case IMB_AUTH_CHACHA20_POLY1305: 1188 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; 1189 job->u.CHACHA20_POLY1305.aad_len_in_bytes = 1190 session->aead.aad_len; 1191 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1192 job->dec_keys = session->cipher.expanded_aes_keys.encode; 1193 break; 1194 default: 1195 job->u.HMAC._hashed_auth_key_xor_ipad = 1196 session->auth.pads.inner; 1197 job->u.HMAC._hashed_auth_key_xor_opad = 1198 session->auth.pads.outer; 1199 1200 } 1201 1202 if (aead) 1203 m_offset = op->sym->aead.data.offset; 1204 else 1205 m_offset = op->sym->cipher.data.offset; 1206 1207 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { 1208 job->enc_keys = session->cipher.zuc_cipher_key; 1209 job->dec_keys = session->cipher.zuc_cipher_key; 1210 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { 1211 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher; 1212 m_offset = 0; 1213 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { 1214 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher; 1215 m_offset = 0; 1216 } 1217 1218 if (!op->sym->m_dst) { 1219 /* in-place operation */ 1220 m_dst = m_src; 1221 oop = 0; 1222 } else if (op->sym->m_dst == op->sym->m_src) { 1223 /* in-place operation */ 1224 m_dst = m_src; 1225 oop = 0; 1226 } else { 1227 /* out-of-place operation */ 1228 m_dst = op->sym->m_dst; 1229 oop = 1; 1230 } 1231 1232 /* Set digest output location */ 1233 if (job->hash_alg != IMB_AUTH_NULL && 1234 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1235 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1236 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1237 } else { 1238 if (aead) 1239 job->auth_tag_output = op->sym->aead.digest.data; 1240 else 1241 job->auth_tag_output = op->sym->auth.digest.data; 1242 1243 if (session->auth.req_digest_len != 1244 session->auth.gen_digest_len) { 1245 job->auth_tag_output = 1246 qp_data->temp_digests[*digest_idx]; 1247 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1248 } 1249 } 1250 /* 1251 * Multi-buffer library current only support returning a truncated 1252 * digest length as specified in the relevant IPsec RFCs 1253 */ 1254 1255 /* Set digest length */ 1256 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; 1257 1258 /* Set IV parameters */ 1259 job->iv_len_in_bytes = session->iv.length; 1260 1261 /* Data Parameters */ 1262 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1263 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1264 1265 switch (job->hash_alg) { 1266 case IMB_AUTH_AES_CCM: 1267 job->cipher_start_src_offset_in_bytes = 1268 op->sym->aead.data.offset; 1269 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1270 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; 1271 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length; 1272 1273 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1274 session->iv.offset + 1); 1275 break; 1276 1277 case IMB_AUTH_AES_GMAC: 1278 if (session->cipher.mode == IMB_CIPHER_GCM) { 1279 job->cipher_start_src_offset_in_bytes = 1280 op->sym->aead.data.offset; 1281 job->hash_start_src_offset_in_bytes = 1282 op->sym->aead.data.offset; 1283 job->msg_len_to_cipher_in_bytes = 1284 op->sym->aead.data.length; 1285 job->msg_len_to_hash_in_bytes = 1286 op->sym->aead.data.length; 1287 } else { 1288 job->cipher_start_src_offset_in_bytes = 1289 op->sym->auth.data.offset; 1290 job->hash_start_src_offset_in_bytes = 1291 op->sym->auth.data.offset; 1292 job->msg_len_to_cipher_in_bytes = 0; 1293 job->msg_len_to_hash_in_bytes = 0; 1294 } 1295 1296 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1297 session->iv.offset); 1298 break; 1299 1300 case IMB_AUTH_CHACHA20_POLY1305: 1301 job->cipher_start_src_offset_in_bytes = 1302 op->sym->aead.data.offset; 1303 job->hash_start_src_offset_in_bytes = 1304 op->sym->aead.data.offset; 1305 job->msg_len_to_cipher_in_bytes = 1306 op->sym->aead.data.length; 1307 job->msg_len_to_hash_in_bytes = 1308 op->sym->aead.data.length; 1309 1310 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1311 session->iv.offset); 1312 break; 1313 default: 1314 /* For SNOW3G, length and offsets are already in bits */ 1315 job->cipher_start_src_offset_in_bytes = 1316 op->sym->cipher.data.offset; 1317 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; 1318 1319 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1320 session, oop); 1321 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length; 1322 1323 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1324 session->iv.offset); 1325 } 1326 1327 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) 1328 job->msg_len_to_cipher_in_bytes >>= 3; 1329 else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1) 1330 job->msg_len_to_hash_in_bytes >>= 3; 1331 1332 /* Set user data to be crypto operation data struct */ 1333 job->user_data = op; 1334 1335 return 0; 1336 } 1337 1338 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 1339 /** 1340 * Process a crypto operation containing a security op and complete a 1341 * IMB_JOB job structure for submission to the multi buffer library for 1342 * processing. 1343 */ 1344 static inline int 1345 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1346 struct rte_crypto_op *op, uint8_t *digest_idx) 1347 { 1348 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1349 struct rte_mbuf *m_src, *m_dst; 1350 struct rte_crypto_sym_op *sym; 1351 struct aesni_mb_session *session = NULL; 1352 1353 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) { 1354 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1355 return -1; 1356 } 1357 session = (struct aesni_mb_session *) 1358 get_sec_session_private_data(op->sym->sec_session); 1359 1360 if (unlikely(session == NULL)) { 1361 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1362 return -1; 1363 } 1364 /* Only DOCSIS protocol operations supported now */ 1365 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI || 1366 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) { 1367 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1368 return -1; 1369 } 1370 1371 sym = op->sym; 1372 m_src = sym->m_src; 1373 1374 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) { 1375 /* in-place operation */ 1376 m_dst = m_src; 1377 } else { 1378 /* out-of-place operation not supported */ 1379 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1380 return -ENOTSUP; 1381 } 1382 1383 /* Set crypto operation */ 1384 job->chain_order = session->chain_order; 1385 1386 /* Set cipher parameters */ 1387 job->cipher_direction = session->cipher.direction; 1388 job->cipher_mode = session->cipher.mode; 1389 1390 job->key_len_in_bytes = session->cipher.key_length_in_bytes; 1391 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1392 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1393 1394 /* Set IV parameters */ 1395 job->iv_len_in_bytes = session->iv.length; 1396 job->iv = (uint8_t *)op + session->iv.offset; 1397 1398 /* Set authentication parameters */ 1399 job->hash_alg = session->auth.algo; 1400 1401 /* Set digest output location */ 1402 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1403 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1404 1405 /* Set digest length */ 1406 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; 1407 1408 /* Set data parameters */ 1409 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1410 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, 1411 sym->cipher.data.offset); 1412 1413 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset; 1414 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length; 1415 1416 job->hash_start_src_offset_in_bytes = sym->auth.data.offset; 1417 job->msg_len_to_hash_in_bytes = sym->auth.data.length; 1418 1419 job->user_data = op; 1420 1421 return 0; 1422 } 1423 1424 static inline void 1425 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status) 1426 { 1427 uint16_t crc_offset; 1428 uint8_t *crc; 1429 1430 if (!job->msg_len_to_hash_in_bytes) 1431 return; 1432 1433 crc_offset = job->hash_start_src_offset_in_bytes + 1434 job->msg_len_to_hash_in_bytes - 1435 job->cipher_start_src_offset_in_bytes; 1436 crc = job->dst + crc_offset; 1437 1438 /* Verify CRC (at the end of the message) */ 1439 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0) 1440 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1441 } 1442 #endif 1443 1444 static inline void 1445 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status) 1446 { 1447 /* Verify digest if required */ 1448 if (memcmp(job->auth_tag_output, digest, len) != 0) 1449 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1450 } 1451 1452 static inline void 1453 generate_digest(IMB_JOB *job, struct rte_crypto_op *op, 1454 struct aesni_mb_session *sess) 1455 { 1456 /* No extra copy needed */ 1457 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len)) 1458 return; 1459 1460 /* 1461 * This can only happen for HMAC, so only digest 1462 * for authentication algos is required 1463 */ 1464 memcpy(op->sym->auth.digest.data, job->auth_tag_output, 1465 sess->auth.req_digest_len); 1466 } 1467 1468 /** 1469 * Process a completed job and return rte_mbuf which job processed 1470 * 1471 * @param qp Queue Pair to process 1472 * @param job IMB_JOB job to process 1473 * 1474 * @return 1475 * - Returns processed crypto operation. 1476 * - Returns NULL on invalid job 1477 */ 1478 static inline struct rte_crypto_op * 1479 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) 1480 { 1481 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; 1482 struct aesni_mb_session *sess = NULL; 1483 uint32_t driver_id = ipsec_mb_get_driver_id( 1484 IPSEC_MB_PMD_TYPE_AESNI_MB); 1485 1486 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 1487 uint8_t is_docsis_sec = 0; 1488 1489 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1490 /* 1491 * Assuming at this point that if it's a security type op, that 1492 * this is for DOCSIS 1493 */ 1494 is_docsis_sec = 1; 1495 sess = get_sec_session_private_data(op->sym->sec_session); 1496 } else 1497 #endif 1498 { 1499 sess = get_sym_session_private_data(op->sym->session, 1500 driver_id); 1501 } 1502 1503 if (unlikely(sess == NULL)) { 1504 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1505 return op; 1506 } 1507 1508 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) { 1509 switch (job->status) { 1510 case IMB_STATUS_COMPLETED: 1511 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1512 1513 if (job->hash_alg == IMB_AUTH_NULL) 1514 break; 1515 1516 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1517 if (is_aead_algo(job->hash_alg, 1518 sess->cipher.mode)) 1519 verify_digest(job, 1520 op->sym->aead.digest.data, 1521 sess->auth.req_digest_len, 1522 &op->status); 1523 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 1524 else if (is_docsis_sec) 1525 verify_docsis_sec_crc(job, 1526 &op->status); 1527 #endif 1528 else 1529 verify_digest(job, 1530 op->sym->auth.digest.data, 1531 sess->auth.req_digest_len, 1532 &op->status); 1533 } else 1534 generate_digest(job, op, sess); 1535 break; 1536 default: 1537 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1538 } 1539 } 1540 1541 /* Free session if a session-less crypto op */ 1542 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1543 memset(sess, 0, sizeof(struct aesni_mb_session)); 1544 memset(op->sym->session, 0, 1545 rte_cryptodev_sym_get_existing_header_session_size( 1546 op->sym->session)); 1547 rte_mempool_put(qp->sess_mp_priv, sess); 1548 rte_mempool_put(qp->sess_mp, op->sym->session); 1549 op->sym->session = NULL; 1550 } 1551 1552 return op; 1553 } 1554 1555 static inline void 1556 post_process_mb_sync_job(IMB_JOB *job) 1557 { 1558 uint32_t *st; 1559 1560 st = job->user_data; 1561 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG; 1562 } 1563 1564 /** 1565 * Process a completed IMB_JOB job and keep processing jobs until 1566 * get_completed_job return NULL 1567 * 1568 * @param qp Queue Pair to process 1569 * @param mb_mgr IMB_MGR to use 1570 * @param job IMB_JOB job 1571 * @param ops crypto ops to fill 1572 * @param nb_ops number of crypto ops 1573 * 1574 * @return 1575 * - Number of processed jobs 1576 */ 1577 static unsigned 1578 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 1579 IMB_JOB *job, struct rte_crypto_op **ops, 1580 uint16_t nb_ops) 1581 { 1582 struct rte_crypto_op *op = NULL; 1583 uint16_t processed_jobs = 0; 1584 1585 while (job != NULL) { 1586 op = post_process_mb_job(qp, job); 1587 1588 if (op) { 1589 ops[processed_jobs++] = op; 1590 qp->stats.dequeued_count++; 1591 } else { 1592 qp->stats.dequeue_err_count++; 1593 break; 1594 } 1595 if (processed_jobs == nb_ops) 1596 break; 1597 1598 job = IMB_GET_COMPLETED_JOB(mb_mgr); 1599 } 1600 1601 return processed_jobs; 1602 } 1603 1604 static inline uint32_t 1605 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) 1606 { 1607 uint32_t i; 1608 1609 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) 1610 post_process_mb_sync_job(job); 1611 1612 return i; 1613 } 1614 1615 static inline uint32_t 1616 flush_mb_sync_mgr(IMB_MGR *mb_mgr) 1617 { 1618 IMB_JOB *job; 1619 1620 job = IMB_FLUSH_JOB(mb_mgr); 1621 return handle_completed_sync_jobs(job, mb_mgr); 1622 } 1623 1624 static inline uint16_t 1625 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 1626 struct rte_crypto_op **ops, uint16_t nb_ops) 1627 { 1628 int processed_ops = 0; 1629 1630 /* Flush the remaining jobs */ 1631 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr); 1632 1633 if (job) 1634 processed_ops += handle_completed_jobs(qp, mb_mgr, job, 1635 &ops[processed_ops], nb_ops - processed_ops); 1636 1637 return processed_ops; 1638 } 1639 1640 static inline IMB_JOB * 1641 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) 1642 { 1643 job->chain_order = IMB_ORDER_HASH_CIPHER; 1644 job->cipher_mode = IMB_CIPHER_NULL; 1645 job->hash_alg = IMB_AUTH_NULL; 1646 job->cipher_direction = IMB_DIR_DECRYPT; 1647 1648 /* Set user data to be crypto operation data struct */ 1649 job->user_data = op; 1650 1651 return job; 1652 } 1653 1654 static uint16_t 1655 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 1656 uint16_t nb_ops) 1657 { 1658 struct ipsec_mb_qp *qp = queue_pair; 1659 IMB_MGR *mb_mgr = qp->mb_mgr; 1660 struct rte_crypto_op *op; 1661 IMB_JOB *job; 1662 int retval, processed_jobs = 0; 1663 1664 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 1665 return 0; 1666 1667 uint8_t digest_idx = qp->digest_idx; 1668 1669 do { 1670 /* Get next free mb job struct from mb manager */ 1671 job = IMB_GET_NEXT_JOB(mb_mgr); 1672 if (unlikely(job == NULL)) { 1673 /* if no free mb job structs we need to flush mb_mgr */ 1674 processed_jobs += flush_mb_mgr(qp, mb_mgr, 1675 &ops[processed_jobs], 1676 nb_ops - processed_jobs); 1677 1678 if (nb_ops == processed_jobs) 1679 break; 1680 1681 job = IMB_GET_NEXT_JOB(mb_mgr); 1682 } 1683 1684 /* 1685 * Get next operation to process from ingress queue. 1686 * There is no need to return the job to the IMB_MGR 1687 * if there are no more operations to process, since the IMB_MGR 1688 * can use that pointer again in next get_next calls. 1689 */ 1690 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); 1691 if (retval < 0) 1692 break; 1693 1694 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 1695 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1696 retval = set_sec_mb_job_params(job, qp, op, 1697 &digest_idx); 1698 else 1699 #endif 1700 retval = set_mb_job_params(job, qp, op, 1701 &digest_idx); 1702 1703 if (unlikely(retval != 0)) { 1704 qp->stats.dequeue_err_count++; 1705 set_job_null_op(job, op); 1706 } 1707 1708 /* Submit job to multi-buffer for processing */ 1709 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 1710 job = IMB_SUBMIT_JOB(mb_mgr); 1711 #else 1712 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 1713 #endif 1714 /* 1715 * If submit returns a processed job then handle it, 1716 * before submitting subsequent jobs 1717 */ 1718 if (job) 1719 processed_jobs += handle_completed_jobs(qp, mb_mgr, 1720 job, &ops[processed_jobs], 1721 nb_ops - processed_jobs); 1722 1723 } while (processed_jobs < nb_ops); 1724 1725 qp->digest_idx = digest_idx; 1726 1727 if (processed_jobs < 1) 1728 processed_jobs += flush_mb_mgr(qp, mb_mgr, 1729 &ops[processed_jobs], 1730 nb_ops - processed_jobs); 1731 1732 return processed_jobs; 1733 } 1734 1735 1736 static inline void 1737 ipsec_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err) 1738 { 1739 uint32_t i; 1740 1741 for (i = 0; i != vec->num; ++i) 1742 vec->status[i] = err; 1743 } 1744 1745 static inline int 1746 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) 1747 { 1748 /* no multi-seg support with current AESNI-MB PMD */ 1749 if (sgl->num != 1) 1750 return -ENOTSUP; 1751 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len) 1752 return -EINVAL; 1753 return 0; 1754 } 1755 1756 static inline IMB_JOB * 1757 submit_sync_job(IMB_MGR *mb_mgr) 1758 { 1759 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 1760 return IMB_SUBMIT_JOB(mb_mgr); 1761 #else 1762 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 1763 #endif 1764 } 1765 1766 static inline uint32_t 1767 generate_sync_dgst(struct rte_crypto_sym_vec *vec, 1768 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 1769 { 1770 uint32_t i, k; 1771 1772 for (i = 0, k = 0; i != vec->num; i++) { 1773 if (vec->status[i] == 0) { 1774 memcpy(vec->digest[i].va, dgst[i], len); 1775 k++; 1776 } 1777 } 1778 1779 return k; 1780 } 1781 1782 static inline uint32_t 1783 verify_sync_dgst(struct rte_crypto_sym_vec *vec, 1784 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 1785 { 1786 uint32_t i, k; 1787 1788 for (i = 0, k = 0; i != vec->num; i++) { 1789 if (vec->status[i] == 0) { 1790 if (memcmp(vec->digest[i].va, dgst[i], len) != 0) 1791 vec->status[i] = EBADMSG; 1792 else 1793 k++; 1794 } 1795 } 1796 1797 return k; 1798 } 1799 1800 static uint32_t 1801 aesni_mb_process_bulk(struct rte_cryptodev *dev, 1802 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs, 1803 struct rte_crypto_sym_vec *vec) 1804 { 1805 int32_t ret; 1806 uint32_t i, j, k, len; 1807 void *buf; 1808 IMB_JOB *job; 1809 IMB_MGR *mb_mgr; 1810 struct aesni_mb_session *s; 1811 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX]; 1812 1813 s = get_sym_session_private_data(sess, dev->driver_id); 1814 if (s == NULL) { 1815 ipsec_mb_fill_error_code(vec, EINVAL); 1816 return 0; 1817 } 1818 1819 /* get per-thread MB MGR, create one if needed */ 1820 mb_mgr = get_per_thread_mb_mgr(); 1821 if (unlikely(mb_mgr == NULL)) 1822 return 0; 1823 1824 for (i = 0, j = 0, k = 0; i != vec->num; i++) { 1825 ret = check_crypto_sgl(sofs, vec->src_sgl + i); 1826 if (ret != 0) { 1827 vec->status[i] = ret; 1828 continue; 1829 } 1830 1831 buf = vec->src_sgl[i].vec[0].base; 1832 len = vec->src_sgl[i].vec[0].len; 1833 1834 job = IMB_GET_NEXT_JOB(mb_mgr); 1835 if (job == NULL) { 1836 k += flush_mb_sync_mgr(mb_mgr); 1837 job = IMB_GET_NEXT_JOB(mb_mgr); 1838 RTE_ASSERT(job != NULL); 1839 } 1840 1841 /* Submit job for processing */ 1842 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i], 1843 &vec->aad[i], tmp_dgst[i], &vec->status[i]); 1844 job = submit_sync_job(mb_mgr); 1845 j++; 1846 1847 /* handle completed jobs */ 1848 k += handle_completed_sync_jobs(job, mb_mgr); 1849 } 1850 1851 /* flush remaining jobs */ 1852 while (k != j) 1853 k += flush_mb_sync_mgr(mb_mgr); 1854 1855 /* finish processing for successful jobs: check/update digest */ 1856 if (k != 0) { 1857 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 1858 k = verify_sync_dgst(vec, 1859 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 1860 s->auth.req_digest_len); 1861 else 1862 k = generate_sync_dgst(vec, 1863 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 1864 s->auth.req_digest_len); 1865 } 1866 1867 return k; 1868 } 1869 1870 struct rte_cryptodev_ops aesni_mb_pmd_ops = { 1871 .dev_configure = ipsec_mb_config, 1872 .dev_start = ipsec_mb_start, 1873 .dev_stop = ipsec_mb_stop, 1874 .dev_close = ipsec_mb_close, 1875 1876 .stats_get = ipsec_mb_stats_get, 1877 .stats_reset = ipsec_mb_stats_reset, 1878 1879 .dev_infos_get = ipsec_mb_info_get, 1880 1881 .queue_pair_setup = ipsec_mb_qp_setup, 1882 .queue_pair_release = ipsec_mb_qp_release, 1883 1884 .sym_cpu_process = aesni_mb_process_bulk, 1885 1886 .sym_session_get_size = ipsec_mb_sym_session_get_size, 1887 .sym_session_configure = ipsec_mb_sym_session_configure, 1888 .sym_session_clear = ipsec_mb_sym_session_clear 1889 }; 1890 1891 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 1892 /** 1893 * Configure a aesni multi-buffer session from a security session 1894 * configuration 1895 */ 1896 static int 1897 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf, 1898 struct rte_security_session *sess, 1899 struct rte_mempool *mempool) 1900 { 1901 void *sess_private_data; 1902 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 1903 int ret; 1904 1905 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || 1906 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { 1907 IPSEC_MB_LOG(ERR, "Invalid security protocol"); 1908 return -EINVAL; 1909 } 1910 1911 if (rte_mempool_get(mempool, &sess_private_data)) { 1912 IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool"); 1913 return -ENOMEM; 1914 } 1915 1916 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf, 1917 sess_private_data); 1918 1919 if (ret != 0) { 1920 IPSEC_MB_LOG(ERR, "Failed to configure session parameters"); 1921 1922 /* Return session to mempool */ 1923 rte_mempool_put(mempool, sess_private_data); 1924 return ret; 1925 } 1926 1927 set_sec_session_private_data(sess, sess_private_data); 1928 1929 return ret; 1930 } 1931 1932 /** Clear the memory of session so it does not leave key material behind */ 1933 static int 1934 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused, 1935 struct rte_security_session *sess) 1936 { 1937 void *sess_priv = get_sec_session_private_data(sess); 1938 1939 if (sess_priv) { 1940 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1941 1942 memset(sess_priv, 0, sizeof(struct aesni_mb_session)); 1943 set_sec_session_private_data(sess, NULL); 1944 rte_mempool_put(sess_mp, sess_priv); 1945 } 1946 return 0; 1947 } 1948 1949 /** Get security capabilities for aesni multi-buffer */ 1950 static const struct rte_security_capability * 1951 aesni_mb_pmd_sec_capa_get(void *device __rte_unused) 1952 { 1953 return aesni_mb_pmd_security_cap; 1954 } 1955 1956 static struct rte_security_ops aesni_mb_pmd_sec_ops = { 1957 .session_create = aesni_mb_pmd_sec_sess_create, 1958 .session_update = NULL, 1959 .session_stats_get = NULL, 1960 .session_destroy = aesni_mb_pmd_sec_sess_destroy, 1961 .set_pkt_metadata = NULL, 1962 .capabilities_get = aesni_mb_pmd_sec_capa_get 1963 }; 1964 1965 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops; 1966 1967 static int 1968 aesni_mb_configure_dev(struct rte_cryptodev *dev) 1969 { 1970 struct rte_security_ctx *security_instance; 1971 1972 security_instance = rte_malloc("aesni_mb_sec", 1973 sizeof(struct rte_security_ctx), 1974 RTE_CACHE_LINE_SIZE); 1975 if (security_instance != NULL) { 1976 security_instance->device = (void *)dev; 1977 security_instance->ops = rte_aesni_mb_pmd_sec_ops; 1978 security_instance->sess_cnt = 0; 1979 dev->security_ctx = security_instance; 1980 1981 return 0; 1982 } 1983 1984 return -ENOMEM; 1985 } 1986 1987 #endif 1988 1989 static int 1990 aesni_mb_probe(struct rte_vdev_device *vdev) 1991 { 1992 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB); 1993 } 1994 1995 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = { 1996 .probe = aesni_mb_probe, 1997 .remove = ipsec_mb_remove 1998 }; 1999 2000 static struct cryptodev_driver aesni_mb_crypto_drv; 2001 2002 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, 2003 cryptodev_aesni_mb_pmd_drv); 2004 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); 2005 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, 2006 "max_nb_queue_pairs=<int> socket_id=<int>"); 2007 RTE_PMD_REGISTER_CRYPTO_DRIVER( 2008 aesni_mb_crypto_drv, 2009 cryptodev_aesni_mb_pmd_drv.driver, 2010 pmd_driver_id_aesni_mb); 2011 2012 /* Constructor function to register aesni-mb PMD */ 2013 RTE_INIT(ipsec_mb_register_aesni_mb) 2014 { 2015 struct ipsec_mb_internals *aesni_mb_data = 2016 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB]; 2017 2018 aesni_mb_data->caps = aesni_mb_capabilities; 2019 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst; 2020 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2021 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2022 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | 2023 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO | 2024 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA | 2025 RTE_CRYPTODEV_FF_SYM_SESSIONLESS; 2026 2027 aesni_mb_data->internals_priv_size = 0; 2028 aesni_mb_data->ops = &aesni_mb_pmd_ops; 2029 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data); 2030 aesni_mb_data->queue_pair_configure = NULL; 2031 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED 2032 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops; 2033 aesni_mb_data->dev_config = aesni_mb_configure_dev; 2034 aesni_mb_data->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; 2035 #endif 2036 aesni_mb_data->session_configure = aesni_mb_session_configure; 2037 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session); 2038 } 2039