1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2021 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 7 #include "pmd_aesni_mb_priv.h" 8 9 RTE_DEFINE_PER_LCORE(pid_t, pid); 10 11 uint8_t pmd_driver_id_aesni_mb; 12 13 struct aesni_mb_op_buf_data { 14 struct rte_mbuf *m; 15 uint32_t offset; 16 }; 17 18 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 19 /** 20 * Calculate the authentication pre-computes 21 * 22 * @param one_block_hash Function pointer 23 * to calculate digest on ipad/opad 24 * @param ipad Inner pad output byte array 25 * @param opad Outer pad output byte array 26 * @param hkey Authentication key 27 * @param hkey_len Authentication key length 28 * @param blocksize Block size of selected hash algo 29 */ 30 static void 31 calculate_auth_precomputes(hash_one_block_t one_block_hash, 32 uint8_t *ipad, uint8_t *opad, 33 const uint8_t *hkey, uint16_t hkey_len, 34 uint16_t blocksize) 35 { 36 uint32_t i, length; 37 38 alignas(16) uint8_t ipad_buf[blocksize]; 39 alignas(16) uint8_t opad_buf[blocksize]; 40 41 /* Setup inner and outer pads */ 42 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize); 43 memset(opad_buf, HMAC_OPAD_VALUE, blocksize); 44 45 /* XOR hash key with inner and outer pads */ 46 length = hkey_len > blocksize ? blocksize : hkey_len; 47 48 for (i = 0; i < length; i++) { 49 ipad_buf[i] ^= hkey[i]; 50 opad_buf[i] ^= hkey[i]; 51 } 52 53 /* Compute partial hashes */ 54 (*one_block_hash)(ipad_buf, ipad); 55 (*one_block_hash)(opad_buf, opad); 56 57 /* Clean up stack */ 58 memset(ipad_buf, 0, blocksize); 59 memset(opad_buf, 0, blocksize); 60 } 61 #endif 62 63 static inline int 64 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) 65 { 66 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || 67 hash_alg == IMB_AUTH_AES_CCM || 68 cipher_mode == IMB_CIPHER_GCM); 69 } 70 71 /** Set session authentication parameters */ 72 static int 73 aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, 74 struct aesni_mb_session *sess, 75 const struct rte_crypto_sym_xform *xform) 76 { 77 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 78 hash_one_block_t hash_oneblock_fn = NULL; 79 unsigned int key_larger_block_size = 0; 80 #endif 81 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 }; 82 uint32_t auth_precompute = 1; 83 84 if (xform == NULL) { 85 sess->template_job.hash_alg = IMB_AUTH_NULL; 86 return 0; 87 } 88 89 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { 90 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth"); 91 return -1; 92 } 93 94 /* Set IV parameters */ 95 sess->auth_iv.offset = xform->auth.iv.offset; 96 97 /* Set the request digest size */ 98 sess->auth.req_digest_len = xform->auth.digest_length; 99 100 /* Select auth generate/verify */ 101 sess->auth.operation = xform->auth.op; 102 103 /* Set Authentication Parameters */ 104 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) { 105 sess->template_job.hash_alg = IMB_AUTH_NULL; 106 sess->template_job.auth_tag_output_len_in_bytes = 0; 107 return 0; 108 } 109 110 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 111 sess->template_job.hash_alg = IMB_AUTH_AES_XCBC; 112 113 uint16_t xcbc_mac_digest_len = 114 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); 115 if (sess->auth.req_digest_len != xcbc_mac_digest_len) { 116 IPSEC_MB_LOG(ERR, "Invalid digest size"); 117 return -EINVAL; 118 } 119 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 120 121 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, 122 sess->auth.xcbc.k1_expanded, 123 sess->auth.xcbc.k2, sess->auth.xcbc.k3); 124 sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded; 125 sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2; 126 sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3; 127 return 0; 128 } 129 130 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { 131 uint32_t dust[4*15]; 132 133 sess->template_job.hash_alg = IMB_AUTH_AES_CMAC; 134 135 uint16_t cmac_digest_len = 136 get_digest_byte_length(IMB_AUTH_AES_CMAC); 137 138 if (sess->auth.req_digest_len > cmac_digest_len) { 139 IPSEC_MB_LOG(ERR, "Invalid digest size"); 140 return -EINVAL; 141 } 142 /* 143 * Multi-buffer lib supports digest sizes from 4 to 16 bytes 144 * in version 0.50 and sizes of 12 and 16 bytes, 145 * in version 0.49. 146 * If size requested is different, generate the full digest 147 * (16 bytes) in a temporary location and then memcpy 148 * the requested number of bytes. 149 */ 150 if (sess->auth.req_digest_len < 4) 151 sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len; 152 else 153 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 154 155 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, 156 sess->auth.cmac.expkey, dust); 157 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, 158 sess->auth.cmac.skey1, sess->auth.cmac.skey2); 159 sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey; 160 sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1; 161 sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2; 162 return 0; 163 } 164 165 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 166 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { 167 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 168 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 169 } else 170 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 171 172 if (sess->auth.req_digest_len > 173 get_digest_byte_length(IMB_AUTH_AES_GMAC)) { 174 IPSEC_MB_LOG(ERR, "Invalid digest size"); 175 return -EINVAL; 176 } 177 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 178 sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length; 179 sess->iv.offset = xform->auth.iv.offset; 180 181 switch (xform->auth.key.length) { 182 case IMB_KEY_128_BYTES: 183 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128; 184 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, 185 &sess->cipher.gcm_key); 186 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 187 break; 188 case IMB_KEY_192_BYTES: 189 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192; 190 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, 191 &sess->cipher.gcm_key); 192 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 193 break; 194 case IMB_KEY_256_BYTES: 195 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256; 196 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, 197 &sess->cipher.gcm_key); 198 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 199 break; 200 default: 201 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 202 return -EINVAL; 203 } 204 sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; 205 206 return 0; 207 } 208 209 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { 210 if (xform->auth.key.length == 16) { 211 sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; 212 213 if (sess->auth.req_digest_len != 4) { 214 IPSEC_MB_LOG(ERR, "Invalid digest size"); 215 return -EINVAL; 216 } 217 } else if (xform->auth.key.length == 32) { 218 sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; 219 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 220 if (sess->auth.req_digest_len != 4 && 221 sess->auth.req_digest_len != 8 && 222 sess->auth.req_digest_len != 16) { 223 #else 224 if (sess->auth.req_digest_len != 4) { 225 #endif 226 IPSEC_MB_LOG(ERR, "Invalid digest size"); 227 return -EINVAL; 228 } 229 } else { 230 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 231 return -EINVAL; 232 } 233 234 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 235 236 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 237 xform->auth.key.length); 238 sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key; 239 return 0; 240 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 241 sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN; 242 uint16_t snow3g_uia2_digest_len = 243 get_truncated_digest_byte_length( 244 IMB_AUTH_SNOW3G_UIA2_BITLEN); 245 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) { 246 IPSEC_MB_LOG(ERR, "Invalid digest size"); 247 return -EINVAL; 248 } 249 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 250 251 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, 252 &sess->auth.pKeySched_snow3g_auth); 253 sess->template_job.u.SNOW3G_UIA2._key = (void *) 254 &sess->auth.pKeySched_snow3g_auth; 255 return 0; 256 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { 257 sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1; 258 uint16_t kasumi_f9_digest_len = 259 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); 260 if (sess->auth.req_digest_len != kasumi_f9_digest_len) { 261 IPSEC_MB_LOG(ERR, "Invalid digest size"); 262 return -EINVAL; 263 } 264 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 265 266 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, 267 &sess->auth.pKeySched_kasumi_auth); 268 sess->template_job.u.KASUMI_UIA1._key = (void *) 269 &sess->auth.pKeySched_kasumi_auth; 270 return 0; 271 } 272 273 switch (xform->auth.algo) { 274 case RTE_CRYPTO_AUTH_MD5_HMAC: 275 sess->template_job.hash_alg = IMB_AUTH_MD5; 276 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 277 hash_oneblock_fn = mb_mgr->md5_one_block; 278 #endif 279 break; 280 case RTE_CRYPTO_AUTH_SHA1_HMAC: 281 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; 282 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 283 hash_oneblock_fn = mb_mgr->sha1_one_block; 284 #endif 285 if (xform->auth.key.length > get_auth_algo_blocksize( 286 IMB_AUTH_HMAC_SHA_1)) { 287 IMB_SHA1(mb_mgr, 288 xform->auth.key.data, 289 xform->auth.key.length, 290 hashed_key); 291 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 292 key_larger_block_size = 1; 293 #endif 294 } 295 break; 296 case RTE_CRYPTO_AUTH_SHA1: 297 sess->template_job.hash_alg = IMB_AUTH_SHA_1; 298 auth_precompute = 0; 299 break; 300 case RTE_CRYPTO_AUTH_SHA224_HMAC: 301 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; 302 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 303 hash_oneblock_fn = mb_mgr->sha224_one_block; 304 #endif 305 if (xform->auth.key.length > get_auth_algo_blocksize( 306 IMB_AUTH_HMAC_SHA_224)) { 307 IMB_SHA224(mb_mgr, 308 xform->auth.key.data, 309 xform->auth.key.length, 310 hashed_key); 311 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 312 key_larger_block_size = 1; 313 #endif 314 } 315 break; 316 case RTE_CRYPTO_AUTH_SHA224: 317 sess->template_job.hash_alg = IMB_AUTH_SHA_224; 318 auth_precompute = 0; 319 break; 320 case RTE_CRYPTO_AUTH_SHA256_HMAC: 321 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; 322 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 323 hash_oneblock_fn = mb_mgr->sha256_one_block; 324 #endif 325 if (xform->auth.key.length > get_auth_algo_blocksize( 326 IMB_AUTH_HMAC_SHA_256)) { 327 IMB_SHA256(mb_mgr, 328 xform->auth.key.data, 329 xform->auth.key.length, 330 hashed_key); 331 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 332 key_larger_block_size = 1; 333 #endif 334 } 335 break; 336 case RTE_CRYPTO_AUTH_SHA256: 337 sess->template_job.hash_alg = IMB_AUTH_SHA_256; 338 auth_precompute = 0; 339 break; 340 case RTE_CRYPTO_AUTH_SHA384_HMAC: 341 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; 342 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 343 hash_oneblock_fn = mb_mgr->sha384_one_block; 344 #endif 345 if (xform->auth.key.length > get_auth_algo_blocksize( 346 IMB_AUTH_HMAC_SHA_384)) { 347 IMB_SHA384(mb_mgr, 348 xform->auth.key.data, 349 xform->auth.key.length, 350 hashed_key); 351 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 352 key_larger_block_size = 1; 353 #endif 354 } 355 break; 356 case RTE_CRYPTO_AUTH_SHA384: 357 sess->template_job.hash_alg = IMB_AUTH_SHA_384; 358 auth_precompute = 0; 359 break; 360 case RTE_CRYPTO_AUTH_SHA512_HMAC: 361 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; 362 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 363 hash_oneblock_fn = mb_mgr->sha512_one_block; 364 #endif 365 if (xform->auth.key.length > get_auth_algo_blocksize( 366 IMB_AUTH_HMAC_SHA_512)) { 367 IMB_SHA512(mb_mgr, 368 xform->auth.key.data, 369 xform->auth.key.length, 370 hashed_key); 371 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 372 key_larger_block_size = 1; 373 #endif 374 } 375 break; 376 case RTE_CRYPTO_AUTH_SHA512: 377 sess->template_job.hash_alg = IMB_AUTH_SHA_512; 378 auth_precompute = 0; 379 break; 380 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 381 case RTE_CRYPTO_AUTH_SM3: 382 sess->template_job.hash_alg = IMB_AUTH_SM3; 383 break; 384 case RTE_CRYPTO_AUTH_SM3_HMAC: 385 sess->template_job.hash_alg = IMB_AUTH_HMAC_SM3; 386 break; 387 #endif 388 default: 389 IPSEC_MB_LOG(ERR, 390 "Unsupported authentication algorithm selection"); 391 return -ENOTSUP; 392 } 393 uint16_t trunc_digest_size = 394 get_truncated_digest_byte_length(sess->template_job.hash_alg); 395 uint16_t full_digest_size = 396 get_digest_byte_length(sess->template_job.hash_alg); 397 398 if (sess->auth.req_digest_len > full_digest_size || 399 sess->auth.req_digest_len == 0) { 400 IPSEC_MB_LOG(ERR, "Invalid digest size"); 401 return -EINVAL; 402 } 403 404 if (sess->auth.req_digest_len != trunc_digest_size && 405 sess->auth.req_digest_len != full_digest_size) 406 sess->template_job.auth_tag_output_len_in_bytes = full_digest_size; 407 else 408 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 409 410 /* Plain SHA does not require precompute key */ 411 if (auth_precompute == 0) 412 return 0; 413 414 /* Calculate Authentication precomputes */ 415 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 416 imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg, 417 xform->auth.key.data, xform->auth.key.length, 418 sess->auth.pads.inner, sess->auth.pads.outer); 419 #else 420 if (key_larger_block_size) { 421 calculate_auth_precomputes(hash_oneblock_fn, 422 sess->auth.pads.inner, sess->auth.pads.outer, 423 hashed_key, 424 xform->auth.key.length, 425 get_auth_algo_blocksize(sess->template_job.hash_alg)); 426 } else { 427 calculate_auth_precomputes(hash_oneblock_fn, 428 sess->auth.pads.inner, sess->auth.pads.outer, 429 xform->auth.key.data, 430 xform->auth.key.length, 431 get_auth_algo_blocksize(sess->template_job.hash_alg)); 432 } 433 #endif 434 sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = 435 sess->auth.pads.inner; 436 sess->template_job.u.HMAC._hashed_auth_key_xor_opad = 437 sess->auth.pads.outer; 438 439 return 0; 440 } 441 442 /** Set session cipher parameters */ 443 static int 444 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, 445 struct aesni_mb_session *sess, 446 const struct rte_crypto_sym_xform *xform) 447 { 448 uint8_t is_aes = 0; 449 uint8_t is_3DES = 0; 450 uint8_t is_docsis = 0; 451 uint8_t is_zuc = 0; 452 uint8_t is_snow3g = 0; 453 uint8_t is_kasumi = 0; 454 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 455 uint8_t is_sm4 = 0; 456 #endif 457 458 if (xform == NULL) { 459 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 460 return 0; 461 } 462 463 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { 464 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher"); 465 return -EINVAL; 466 } 467 468 /* Select cipher direction */ 469 switch (xform->cipher.op) { 470 case RTE_CRYPTO_CIPHER_OP_ENCRYPT: 471 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 472 break; 473 case RTE_CRYPTO_CIPHER_OP_DECRYPT: 474 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 475 break; 476 default: 477 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); 478 return -EINVAL; 479 } 480 481 /* Select cipher mode */ 482 switch (xform->cipher.algo) { 483 case RTE_CRYPTO_CIPHER_AES_CBC: 484 sess->template_job.cipher_mode = IMB_CIPHER_CBC; 485 is_aes = 1; 486 break; 487 case RTE_CRYPTO_CIPHER_AES_CTR: 488 sess->template_job.cipher_mode = IMB_CIPHER_CNTR; 489 is_aes = 1; 490 break; 491 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: 492 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI; 493 is_docsis = 1; 494 break; 495 case RTE_CRYPTO_CIPHER_DES_CBC: 496 sess->template_job.cipher_mode = IMB_CIPHER_DES; 497 break; 498 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: 499 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES; 500 break; 501 case RTE_CRYPTO_CIPHER_3DES_CBC: 502 sess->template_job.cipher_mode = IMB_CIPHER_DES3; 503 is_3DES = 1; 504 break; 505 case RTE_CRYPTO_CIPHER_AES_ECB: 506 sess->template_job.cipher_mode = IMB_CIPHER_ECB; 507 is_aes = 1; 508 break; 509 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 510 sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3; 511 is_zuc = 1; 512 break; 513 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 514 sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; 515 is_snow3g = 1; 516 break; 517 case RTE_CRYPTO_CIPHER_KASUMI_F8: 518 sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; 519 is_kasumi = 1; 520 break; 521 case RTE_CRYPTO_CIPHER_NULL: 522 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 523 sess->template_job.key_len_in_bytes = 0; 524 sess->iv.offset = xform->cipher.iv.offset; 525 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 526 return 0; 527 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 528 case RTE_CRYPTO_CIPHER_SM4_CBC: 529 sess->template_job.cipher_mode = IMB_CIPHER_SM4_CBC; 530 is_sm4 = 1; 531 break; 532 case RTE_CRYPTO_CIPHER_SM4_ECB: 533 sess->template_job.cipher_mode = IMB_CIPHER_SM4_ECB; 534 is_sm4 = 1; 535 break; 536 #endif 537 #if IMB_VERSION(1, 5, 0) < IMB_VERSION_NUM 538 case RTE_CRYPTO_CIPHER_SM4_CTR: 539 sess->template_job.cipher_mode = IMB_CIPHER_SM4_CNTR; 540 is_sm4 = 1; 541 break; 542 #endif 543 default: 544 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); 545 return -ENOTSUP; 546 } 547 548 /* Set IV parameters */ 549 sess->iv.offset = xform->cipher.iv.offset; 550 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 551 552 /* Check key length and choose key expansion function for AES */ 553 if (is_aes) { 554 switch (xform->cipher.key.length) { 555 case IMB_KEY_128_BYTES: 556 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 557 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 558 sess->cipher.expanded_aes_keys.encode, 559 sess->cipher.expanded_aes_keys.decode); 560 break; 561 case IMB_KEY_192_BYTES: 562 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 563 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, 564 sess->cipher.expanded_aes_keys.encode, 565 sess->cipher.expanded_aes_keys.decode); 566 break; 567 case IMB_KEY_256_BYTES: 568 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 569 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 570 sess->cipher.expanded_aes_keys.encode, 571 sess->cipher.expanded_aes_keys.decode); 572 break; 573 default: 574 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 575 return -EINVAL; 576 } 577 578 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 579 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 580 } else if (is_docsis) { 581 switch (xform->cipher.key.length) { 582 case IMB_KEY_128_BYTES: 583 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 584 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 585 sess->cipher.expanded_aes_keys.encode, 586 sess->cipher.expanded_aes_keys.decode); 587 break; 588 case IMB_KEY_256_BYTES: 589 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 590 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 591 sess->cipher.expanded_aes_keys.encode, 592 sess->cipher.expanded_aes_keys.decode); 593 break; 594 default: 595 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 596 return -EINVAL; 597 } 598 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 599 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 600 } else if (is_3DES) { 601 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], 602 sess->cipher.exp_3des_keys.key[1], 603 sess->cipher.exp_3des_keys.key[2]}; 604 605 switch (xform->cipher.key.length) { 606 case 24: 607 IMB_DES_KEYSCHED(mb_mgr, keys[0], 608 xform->cipher.key.data); 609 IMB_DES_KEYSCHED(mb_mgr, keys[1], 610 xform->cipher.key.data + 8); 611 IMB_DES_KEYSCHED(mb_mgr, keys[2], 612 xform->cipher.key.data + 16); 613 614 /* Initialize keys - 24 bytes: [K1-K2-K3] */ 615 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 616 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 617 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; 618 break; 619 case 16: 620 IMB_DES_KEYSCHED(mb_mgr, keys[0], 621 xform->cipher.key.data); 622 IMB_DES_KEYSCHED(mb_mgr, keys[1], 623 xform->cipher.key.data + 8); 624 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ 625 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 626 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 627 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 628 break; 629 case 8: 630 IMB_DES_KEYSCHED(mb_mgr, keys[0], 631 xform->cipher.key.data); 632 633 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ 634 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 635 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; 636 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 637 break; 638 default: 639 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 640 return -EINVAL; 641 } 642 643 sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr; 644 sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr; 645 sess->template_job.key_len_in_bytes = 24; 646 } else if (is_zuc) { 647 if (xform->cipher.key.length != 16 && 648 xform->cipher.key.length != 32) { 649 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 650 return -EINVAL; 651 } 652 sess->template_job.key_len_in_bytes = xform->cipher.key.length; 653 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, 654 xform->cipher.key.length); 655 sess->template_job.enc_keys = sess->cipher.zuc_cipher_key; 656 sess->template_job.dec_keys = sess->cipher.zuc_cipher_key; 657 } else if (is_snow3g) { 658 if (xform->cipher.key.length != 16) { 659 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 660 return -EINVAL; 661 } 662 sess->template_job.key_len_in_bytes = 16; 663 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, 664 &sess->cipher.pKeySched_snow3g_cipher); 665 sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher; 666 sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher; 667 } else if (is_kasumi) { 668 if (xform->cipher.key.length != 16) { 669 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 670 return -EINVAL; 671 } 672 sess->template_job.key_len_in_bytes = 16; 673 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, 674 &sess->cipher.pKeySched_kasumi_cipher); 675 sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher; 676 sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher; 677 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 678 } else if (is_sm4) { 679 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 680 IMB_SM4_KEYEXP(mb_mgr, xform->cipher.key.data, 681 sess->cipher.expanded_sm4_keys.encode, 682 sess->cipher.expanded_sm4_keys.decode); 683 sess->template_job.enc_keys = sess->cipher.expanded_sm4_keys.encode; 684 sess->template_job.dec_keys = sess->cipher.expanded_sm4_keys.decode; 685 #endif 686 } else { 687 if (xform->cipher.key.length != 8) { 688 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 689 return -EINVAL; 690 } 691 sess->template_job.key_len_in_bytes = 8; 692 693 IMB_DES_KEYSCHED(mb_mgr, 694 (uint64_t *)sess->cipher.expanded_aes_keys.encode, 695 xform->cipher.key.data); 696 IMB_DES_KEYSCHED(mb_mgr, 697 (uint64_t *)sess->cipher.expanded_aes_keys.decode, 698 xform->cipher.key.data); 699 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 700 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 701 } 702 703 return 0; 704 } 705 706 static int 707 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, 708 struct aesni_mb_session *sess, 709 const struct rte_crypto_sym_xform *xform) 710 { 711 switch (xform->aead.op) { 712 case RTE_CRYPTO_AEAD_OP_ENCRYPT: 713 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 714 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 715 break; 716 case RTE_CRYPTO_AEAD_OP_DECRYPT: 717 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 718 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 719 break; 720 default: 721 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter"); 722 return -EINVAL; 723 } 724 725 /* Set IV parameters */ 726 sess->iv.offset = xform->aead.iv.offset; 727 sess->template_job.iv_len_in_bytes = xform->aead.iv.length; 728 729 /* Set digest sizes */ 730 sess->auth.req_digest_len = xform->aead.digest_length; 731 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 732 733 switch (xform->aead.algo) { 734 case RTE_CRYPTO_AEAD_AES_CCM: 735 sess->template_job.cipher_mode = IMB_CIPHER_CCM; 736 sess->template_job.hash_alg = IMB_AUTH_AES_CCM; 737 sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length; 738 739 /* Check key length and choose key expansion function for AES */ 740 switch (xform->aead.key.length) { 741 case IMB_KEY_128_BYTES: 742 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 743 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, 744 sess->cipher.expanded_aes_keys.encode, 745 sess->cipher.expanded_aes_keys.decode); 746 break; 747 case IMB_KEY_256_BYTES: 748 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 749 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, 750 sess->cipher.expanded_aes_keys.encode, 751 sess->cipher.expanded_aes_keys.decode); 752 break; 753 default: 754 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 755 return -EINVAL; 756 } 757 758 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 759 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 760 /* CCM digests must be between 4 and 16 and an even number */ 761 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || 762 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || 763 (sess->auth.req_digest_len & 1) == 1) { 764 IPSEC_MB_LOG(ERR, "Invalid digest size"); 765 return -EINVAL; 766 } 767 break; 768 769 case RTE_CRYPTO_AEAD_AES_GCM: 770 sess->template_job.cipher_mode = IMB_CIPHER_GCM; 771 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC; 772 sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length; 773 774 switch (xform->aead.key.length) { 775 case IMB_KEY_128_BYTES: 776 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 777 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, 778 &sess->cipher.gcm_key); 779 break; 780 case IMB_KEY_192_BYTES: 781 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 782 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, 783 &sess->cipher.gcm_key); 784 break; 785 case IMB_KEY_256_BYTES: 786 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 787 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, 788 &sess->cipher.gcm_key); 789 break; 790 default: 791 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 792 return -EINVAL; 793 } 794 795 sess->template_job.enc_keys = &sess->cipher.gcm_key; 796 sess->template_job.dec_keys = &sess->cipher.gcm_key; 797 /* GCM digest size must be between 1 and 16 */ 798 if (sess->auth.req_digest_len == 0 || 799 sess->auth.req_digest_len > 16) { 800 IPSEC_MB_LOG(ERR, "Invalid digest size"); 801 return -EINVAL; 802 } 803 break; 804 805 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: 806 sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305; 807 sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305; 808 sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes = 809 xform->aead.aad_length; 810 811 if (xform->aead.key.length != 32) { 812 IPSEC_MB_LOG(ERR, "Invalid key length"); 813 return -EINVAL; 814 } 815 sess->template_job.key_len_in_bytes = 32; 816 memcpy(sess->cipher.expanded_aes_keys.encode, 817 xform->aead.key.data, 32); 818 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 819 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 820 if (sess->auth.req_digest_len != 16) { 821 IPSEC_MB_LOG(ERR, "Invalid digest size"); 822 return -EINVAL; 823 } 824 break; 825 default: 826 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter"); 827 return -ENOTSUP; 828 } 829 830 return 0; 831 } 832 833 /** Configure a aesni multi-buffer session from a crypto xform chain */ 834 int 835 aesni_mb_session_configure(IMB_MGR *mb_mgr, 836 void *priv_sess, 837 const struct rte_crypto_sym_xform *xform) 838 { 839 const struct rte_crypto_sym_xform *auth_xform = NULL; 840 const struct rte_crypto_sym_xform *cipher_xform = NULL; 841 const struct rte_crypto_sym_xform *aead_xform = NULL; 842 enum ipsec_mb_operation mode; 843 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess; 844 int ret; 845 846 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 847 &cipher_xform, &aead_xform); 848 if (ret) 849 return ret; 850 851 /* Select Crypto operation - hash then cipher / cipher then hash */ 852 switch (mode) { 853 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 854 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 855 break; 856 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 857 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 858 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 859 break; 860 case IPSEC_MB_OP_HASH_GEN_ONLY: 861 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 862 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 863 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 864 break; 865 /* 866 * Multi buffer library operates only at two modes, 867 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER. 868 * When doing ciphering only, chain order depends 869 * on cipher operation: encryption is always 870 * the first operation and decryption the last one. 871 */ 872 case IPSEC_MB_OP_ENCRYPT_ONLY: 873 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 874 break; 875 case IPSEC_MB_OP_DECRYPT_ONLY: 876 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 877 break; 878 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: 879 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 880 break; 881 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: 882 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 883 break; 884 case IPSEC_MB_OP_NOT_SUPPORTED: 885 default: 886 IPSEC_MB_LOG(ERR, 887 "Unsupported operation chain order parameter"); 888 return -ENOTSUP; 889 } 890 891 /* Default IV length = 0 */ 892 sess->template_job.iv_len_in_bytes = 0; 893 894 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); 895 if (ret != 0) { 896 IPSEC_MB_LOG(ERR, 897 "Invalid/unsupported authentication parameters"); 898 return ret; 899 } 900 901 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess, 902 cipher_xform); 903 if (ret != 0) { 904 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 905 return ret; 906 } 907 908 if (aead_xform) { 909 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess, 910 aead_xform); 911 if (ret != 0) { 912 IPSEC_MB_LOG(ERR, 913 "Invalid/unsupported aead parameters"); 914 return ret; 915 } 916 } 917 918 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 919 sess->session_id = imb_set_session(mb_mgr, &sess->template_job); 920 sess->pid = getpid(); 921 RTE_PER_LCORE(pid) = sess->pid; 922 #endif 923 924 return 0; 925 } 926 927 /** Check DOCSIS security session configuration is valid */ 928 static int 929 check_docsis_sec_session(struct rte_security_session_conf *conf) 930 { 931 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform; 932 struct rte_security_docsis_xform *docsis = &conf->docsis; 933 934 /* Downlink: CRC generate -> Cipher encrypt */ 935 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 936 937 if (crypto_sym != NULL && 938 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 939 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 940 crypto_sym->cipher.algo == 941 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 942 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 943 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 944 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 945 crypto_sym->next == NULL) { 946 return 0; 947 } 948 /* Uplink: Cipher decrypt -> CRC verify */ 949 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) { 950 951 if (crypto_sym != NULL && 952 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 953 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && 954 crypto_sym->cipher.algo == 955 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 956 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 957 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 958 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 959 crypto_sym->next == NULL) { 960 return 0; 961 } 962 } 963 964 return -EINVAL; 965 } 966 967 /** Set DOCSIS security session auth (CRC) parameters */ 968 static int 969 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, 970 struct rte_security_docsis_xform *xform) 971 { 972 if (xform == NULL) { 973 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform"); 974 return -EINVAL; 975 } 976 977 /* Select CRC generate/verify */ 978 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { 979 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 980 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 981 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 982 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 983 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 984 } else { 985 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); 986 return -ENOTSUP; 987 } 988 989 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; 990 sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN; 991 992 return 0; 993 } 994 995 /** 996 * Parse DOCSIS security session configuration and set private session 997 * parameters 998 */ 999 static int 1000 aesni_mb_set_docsis_sec_session_parameters( 1001 __rte_unused struct rte_cryptodev *dev, 1002 struct rte_security_session_conf *conf, 1003 void *sess) 1004 { 1005 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 1006 struct rte_security_docsis_xform *docsis_xform; 1007 struct rte_crypto_sym_xform *cipher_xform; 1008 struct aesni_mb_session *ipsec_sess = sess; 1009 int ret = 0; 1010 1011 if (!mb_mgr) 1012 return -ENOMEM; 1013 1014 ret = check_docsis_sec_session(conf); 1015 if (ret) { 1016 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 1017 goto error_exit; 1018 } 1019 1020 switch (conf->docsis.direction) { 1021 case RTE_SECURITY_DOCSIS_UPLINK: 1022 ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 1023 docsis_xform = &conf->docsis; 1024 cipher_xform = conf->crypto_xform; 1025 break; 1026 case RTE_SECURITY_DOCSIS_DOWNLINK: 1027 ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 1028 cipher_xform = conf->crypto_xform; 1029 docsis_xform = &conf->docsis; 1030 break; 1031 default: 1032 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 1033 ret = -EINVAL; 1034 goto error_exit; 1035 } 1036 1037 /* Default IV length = 0 */ 1038 ipsec_sess->template_job.iv_len_in_bytes = 0; 1039 1040 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, 1041 docsis_xform); 1042 if (ret != 0) { 1043 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters"); 1044 goto error_exit; 1045 } 1046 1047 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, 1048 ipsec_sess, cipher_xform); 1049 1050 if (ret != 0) { 1051 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 1052 goto error_exit; 1053 } 1054 1055 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1056 ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); 1057 #endif 1058 1059 error_exit: 1060 free_mb_mgr(mb_mgr); 1061 return ret; 1062 } 1063 1064 static inline uint64_t 1065 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, 1066 uint32_t oop, const uint32_t auth_offset, 1067 const uint32_t cipher_offset, const uint32_t auth_length, 1068 const uint32_t cipher_length, uint8_t lb_sgl) 1069 { 1070 struct rte_mbuf *m_src, *m_dst; 1071 uint8_t *p_src, *p_dst; 1072 uintptr_t u_src, u_dst; 1073 uint32_t cipher_end, auth_end; 1074 1075 /* Only cipher then hash needs special calculation. */ 1076 if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) 1077 return auth_offset; 1078 1079 m_src = op->sym->m_src; 1080 m_dst = op->sym->m_dst; 1081 1082 p_src = rte_pktmbuf_mtod(m_src, uint8_t *); 1083 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 1084 u_src = (uintptr_t)p_src; 1085 u_dst = (uintptr_t)p_dst + auth_offset; 1086 1087 /** 1088 * Copy the content between cipher offset and auth offset for generating 1089 * correct digest. 1090 */ 1091 if (cipher_offset > auth_offset) 1092 memcpy(p_dst + auth_offset, 1093 p_src + auth_offset, 1094 cipher_offset - 1095 auth_offset); 1096 1097 /** 1098 * Copy the content between (cipher offset + length) and (auth offset + 1099 * length) for generating correct digest 1100 */ 1101 cipher_end = cipher_offset + cipher_length; 1102 auth_end = auth_offset + auth_length; 1103 if (cipher_end < auth_end) 1104 memcpy(p_dst + cipher_end, p_src + cipher_end, 1105 auth_end - cipher_end); 1106 1107 /** 1108 * Since intel-ipsec-mb only supports positive values, 1109 * we need to deduct the correct offset between src and dst. 1110 */ 1111 1112 return u_src < u_dst ? (u_dst - u_src) : 1113 (UINT64_MAX - u_src + u_dst + 1); 1114 } 1115 1116 static inline void 1117 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, 1118 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len, 1119 struct rte_crypto_va_iova_ptr *iv, 1120 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) 1121 { 1122 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1123 1124 /* Set authentication parameters */ 1125 job->iv = iv->va; 1126 1127 switch (job->hash_alg) { 1128 case IMB_AUTH_AES_CCM: 1129 job->u.CCM.aad = (uint8_t *)aad->va + 18; 1130 job->iv++; 1131 break; 1132 1133 case IMB_AUTH_AES_GMAC: 1134 job->u.GCM.aad = aad->va; 1135 break; 1136 1137 case IMB_AUTH_AES_GMAC_128: 1138 case IMB_AUTH_AES_GMAC_192: 1139 case IMB_AUTH_AES_GMAC_256: 1140 job->u.GMAC._iv = iv->va; 1141 break; 1142 1143 case IMB_AUTH_CHACHA20_POLY1305: 1144 job->u.CHACHA20_POLY1305.aad = aad->va; 1145 break; 1146 default: 1147 job->u.HMAC._hashed_auth_key_xor_ipad = 1148 session->auth.pads.inner; 1149 job->u.HMAC._hashed_auth_key_xor_opad = 1150 session->auth.pads.outer; 1151 } 1152 1153 /* 1154 * Multi-buffer library current only support returning a truncated 1155 * digest length as specified in the relevant IPsec RFCs 1156 */ 1157 1158 /* Set digest location and length */ 1159 job->auth_tag_output = digest; 1160 1161 /* Data Parameters */ 1162 job->src = buf; 1163 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; 1164 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; 1165 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; 1166 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - 1167 sofs.ofs.auth.tail; 1168 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - 1169 sofs.ofs.cipher.tail; 1170 1171 job->user_data = udata; 1172 } 1173 1174 static int 1175 handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr, 1176 uint32_t *total_len, 1177 struct aesni_mb_op_buf_data *src_data, 1178 struct aesni_mb_op_buf_data *dst_data) 1179 { 1180 uint32_t data_len, part_len; 1181 1182 if (*total_len == 0) { 1183 job->sgl_state = IMB_SGL_COMPLETE; 1184 return 0; 1185 } 1186 1187 if (src_data->m == NULL) { 1188 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1189 return -EINVAL; 1190 } 1191 1192 job->sgl_state = IMB_SGL_UPDATE; 1193 1194 data_len = src_data->m->data_len - src_data->offset; 1195 1196 job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1197 src_data->offset); 1198 1199 if (dst_data->m != NULL) { 1200 if (dst_data->m->data_len - dst_data->offset == 0) { 1201 dst_data->m = dst_data->m->next; 1202 if (dst_data->m == NULL) { 1203 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1204 return -EINVAL; 1205 } 1206 dst_data->offset = 0; 1207 } 1208 part_len = RTE_MIN(data_len, (dst_data->m->data_len - 1209 dst_data->offset)); 1210 job->dst = rte_pktmbuf_mtod_offset(dst_data->m, 1211 uint8_t *, dst_data->offset); 1212 dst_data->offset += part_len; 1213 } else { 1214 part_len = RTE_MIN(data_len, *total_len); 1215 job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1216 src_data->offset); 1217 } 1218 1219 job->msg_len_to_cipher_in_bytes = part_len; 1220 job->msg_len_to_hash_in_bytes = part_len; 1221 1222 job = IMB_SUBMIT_JOB(mb_mgr); 1223 1224 *total_len -= part_len; 1225 1226 if (part_len != data_len) { 1227 src_data->offset += part_len; 1228 } else { 1229 src_data->m = src_data->m->next; 1230 src_data->offset = 0; 1231 } 1232 1233 return 0; 1234 } 1235 1236 static uint64_t 1237 sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len) 1238 { 1239 uint64_t cipher_len; 1240 1241 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1242 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1243 cipher_len = (job->msg_len_to_cipher_in_bits >> 3) + 1244 (job->cipher_start_src_offset_in_bits >> 3); 1245 else 1246 cipher_len = job->msg_len_to_cipher_in_bytes + 1247 job->cipher_start_src_offset_in_bytes; 1248 1249 if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1250 job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN) 1251 *auth_len = (job->msg_len_to_hash_in_bits >> 3) + 1252 job->hash_start_src_offset_in_bytes; 1253 else 1254 *auth_len = job->msg_len_to_hash_in_bytes + 1255 job->hash_start_src_offset_in_bytes; 1256 1257 return RTE_MAX(*auth_len, cipher_len); 1258 } 1259 1260 static int 1261 handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, 1262 struct aesni_mb_session *session) 1263 { 1264 uint64_t auth_len, total_len; 1265 uint8_t *src, *linear_buf = NULL; 1266 int lb_offset = 0; 1267 struct rte_mbuf *src_seg; 1268 uint16_t src_len; 1269 1270 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 1271 linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0); 1272 if (linear_buf == NULL) { 1273 IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer"); 1274 return -1; 1275 } 1276 1277 for (src_seg = op->sym->m_src; (src_seg != NULL) && 1278 (total_len - lb_offset > 0); 1279 src_seg = src_seg->next) { 1280 src = rte_pktmbuf_mtod(src_seg, uint8_t *); 1281 src_len = RTE_MIN(src_seg->data_len, total_len - lb_offset); 1282 rte_memcpy(linear_buf + lb_offset, src, src_len); 1283 lb_offset += src_len; 1284 } 1285 1286 job->src = linear_buf; 1287 job->dst = linear_buf + dst_offset; 1288 job->user_data2 = linear_buf; 1289 1290 if (job->hash_alg == IMB_AUTH_AES_GMAC) 1291 job->u.GCM.aad = linear_buf; 1292 1293 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 1294 job->auth_tag_output = linear_buf + lb_offset; 1295 else 1296 job->auth_tag_output = linear_buf + auth_len; 1297 1298 return 0; 1299 } 1300 1301 static inline int 1302 imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) 1303 { 1304 if (alg == IMB_CIPHER_CHACHA20_POLY1305 || 1305 alg == IMB_CIPHER_CHACHA20_POLY1305_SGL || 1306 alg == IMB_CIPHER_GCM_SGL || 1307 alg == IMB_CIPHER_GCM) 1308 return 1; 1309 return 0; 1310 } 1311 1312 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1313 static inline int 1314 single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1315 int oop, uint32_t offset, struct rte_mbuf *m_src, 1316 struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) 1317 { 1318 uint32_t num_segs = 0; 1319 struct aesni_mb_op_buf_data src_sgl = {0}; 1320 struct aesni_mb_op_buf_data dst_sgl = {0}; 1321 uint32_t total_len; 1322 1323 job->sgl_state = IMB_SGL_ALL; 1324 1325 src_sgl.m = m_src; 1326 src_sgl.offset = offset; 1327 1328 while (src_sgl.offset >= src_sgl.m->data_len) { 1329 src_sgl.offset -= src_sgl.m->data_len; 1330 src_sgl.m = src_sgl.m->next; 1331 1332 RTE_ASSERT(src_sgl.m != NULL); 1333 } 1334 1335 if (oop) { 1336 dst_sgl.m = m_dst; 1337 dst_sgl.offset = offset; 1338 1339 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1340 dst_sgl.offset -= dst_sgl.m->data_len; 1341 dst_sgl.m = dst_sgl.m->next; 1342 1343 RTE_ASSERT(dst_sgl.m != NULL); 1344 } 1345 } 1346 total_len = op->sym->aead.data.length; 1347 1348 while (total_len != 0) { 1349 uint32_t data_len, part_len; 1350 1351 if (src_sgl.m == NULL) { 1352 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1353 return -EINVAL; 1354 } 1355 1356 data_len = src_sgl.m->data_len - src_sgl.offset; 1357 1358 sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1359 src_sgl.offset); 1360 1361 if (dst_sgl.m != NULL) { 1362 if (dst_sgl.m->data_len - dst_sgl.offset == 0) { 1363 dst_sgl.m = dst_sgl.m->next; 1364 if (dst_sgl.m == NULL) { 1365 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1366 return -EINVAL; 1367 } 1368 dst_sgl.offset = 0; 1369 } 1370 part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - 1371 dst_sgl.offset)); 1372 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, 1373 uint8_t *, dst_sgl.offset); 1374 dst_sgl.offset += part_len; 1375 } else { 1376 part_len = RTE_MIN(data_len, total_len); 1377 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1378 src_sgl.offset); 1379 } 1380 1381 sgl_segs[num_segs].len = part_len; 1382 1383 total_len -= part_len; 1384 1385 if (part_len != data_len) { 1386 src_sgl.offset += part_len; 1387 } else { 1388 src_sgl.m = src_sgl.m->next; 1389 src_sgl.offset = 0; 1390 } 1391 num_segs++; 1392 } 1393 job->num_sgl_io_segs = num_segs; 1394 job->sgl_io_segs = sgl_segs; 1395 return 0; 1396 } 1397 #endif 1398 1399 static inline int 1400 multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1401 int oop, uint32_t offset, struct rte_mbuf *m_src, 1402 struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) 1403 { 1404 int ret; 1405 IMB_JOB base_job; 1406 struct aesni_mb_op_buf_data src_sgl = {0}; 1407 struct aesni_mb_op_buf_data dst_sgl = {0}; 1408 uint32_t total_len; 1409 1410 base_job = *job; 1411 job->sgl_state = IMB_SGL_INIT; 1412 job = IMB_SUBMIT_JOB(mb_mgr); 1413 total_len = op->sym->aead.data.length; 1414 1415 src_sgl.m = m_src; 1416 src_sgl.offset = offset; 1417 1418 while (src_sgl.offset >= src_sgl.m->data_len) { 1419 src_sgl.offset -= src_sgl.m->data_len; 1420 src_sgl.m = src_sgl.m->next; 1421 1422 RTE_ASSERT(src_sgl.m != NULL); 1423 } 1424 1425 if (oop) { 1426 dst_sgl.m = m_dst; 1427 dst_sgl.offset = offset; 1428 1429 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1430 dst_sgl.offset -= dst_sgl.m->data_len; 1431 dst_sgl.m = dst_sgl.m->next; 1432 1433 RTE_ASSERT(dst_sgl.m != NULL); 1434 } 1435 } 1436 1437 while (job->sgl_state != IMB_SGL_COMPLETE) { 1438 job = IMB_GET_NEXT_JOB(mb_mgr); 1439 *job = base_job; 1440 ret = handle_aead_sgl_job(job, mb_mgr, &total_len, 1441 &src_sgl, &dst_sgl); 1442 if (ret < 0) 1443 return ret; 1444 } 1445 return 0; 1446 } 1447 1448 static inline int 1449 set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, 1450 struct aesni_mb_qp_data *qp_data, 1451 struct rte_crypto_op *op, uint8_t *digest_idx, 1452 const struct aesni_mb_session *session, 1453 struct rte_mbuf *m_src, struct rte_mbuf *m_dst, 1454 const int oop) 1455 { 1456 const uint32_t m_offset = op->sym->aead.data.offset; 1457 1458 job->u.GCM.aad = op->sym->aead.aad.data; 1459 if (sgl) { 1460 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1461 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1462 job->hash_alg = IMB_AUTH_GCM_SGL; 1463 job->hash_start_src_offset_in_bytes = 0; 1464 job->msg_len_to_hash_in_bytes = 0; 1465 job->msg_len_to_cipher_in_bytes = 0; 1466 job->cipher_start_src_offset_in_bytes = 0; 1467 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1468 imb_set_session(mb_mgr, job); 1469 #endif 1470 } else { 1471 job->hash_start_src_offset_in_bytes = 1472 op->sym->aead.data.offset; 1473 job->msg_len_to_hash_in_bytes = 1474 op->sym->aead.data.length; 1475 job->cipher_start_src_offset_in_bytes = 1476 op->sym->aead.data.offset; 1477 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1478 } 1479 1480 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1481 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1482 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1483 } else { 1484 job->auth_tag_output = op->sym->aead.digest.data; 1485 } 1486 1487 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1488 session->iv.offset); 1489 1490 /* Set user data to be crypto operation data struct */ 1491 job->user_data = op; 1492 1493 if (sgl) { 1494 job->src = NULL; 1495 job->dst = NULL; 1496 1497 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1498 if (m_src->nb_segs <= MAX_NUM_SEGS) 1499 return single_sgl_job(job, op, oop, 1500 m_offset, m_src, m_dst, 1501 qp_data->sgl_segs); 1502 else 1503 #endif 1504 return multi_sgl_job(job, op, oop, 1505 m_offset, m_src, m_dst, mb_mgr); 1506 } else { 1507 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1508 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1509 } 1510 1511 return 0; 1512 } 1513 1514 /** Check if conditions are met for digest-appended operations */ 1515 static uint8_t * 1516 aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, 1517 uint32_t oop) 1518 { 1519 unsigned int auth_size, cipher_size; 1520 uint8_t *end_cipher; 1521 uint8_t *start_cipher; 1522 1523 if (job->cipher_mode == IMB_CIPHER_NULL) 1524 return NULL; 1525 1526 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 || 1527 job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1528 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { 1529 cipher_size = (op->sym->cipher.data.offset >> 3) + 1530 (op->sym->cipher.data.length >> 3); 1531 } else { 1532 cipher_size = (op->sym->cipher.data.offset) + 1533 (op->sym->cipher.data.length); 1534 } 1535 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 1536 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1537 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 1538 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 1539 auth_size = (op->sym->auth.data.offset >> 3) + 1540 (op->sym->auth.data.length >> 3); 1541 } else { 1542 auth_size = (op->sym->auth.data.offset) + 1543 (op->sym->auth.data.length); 1544 } 1545 1546 if (!oop) { 1547 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size); 1548 start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); 1549 } else { 1550 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size); 1551 start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); 1552 } 1553 1554 if (start_cipher < op->sym->auth.digest.data && 1555 op->sym->auth.digest.data < end_cipher) { 1556 return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size); 1557 } else { 1558 return NULL; 1559 } 1560 } 1561 1562 /** 1563 * Process a crypto operation and complete a IMB_JOB job structure for 1564 * submission to the multi buffer library for processing. 1565 * 1566 * @param qp queue pair 1567 * @param job IMB_JOB structure to fill 1568 * @param op crypto op to process 1569 * @param digest_idx ID for digest to use 1570 * 1571 * @return 1572 * - 0 on success, the IMB_JOB will be filled 1573 * - -1 if invalid session or errors allocating SGL linear buffer, 1574 * IMB_JOB will not be filled 1575 */ 1576 static inline int 1577 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1578 struct rte_crypto_op *op, uint8_t *digest_idx, 1579 IMB_MGR *mb_mgr, pid_t pid) 1580 { 1581 struct rte_mbuf *m_src = op->sym->m_src, *m_dst; 1582 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1583 struct aesni_mb_session *session; 1584 uint32_t m_offset; 1585 int oop; 1586 uint32_t auth_off_in_bytes; 1587 uint32_t ciph_off_in_bytes; 1588 uint32_t auth_len_in_bytes; 1589 uint32_t ciph_len_in_bytes; 1590 uint8_t sgl = 0; 1591 uint8_t lb_sgl = 0; 1592 1593 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 1594 (void) pid; 1595 #endif 1596 1597 session = ipsec_mb_get_session_private(qp, op); 1598 if (session == NULL) { 1599 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1600 return -1; 1601 } 1602 1603 const IMB_CIPHER_MODE cipher_mode = 1604 session->template_job.cipher_mode; 1605 1606 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1607 if (session->pid != pid) { 1608 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1609 imb_set_session(mb_mgr, job); 1610 } else if (job->session_id != session->session_id) 1611 #endif 1612 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1613 1614 if (!op->sym->m_dst) { 1615 /* in-place operation */ 1616 m_dst = m_src; 1617 oop = 0; 1618 } else if (op->sym->m_dst == op->sym->m_src) { 1619 /* in-place operation */ 1620 m_dst = m_src; 1621 oop = 0; 1622 } else { 1623 /* out-of-place operation */ 1624 m_dst = op->sym->m_dst; 1625 oop = 1; 1626 } 1627 1628 if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { 1629 sgl = 1; 1630 if (!imb_lib_support_sgl_algo(cipher_mode)) 1631 lb_sgl = 1; 1632 } 1633 1634 if (cipher_mode == IMB_CIPHER_GCM) 1635 return set_gcm_job(mb_mgr, job, sgl, qp_data, 1636 op, digest_idx, session, m_src, m_dst, oop); 1637 1638 /* Set authentication parameters */ 1639 const int aead = is_aead_algo(job->hash_alg, cipher_mode); 1640 1641 switch (job->hash_alg) { 1642 case IMB_AUTH_AES_CCM: 1643 job->u.CCM.aad = op->sym->aead.aad.data + 18; 1644 break; 1645 1646 case IMB_AUTH_AES_GMAC: 1647 job->u.GCM.aad = op->sym->aead.aad.data; 1648 if (sgl) { 1649 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1650 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1651 job->hash_alg = IMB_AUTH_GCM_SGL; 1652 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1653 imb_set_session(mb_mgr, job); 1654 #endif 1655 } 1656 break; 1657 case IMB_AUTH_AES_GMAC_128: 1658 case IMB_AUTH_AES_GMAC_192: 1659 case IMB_AUTH_AES_GMAC_256: 1660 job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1661 session->auth_iv.offset); 1662 break; 1663 case IMB_AUTH_ZUC_EIA3_BITLEN: 1664 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1665 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1666 session->auth_iv.offset); 1667 break; 1668 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1669 job->u.SNOW3G_UIA2._iv = 1670 rte_crypto_op_ctod_offset(op, uint8_t *, 1671 session->auth_iv.offset); 1672 break; 1673 case IMB_AUTH_CHACHA20_POLY1305: 1674 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; 1675 if (sgl) { 1676 job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; 1677 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; 1678 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; 1679 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1680 imb_set_session(mb_mgr, job); 1681 #endif 1682 } 1683 break; 1684 default: 1685 break; 1686 } 1687 1688 if (aead) 1689 m_offset = op->sym->aead.data.offset; 1690 else 1691 m_offset = op->sym->cipher.data.offset; 1692 1693 if (cipher_mode == IMB_CIPHER_ZUC_EEA3) 1694 m_offset >>= 3; 1695 else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) 1696 m_offset = 0; 1697 else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1698 m_offset = 0; 1699 1700 /* Set digest output location */ 1701 if (job->hash_alg != IMB_AUTH_NULL && 1702 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1703 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1704 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1705 } else { 1706 if (aead) 1707 job->auth_tag_output = op->sym->aead.digest.data; 1708 else { 1709 job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop); 1710 if (job->auth_tag_output == NULL) { 1711 job->auth_tag_output = op->sym->auth.digest.data; 1712 } 1713 } 1714 if (session->auth.req_digest_len != 1715 job->auth_tag_output_len_in_bytes) { 1716 job->auth_tag_output = 1717 qp_data->temp_digests[*digest_idx]; 1718 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1719 } 1720 } 1721 /* 1722 * Multi-buffer library current only support returning a truncated 1723 * digest length as specified in the relevant IPsec RFCs 1724 */ 1725 1726 /* Data Parameters */ 1727 if (sgl) { 1728 job->src = NULL; 1729 job->dst = NULL; 1730 } else { 1731 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1732 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1733 } 1734 1735 switch (job->hash_alg) { 1736 case IMB_AUTH_AES_CCM: 1737 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; 1738 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length; 1739 1740 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1741 session->iv.offset + 1); 1742 break; 1743 1744 case IMB_AUTH_AES_GMAC: 1745 job->hash_start_src_offset_in_bytes = 1746 op->sym->aead.data.offset; 1747 job->msg_len_to_hash_in_bytes = 1748 op->sym->aead.data.length; 1749 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1750 session->iv.offset); 1751 break; 1752 case IMB_AUTH_AES_GMAC_128: 1753 case IMB_AUTH_AES_GMAC_192: 1754 case IMB_AUTH_AES_GMAC_256: 1755 job->hash_start_src_offset_in_bytes = 1756 op->sym->auth.data.offset; 1757 job->msg_len_to_hash_in_bytes = 1758 op->sym->auth.data.length; 1759 break; 1760 1761 case IMB_AUTH_GCM_SGL: 1762 case IMB_AUTH_CHACHA20_POLY1305_SGL: 1763 job->hash_start_src_offset_in_bytes = 0; 1764 job->msg_len_to_hash_in_bytes = 0; 1765 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1766 session->iv.offset); 1767 break; 1768 1769 case IMB_AUTH_CHACHA20_POLY1305: 1770 job->hash_start_src_offset_in_bytes = 1771 op->sym->aead.data.offset; 1772 job->msg_len_to_hash_in_bytes = 1773 op->sym->aead.data.length; 1774 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1775 session->iv.offset); 1776 break; 1777 /* ZUC and SNOW3G require length in bits and offset in bytes */ 1778 case IMB_AUTH_ZUC_EIA3_BITLEN: 1779 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1780 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1781 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1782 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1783 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1784 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1785 1786 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1787 session, oop, auth_off_in_bytes, 1788 ciph_off_in_bytes, auth_len_in_bytes, 1789 ciph_len_in_bytes, lb_sgl); 1790 job->msg_len_to_hash_in_bits = op->sym->auth.data.length; 1791 1792 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1793 session->iv.offset); 1794 break; 1795 1796 /* KASUMI requires lengths and offset in bytes */ 1797 case IMB_AUTH_KASUMI_UIA1: 1798 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1799 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1800 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1801 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1802 1803 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1804 session, oop, auth_off_in_bytes, 1805 ciph_off_in_bytes, auth_len_in_bytes, 1806 ciph_len_in_bytes, lb_sgl); 1807 job->msg_len_to_hash_in_bytes = auth_len_in_bytes; 1808 1809 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1810 session->iv.offset); 1811 break; 1812 1813 default: 1814 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1815 session, oop, op->sym->auth.data.offset, 1816 op->sym->cipher.data.offset, 1817 op->sym->auth.data.length, 1818 op->sym->cipher.data.length, lb_sgl); 1819 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length; 1820 1821 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1822 session->iv.offset); 1823 } 1824 1825 switch (job->cipher_mode) { 1826 /* ZUC requires length and offset in bytes */ 1827 case IMB_CIPHER_ZUC_EEA3: 1828 job->cipher_start_src_offset_in_bytes = 1829 op->sym->cipher.data.offset >> 3; 1830 job->msg_len_to_cipher_in_bytes = 1831 op->sym->cipher.data.length >> 3; 1832 break; 1833 /* ZUC and SNOW3G require length and offset in bits */ 1834 case IMB_CIPHER_SNOW3G_UEA2_BITLEN: 1835 case IMB_CIPHER_KASUMI_UEA1_BITLEN: 1836 job->cipher_start_src_offset_in_bits = 1837 op->sym->cipher.data.offset; 1838 job->msg_len_to_cipher_in_bits = 1839 op->sym->cipher.data.length; 1840 break; 1841 case IMB_CIPHER_GCM: 1842 job->cipher_start_src_offset_in_bytes = 1843 op->sym->aead.data.offset; 1844 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1845 break; 1846 case IMB_CIPHER_CCM: 1847 case IMB_CIPHER_CHACHA20_POLY1305: 1848 job->cipher_start_src_offset_in_bytes = 1849 op->sym->aead.data.offset; 1850 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1851 break; 1852 case IMB_CIPHER_GCM_SGL: 1853 case IMB_CIPHER_CHACHA20_POLY1305_SGL: 1854 job->msg_len_to_cipher_in_bytes = 0; 1855 job->cipher_start_src_offset_in_bytes = 0; 1856 break; 1857 default: 1858 job->cipher_start_src_offset_in_bytes = 1859 op->sym->cipher.data.offset; 1860 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; 1861 } 1862 1863 if (cipher_mode == IMB_CIPHER_NULL && oop) { 1864 memcpy(job->dst + job->cipher_start_src_offset_in_bytes, 1865 job->src + job->cipher_start_src_offset_in_bytes, 1866 job->msg_len_to_cipher_in_bytes); 1867 } 1868 1869 /* Set user data to be crypto operation data struct */ 1870 job->user_data = op; 1871 1872 if (sgl) { 1873 1874 if (lb_sgl) 1875 return handle_sgl_linear(job, op, m_offset, session); 1876 1877 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1878 if (m_src->nb_segs <= MAX_NUM_SEGS) 1879 return single_sgl_job(job, op, oop, 1880 m_offset, m_src, m_dst, 1881 qp_data->sgl_segs); 1882 else 1883 #endif 1884 return multi_sgl_job(job, op, oop, 1885 m_offset, m_src, m_dst, mb_mgr); 1886 } 1887 1888 return 0; 1889 } 1890 1891 /** 1892 * Process a crypto operation containing a security op and complete a 1893 * IMB_JOB job structure for submission to the multi buffer library for 1894 * processing. 1895 */ 1896 static inline int 1897 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1898 struct rte_crypto_op *op, uint8_t *digest_idx) 1899 { 1900 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1901 struct rte_mbuf *m_src, *m_dst; 1902 struct rte_crypto_sym_op *sym; 1903 struct aesni_mb_session *session = NULL; 1904 1905 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) { 1906 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1907 return -1; 1908 } 1909 session = SECURITY_GET_SESS_PRIV(op->sym->session); 1910 1911 if (unlikely(session == NULL)) { 1912 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1913 return -1; 1914 } 1915 /* Only DOCSIS protocol operations supported now */ 1916 if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI || 1917 session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) { 1918 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1919 return -1; 1920 } 1921 1922 sym = op->sym; 1923 m_src = sym->m_src; 1924 1925 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) { 1926 /* in-place operation */ 1927 m_dst = m_src; 1928 } else { 1929 /* out-of-place operation not supported */ 1930 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1931 return -ENOTSUP; 1932 } 1933 1934 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1935 1936 /* Set cipher parameters */ 1937 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1938 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1939 1940 /* Set IV parameters */ 1941 job->iv = (uint8_t *)op + session->iv.offset; 1942 1943 /* Set digest output location */ 1944 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1945 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1946 1947 /* Set data parameters */ 1948 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1949 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, 1950 sym->cipher.data.offset); 1951 1952 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset; 1953 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length; 1954 1955 job->hash_start_src_offset_in_bytes = sym->auth.data.offset; 1956 job->msg_len_to_hash_in_bytes = sym->auth.data.length; 1957 1958 job->user_data = op; 1959 1960 return 0; 1961 } 1962 1963 static inline void 1964 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status) 1965 { 1966 uint16_t crc_offset; 1967 uint8_t *crc; 1968 1969 if (!job->msg_len_to_hash_in_bytes) 1970 return; 1971 1972 crc_offset = job->hash_start_src_offset_in_bytes + 1973 job->msg_len_to_hash_in_bytes - 1974 job->cipher_start_src_offset_in_bytes; 1975 crc = job->dst + crc_offset; 1976 1977 /* Verify CRC (at the end of the message) */ 1978 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0) 1979 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1980 } 1981 1982 static inline void 1983 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status) 1984 { 1985 /* Verify digest if required */ 1986 if (memcmp(job->auth_tag_output, digest, len) != 0) 1987 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1988 } 1989 1990 static inline void 1991 generate_digest(IMB_JOB *job, struct rte_crypto_op *op, 1992 struct aesni_mb_session *sess) 1993 { 1994 /* No extra copy needed */ 1995 if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes)) 1996 return; 1997 1998 /* 1999 * This can only happen for HMAC, so only digest 2000 * for authentication algos is required 2001 */ 2002 memcpy(op->sym->auth.digest.data, job->auth_tag_output, 2003 sess->auth.req_digest_len); 2004 } 2005 2006 static void 2007 post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job, 2008 struct aesni_mb_session *sess, uint8_t *linear_buf) 2009 { 2010 2011 int lb_offset = 0; 2012 struct rte_mbuf *m_dst = op->sym->m_dst == NULL ? 2013 op->sym->m_src : op->sym->m_dst; 2014 uint16_t total_len, dst_len; 2015 uint64_t auth_len; 2016 uint8_t *dst; 2017 2018 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 2019 2020 if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY) 2021 total_len += job->auth_tag_output_len_in_bytes; 2022 2023 for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) { 2024 dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 2025 dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset); 2026 rte_memcpy(dst, linear_buf + lb_offset, dst_len); 2027 lb_offset += dst_len; 2028 } 2029 } 2030 2031 /** 2032 * Process a completed job and return rte_mbuf which job processed 2033 * 2034 * @param qp Queue Pair to process 2035 * @param job IMB_JOB job to process 2036 * 2037 * @return 2038 * - Returns processed crypto operation. 2039 * - Returns NULL on invalid job 2040 */ 2041 static inline struct rte_crypto_op * 2042 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) 2043 { 2044 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; 2045 struct aesni_mb_session *sess = NULL; 2046 uint8_t *linear_buf = NULL; 2047 int sgl = 0; 2048 uint8_t oop = 0; 2049 uint8_t is_docsis_sec = 0; 2050 2051 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 2052 /* 2053 * Assuming at this point that if it's a security type op, that 2054 * this is for DOCSIS 2055 */ 2056 is_docsis_sec = 1; 2057 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 2058 } else 2059 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 2060 2061 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) { 2062 switch (job->status) { 2063 case IMB_STATUS_COMPLETED: 2064 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 2065 2066 if ((op->sym->m_src->nb_segs > 1 || 2067 (op->sym->m_dst != NULL && 2068 op->sym->m_dst->nb_segs > 1)) && 2069 !imb_lib_support_sgl_algo(job->cipher_mode)) { 2070 linear_buf = (uint8_t *) job->user_data2; 2071 sgl = 1; 2072 2073 post_process_sgl_linear(op, job, sess, linear_buf); 2074 } 2075 2076 if (job->hash_alg == IMB_AUTH_NULL) 2077 break; 2078 2079 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 2080 if (is_aead_algo(job->hash_alg, 2081 job->cipher_mode)) 2082 verify_digest(job, 2083 op->sym->aead.digest.data, 2084 sess->auth.req_digest_len, 2085 &op->status); 2086 else if (is_docsis_sec) 2087 verify_docsis_sec_crc(job, 2088 &op->status); 2089 else 2090 verify_digest(job, 2091 op->sym->auth.digest.data, 2092 sess->auth.req_digest_len, 2093 &op->status); 2094 } else { 2095 if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) { 2096 /* in-place operation */ 2097 oop = 0; 2098 } else { /* out-of-place operation */ 2099 oop = 1; 2100 } 2101 2102 /* Enable digest check */ 2103 if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL 2104 && !is_aead_algo(job->hash_alg, sess->template_job.cipher_mode) && 2105 aesni_mb_digest_appended_in_src(op, job, oop) != NULL) { 2106 unsigned int auth_size, cipher_size; 2107 int unencrypted_bytes = 0; 2108 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 2109 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN || 2110 job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { 2111 cipher_size = (op->sym->cipher.data.offset >> 3) + 2112 (op->sym->cipher.data.length >> 3); 2113 } else { 2114 cipher_size = (op->sym->cipher.data.offset) + 2115 (op->sym->cipher.data.length); 2116 } 2117 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 2118 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 2119 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 2120 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 2121 auth_size = (op->sym->auth.data.offset >> 3) + 2122 (op->sym->auth.data.length >> 3); 2123 } else { 2124 auth_size = (op->sym->auth.data.offset) + 2125 (op->sym->auth.data.length); 2126 } 2127 /* Check for unencrypted bytes in partial digest cases */ 2128 if (job->cipher_mode != IMB_CIPHER_NULL) { 2129 unencrypted_bytes = auth_size + 2130 job->auth_tag_output_len_in_bytes - cipher_size; 2131 } 2132 if (unencrypted_bytes > 0) 2133 rte_memcpy( 2134 rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, 2135 cipher_size), 2136 rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, 2137 cipher_size), 2138 unencrypted_bytes); 2139 } 2140 generate_digest(job, op, sess); 2141 } 2142 break; 2143 default: 2144 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 2145 } 2146 if (sgl) 2147 rte_free(linear_buf); 2148 } 2149 2150 /* Free session if a session-less crypto op */ 2151 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 2152 memset(sess, 0, sizeof(struct aesni_mb_session)); 2153 rte_mempool_put(qp->sess_mp, op->sym->session); 2154 op->sym->session = NULL; 2155 } 2156 2157 return op; 2158 } 2159 2160 static inline void 2161 post_process_mb_sync_job(IMB_JOB *job) 2162 { 2163 uint32_t *st; 2164 2165 st = job->user_data; 2166 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG; 2167 } 2168 2169 static inline uint32_t 2170 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) 2171 { 2172 uint32_t i; 2173 2174 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) 2175 post_process_mb_sync_job(job); 2176 2177 return i; 2178 } 2179 2180 static inline uint32_t 2181 flush_mb_sync_mgr(IMB_MGR *mb_mgr) 2182 { 2183 IMB_JOB *job; 2184 2185 job = IMB_FLUSH_JOB(mb_mgr); 2186 return handle_completed_sync_jobs(job, mb_mgr); 2187 } 2188 2189 static inline IMB_JOB * 2190 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) 2191 { 2192 job->chain_order = IMB_ORDER_HASH_CIPHER; 2193 job->cipher_mode = IMB_CIPHER_NULL; 2194 job->hash_alg = IMB_AUTH_NULL; 2195 job->cipher_direction = IMB_DIR_DECRYPT; 2196 2197 /* Set user data to be crypto operation data struct */ 2198 job->user_data = op; 2199 2200 return job; 2201 } 2202 2203 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 2204 uint16_t 2205 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 2206 uint16_t nb_ops) 2207 { 2208 struct ipsec_mb_qp *qp = queue_pair; 2209 IMB_MGR *mb_mgr = qp->mb_mgr; 2210 struct rte_crypto_op *op; 2211 struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE]; 2212 IMB_JOB *job; 2213 int retval, processed_jobs = 0; 2214 uint16_t i, nb_jobs; 2215 IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL}; 2216 pid_t pid; 2217 2218 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 2219 return 0; 2220 2221 uint8_t digest_idx = qp->digest_idx; 2222 uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ? 2223 IMB_MAX_BURST_SIZE : nb_ops; 2224 2225 /* 2226 * If nb_ops is greater than the max supported 2227 * ipsec_mb burst size, then process in bursts of 2228 * IMB_MAX_BURST_SIZE until all operations are submitted 2229 */ 2230 while (nb_ops) { 2231 uint16_t nb_submit_ops; 2232 uint16_t n = (nb_ops / burst_sz) ? 2233 burst_sz : nb_ops; 2234 2235 if (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) { 2236 /* 2237 * Not enough free jobs in the queue 2238 * Flush n jobs until enough jobs available 2239 */ 2240 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2241 for (i = 0; i < nb_jobs; i++) { 2242 job = jobs[i]; 2243 2244 op = post_process_mb_job(qp, job); 2245 if (op) { 2246 ops[processed_jobs++] = op; 2247 qp->stats.dequeued_count++; 2248 } else { 2249 qp->stats.dequeue_err_count++; 2250 break; 2251 } 2252 } 2253 nb_ops -= i; 2254 continue; 2255 } 2256 2257 if (!RTE_PER_LCORE(pid)) 2258 RTE_PER_LCORE(pid) = getpid(); 2259 2260 pid = RTE_PER_LCORE(pid); 2261 2262 /* 2263 * Get the next operations to process from ingress queue. 2264 * There is no need to return the job to the IMB_MGR 2265 * if there are no more operations to process, since 2266 * the IMB_MGR can use that pointer again in next 2267 * get_next calls. 2268 */ 2269 nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue, 2270 (void **)deqd_ops, n, NULL); 2271 for (i = 0; i < nb_submit_ops; i++) { 2272 job = jobs[i]; 2273 op = deqd_ops[i]; 2274 2275 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 2276 retval = set_sec_mb_job_params(job, qp, op, 2277 &digest_idx); 2278 else 2279 retval = set_mb_job_params(job, qp, op, 2280 &digest_idx, mb_mgr, pid); 2281 2282 if (unlikely(retval != 0)) { 2283 qp->stats.dequeue_err_count++; 2284 set_job_null_op(job, op); 2285 } 2286 } 2287 2288 /* Submit jobs to multi-buffer for processing */ 2289 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2290 int err = 0; 2291 2292 nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs); 2293 err = imb_get_errno(mb_mgr); 2294 if (err) 2295 IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err)); 2296 #else 2297 nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr, 2298 nb_submit_ops, jobs); 2299 #endif 2300 for (i = 0; i < nb_jobs; i++) { 2301 job = jobs[i]; 2302 2303 op = post_process_mb_job(qp, job); 2304 if (op) { 2305 ops[processed_jobs++] = op; 2306 qp->stats.dequeued_count++; 2307 } else { 2308 qp->stats.dequeue_err_count++; 2309 break; 2310 } 2311 } 2312 2313 qp->digest_idx = digest_idx; 2314 2315 if (processed_jobs < 1) { 2316 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2317 2318 for (i = 0; i < nb_jobs; i++) { 2319 job = jobs[i]; 2320 2321 op = post_process_mb_job(qp, job); 2322 if (op) { 2323 ops[processed_jobs++] = op; 2324 qp->stats.dequeued_count++; 2325 } else { 2326 qp->stats.dequeue_err_count++; 2327 break; 2328 } 2329 } 2330 } 2331 nb_ops -= n; 2332 } 2333 2334 return processed_jobs; 2335 } 2336 #else 2337 2338 /** 2339 * Process a completed IMB_JOB job and keep processing jobs until 2340 * get_completed_job return NULL 2341 * 2342 * @param qp Queue Pair to process 2343 * @param mb_mgr IMB_MGR to use 2344 * @param job IMB_JOB job 2345 * @param ops crypto ops to fill 2346 * @param nb_ops number of crypto ops 2347 * 2348 * @return 2349 * - Number of processed jobs 2350 */ 2351 static unsigned 2352 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 2353 IMB_JOB *job, struct rte_crypto_op **ops, 2354 uint16_t nb_ops) 2355 { 2356 struct rte_crypto_op *op = NULL; 2357 uint16_t processed_jobs = 0; 2358 2359 while (job != NULL) { 2360 op = post_process_mb_job(qp, job); 2361 2362 if (op) { 2363 ops[processed_jobs++] = op; 2364 qp->stats.dequeued_count++; 2365 } else { 2366 qp->stats.dequeue_err_count++; 2367 break; 2368 } 2369 if (processed_jobs == nb_ops) 2370 break; 2371 2372 job = IMB_GET_COMPLETED_JOB(mb_mgr); 2373 } 2374 2375 return processed_jobs; 2376 } 2377 2378 static inline uint16_t 2379 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 2380 struct rte_crypto_op **ops, uint16_t nb_ops) 2381 { 2382 int processed_ops = 0; 2383 2384 /* Flush the remaining jobs */ 2385 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr); 2386 2387 if (job) 2388 processed_ops += handle_completed_jobs(qp, mb_mgr, job, 2389 &ops[processed_ops], nb_ops - processed_ops); 2390 2391 return processed_ops; 2392 } 2393 2394 uint16_t 2395 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 2396 uint16_t nb_ops) 2397 { 2398 struct ipsec_mb_qp *qp = queue_pair; 2399 IMB_MGR *mb_mgr = qp->mb_mgr; 2400 struct rte_crypto_op *op; 2401 IMB_JOB *job; 2402 int retval, processed_jobs = 0; 2403 pid_t pid = 0; 2404 2405 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 2406 return 0; 2407 2408 uint8_t digest_idx = qp->digest_idx; 2409 2410 do { 2411 /* Get next free mb job struct from mb manager */ 2412 job = IMB_GET_NEXT_JOB(mb_mgr); 2413 if (unlikely(job == NULL)) { 2414 /* if no free mb job structs we need to flush mb_mgr */ 2415 processed_jobs += flush_mb_mgr(qp, mb_mgr, 2416 &ops[processed_jobs], 2417 nb_ops - processed_jobs); 2418 2419 if (nb_ops == processed_jobs) 2420 break; 2421 2422 job = IMB_GET_NEXT_JOB(mb_mgr); 2423 } 2424 2425 /* 2426 * Get next operation to process from ingress queue. 2427 * There is no need to return the job to the IMB_MGR 2428 * if there are no more operations to process, since the IMB_MGR 2429 * can use that pointer again in next get_next calls. 2430 */ 2431 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); 2432 if (retval < 0) 2433 break; 2434 2435 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 2436 retval = set_sec_mb_job_params(job, qp, op, 2437 &digest_idx); 2438 else 2439 retval = set_mb_job_params(job, qp, op, 2440 &digest_idx, mb_mgr, pid); 2441 2442 if (unlikely(retval != 0)) { 2443 qp->stats.dequeue_err_count++; 2444 set_job_null_op(job, op); 2445 } 2446 2447 /* Submit job to multi-buffer for processing */ 2448 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2449 job = IMB_SUBMIT_JOB(mb_mgr); 2450 #else 2451 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 2452 #endif 2453 /* 2454 * If submit returns a processed job then handle it, 2455 * before submitting subsequent jobs 2456 */ 2457 if (job) 2458 processed_jobs += handle_completed_jobs(qp, mb_mgr, 2459 job, &ops[processed_jobs], 2460 nb_ops - processed_jobs); 2461 2462 } while (processed_jobs < nb_ops); 2463 2464 qp->digest_idx = digest_idx; 2465 2466 if (processed_jobs < 1) 2467 processed_jobs += flush_mb_mgr(qp, mb_mgr, 2468 &ops[processed_jobs], 2469 nb_ops - processed_jobs); 2470 2471 return processed_jobs; 2472 } 2473 #endif 2474 static inline int 2475 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) 2476 { 2477 /* no multi-seg support with current AESNI-MB PMD */ 2478 if (sgl->num != 1) 2479 return -ENOTSUP; 2480 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len) 2481 return -EINVAL; 2482 return 0; 2483 } 2484 2485 static inline IMB_JOB * 2486 submit_sync_job(IMB_MGR *mb_mgr) 2487 { 2488 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2489 return IMB_SUBMIT_JOB(mb_mgr); 2490 #else 2491 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 2492 #endif 2493 } 2494 2495 static inline uint32_t 2496 generate_sync_dgst(struct rte_crypto_sym_vec *vec, 2497 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2498 { 2499 uint32_t i, k; 2500 2501 for (i = 0, k = 0; i != vec->num; i++) { 2502 if (vec->status[i] == 0) { 2503 memcpy(vec->digest[i].va, dgst[i], len); 2504 k++; 2505 } 2506 } 2507 2508 return k; 2509 } 2510 2511 static inline uint32_t 2512 verify_sync_dgst(struct rte_crypto_sym_vec *vec, 2513 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2514 { 2515 uint32_t i, k; 2516 2517 for (i = 0, k = 0; i != vec->num; i++) { 2518 if (vec->status[i] == 0) { 2519 if (memcmp(vec->digest[i].va, dgst[i], len) != 0) 2520 vec->status[i] = EBADMSG; 2521 else 2522 k++; 2523 } 2524 } 2525 2526 return k; 2527 } 2528 2529 uint32_t 2530 aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused, 2531 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs, 2532 struct rte_crypto_sym_vec *vec) 2533 { 2534 int32_t ret; 2535 uint32_t i, j, k, len; 2536 void *buf; 2537 IMB_JOB *job; 2538 IMB_MGR *mb_mgr; 2539 struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2540 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX]; 2541 2542 /* get per-thread MB MGR, create one if needed */ 2543 mb_mgr = get_per_thread_mb_mgr(); 2544 if (unlikely(mb_mgr == NULL)) 2545 return 0; 2546 2547 for (i = 0, j = 0, k = 0; i != vec->num; i++) { 2548 ret = check_crypto_sgl(sofs, vec->src_sgl + i); 2549 if (ret != 0) { 2550 vec->status[i] = ret; 2551 continue; 2552 } 2553 2554 buf = vec->src_sgl[i].vec[0].base; 2555 len = vec->src_sgl[i].vec[0].len; 2556 2557 job = IMB_GET_NEXT_JOB(mb_mgr); 2558 if (job == NULL) { 2559 k += flush_mb_sync_mgr(mb_mgr); 2560 job = IMB_GET_NEXT_JOB(mb_mgr); 2561 RTE_ASSERT(job != NULL); 2562 } 2563 2564 /* Submit job for processing */ 2565 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i], 2566 &vec->aad[i], tmp_dgst[i], &vec->status[i]); 2567 job = submit_sync_job(mb_mgr); 2568 j++; 2569 2570 /* handle completed jobs */ 2571 k += handle_completed_sync_jobs(job, mb_mgr); 2572 } 2573 2574 /* flush remaining jobs */ 2575 while (k != j) 2576 k += flush_mb_sync_mgr(mb_mgr); 2577 2578 /* finish processing for successful jobs: check/update digest */ 2579 if (k != 0) { 2580 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 2581 k = verify_sync_dgst(vec, 2582 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2583 s->auth.req_digest_len); 2584 else 2585 k = generate_sync_dgst(vec, 2586 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2587 s->auth.req_digest_len); 2588 } 2589 2590 return k; 2591 } 2592 2593 struct rte_cryptodev_ops aesni_mb_pmd_ops = { 2594 .dev_configure = ipsec_mb_config, 2595 .dev_start = ipsec_mb_start, 2596 .dev_stop = ipsec_mb_stop, 2597 .dev_close = ipsec_mb_close, 2598 2599 .stats_get = ipsec_mb_stats_get, 2600 .stats_reset = ipsec_mb_stats_reset, 2601 2602 .dev_infos_get = ipsec_mb_info_get, 2603 2604 .queue_pair_setup = ipsec_mb_qp_setup, 2605 .queue_pair_release = ipsec_mb_qp_release, 2606 2607 .sym_cpu_process = aesni_mb_process_bulk, 2608 2609 .sym_session_get_size = ipsec_mb_sym_session_get_size, 2610 .sym_session_configure = ipsec_mb_sym_session_configure, 2611 .sym_session_clear = ipsec_mb_sym_session_clear 2612 }; 2613 2614 /** 2615 * Configure a aesni multi-buffer session from a security session 2616 * configuration 2617 */ 2618 static int 2619 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf, 2620 struct rte_security_session *sess) 2621 { 2622 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 2623 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2624 int ret; 2625 2626 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || 2627 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { 2628 IPSEC_MB_LOG(ERR, "Invalid security protocol"); 2629 return -EINVAL; 2630 } 2631 2632 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf, 2633 sess_private_data); 2634 2635 if (ret != 0) { 2636 IPSEC_MB_LOG(ERR, "Failed to configure session parameters"); 2637 return ret; 2638 } 2639 2640 return ret; 2641 } 2642 2643 /** Clear the memory of session so it does not leave key material behind */ 2644 static int 2645 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused, 2646 struct rte_security_session *sess) 2647 { 2648 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 2649 2650 if (sess_priv) { 2651 memset(sess_priv, 0, sizeof(struct aesni_mb_session)); 2652 } 2653 return 0; 2654 } 2655 2656 static unsigned int 2657 aesni_mb_pmd_sec_sess_get_size(void *device __rte_unused) 2658 { 2659 return sizeof(struct aesni_mb_session); 2660 } 2661 2662 /** Get security capabilities for aesni multi-buffer */ 2663 static const struct rte_security_capability * 2664 aesni_mb_pmd_sec_capa_get(void *device __rte_unused) 2665 { 2666 return aesni_mb_pmd_security_cap; 2667 } 2668 2669 static struct rte_security_ops aesni_mb_pmd_sec_ops = { 2670 .session_create = aesni_mb_pmd_sec_sess_create, 2671 .session_update = NULL, 2672 .session_get_size = aesni_mb_pmd_sec_sess_get_size, 2673 .session_stats_get = NULL, 2674 .session_destroy = aesni_mb_pmd_sec_sess_destroy, 2675 .set_pkt_metadata = NULL, 2676 .capabilities_get = aesni_mb_pmd_sec_capa_get 2677 }; 2678 2679 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops; 2680 2681 static int 2682 aesni_mb_configure_dev(struct rte_cryptodev *dev) 2683 { 2684 struct rte_security_ctx *security_instance; 2685 2686 security_instance = rte_malloc("aesni_mb_sec", 2687 sizeof(struct rte_security_ctx), 2688 RTE_CACHE_LINE_SIZE); 2689 if (security_instance != NULL) { 2690 security_instance->device = (void *)dev; 2691 security_instance->ops = rte_aesni_mb_pmd_sec_ops; 2692 security_instance->sess_cnt = 0; 2693 dev->security_ctx = security_instance; 2694 2695 return 0; 2696 } 2697 2698 return -ENOMEM; 2699 } 2700 2701 static int 2702 aesni_mb_probe(struct rte_vdev_device *vdev) 2703 { 2704 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB); 2705 } 2706 2707 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = { 2708 .probe = aesni_mb_probe, 2709 .remove = ipsec_mb_remove 2710 }; 2711 2712 static struct cryptodev_driver aesni_mb_crypto_drv; 2713 2714 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, 2715 cryptodev_aesni_mb_pmd_drv); 2716 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); 2717 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, 2718 "max_nb_queue_pairs=<int> socket_id=<int>"); 2719 RTE_PMD_REGISTER_CRYPTO_DRIVER( 2720 aesni_mb_crypto_drv, 2721 cryptodev_aesni_mb_pmd_drv.driver, 2722 pmd_driver_id_aesni_mb); 2723 2724 /* Constructor function to register aesni-mb PMD */ 2725 RTE_INIT(ipsec_mb_register_aesni_mb) 2726 { 2727 struct ipsec_mb_internals *aesni_mb_data = 2728 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB]; 2729 2730 aesni_mb_data->caps = aesni_mb_capabilities; 2731 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst; 2732 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2733 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2734 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | 2735 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO | 2736 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA | 2737 RTE_CRYPTODEV_FF_SYM_SESSIONLESS | 2738 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 2739 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 2740 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 2741 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 2742 RTE_CRYPTODEV_FF_SECURITY | 2743 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; 2744 2745 aesni_mb_data->internals_priv_size = 0; 2746 aesni_mb_data->ops = &aesni_mb_pmd_ops; 2747 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data); 2748 aesni_mb_data->queue_pair_configure = NULL; 2749 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops; 2750 aesni_mb_data->dev_config = aesni_mb_configure_dev; 2751 aesni_mb_data->session_configure = aesni_mb_session_configure; 2752 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session); 2753 } 2754