1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2021 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 7 #include "pmd_aesni_mb_priv.h" 8 9 RTE_DEFINE_PER_LCORE(pid_t, pid); 10 11 uint8_t pmd_driver_id_aesni_mb; 12 13 struct aesni_mb_op_buf_data { 14 struct rte_mbuf *m; 15 uint32_t offset; 16 }; 17 18 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 19 /** 20 * Calculate the authentication pre-computes 21 * 22 * @param one_block_hash Function pointer 23 * to calculate digest on ipad/opad 24 * @param ipad Inner pad output byte array 25 * @param opad Outer pad output byte array 26 * @param hkey Authentication key 27 * @param hkey_len Authentication key length 28 * @param blocksize Block size of selected hash algo 29 */ 30 static void 31 calculate_auth_precomputes(hash_one_block_t one_block_hash, 32 uint8_t *ipad, uint8_t *opad, 33 const uint8_t *hkey, uint16_t hkey_len, 34 uint16_t blocksize) 35 { 36 uint32_t i, length; 37 38 alignas(16) uint8_t ipad_buf[blocksize]; 39 alignas(16) uint8_t opad_buf[blocksize]; 40 41 /* Setup inner and outer pads */ 42 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize); 43 memset(opad_buf, HMAC_OPAD_VALUE, blocksize); 44 45 /* XOR hash key with inner and outer pads */ 46 length = hkey_len > blocksize ? blocksize : hkey_len; 47 48 for (i = 0; i < length; i++) { 49 ipad_buf[i] ^= hkey[i]; 50 opad_buf[i] ^= hkey[i]; 51 } 52 53 /* Compute partial hashes */ 54 (*one_block_hash)(ipad_buf, ipad); 55 (*one_block_hash)(opad_buf, opad); 56 57 /* Clean up stack */ 58 memset(ipad_buf, 0, blocksize); 59 memset(opad_buf, 0, blocksize); 60 } 61 #endif 62 63 static inline int 64 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) 65 { 66 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || 67 hash_alg == IMB_AUTH_AES_CCM || 68 cipher_mode == IMB_CIPHER_GCM); 69 } 70 71 /** Set session authentication parameters */ 72 static int 73 aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, 74 struct aesni_mb_session *sess, 75 const struct rte_crypto_sym_xform *xform) 76 { 77 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 78 hash_one_block_t hash_oneblock_fn = NULL; 79 unsigned int key_larger_block_size = 0; 80 #endif 81 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 }; 82 uint32_t auth_precompute = 1; 83 84 if (xform == NULL) { 85 sess->template_job.hash_alg = IMB_AUTH_NULL; 86 return 0; 87 } 88 89 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { 90 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth"); 91 return -1; 92 } 93 94 /* Set IV parameters */ 95 sess->auth_iv.offset = xform->auth.iv.offset; 96 97 /* Set the request digest size */ 98 sess->auth.req_digest_len = xform->auth.digest_length; 99 100 /* Select auth generate/verify */ 101 sess->auth.operation = xform->auth.op; 102 103 /* Set Authentication Parameters */ 104 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) { 105 sess->template_job.hash_alg = IMB_AUTH_NULL; 106 sess->template_job.auth_tag_output_len_in_bytes = 0; 107 return 0; 108 } 109 110 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 111 sess->template_job.hash_alg = IMB_AUTH_AES_XCBC; 112 113 uint16_t xcbc_mac_digest_len = 114 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); 115 if (sess->auth.req_digest_len != xcbc_mac_digest_len) { 116 IPSEC_MB_LOG(ERR, "Invalid digest size"); 117 return -EINVAL; 118 } 119 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 120 121 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, 122 sess->auth.xcbc.k1_expanded, 123 sess->auth.xcbc.k2, sess->auth.xcbc.k3); 124 sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded; 125 sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2; 126 sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3; 127 return 0; 128 } 129 130 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { 131 uint32_t dust[4*15]; 132 133 sess->template_job.hash_alg = IMB_AUTH_AES_CMAC; 134 135 uint16_t cmac_digest_len = 136 get_digest_byte_length(IMB_AUTH_AES_CMAC); 137 138 if (sess->auth.req_digest_len > cmac_digest_len) { 139 IPSEC_MB_LOG(ERR, "Invalid digest size"); 140 return -EINVAL; 141 } 142 /* 143 * Multi-buffer lib supports digest sizes from 4 to 16 bytes 144 * in version 0.50 and sizes of 12 and 16 bytes, 145 * in version 0.49. 146 * If size requested is different, generate the full digest 147 * (16 bytes) in a temporary location and then memcpy 148 * the requested number of bytes. 149 */ 150 if (sess->auth.req_digest_len < 4) 151 sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len; 152 else 153 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 154 155 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, 156 sess->auth.cmac.expkey, dust); 157 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, 158 sess->auth.cmac.skey1, sess->auth.cmac.skey2); 159 sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey; 160 sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1; 161 sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2; 162 return 0; 163 } 164 165 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 166 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { 167 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 168 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 169 } else 170 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 171 172 if (sess->auth.req_digest_len > 173 get_digest_byte_length(IMB_AUTH_AES_GMAC)) { 174 IPSEC_MB_LOG(ERR, "Invalid digest size"); 175 return -EINVAL; 176 } 177 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 178 sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length; 179 sess->iv.offset = xform->auth.iv.offset; 180 181 switch (xform->auth.key.length) { 182 case IMB_KEY_128_BYTES: 183 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128; 184 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, 185 &sess->cipher.gcm_key); 186 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 187 break; 188 case IMB_KEY_192_BYTES: 189 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192; 190 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, 191 &sess->cipher.gcm_key); 192 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 193 break; 194 case IMB_KEY_256_BYTES: 195 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256; 196 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, 197 &sess->cipher.gcm_key); 198 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 199 break; 200 default: 201 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 202 return -EINVAL; 203 } 204 sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; 205 206 return 0; 207 } 208 209 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { 210 if (xform->auth.key.length == 16) { 211 sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; 212 213 if (sess->auth.req_digest_len != 4) { 214 IPSEC_MB_LOG(ERR, "Invalid digest size"); 215 return -EINVAL; 216 } 217 } else if (xform->auth.key.length == 32) { 218 sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; 219 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 220 if (sess->auth.req_digest_len != 4 && 221 sess->auth.req_digest_len != 8 && 222 sess->auth.req_digest_len != 16) { 223 #else 224 if (sess->auth.req_digest_len != 4) { 225 #endif 226 IPSEC_MB_LOG(ERR, "Invalid digest size"); 227 return -EINVAL; 228 } 229 } else { 230 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 231 return -EINVAL; 232 } 233 234 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 235 236 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 237 xform->auth.key.length); 238 sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key; 239 return 0; 240 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 241 sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN; 242 uint16_t snow3g_uia2_digest_len = 243 get_truncated_digest_byte_length( 244 IMB_AUTH_SNOW3G_UIA2_BITLEN); 245 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) { 246 IPSEC_MB_LOG(ERR, "Invalid digest size"); 247 return -EINVAL; 248 } 249 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 250 251 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, 252 &sess->auth.pKeySched_snow3g_auth); 253 sess->template_job.u.SNOW3G_UIA2._key = (void *) 254 &sess->auth.pKeySched_snow3g_auth; 255 return 0; 256 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { 257 sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1; 258 uint16_t kasumi_f9_digest_len = 259 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); 260 if (sess->auth.req_digest_len != kasumi_f9_digest_len) { 261 IPSEC_MB_LOG(ERR, "Invalid digest size"); 262 return -EINVAL; 263 } 264 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 265 266 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, 267 &sess->auth.pKeySched_kasumi_auth); 268 sess->template_job.u.KASUMI_UIA1._key = (void *) 269 &sess->auth.pKeySched_kasumi_auth; 270 return 0; 271 } 272 273 switch (xform->auth.algo) { 274 case RTE_CRYPTO_AUTH_MD5_HMAC: 275 sess->template_job.hash_alg = IMB_AUTH_MD5; 276 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 277 hash_oneblock_fn = mb_mgr->md5_one_block; 278 #endif 279 break; 280 case RTE_CRYPTO_AUTH_SHA1_HMAC: 281 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; 282 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 283 hash_oneblock_fn = mb_mgr->sha1_one_block; 284 #endif 285 if (xform->auth.key.length > get_auth_algo_blocksize( 286 IMB_AUTH_HMAC_SHA_1)) { 287 IMB_SHA1(mb_mgr, 288 xform->auth.key.data, 289 xform->auth.key.length, 290 hashed_key); 291 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 292 key_larger_block_size = 1; 293 #endif 294 } 295 break; 296 case RTE_CRYPTO_AUTH_SHA1: 297 sess->template_job.hash_alg = IMB_AUTH_SHA_1; 298 auth_precompute = 0; 299 break; 300 case RTE_CRYPTO_AUTH_SHA224_HMAC: 301 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; 302 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 303 hash_oneblock_fn = mb_mgr->sha224_one_block; 304 #endif 305 if (xform->auth.key.length > get_auth_algo_blocksize( 306 IMB_AUTH_HMAC_SHA_224)) { 307 IMB_SHA224(mb_mgr, 308 xform->auth.key.data, 309 xform->auth.key.length, 310 hashed_key); 311 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 312 key_larger_block_size = 1; 313 #endif 314 } 315 break; 316 case RTE_CRYPTO_AUTH_SHA224: 317 sess->template_job.hash_alg = IMB_AUTH_SHA_224; 318 auth_precompute = 0; 319 break; 320 case RTE_CRYPTO_AUTH_SHA256_HMAC: 321 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; 322 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 323 hash_oneblock_fn = mb_mgr->sha256_one_block; 324 #endif 325 if (xform->auth.key.length > get_auth_algo_blocksize( 326 IMB_AUTH_HMAC_SHA_256)) { 327 IMB_SHA256(mb_mgr, 328 xform->auth.key.data, 329 xform->auth.key.length, 330 hashed_key); 331 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 332 key_larger_block_size = 1; 333 #endif 334 } 335 break; 336 case RTE_CRYPTO_AUTH_SHA256: 337 sess->template_job.hash_alg = IMB_AUTH_SHA_256; 338 auth_precompute = 0; 339 break; 340 case RTE_CRYPTO_AUTH_SHA384_HMAC: 341 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; 342 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 343 hash_oneblock_fn = mb_mgr->sha384_one_block; 344 #endif 345 if (xform->auth.key.length > get_auth_algo_blocksize( 346 IMB_AUTH_HMAC_SHA_384)) { 347 IMB_SHA384(mb_mgr, 348 xform->auth.key.data, 349 xform->auth.key.length, 350 hashed_key); 351 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 352 key_larger_block_size = 1; 353 #endif 354 } 355 break; 356 case RTE_CRYPTO_AUTH_SHA384: 357 sess->template_job.hash_alg = IMB_AUTH_SHA_384; 358 auth_precompute = 0; 359 break; 360 case RTE_CRYPTO_AUTH_SHA512_HMAC: 361 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; 362 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 363 hash_oneblock_fn = mb_mgr->sha512_one_block; 364 #endif 365 if (xform->auth.key.length > get_auth_algo_blocksize( 366 IMB_AUTH_HMAC_SHA_512)) { 367 IMB_SHA512(mb_mgr, 368 xform->auth.key.data, 369 xform->auth.key.length, 370 hashed_key); 371 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 372 key_larger_block_size = 1; 373 #endif 374 } 375 break; 376 case RTE_CRYPTO_AUTH_SHA512: 377 sess->template_job.hash_alg = IMB_AUTH_SHA_512; 378 auth_precompute = 0; 379 break; 380 default: 381 IPSEC_MB_LOG(ERR, 382 "Unsupported authentication algorithm selection"); 383 return -ENOTSUP; 384 } 385 uint16_t trunc_digest_size = 386 get_truncated_digest_byte_length(sess->template_job.hash_alg); 387 uint16_t full_digest_size = 388 get_digest_byte_length(sess->template_job.hash_alg); 389 390 if (sess->auth.req_digest_len > full_digest_size || 391 sess->auth.req_digest_len == 0) { 392 IPSEC_MB_LOG(ERR, "Invalid digest size"); 393 return -EINVAL; 394 } 395 396 if (sess->auth.req_digest_len != trunc_digest_size && 397 sess->auth.req_digest_len != full_digest_size) 398 sess->template_job.auth_tag_output_len_in_bytes = full_digest_size; 399 else 400 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 401 402 /* Plain SHA does not require precompute key */ 403 if (auth_precompute == 0) 404 return 0; 405 406 /* Calculate Authentication precomputes */ 407 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 408 imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg, 409 xform->auth.key.data, xform->auth.key.length, 410 sess->auth.pads.inner, sess->auth.pads.outer); 411 #else 412 if (key_larger_block_size) { 413 calculate_auth_precomputes(hash_oneblock_fn, 414 sess->auth.pads.inner, sess->auth.pads.outer, 415 hashed_key, 416 xform->auth.key.length, 417 get_auth_algo_blocksize(sess->template_job.hash_alg)); 418 } else { 419 calculate_auth_precomputes(hash_oneblock_fn, 420 sess->auth.pads.inner, sess->auth.pads.outer, 421 xform->auth.key.data, 422 xform->auth.key.length, 423 get_auth_algo_blocksize(sess->template_job.hash_alg)); 424 } 425 #endif 426 sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = 427 sess->auth.pads.inner; 428 sess->template_job.u.HMAC._hashed_auth_key_xor_opad = 429 sess->auth.pads.outer; 430 431 return 0; 432 } 433 434 /** Set session cipher parameters */ 435 static int 436 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, 437 struct aesni_mb_session *sess, 438 const struct rte_crypto_sym_xform *xform) 439 { 440 uint8_t is_aes = 0; 441 uint8_t is_3DES = 0; 442 uint8_t is_docsis = 0; 443 uint8_t is_zuc = 0; 444 uint8_t is_snow3g = 0; 445 uint8_t is_kasumi = 0; 446 447 if (xform == NULL) { 448 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 449 return 0; 450 } 451 452 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { 453 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher"); 454 return -EINVAL; 455 } 456 457 /* Select cipher direction */ 458 switch (xform->cipher.op) { 459 case RTE_CRYPTO_CIPHER_OP_ENCRYPT: 460 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 461 break; 462 case RTE_CRYPTO_CIPHER_OP_DECRYPT: 463 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 464 break; 465 default: 466 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); 467 return -EINVAL; 468 } 469 470 /* Select cipher mode */ 471 switch (xform->cipher.algo) { 472 case RTE_CRYPTO_CIPHER_AES_CBC: 473 sess->template_job.cipher_mode = IMB_CIPHER_CBC; 474 is_aes = 1; 475 break; 476 case RTE_CRYPTO_CIPHER_AES_CTR: 477 sess->template_job.cipher_mode = IMB_CIPHER_CNTR; 478 is_aes = 1; 479 break; 480 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: 481 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI; 482 is_docsis = 1; 483 break; 484 case RTE_CRYPTO_CIPHER_DES_CBC: 485 sess->template_job.cipher_mode = IMB_CIPHER_DES; 486 break; 487 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: 488 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES; 489 break; 490 case RTE_CRYPTO_CIPHER_3DES_CBC: 491 sess->template_job.cipher_mode = IMB_CIPHER_DES3; 492 is_3DES = 1; 493 break; 494 case RTE_CRYPTO_CIPHER_AES_ECB: 495 sess->template_job.cipher_mode = IMB_CIPHER_ECB; 496 is_aes = 1; 497 break; 498 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 499 sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3; 500 is_zuc = 1; 501 break; 502 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 503 sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; 504 is_snow3g = 1; 505 break; 506 case RTE_CRYPTO_CIPHER_KASUMI_F8: 507 sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; 508 is_kasumi = 1; 509 break; 510 case RTE_CRYPTO_CIPHER_NULL: 511 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 512 sess->template_job.key_len_in_bytes = 0; 513 sess->iv.offset = xform->cipher.iv.offset; 514 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 515 return 0; 516 default: 517 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); 518 return -ENOTSUP; 519 } 520 521 /* Set IV parameters */ 522 sess->iv.offset = xform->cipher.iv.offset; 523 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 524 525 /* Check key length and choose key expansion function for AES */ 526 if (is_aes) { 527 switch (xform->cipher.key.length) { 528 case IMB_KEY_128_BYTES: 529 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 530 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 531 sess->cipher.expanded_aes_keys.encode, 532 sess->cipher.expanded_aes_keys.decode); 533 break; 534 case IMB_KEY_192_BYTES: 535 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 536 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, 537 sess->cipher.expanded_aes_keys.encode, 538 sess->cipher.expanded_aes_keys.decode); 539 break; 540 case IMB_KEY_256_BYTES: 541 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 542 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 543 sess->cipher.expanded_aes_keys.encode, 544 sess->cipher.expanded_aes_keys.decode); 545 break; 546 default: 547 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 548 return -EINVAL; 549 } 550 551 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 552 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 553 } else if (is_docsis) { 554 switch (xform->cipher.key.length) { 555 case IMB_KEY_128_BYTES: 556 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 557 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 558 sess->cipher.expanded_aes_keys.encode, 559 sess->cipher.expanded_aes_keys.decode); 560 break; 561 case IMB_KEY_256_BYTES: 562 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 563 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 564 sess->cipher.expanded_aes_keys.encode, 565 sess->cipher.expanded_aes_keys.decode); 566 break; 567 default: 568 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 569 return -EINVAL; 570 } 571 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 572 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 573 } else if (is_3DES) { 574 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], 575 sess->cipher.exp_3des_keys.key[1], 576 sess->cipher.exp_3des_keys.key[2]}; 577 578 switch (xform->cipher.key.length) { 579 case 24: 580 IMB_DES_KEYSCHED(mb_mgr, keys[0], 581 xform->cipher.key.data); 582 IMB_DES_KEYSCHED(mb_mgr, keys[1], 583 xform->cipher.key.data + 8); 584 IMB_DES_KEYSCHED(mb_mgr, keys[2], 585 xform->cipher.key.data + 16); 586 587 /* Initialize keys - 24 bytes: [K1-K2-K3] */ 588 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 589 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 590 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; 591 break; 592 case 16: 593 IMB_DES_KEYSCHED(mb_mgr, keys[0], 594 xform->cipher.key.data); 595 IMB_DES_KEYSCHED(mb_mgr, keys[1], 596 xform->cipher.key.data + 8); 597 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ 598 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 599 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 600 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 601 break; 602 case 8: 603 IMB_DES_KEYSCHED(mb_mgr, keys[0], 604 xform->cipher.key.data); 605 606 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ 607 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 608 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; 609 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 610 break; 611 default: 612 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 613 return -EINVAL; 614 } 615 616 sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr; 617 sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr; 618 sess->template_job.key_len_in_bytes = 24; 619 } else if (is_zuc) { 620 if (xform->cipher.key.length != 16 && 621 xform->cipher.key.length != 32) { 622 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 623 return -EINVAL; 624 } 625 sess->template_job.key_len_in_bytes = xform->cipher.key.length; 626 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, 627 xform->cipher.key.length); 628 sess->template_job.enc_keys = sess->cipher.zuc_cipher_key; 629 sess->template_job.dec_keys = sess->cipher.zuc_cipher_key; 630 } else if (is_snow3g) { 631 if (xform->cipher.key.length != 16) { 632 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 633 return -EINVAL; 634 } 635 sess->template_job.key_len_in_bytes = 16; 636 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, 637 &sess->cipher.pKeySched_snow3g_cipher); 638 sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher; 639 sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher; 640 } else if (is_kasumi) { 641 if (xform->cipher.key.length != 16) { 642 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 643 return -EINVAL; 644 } 645 sess->template_job.key_len_in_bytes = 16; 646 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, 647 &sess->cipher.pKeySched_kasumi_cipher); 648 sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher; 649 sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher; 650 } else { 651 if (xform->cipher.key.length != 8) { 652 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 653 return -EINVAL; 654 } 655 sess->template_job.key_len_in_bytes = 8; 656 657 IMB_DES_KEYSCHED(mb_mgr, 658 (uint64_t *)sess->cipher.expanded_aes_keys.encode, 659 xform->cipher.key.data); 660 IMB_DES_KEYSCHED(mb_mgr, 661 (uint64_t *)sess->cipher.expanded_aes_keys.decode, 662 xform->cipher.key.data); 663 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 664 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 665 } 666 667 return 0; 668 } 669 670 static int 671 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, 672 struct aesni_mb_session *sess, 673 const struct rte_crypto_sym_xform *xform) 674 { 675 switch (xform->aead.op) { 676 case RTE_CRYPTO_AEAD_OP_ENCRYPT: 677 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 678 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 679 break; 680 case RTE_CRYPTO_AEAD_OP_DECRYPT: 681 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 682 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 683 break; 684 default: 685 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter"); 686 return -EINVAL; 687 } 688 689 /* Set IV parameters */ 690 sess->iv.offset = xform->aead.iv.offset; 691 sess->template_job.iv_len_in_bytes = xform->aead.iv.length; 692 693 /* Set digest sizes */ 694 sess->auth.req_digest_len = xform->aead.digest_length; 695 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 696 697 switch (xform->aead.algo) { 698 case RTE_CRYPTO_AEAD_AES_CCM: 699 sess->template_job.cipher_mode = IMB_CIPHER_CCM; 700 sess->template_job.hash_alg = IMB_AUTH_AES_CCM; 701 sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length; 702 703 /* Check key length and choose key expansion function for AES */ 704 switch (xform->aead.key.length) { 705 case IMB_KEY_128_BYTES: 706 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 707 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, 708 sess->cipher.expanded_aes_keys.encode, 709 sess->cipher.expanded_aes_keys.decode); 710 break; 711 case IMB_KEY_256_BYTES: 712 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 713 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, 714 sess->cipher.expanded_aes_keys.encode, 715 sess->cipher.expanded_aes_keys.decode); 716 break; 717 default: 718 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 719 return -EINVAL; 720 } 721 722 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 723 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 724 /* CCM digests must be between 4 and 16 and an even number */ 725 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || 726 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || 727 (sess->auth.req_digest_len & 1) == 1) { 728 IPSEC_MB_LOG(ERR, "Invalid digest size"); 729 return -EINVAL; 730 } 731 break; 732 733 case RTE_CRYPTO_AEAD_AES_GCM: 734 sess->template_job.cipher_mode = IMB_CIPHER_GCM; 735 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC; 736 sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length; 737 738 switch (xform->aead.key.length) { 739 case IMB_KEY_128_BYTES: 740 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 741 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, 742 &sess->cipher.gcm_key); 743 break; 744 case IMB_KEY_192_BYTES: 745 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 746 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, 747 &sess->cipher.gcm_key); 748 break; 749 case IMB_KEY_256_BYTES: 750 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 751 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, 752 &sess->cipher.gcm_key); 753 break; 754 default: 755 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 756 return -EINVAL; 757 } 758 759 sess->template_job.enc_keys = &sess->cipher.gcm_key; 760 sess->template_job.dec_keys = &sess->cipher.gcm_key; 761 /* GCM digest size must be between 1 and 16 */ 762 if (sess->auth.req_digest_len == 0 || 763 sess->auth.req_digest_len > 16) { 764 IPSEC_MB_LOG(ERR, "Invalid digest size"); 765 return -EINVAL; 766 } 767 break; 768 769 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: 770 sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305; 771 sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305; 772 sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes = 773 xform->aead.aad_length; 774 775 if (xform->aead.key.length != 32) { 776 IPSEC_MB_LOG(ERR, "Invalid key length"); 777 return -EINVAL; 778 } 779 sess->template_job.key_len_in_bytes = 32; 780 memcpy(sess->cipher.expanded_aes_keys.encode, 781 xform->aead.key.data, 32); 782 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 783 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 784 if (sess->auth.req_digest_len != 16) { 785 IPSEC_MB_LOG(ERR, "Invalid digest size"); 786 return -EINVAL; 787 } 788 break; 789 default: 790 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter"); 791 return -ENOTSUP; 792 } 793 794 return 0; 795 } 796 797 /** Configure a aesni multi-buffer session from a crypto xform chain */ 798 int 799 aesni_mb_session_configure(IMB_MGR *mb_mgr, 800 void *priv_sess, 801 const struct rte_crypto_sym_xform *xform) 802 { 803 const struct rte_crypto_sym_xform *auth_xform = NULL; 804 const struct rte_crypto_sym_xform *cipher_xform = NULL; 805 const struct rte_crypto_sym_xform *aead_xform = NULL; 806 enum ipsec_mb_operation mode; 807 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess; 808 int ret; 809 810 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 811 &cipher_xform, &aead_xform); 812 if (ret) 813 return ret; 814 815 /* Select Crypto operation - hash then cipher / cipher then hash */ 816 switch (mode) { 817 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 818 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 819 break; 820 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 821 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 822 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 823 break; 824 case IPSEC_MB_OP_HASH_GEN_ONLY: 825 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 826 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 827 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 828 break; 829 /* 830 * Multi buffer library operates only at two modes, 831 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER. 832 * When doing ciphering only, chain order depends 833 * on cipher operation: encryption is always 834 * the first operation and decryption the last one. 835 */ 836 case IPSEC_MB_OP_ENCRYPT_ONLY: 837 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 838 break; 839 case IPSEC_MB_OP_DECRYPT_ONLY: 840 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 841 break; 842 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: 843 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 844 break; 845 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: 846 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 847 break; 848 case IPSEC_MB_OP_NOT_SUPPORTED: 849 default: 850 IPSEC_MB_LOG(ERR, 851 "Unsupported operation chain order parameter"); 852 return -ENOTSUP; 853 } 854 855 /* Default IV length = 0 */ 856 sess->template_job.iv_len_in_bytes = 0; 857 858 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); 859 if (ret != 0) { 860 IPSEC_MB_LOG(ERR, 861 "Invalid/unsupported authentication parameters"); 862 return ret; 863 } 864 865 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess, 866 cipher_xform); 867 if (ret != 0) { 868 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 869 return ret; 870 } 871 872 if (aead_xform) { 873 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess, 874 aead_xform); 875 if (ret != 0) { 876 IPSEC_MB_LOG(ERR, 877 "Invalid/unsupported aead parameters"); 878 return ret; 879 } 880 } 881 882 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 883 sess->session_id = imb_set_session(mb_mgr, &sess->template_job); 884 sess->pid = getpid(); 885 RTE_PER_LCORE(pid) = sess->pid; 886 #endif 887 888 return 0; 889 } 890 891 /** Check DOCSIS security session configuration is valid */ 892 static int 893 check_docsis_sec_session(struct rte_security_session_conf *conf) 894 { 895 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform; 896 struct rte_security_docsis_xform *docsis = &conf->docsis; 897 898 /* Downlink: CRC generate -> Cipher encrypt */ 899 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 900 901 if (crypto_sym != NULL && 902 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 903 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 904 crypto_sym->cipher.algo == 905 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 906 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 907 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 908 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 909 crypto_sym->next == NULL) { 910 return 0; 911 } 912 /* Uplink: Cipher decrypt -> CRC verify */ 913 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) { 914 915 if (crypto_sym != NULL && 916 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 917 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && 918 crypto_sym->cipher.algo == 919 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 920 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 921 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 922 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 923 crypto_sym->next == NULL) { 924 return 0; 925 } 926 } 927 928 return -EINVAL; 929 } 930 931 /** Set DOCSIS security session auth (CRC) parameters */ 932 static int 933 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, 934 struct rte_security_docsis_xform *xform) 935 { 936 if (xform == NULL) { 937 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform"); 938 return -EINVAL; 939 } 940 941 /* Select CRC generate/verify */ 942 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { 943 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 944 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 945 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 946 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 947 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 948 } else { 949 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); 950 return -ENOTSUP; 951 } 952 953 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; 954 sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN; 955 956 return 0; 957 } 958 959 /** 960 * Parse DOCSIS security session configuration and set private session 961 * parameters 962 */ 963 static int 964 aesni_mb_set_docsis_sec_session_parameters( 965 __rte_unused struct rte_cryptodev *dev, 966 struct rte_security_session_conf *conf, 967 void *sess) 968 { 969 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 970 struct rte_security_docsis_xform *docsis_xform; 971 struct rte_crypto_sym_xform *cipher_xform; 972 struct aesni_mb_session *ipsec_sess = sess; 973 int ret = 0; 974 975 if (!mb_mgr) 976 return -ENOMEM; 977 978 ret = check_docsis_sec_session(conf); 979 if (ret) { 980 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 981 goto error_exit; 982 } 983 984 switch (conf->docsis.direction) { 985 case RTE_SECURITY_DOCSIS_UPLINK: 986 ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 987 docsis_xform = &conf->docsis; 988 cipher_xform = conf->crypto_xform; 989 break; 990 case RTE_SECURITY_DOCSIS_DOWNLINK: 991 ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 992 cipher_xform = conf->crypto_xform; 993 docsis_xform = &conf->docsis; 994 break; 995 default: 996 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 997 ret = -EINVAL; 998 goto error_exit; 999 } 1000 1001 /* Default IV length = 0 */ 1002 ipsec_sess->template_job.iv_len_in_bytes = 0; 1003 1004 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, 1005 docsis_xform); 1006 if (ret != 0) { 1007 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters"); 1008 goto error_exit; 1009 } 1010 1011 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, 1012 ipsec_sess, cipher_xform); 1013 1014 if (ret != 0) { 1015 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 1016 goto error_exit; 1017 } 1018 1019 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1020 ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); 1021 #endif 1022 1023 error_exit: 1024 free_mb_mgr(mb_mgr); 1025 return ret; 1026 } 1027 1028 static inline uint64_t 1029 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, 1030 uint32_t oop, const uint32_t auth_offset, 1031 const uint32_t cipher_offset, const uint32_t auth_length, 1032 const uint32_t cipher_length, uint8_t lb_sgl) 1033 { 1034 struct rte_mbuf *m_src, *m_dst; 1035 uint8_t *p_src, *p_dst; 1036 uintptr_t u_src, u_dst; 1037 uint32_t cipher_end, auth_end; 1038 1039 /* Only cipher then hash needs special calculation. */ 1040 if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) 1041 return auth_offset; 1042 1043 m_src = op->sym->m_src; 1044 m_dst = op->sym->m_dst; 1045 1046 p_src = rte_pktmbuf_mtod(m_src, uint8_t *); 1047 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 1048 u_src = (uintptr_t)p_src; 1049 u_dst = (uintptr_t)p_dst + auth_offset; 1050 1051 /** 1052 * Copy the content between cipher offset and auth offset for generating 1053 * correct digest. 1054 */ 1055 if (cipher_offset > auth_offset) 1056 memcpy(p_dst + auth_offset, 1057 p_src + auth_offset, 1058 cipher_offset - 1059 auth_offset); 1060 1061 /** 1062 * Copy the content between (cipher offset + length) and (auth offset + 1063 * length) for generating correct digest 1064 */ 1065 cipher_end = cipher_offset + cipher_length; 1066 auth_end = auth_offset + auth_length; 1067 if (cipher_end < auth_end) 1068 memcpy(p_dst + cipher_end, p_src + cipher_end, 1069 auth_end - cipher_end); 1070 1071 /** 1072 * Since intel-ipsec-mb only supports positive values, 1073 * we need to deduct the correct offset between src and dst. 1074 */ 1075 1076 return u_src < u_dst ? (u_dst - u_src) : 1077 (UINT64_MAX - u_src + u_dst + 1); 1078 } 1079 1080 static inline void 1081 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, 1082 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len, 1083 struct rte_crypto_va_iova_ptr *iv, 1084 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) 1085 { 1086 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1087 1088 /* Set authentication parameters */ 1089 job->iv = iv->va; 1090 1091 switch (job->hash_alg) { 1092 case IMB_AUTH_AES_CCM: 1093 job->u.CCM.aad = (uint8_t *)aad->va + 18; 1094 job->iv++; 1095 break; 1096 1097 case IMB_AUTH_AES_GMAC: 1098 job->u.GCM.aad = aad->va; 1099 break; 1100 1101 case IMB_AUTH_AES_GMAC_128: 1102 case IMB_AUTH_AES_GMAC_192: 1103 case IMB_AUTH_AES_GMAC_256: 1104 job->u.GMAC._iv = iv->va; 1105 break; 1106 1107 case IMB_AUTH_CHACHA20_POLY1305: 1108 job->u.CHACHA20_POLY1305.aad = aad->va; 1109 break; 1110 default: 1111 job->u.HMAC._hashed_auth_key_xor_ipad = 1112 session->auth.pads.inner; 1113 job->u.HMAC._hashed_auth_key_xor_opad = 1114 session->auth.pads.outer; 1115 } 1116 1117 /* 1118 * Multi-buffer library current only support returning a truncated 1119 * digest length as specified in the relevant IPsec RFCs 1120 */ 1121 1122 /* Set digest location and length */ 1123 job->auth_tag_output = digest; 1124 1125 /* Data Parameters */ 1126 job->src = buf; 1127 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; 1128 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; 1129 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; 1130 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - 1131 sofs.ofs.auth.tail; 1132 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - 1133 sofs.ofs.cipher.tail; 1134 1135 job->user_data = udata; 1136 } 1137 1138 static int 1139 handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr, 1140 uint32_t *total_len, 1141 struct aesni_mb_op_buf_data *src_data, 1142 struct aesni_mb_op_buf_data *dst_data) 1143 { 1144 uint32_t data_len, part_len; 1145 1146 if (*total_len == 0) { 1147 job->sgl_state = IMB_SGL_COMPLETE; 1148 return 0; 1149 } 1150 1151 if (src_data->m == NULL) { 1152 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1153 return -EINVAL; 1154 } 1155 1156 job->sgl_state = IMB_SGL_UPDATE; 1157 1158 data_len = src_data->m->data_len - src_data->offset; 1159 1160 job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1161 src_data->offset); 1162 1163 if (dst_data->m != NULL) { 1164 if (dst_data->m->data_len - dst_data->offset == 0) { 1165 dst_data->m = dst_data->m->next; 1166 if (dst_data->m == NULL) { 1167 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1168 return -EINVAL; 1169 } 1170 dst_data->offset = 0; 1171 } 1172 part_len = RTE_MIN(data_len, (dst_data->m->data_len - 1173 dst_data->offset)); 1174 job->dst = rte_pktmbuf_mtod_offset(dst_data->m, 1175 uint8_t *, dst_data->offset); 1176 dst_data->offset += part_len; 1177 } else { 1178 part_len = RTE_MIN(data_len, *total_len); 1179 job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1180 src_data->offset); 1181 } 1182 1183 job->msg_len_to_cipher_in_bytes = part_len; 1184 job->msg_len_to_hash_in_bytes = part_len; 1185 1186 job = IMB_SUBMIT_JOB(mb_mgr); 1187 1188 *total_len -= part_len; 1189 1190 if (part_len != data_len) { 1191 src_data->offset += part_len; 1192 } else { 1193 src_data->m = src_data->m->next; 1194 src_data->offset = 0; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static uint64_t 1201 sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len) 1202 { 1203 uint64_t cipher_len; 1204 1205 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1206 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1207 cipher_len = (job->msg_len_to_cipher_in_bits >> 3) + 1208 (job->cipher_start_src_offset_in_bits >> 3); 1209 else 1210 cipher_len = job->msg_len_to_cipher_in_bytes + 1211 job->cipher_start_src_offset_in_bytes; 1212 1213 if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1214 job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN) 1215 *auth_len = (job->msg_len_to_hash_in_bits >> 3) + 1216 job->hash_start_src_offset_in_bytes; 1217 else 1218 *auth_len = job->msg_len_to_hash_in_bytes + 1219 job->hash_start_src_offset_in_bytes; 1220 1221 return RTE_MAX(*auth_len, cipher_len); 1222 } 1223 1224 static int 1225 handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, 1226 struct aesni_mb_session *session) 1227 { 1228 uint64_t auth_len, total_len; 1229 uint8_t *src, *linear_buf = NULL; 1230 int lb_offset = 0; 1231 struct rte_mbuf *src_seg; 1232 uint16_t src_len; 1233 1234 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 1235 linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0); 1236 if (linear_buf == NULL) { 1237 IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer"); 1238 return -1; 1239 } 1240 1241 for (src_seg = op->sym->m_src; (src_seg != NULL) && 1242 (total_len - lb_offset > 0); 1243 src_seg = src_seg->next) { 1244 src = rte_pktmbuf_mtod(src_seg, uint8_t *); 1245 src_len = RTE_MIN(src_seg->data_len, total_len - lb_offset); 1246 rte_memcpy(linear_buf + lb_offset, src, src_len); 1247 lb_offset += src_len; 1248 } 1249 1250 job->src = linear_buf; 1251 job->dst = linear_buf + dst_offset; 1252 job->user_data2 = linear_buf; 1253 1254 if (job->hash_alg == IMB_AUTH_AES_GMAC) 1255 job->u.GCM.aad = linear_buf; 1256 1257 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 1258 job->auth_tag_output = linear_buf + lb_offset; 1259 else 1260 job->auth_tag_output = linear_buf + auth_len; 1261 1262 return 0; 1263 } 1264 1265 static inline int 1266 imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) 1267 { 1268 if (alg == IMB_CIPHER_CHACHA20_POLY1305 || 1269 alg == IMB_CIPHER_CHACHA20_POLY1305_SGL || 1270 alg == IMB_CIPHER_GCM_SGL || 1271 alg == IMB_CIPHER_GCM) 1272 return 1; 1273 return 0; 1274 } 1275 1276 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1277 static inline int 1278 single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1279 int oop, uint32_t offset, struct rte_mbuf *m_src, 1280 struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) 1281 { 1282 uint32_t num_segs = 0; 1283 struct aesni_mb_op_buf_data src_sgl = {0}; 1284 struct aesni_mb_op_buf_data dst_sgl = {0}; 1285 uint32_t total_len; 1286 1287 job->sgl_state = IMB_SGL_ALL; 1288 1289 src_sgl.m = m_src; 1290 src_sgl.offset = offset; 1291 1292 while (src_sgl.offset >= src_sgl.m->data_len) { 1293 src_sgl.offset -= src_sgl.m->data_len; 1294 src_sgl.m = src_sgl.m->next; 1295 1296 RTE_ASSERT(src_sgl.m != NULL); 1297 } 1298 1299 if (oop) { 1300 dst_sgl.m = m_dst; 1301 dst_sgl.offset = offset; 1302 1303 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1304 dst_sgl.offset -= dst_sgl.m->data_len; 1305 dst_sgl.m = dst_sgl.m->next; 1306 1307 RTE_ASSERT(dst_sgl.m != NULL); 1308 } 1309 } 1310 total_len = op->sym->aead.data.length; 1311 1312 while (total_len != 0) { 1313 uint32_t data_len, part_len; 1314 1315 if (src_sgl.m == NULL) { 1316 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1317 return -EINVAL; 1318 } 1319 1320 data_len = src_sgl.m->data_len - src_sgl.offset; 1321 1322 sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1323 src_sgl.offset); 1324 1325 if (dst_sgl.m != NULL) { 1326 if (dst_sgl.m->data_len - dst_sgl.offset == 0) { 1327 dst_sgl.m = dst_sgl.m->next; 1328 if (dst_sgl.m == NULL) { 1329 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1330 return -EINVAL; 1331 } 1332 dst_sgl.offset = 0; 1333 } 1334 part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - 1335 dst_sgl.offset)); 1336 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, 1337 uint8_t *, dst_sgl.offset); 1338 dst_sgl.offset += part_len; 1339 } else { 1340 part_len = RTE_MIN(data_len, total_len); 1341 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1342 src_sgl.offset); 1343 } 1344 1345 sgl_segs[num_segs].len = part_len; 1346 1347 total_len -= part_len; 1348 1349 if (part_len != data_len) { 1350 src_sgl.offset += part_len; 1351 } else { 1352 src_sgl.m = src_sgl.m->next; 1353 src_sgl.offset = 0; 1354 } 1355 num_segs++; 1356 } 1357 job->num_sgl_io_segs = num_segs; 1358 job->sgl_io_segs = sgl_segs; 1359 return 0; 1360 } 1361 #endif 1362 1363 static inline int 1364 multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1365 int oop, uint32_t offset, struct rte_mbuf *m_src, 1366 struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) 1367 { 1368 int ret; 1369 IMB_JOB base_job; 1370 struct aesni_mb_op_buf_data src_sgl = {0}; 1371 struct aesni_mb_op_buf_data dst_sgl = {0}; 1372 uint32_t total_len; 1373 1374 base_job = *job; 1375 job->sgl_state = IMB_SGL_INIT; 1376 job = IMB_SUBMIT_JOB(mb_mgr); 1377 total_len = op->sym->aead.data.length; 1378 1379 src_sgl.m = m_src; 1380 src_sgl.offset = offset; 1381 1382 while (src_sgl.offset >= src_sgl.m->data_len) { 1383 src_sgl.offset -= src_sgl.m->data_len; 1384 src_sgl.m = src_sgl.m->next; 1385 1386 RTE_ASSERT(src_sgl.m != NULL); 1387 } 1388 1389 if (oop) { 1390 dst_sgl.m = m_dst; 1391 dst_sgl.offset = offset; 1392 1393 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1394 dst_sgl.offset -= dst_sgl.m->data_len; 1395 dst_sgl.m = dst_sgl.m->next; 1396 1397 RTE_ASSERT(dst_sgl.m != NULL); 1398 } 1399 } 1400 1401 while (job->sgl_state != IMB_SGL_COMPLETE) { 1402 job = IMB_GET_NEXT_JOB(mb_mgr); 1403 *job = base_job; 1404 ret = handle_aead_sgl_job(job, mb_mgr, &total_len, 1405 &src_sgl, &dst_sgl); 1406 if (ret < 0) 1407 return ret; 1408 } 1409 return 0; 1410 } 1411 1412 static inline int 1413 set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, 1414 struct aesni_mb_qp_data *qp_data, 1415 struct rte_crypto_op *op, uint8_t *digest_idx, 1416 const struct aesni_mb_session *session, 1417 struct rte_mbuf *m_src, struct rte_mbuf *m_dst, 1418 const int oop) 1419 { 1420 const uint32_t m_offset = op->sym->aead.data.offset; 1421 1422 job->u.GCM.aad = op->sym->aead.aad.data; 1423 if (sgl) { 1424 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1425 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1426 job->hash_alg = IMB_AUTH_GCM_SGL; 1427 job->hash_start_src_offset_in_bytes = 0; 1428 job->msg_len_to_hash_in_bytes = 0; 1429 job->msg_len_to_cipher_in_bytes = 0; 1430 job->cipher_start_src_offset_in_bytes = 0; 1431 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1432 imb_set_session(mb_mgr, job); 1433 #endif 1434 } else { 1435 job->hash_start_src_offset_in_bytes = 1436 op->sym->aead.data.offset; 1437 job->msg_len_to_hash_in_bytes = 1438 op->sym->aead.data.length; 1439 job->cipher_start_src_offset_in_bytes = 1440 op->sym->aead.data.offset; 1441 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1442 } 1443 1444 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1445 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1446 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1447 } else { 1448 job->auth_tag_output = op->sym->aead.digest.data; 1449 } 1450 1451 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1452 session->iv.offset); 1453 1454 /* Set user data to be crypto operation data struct */ 1455 job->user_data = op; 1456 1457 if (sgl) { 1458 job->src = NULL; 1459 job->dst = NULL; 1460 1461 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1462 if (m_src->nb_segs <= MAX_NUM_SEGS) 1463 return single_sgl_job(job, op, oop, 1464 m_offset, m_src, m_dst, 1465 qp_data->sgl_segs); 1466 else 1467 #endif 1468 return multi_sgl_job(job, op, oop, 1469 m_offset, m_src, m_dst, mb_mgr); 1470 } else { 1471 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1472 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1473 } 1474 1475 return 0; 1476 } 1477 1478 /** Check if conditions are met for digest-appended operations */ 1479 static uint8_t * 1480 aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, 1481 uint32_t oop) 1482 { 1483 unsigned int auth_size, cipher_size; 1484 uint8_t *end_cipher; 1485 uint8_t *start_cipher; 1486 1487 if (job->cipher_mode == IMB_CIPHER_NULL) 1488 return NULL; 1489 1490 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 || 1491 job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1492 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { 1493 cipher_size = (op->sym->cipher.data.offset >> 3) + 1494 (op->sym->cipher.data.length >> 3); 1495 } else { 1496 cipher_size = (op->sym->cipher.data.offset) + 1497 (op->sym->cipher.data.length); 1498 } 1499 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 1500 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1501 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 1502 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 1503 auth_size = (op->sym->auth.data.offset >> 3) + 1504 (op->sym->auth.data.length >> 3); 1505 } else { 1506 auth_size = (op->sym->auth.data.offset) + 1507 (op->sym->auth.data.length); 1508 } 1509 1510 if (!oop) { 1511 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size); 1512 start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); 1513 } else { 1514 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size); 1515 start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); 1516 } 1517 1518 if (start_cipher < op->sym->auth.digest.data && 1519 op->sym->auth.digest.data < end_cipher) { 1520 return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size); 1521 } else { 1522 return NULL; 1523 } 1524 } 1525 1526 /** 1527 * Process a crypto operation and complete a IMB_JOB job structure for 1528 * submission to the multi buffer library for processing. 1529 * 1530 * @param qp queue pair 1531 * @param job IMB_JOB structure to fill 1532 * @param op crypto op to process 1533 * @param digest_idx ID for digest to use 1534 * 1535 * @return 1536 * - 0 on success, the IMB_JOB will be filled 1537 * - -1 if invalid session or errors allocating SGL linear buffer, 1538 * IMB_JOB will not be filled 1539 */ 1540 static inline int 1541 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1542 struct rte_crypto_op *op, uint8_t *digest_idx, 1543 IMB_MGR *mb_mgr, pid_t pid) 1544 { 1545 struct rte_mbuf *m_src = op->sym->m_src, *m_dst; 1546 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1547 struct aesni_mb_session *session; 1548 uint32_t m_offset; 1549 int oop; 1550 uint32_t auth_off_in_bytes; 1551 uint32_t ciph_off_in_bytes; 1552 uint32_t auth_len_in_bytes; 1553 uint32_t ciph_len_in_bytes; 1554 uint8_t sgl = 0; 1555 uint8_t lb_sgl = 0; 1556 1557 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM 1558 (void) pid; 1559 #endif 1560 1561 session = ipsec_mb_get_session_private(qp, op); 1562 if (session == NULL) { 1563 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1564 return -1; 1565 } 1566 1567 const IMB_CIPHER_MODE cipher_mode = 1568 session->template_job.cipher_mode; 1569 1570 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1571 if (session->pid != pid) { 1572 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1573 imb_set_session(mb_mgr, job); 1574 } else if (job->session_id != session->session_id) 1575 #endif 1576 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1577 1578 if (!op->sym->m_dst) { 1579 /* in-place operation */ 1580 m_dst = m_src; 1581 oop = 0; 1582 } else if (op->sym->m_dst == op->sym->m_src) { 1583 /* in-place operation */ 1584 m_dst = m_src; 1585 oop = 0; 1586 } else { 1587 /* out-of-place operation */ 1588 m_dst = op->sym->m_dst; 1589 oop = 1; 1590 } 1591 1592 if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { 1593 sgl = 1; 1594 if (!imb_lib_support_sgl_algo(cipher_mode)) 1595 lb_sgl = 1; 1596 } 1597 1598 if (cipher_mode == IMB_CIPHER_GCM) 1599 return set_gcm_job(mb_mgr, job, sgl, qp_data, 1600 op, digest_idx, session, m_src, m_dst, oop); 1601 1602 /* Set authentication parameters */ 1603 const int aead = is_aead_algo(job->hash_alg, cipher_mode); 1604 1605 switch (job->hash_alg) { 1606 case IMB_AUTH_AES_CCM: 1607 job->u.CCM.aad = op->sym->aead.aad.data + 18; 1608 break; 1609 1610 case IMB_AUTH_AES_GMAC: 1611 job->u.GCM.aad = op->sym->aead.aad.data; 1612 if (sgl) { 1613 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1614 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1615 job->hash_alg = IMB_AUTH_GCM_SGL; 1616 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1617 imb_set_session(mb_mgr, job); 1618 #endif 1619 } 1620 break; 1621 case IMB_AUTH_AES_GMAC_128: 1622 case IMB_AUTH_AES_GMAC_192: 1623 case IMB_AUTH_AES_GMAC_256: 1624 job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1625 session->auth_iv.offset); 1626 break; 1627 case IMB_AUTH_ZUC_EIA3_BITLEN: 1628 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1629 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1630 session->auth_iv.offset); 1631 break; 1632 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1633 job->u.SNOW3G_UIA2._iv = 1634 rte_crypto_op_ctod_offset(op, uint8_t *, 1635 session->auth_iv.offset); 1636 break; 1637 case IMB_AUTH_CHACHA20_POLY1305: 1638 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; 1639 if (sgl) { 1640 job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; 1641 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; 1642 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; 1643 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM 1644 imb_set_session(mb_mgr, job); 1645 #endif 1646 } 1647 break; 1648 default: 1649 break; 1650 } 1651 1652 if (aead) 1653 m_offset = op->sym->aead.data.offset; 1654 else 1655 m_offset = op->sym->cipher.data.offset; 1656 1657 if (cipher_mode == IMB_CIPHER_ZUC_EEA3) 1658 m_offset >>= 3; 1659 else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) 1660 m_offset = 0; 1661 else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1662 m_offset = 0; 1663 1664 /* Set digest output location */ 1665 if (job->hash_alg != IMB_AUTH_NULL && 1666 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1667 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1668 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1669 } else { 1670 if (aead) 1671 job->auth_tag_output = op->sym->aead.digest.data; 1672 else { 1673 job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop); 1674 if (job->auth_tag_output == NULL) { 1675 job->auth_tag_output = op->sym->auth.digest.data; 1676 } 1677 } 1678 if (session->auth.req_digest_len != 1679 job->auth_tag_output_len_in_bytes) { 1680 job->auth_tag_output = 1681 qp_data->temp_digests[*digest_idx]; 1682 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1683 } 1684 } 1685 /* 1686 * Multi-buffer library current only support returning a truncated 1687 * digest length as specified in the relevant IPsec RFCs 1688 */ 1689 1690 /* Data Parameters */ 1691 if (sgl) { 1692 job->src = NULL; 1693 job->dst = NULL; 1694 } else { 1695 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1696 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1697 } 1698 1699 switch (job->hash_alg) { 1700 case IMB_AUTH_AES_CCM: 1701 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; 1702 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length; 1703 1704 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1705 session->iv.offset + 1); 1706 break; 1707 1708 case IMB_AUTH_AES_GMAC: 1709 job->hash_start_src_offset_in_bytes = 1710 op->sym->aead.data.offset; 1711 job->msg_len_to_hash_in_bytes = 1712 op->sym->aead.data.length; 1713 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1714 session->iv.offset); 1715 break; 1716 case IMB_AUTH_AES_GMAC_128: 1717 case IMB_AUTH_AES_GMAC_192: 1718 case IMB_AUTH_AES_GMAC_256: 1719 job->hash_start_src_offset_in_bytes = 1720 op->sym->auth.data.offset; 1721 job->msg_len_to_hash_in_bytes = 1722 op->sym->auth.data.length; 1723 break; 1724 1725 case IMB_AUTH_GCM_SGL: 1726 case IMB_AUTH_CHACHA20_POLY1305_SGL: 1727 job->hash_start_src_offset_in_bytes = 0; 1728 job->msg_len_to_hash_in_bytes = 0; 1729 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1730 session->iv.offset); 1731 break; 1732 1733 case IMB_AUTH_CHACHA20_POLY1305: 1734 job->hash_start_src_offset_in_bytes = 1735 op->sym->aead.data.offset; 1736 job->msg_len_to_hash_in_bytes = 1737 op->sym->aead.data.length; 1738 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1739 session->iv.offset); 1740 break; 1741 /* ZUC and SNOW3G require length in bits and offset in bytes */ 1742 case IMB_AUTH_ZUC_EIA3_BITLEN: 1743 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1744 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1745 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1746 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1747 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1748 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1749 1750 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1751 session, oop, auth_off_in_bytes, 1752 ciph_off_in_bytes, auth_len_in_bytes, 1753 ciph_len_in_bytes, lb_sgl); 1754 job->msg_len_to_hash_in_bits = op->sym->auth.data.length; 1755 1756 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1757 session->iv.offset); 1758 break; 1759 1760 /* KASUMI requires lengths and offset in bytes */ 1761 case IMB_AUTH_KASUMI_UIA1: 1762 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1763 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1764 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1765 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1766 1767 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1768 session, oop, auth_off_in_bytes, 1769 ciph_off_in_bytes, auth_len_in_bytes, 1770 ciph_len_in_bytes, lb_sgl); 1771 job->msg_len_to_hash_in_bytes = auth_len_in_bytes; 1772 1773 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1774 session->iv.offset); 1775 break; 1776 1777 default: 1778 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1779 session, oop, op->sym->auth.data.offset, 1780 op->sym->cipher.data.offset, 1781 op->sym->auth.data.length, 1782 op->sym->cipher.data.length, lb_sgl); 1783 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length; 1784 1785 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1786 session->iv.offset); 1787 } 1788 1789 switch (job->cipher_mode) { 1790 /* ZUC requires length and offset in bytes */ 1791 case IMB_CIPHER_ZUC_EEA3: 1792 job->cipher_start_src_offset_in_bytes = 1793 op->sym->cipher.data.offset >> 3; 1794 job->msg_len_to_cipher_in_bytes = 1795 op->sym->cipher.data.length >> 3; 1796 break; 1797 /* ZUC and SNOW3G require length and offset in bits */ 1798 case IMB_CIPHER_SNOW3G_UEA2_BITLEN: 1799 case IMB_CIPHER_KASUMI_UEA1_BITLEN: 1800 job->cipher_start_src_offset_in_bits = 1801 op->sym->cipher.data.offset; 1802 job->msg_len_to_cipher_in_bits = 1803 op->sym->cipher.data.length; 1804 break; 1805 case IMB_CIPHER_GCM: 1806 job->cipher_start_src_offset_in_bytes = 1807 op->sym->aead.data.offset; 1808 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1809 break; 1810 case IMB_CIPHER_CCM: 1811 case IMB_CIPHER_CHACHA20_POLY1305: 1812 job->cipher_start_src_offset_in_bytes = 1813 op->sym->aead.data.offset; 1814 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1815 break; 1816 case IMB_CIPHER_GCM_SGL: 1817 case IMB_CIPHER_CHACHA20_POLY1305_SGL: 1818 job->msg_len_to_cipher_in_bytes = 0; 1819 job->cipher_start_src_offset_in_bytes = 0; 1820 break; 1821 default: 1822 job->cipher_start_src_offset_in_bytes = 1823 op->sym->cipher.data.offset; 1824 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; 1825 } 1826 1827 if (cipher_mode == IMB_CIPHER_NULL && oop) { 1828 memcpy(job->dst + job->cipher_start_src_offset_in_bytes, 1829 job->src + job->cipher_start_src_offset_in_bytes, 1830 job->msg_len_to_cipher_in_bytes); 1831 } 1832 1833 /* Set user data to be crypto operation data struct */ 1834 job->user_data = op; 1835 1836 if (sgl) { 1837 1838 if (lb_sgl) 1839 return handle_sgl_linear(job, op, m_offset, session); 1840 1841 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 1842 if (m_src->nb_segs <= MAX_NUM_SEGS) 1843 return single_sgl_job(job, op, oop, 1844 m_offset, m_src, m_dst, 1845 qp_data->sgl_segs); 1846 else 1847 #endif 1848 return multi_sgl_job(job, op, oop, 1849 m_offset, m_src, m_dst, mb_mgr); 1850 } 1851 1852 return 0; 1853 } 1854 1855 /** 1856 * Process a crypto operation containing a security op and complete a 1857 * IMB_JOB job structure for submission to the multi buffer library for 1858 * processing. 1859 */ 1860 static inline int 1861 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1862 struct rte_crypto_op *op, uint8_t *digest_idx) 1863 { 1864 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1865 struct rte_mbuf *m_src, *m_dst; 1866 struct rte_crypto_sym_op *sym; 1867 struct aesni_mb_session *session = NULL; 1868 1869 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) { 1870 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1871 return -1; 1872 } 1873 session = SECURITY_GET_SESS_PRIV(op->sym->session); 1874 1875 if (unlikely(session == NULL)) { 1876 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1877 return -1; 1878 } 1879 /* Only DOCSIS protocol operations supported now */ 1880 if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI || 1881 session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) { 1882 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1883 return -1; 1884 } 1885 1886 sym = op->sym; 1887 m_src = sym->m_src; 1888 1889 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) { 1890 /* in-place operation */ 1891 m_dst = m_src; 1892 } else { 1893 /* out-of-place operation not supported */ 1894 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1895 return -ENOTSUP; 1896 } 1897 1898 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1899 1900 /* Set cipher parameters */ 1901 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1902 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1903 1904 /* Set IV parameters */ 1905 job->iv = (uint8_t *)op + session->iv.offset; 1906 1907 /* Set digest output location */ 1908 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1909 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1910 1911 /* Set data parameters */ 1912 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1913 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, 1914 sym->cipher.data.offset); 1915 1916 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset; 1917 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length; 1918 1919 job->hash_start_src_offset_in_bytes = sym->auth.data.offset; 1920 job->msg_len_to_hash_in_bytes = sym->auth.data.length; 1921 1922 job->user_data = op; 1923 1924 return 0; 1925 } 1926 1927 static inline void 1928 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status) 1929 { 1930 uint16_t crc_offset; 1931 uint8_t *crc; 1932 1933 if (!job->msg_len_to_hash_in_bytes) 1934 return; 1935 1936 crc_offset = job->hash_start_src_offset_in_bytes + 1937 job->msg_len_to_hash_in_bytes - 1938 job->cipher_start_src_offset_in_bytes; 1939 crc = job->dst + crc_offset; 1940 1941 /* Verify CRC (at the end of the message) */ 1942 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0) 1943 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1944 } 1945 1946 static inline void 1947 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status) 1948 { 1949 /* Verify digest if required */ 1950 if (memcmp(job->auth_tag_output, digest, len) != 0) 1951 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1952 } 1953 1954 static inline void 1955 generate_digest(IMB_JOB *job, struct rte_crypto_op *op, 1956 struct aesni_mb_session *sess) 1957 { 1958 /* No extra copy needed */ 1959 if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes)) 1960 return; 1961 1962 /* 1963 * This can only happen for HMAC, so only digest 1964 * for authentication algos is required 1965 */ 1966 memcpy(op->sym->auth.digest.data, job->auth_tag_output, 1967 sess->auth.req_digest_len); 1968 } 1969 1970 static void 1971 post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job, 1972 struct aesni_mb_session *sess, uint8_t *linear_buf) 1973 { 1974 1975 int lb_offset = 0; 1976 struct rte_mbuf *m_dst = op->sym->m_dst == NULL ? 1977 op->sym->m_src : op->sym->m_dst; 1978 uint16_t total_len, dst_len; 1979 uint64_t auth_len; 1980 uint8_t *dst; 1981 1982 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 1983 1984 if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY) 1985 total_len += job->auth_tag_output_len_in_bytes; 1986 1987 for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) { 1988 dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 1989 dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset); 1990 rte_memcpy(dst, linear_buf + lb_offset, dst_len); 1991 lb_offset += dst_len; 1992 } 1993 } 1994 1995 /** 1996 * Process a completed job and return rte_mbuf which job processed 1997 * 1998 * @param qp Queue Pair to process 1999 * @param job IMB_JOB job to process 2000 * 2001 * @return 2002 * - Returns processed crypto operation. 2003 * - Returns NULL on invalid job 2004 */ 2005 static inline struct rte_crypto_op * 2006 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) 2007 { 2008 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; 2009 struct aesni_mb_session *sess = NULL; 2010 uint8_t *linear_buf = NULL; 2011 int sgl = 0; 2012 uint8_t oop = 0; 2013 uint8_t is_docsis_sec = 0; 2014 2015 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 2016 /* 2017 * Assuming at this point that if it's a security type op, that 2018 * this is for DOCSIS 2019 */ 2020 is_docsis_sec = 1; 2021 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 2022 } else 2023 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 2024 2025 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) { 2026 switch (job->status) { 2027 case IMB_STATUS_COMPLETED: 2028 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 2029 2030 if ((op->sym->m_src->nb_segs > 1 || 2031 (op->sym->m_dst != NULL && 2032 op->sym->m_dst->nb_segs > 1)) && 2033 !imb_lib_support_sgl_algo(job->cipher_mode)) { 2034 linear_buf = (uint8_t *) job->user_data2; 2035 sgl = 1; 2036 2037 post_process_sgl_linear(op, job, sess, linear_buf); 2038 } 2039 2040 if (job->hash_alg == IMB_AUTH_NULL) 2041 break; 2042 2043 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 2044 if (is_aead_algo(job->hash_alg, 2045 job->cipher_mode)) 2046 verify_digest(job, 2047 op->sym->aead.digest.data, 2048 sess->auth.req_digest_len, 2049 &op->status); 2050 else if (is_docsis_sec) 2051 verify_docsis_sec_crc(job, 2052 &op->status); 2053 else 2054 verify_digest(job, 2055 op->sym->auth.digest.data, 2056 sess->auth.req_digest_len, 2057 &op->status); 2058 } else { 2059 if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) { 2060 /* in-place operation */ 2061 oop = 0; 2062 } else { /* out-of-place operation */ 2063 oop = 1; 2064 } 2065 2066 /* Enable digest check */ 2067 if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL 2068 && !is_aead_algo(job->hash_alg, sess->template_job.cipher_mode) && 2069 aesni_mb_digest_appended_in_src(op, job, oop) != NULL) { 2070 unsigned int auth_size, cipher_size; 2071 int unencrypted_bytes = 0; 2072 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 2073 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN || 2074 job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { 2075 cipher_size = (op->sym->cipher.data.offset >> 3) + 2076 (op->sym->cipher.data.length >> 3); 2077 } else { 2078 cipher_size = (op->sym->cipher.data.offset) + 2079 (op->sym->cipher.data.length); 2080 } 2081 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 2082 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 2083 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 2084 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 2085 auth_size = (op->sym->auth.data.offset >> 3) + 2086 (op->sym->auth.data.length >> 3); 2087 } else { 2088 auth_size = (op->sym->auth.data.offset) + 2089 (op->sym->auth.data.length); 2090 } 2091 /* Check for unencrypted bytes in partial digest cases */ 2092 if (job->cipher_mode != IMB_CIPHER_NULL) { 2093 unencrypted_bytes = auth_size + 2094 job->auth_tag_output_len_in_bytes - cipher_size; 2095 } 2096 if (unencrypted_bytes > 0) 2097 rte_memcpy( 2098 rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, 2099 cipher_size), 2100 rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, 2101 cipher_size), 2102 unencrypted_bytes); 2103 } 2104 generate_digest(job, op, sess); 2105 } 2106 break; 2107 default: 2108 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 2109 } 2110 if (sgl) 2111 rte_free(linear_buf); 2112 } 2113 2114 /* Free session if a session-less crypto op */ 2115 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 2116 memset(sess, 0, sizeof(struct aesni_mb_session)); 2117 rte_mempool_put(qp->sess_mp, op->sym->session); 2118 op->sym->session = NULL; 2119 } 2120 2121 return op; 2122 } 2123 2124 static inline void 2125 post_process_mb_sync_job(IMB_JOB *job) 2126 { 2127 uint32_t *st; 2128 2129 st = job->user_data; 2130 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG; 2131 } 2132 2133 static inline uint32_t 2134 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) 2135 { 2136 uint32_t i; 2137 2138 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) 2139 post_process_mb_sync_job(job); 2140 2141 return i; 2142 } 2143 2144 static inline uint32_t 2145 flush_mb_sync_mgr(IMB_MGR *mb_mgr) 2146 { 2147 IMB_JOB *job; 2148 2149 job = IMB_FLUSH_JOB(mb_mgr); 2150 return handle_completed_sync_jobs(job, mb_mgr); 2151 } 2152 2153 static inline IMB_JOB * 2154 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) 2155 { 2156 job->chain_order = IMB_ORDER_HASH_CIPHER; 2157 job->cipher_mode = IMB_CIPHER_NULL; 2158 job->hash_alg = IMB_AUTH_NULL; 2159 job->cipher_direction = IMB_DIR_DECRYPT; 2160 2161 /* Set user data to be crypto operation data struct */ 2162 job->user_data = op; 2163 2164 return job; 2165 } 2166 2167 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM 2168 uint16_t 2169 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 2170 uint16_t nb_ops) 2171 { 2172 struct ipsec_mb_qp *qp = queue_pair; 2173 IMB_MGR *mb_mgr = qp->mb_mgr; 2174 struct rte_crypto_op *op; 2175 struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE]; 2176 IMB_JOB *job; 2177 int retval, processed_jobs = 0; 2178 uint16_t i, nb_jobs; 2179 IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL}; 2180 pid_t pid; 2181 2182 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 2183 return 0; 2184 2185 uint8_t digest_idx = qp->digest_idx; 2186 uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ? 2187 IMB_MAX_BURST_SIZE : nb_ops; 2188 2189 /* 2190 * If nb_ops is greater than the max supported 2191 * ipsec_mb burst size, then process in bursts of 2192 * IMB_MAX_BURST_SIZE until all operations are submitted 2193 */ 2194 while (nb_ops) { 2195 uint16_t nb_submit_ops; 2196 uint16_t n = (nb_ops / burst_sz) ? 2197 burst_sz : nb_ops; 2198 2199 if (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) { 2200 /* 2201 * Not enough free jobs in the queue 2202 * Flush n jobs until enough jobs available 2203 */ 2204 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2205 for (i = 0; i < nb_jobs; i++) { 2206 job = jobs[i]; 2207 2208 op = post_process_mb_job(qp, job); 2209 if (op) { 2210 ops[processed_jobs++] = op; 2211 qp->stats.dequeued_count++; 2212 } else { 2213 qp->stats.dequeue_err_count++; 2214 break; 2215 } 2216 } 2217 nb_ops -= i; 2218 continue; 2219 } 2220 2221 if (!RTE_PER_LCORE(pid)) 2222 RTE_PER_LCORE(pid) = getpid(); 2223 2224 pid = RTE_PER_LCORE(pid); 2225 2226 /* 2227 * Get the next operations to process from ingress queue. 2228 * There is no need to return the job to the IMB_MGR 2229 * if there are no more operations to process, since 2230 * the IMB_MGR can use that pointer again in next 2231 * get_next calls. 2232 */ 2233 nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue, 2234 (void **)deqd_ops, n, NULL); 2235 for (i = 0; i < nb_submit_ops; i++) { 2236 job = jobs[i]; 2237 op = deqd_ops[i]; 2238 2239 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 2240 retval = set_sec_mb_job_params(job, qp, op, 2241 &digest_idx); 2242 else 2243 retval = set_mb_job_params(job, qp, op, 2244 &digest_idx, mb_mgr, pid); 2245 2246 if (unlikely(retval != 0)) { 2247 qp->stats.dequeue_err_count++; 2248 set_job_null_op(job, op); 2249 } 2250 } 2251 2252 /* Submit jobs to multi-buffer for processing */ 2253 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2254 int err = 0; 2255 2256 nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs); 2257 err = imb_get_errno(mb_mgr); 2258 if (err) 2259 IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err)); 2260 #else 2261 nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr, 2262 nb_submit_ops, jobs); 2263 #endif 2264 for (i = 0; i < nb_jobs; i++) { 2265 job = jobs[i]; 2266 2267 op = post_process_mb_job(qp, job); 2268 if (op) { 2269 ops[processed_jobs++] = op; 2270 qp->stats.dequeued_count++; 2271 } else { 2272 qp->stats.dequeue_err_count++; 2273 break; 2274 } 2275 } 2276 2277 qp->digest_idx = digest_idx; 2278 2279 if (processed_jobs < 1) { 2280 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2281 2282 for (i = 0; i < nb_jobs; i++) { 2283 job = jobs[i]; 2284 2285 op = post_process_mb_job(qp, job); 2286 if (op) { 2287 ops[processed_jobs++] = op; 2288 qp->stats.dequeued_count++; 2289 } else { 2290 qp->stats.dequeue_err_count++; 2291 break; 2292 } 2293 } 2294 } 2295 nb_ops -= n; 2296 } 2297 2298 return processed_jobs; 2299 } 2300 #else 2301 2302 /** 2303 * Process a completed IMB_JOB job and keep processing jobs until 2304 * get_completed_job return NULL 2305 * 2306 * @param qp Queue Pair to process 2307 * @param mb_mgr IMB_MGR to use 2308 * @param job IMB_JOB job 2309 * @param ops crypto ops to fill 2310 * @param nb_ops number of crypto ops 2311 * 2312 * @return 2313 * - Number of processed jobs 2314 */ 2315 static unsigned 2316 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 2317 IMB_JOB *job, struct rte_crypto_op **ops, 2318 uint16_t nb_ops) 2319 { 2320 struct rte_crypto_op *op = NULL; 2321 uint16_t processed_jobs = 0; 2322 2323 while (job != NULL) { 2324 op = post_process_mb_job(qp, job); 2325 2326 if (op) { 2327 ops[processed_jobs++] = op; 2328 qp->stats.dequeued_count++; 2329 } else { 2330 qp->stats.dequeue_err_count++; 2331 break; 2332 } 2333 if (processed_jobs == nb_ops) 2334 break; 2335 2336 job = IMB_GET_COMPLETED_JOB(mb_mgr); 2337 } 2338 2339 return processed_jobs; 2340 } 2341 2342 static inline uint16_t 2343 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, 2344 struct rte_crypto_op **ops, uint16_t nb_ops) 2345 { 2346 int processed_ops = 0; 2347 2348 /* Flush the remaining jobs */ 2349 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr); 2350 2351 if (job) 2352 processed_ops += handle_completed_jobs(qp, mb_mgr, job, 2353 &ops[processed_ops], nb_ops - processed_ops); 2354 2355 return processed_ops; 2356 } 2357 2358 uint16_t 2359 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 2360 uint16_t nb_ops) 2361 { 2362 struct ipsec_mb_qp *qp = queue_pair; 2363 IMB_MGR *mb_mgr = qp->mb_mgr; 2364 struct rte_crypto_op *op; 2365 IMB_JOB *job; 2366 int retval, processed_jobs = 0; 2367 pid_t pid = 0; 2368 2369 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 2370 return 0; 2371 2372 uint8_t digest_idx = qp->digest_idx; 2373 2374 do { 2375 /* Get next free mb job struct from mb manager */ 2376 job = IMB_GET_NEXT_JOB(mb_mgr); 2377 if (unlikely(job == NULL)) { 2378 /* if no free mb job structs we need to flush mb_mgr */ 2379 processed_jobs += flush_mb_mgr(qp, mb_mgr, 2380 &ops[processed_jobs], 2381 nb_ops - processed_jobs); 2382 2383 if (nb_ops == processed_jobs) 2384 break; 2385 2386 job = IMB_GET_NEXT_JOB(mb_mgr); 2387 } 2388 2389 /* 2390 * Get next operation to process from ingress queue. 2391 * There is no need to return the job to the IMB_MGR 2392 * if there are no more operations to process, since the IMB_MGR 2393 * can use that pointer again in next get_next calls. 2394 */ 2395 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); 2396 if (retval < 0) 2397 break; 2398 2399 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 2400 retval = set_sec_mb_job_params(job, qp, op, 2401 &digest_idx); 2402 else 2403 retval = set_mb_job_params(job, qp, op, 2404 &digest_idx, mb_mgr, pid); 2405 2406 if (unlikely(retval != 0)) { 2407 qp->stats.dequeue_err_count++; 2408 set_job_null_op(job, op); 2409 } 2410 2411 /* Submit job to multi-buffer for processing */ 2412 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2413 job = IMB_SUBMIT_JOB(mb_mgr); 2414 #else 2415 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 2416 #endif 2417 /* 2418 * If submit returns a processed job then handle it, 2419 * before submitting subsequent jobs 2420 */ 2421 if (job) 2422 processed_jobs += handle_completed_jobs(qp, mb_mgr, 2423 job, &ops[processed_jobs], 2424 nb_ops - processed_jobs); 2425 2426 } while (processed_jobs < nb_ops); 2427 2428 qp->digest_idx = digest_idx; 2429 2430 if (processed_jobs < 1) 2431 processed_jobs += flush_mb_mgr(qp, mb_mgr, 2432 &ops[processed_jobs], 2433 nb_ops - processed_jobs); 2434 2435 return processed_jobs; 2436 } 2437 #endif 2438 static inline int 2439 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) 2440 { 2441 /* no multi-seg support with current AESNI-MB PMD */ 2442 if (sgl->num != 1) 2443 return -ENOTSUP; 2444 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len) 2445 return -EINVAL; 2446 return 0; 2447 } 2448 2449 static inline IMB_JOB * 2450 submit_sync_job(IMB_MGR *mb_mgr) 2451 { 2452 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2453 return IMB_SUBMIT_JOB(mb_mgr); 2454 #else 2455 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 2456 #endif 2457 } 2458 2459 static inline uint32_t 2460 generate_sync_dgst(struct rte_crypto_sym_vec *vec, 2461 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2462 { 2463 uint32_t i, k; 2464 2465 for (i = 0, k = 0; i != vec->num; i++) { 2466 if (vec->status[i] == 0) { 2467 memcpy(vec->digest[i].va, dgst[i], len); 2468 k++; 2469 } 2470 } 2471 2472 return k; 2473 } 2474 2475 static inline uint32_t 2476 verify_sync_dgst(struct rte_crypto_sym_vec *vec, 2477 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2478 { 2479 uint32_t i, k; 2480 2481 for (i = 0, k = 0; i != vec->num; i++) { 2482 if (vec->status[i] == 0) { 2483 if (memcmp(vec->digest[i].va, dgst[i], len) != 0) 2484 vec->status[i] = EBADMSG; 2485 else 2486 k++; 2487 } 2488 } 2489 2490 return k; 2491 } 2492 2493 uint32_t 2494 aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused, 2495 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs, 2496 struct rte_crypto_sym_vec *vec) 2497 { 2498 int32_t ret; 2499 uint32_t i, j, k, len; 2500 void *buf; 2501 IMB_JOB *job; 2502 IMB_MGR *mb_mgr; 2503 struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2504 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX]; 2505 2506 /* get per-thread MB MGR, create one if needed */ 2507 mb_mgr = get_per_thread_mb_mgr(); 2508 if (unlikely(mb_mgr == NULL)) 2509 return 0; 2510 2511 for (i = 0, j = 0, k = 0; i != vec->num; i++) { 2512 ret = check_crypto_sgl(sofs, vec->src_sgl + i); 2513 if (ret != 0) { 2514 vec->status[i] = ret; 2515 continue; 2516 } 2517 2518 buf = vec->src_sgl[i].vec[0].base; 2519 len = vec->src_sgl[i].vec[0].len; 2520 2521 job = IMB_GET_NEXT_JOB(mb_mgr); 2522 if (job == NULL) { 2523 k += flush_mb_sync_mgr(mb_mgr); 2524 job = IMB_GET_NEXT_JOB(mb_mgr); 2525 RTE_ASSERT(job != NULL); 2526 } 2527 2528 /* Submit job for processing */ 2529 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i], 2530 &vec->aad[i], tmp_dgst[i], &vec->status[i]); 2531 job = submit_sync_job(mb_mgr); 2532 j++; 2533 2534 /* handle completed jobs */ 2535 k += handle_completed_sync_jobs(job, mb_mgr); 2536 } 2537 2538 /* flush remaining jobs */ 2539 while (k != j) 2540 k += flush_mb_sync_mgr(mb_mgr); 2541 2542 /* finish processing for successful jobs: check/update digest */ 2543 if (k != 0) { 2544 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 2545 k = verify_sync_dgst(vec, 2546 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2547 s->auth.req_digest_len); 2548 else 2549 k = generate_sync_dgst(vec, 2550 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2551 s->auth.req_digest_len); 2552 } 2553 2554 return k; 2555 } 2556 2557 struct rte_cryptodev_ops aesni_mb_pmd_ops = { 2558 .dev_configure = ipsec_mb_config, 2559 .dev_start = ipsec_mb_start, 2560 .dev_stop = ipsec_mb_stop, 2561 .dev_close = ipsec_mb_close, 2562 2563 .stats_get = ipsec_mb_stats_get, 2564 .stats_reset = ipsec_mb_stats_reset, 2565 2566 .dev_infos_get = ipsec_mb_info_get, 2567 2568 .queue_pair_setup = ipsec_mb_qp_setup, 2569 .queue_pair_release = ipsec_mb_qp_release, 2570 2571 .sym_cpu_process = aesni_mb_process_bulk, 2572 2573 .sym_session_get_size = ipsec_mb_sym_session_get_size, 2574 .sym_session_configure = ipsec_mb_sym_session_configure, 2575 .sym_session_clear = ipsec_mb_sym_session_clear 2576 }; 2577 2578 /** 2579 * Configure a aesni multi-buffer session from a security session 2580 * configuration 2581 */ 2582 static int 2583 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf, 2584 struct rte_security_session *sess) 2585 { 2586 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 2587 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2588 int ret; 2589 2590 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || 2591 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { 2592 IPSEC_MB_LOG(ERR, "Invalid security protocol"); 2593 return -EINVAL; 2594 } 2595 2596 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf, 2597 sess_private_data); 2598 2599 if (ret != 0) { 2600 IPSEC_MB_LOG(ERR, "Failed to configure session parameters"); 2601 return ret; 2602 } 2603 2604 return ret; 2605 } 2606 2607 /** Clear the memory of session so it does not leave key material behind */ 2608 static int 2609 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused, 2610 struct rte_security_session *sess) 2611 { 2612 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 2613 2614 if (sess_priv) { 2615 memset(sess_priv, 0, sizeof(struct aesni_mb_session)); 2616 } 2617 return 0; 2618 } 2619 2620 static unsigned int 2621 aesni_mb_pmd_sec_sess_get_size(void *device __rte_unused) 2622 { 2623 return sizeof(struct aesni_mb_session); 2624 } 2625 2626 /** Get security capabilities for aesni multi-buffer */ 2627 static const struct rte_security_capability * 2628 aesni_mb_pmd_sec_capa_get(void *device __rte_unused) 2629 { 2630 return aesni_mb_pmd_security_cap; 2631 } 2632 2633 static struct rte_security_ops aesni_mb_pmd_sec_ops = { 2634 .session_create = aesni_mb_pmd_sec_sess_create, 2635 .session_update = NULL, 2636 .session_get_size = aesni_mb_pmd_sec_sess_get_size, 2637 .session_stats_get = NULL, 2638 .session_destroy = aesni_mb_pmd_sec_sess_destroy, 2639 .set_pkt_metadata = NULL, 2640 .capabilities_get = aesni_mb_pmd_sec_capa_get 2641 }; 2642 2643 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops; 2644 2645 static int 2646 aesni_mb_configure_dev(struct rte_cryptodev *dev) 2647 { 2648 struct rte_security_ctx *security_instance; 2649 2650 security_instance = rte_malloc("aesni_mb_sec", 2651 sizeof(struct rte_security_ctx), 2652 RTE_CACHE_LINE_SIZE); 2653 if (security_instance != NULL) { 2654 security_instance->device = (void *)dev; 2655 security_instance->ops = rte_aesni_mb_pmd_sec_ops; 2656 security_instance->sess_cnt = 0; 2657 dev->security_ctx = security_instance; 2658 2659 return 0; 2660 } 2661 2662 return -ENOMEM; 2663 } 2664 2665 static int 2666 aesni_mb_probe(struct rte_vdev_device *vdev) 2667 { 2668 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB); 2669 } 2670 2671 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = { 2672 .probe = aesni_mb_probe, 2673 .remove = ipsec_mb_remove 2674 }; 2675 2676 static struct cryptodev_driver aesni_mb_crypto_drv; 2677 2678 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, 2679 cryptodev_aesni_mb_pmd_drv); 2680 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); 2681 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, 2682 "max_nb_queue_pairs=<int> socket_id=<int>"); 2683 RTE_PMD_REGISTER_CRYPTO_DRIVER( 2684 aesni_mb_crypto_drv, 2685 cryptodev_aesni_mb_pmd_drv.driver, 2686 pmd_driver_id_aesni_mb); 2687 2688 /* Constructor function to register aesni-mb PMD */ 2689 RTE_INIT(ipsec_mb_register_aesni_mb) 2690 { 2691 struct ipsec_mb_internals *aesni_mb_data = 2692 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB]; 2693 2694 aesni_mb_data->caps = aesni_mb_capabilities; 2695 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst; 2696 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2697 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2698 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | 2699 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO | 2700 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA | 2701 RTE_CRYPTODEV_FF_SYM_SESSIONLESS | 2702 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 2703 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 2704 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 2705 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 2706 RTE_CRYPTODEV_FF_SECURITY | 2707 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; 2708 2709 aesni_mb_data->internals_priv_size = 0; 2710 aesni_mb_data->ops = &aesni_mb_pmd_ops; 2711 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data); 2712 aesni_mb_data->queue_pair_configure = NULL; 2713 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops; 2714 aesni_mb_data->dev_config = aesni_mb_configure_dev; 2715 aesni_mb_data->session_configure = aesni_mb_session_configure; 2716 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session); 2717 } 2718