1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2021 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 7 #include "pmd_aesni_mb_priv.h" 8 9 RTE_DEFINE_PER_LCORE(pid_t, pid); 10 11 uint8_t pmd_driver_id_aesni_mb; 12 13 struct aesni_mb_op_buf_data { 14 struct rte_mbuf *m; 15 uint32_t offset; 16 }; 17 18 static inline int 19 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) 20 { 21 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || 22 hash_alg == IMB_AUTH_AES_CCM || 23 cipher_mode == IMB_CIPHER_GCM); 24 } 25 26 /** Set session authentication parameters */ 27 static int 28 aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, 29 struct aesni_mb_session *sess, 30 const struct rte_crypto_sym_xform *xform) 31 { 32 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 }; 33 uint32_t auth_precompute = 1; 34 35 if (xform == NULL) { 36 sess->template_job.hash_alg = IMB_AUTH_NULL; 37 return 0; 38 } 39 40 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { 41 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth"); 42 return -1; 43 } 44 45 /* Set IV parameters */ 46 sess->auth_iv.offset = xform->auth.iv.offset; 47 48 /* Set the request digest size */ 49 sess->auth.req_digest_len = xform->auth.digest_length; 50 51 /* Select auth generate/verify */ 52 sess->auth.operation = xform->auth.op; 53 54 /* Set Authentication Parameters */ 55 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) { 56 sess->template_job.hash_alg = IMB_AUTH_NULL; 57 sess->template_job.auth_tag_output_len_in_bytes = 0; 58 return 0; 59 } 60 61 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 62 sess->template_job.hash_alg = IMB_AUTH_AES_XCBC; 63 64 uint16_t xcbc_mac_digest_len = 65 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); 66 if (sess->auth.req_digest_len != xcbc_mac_digest_len) { 67 IPSEC_MB_LOG(ERR, "Invalid digest size"); 68 return -EINVAL; 69 } 70 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 71 72 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, 73 sess->auth.xcbc.k1_expanded, 74 sess->auth.xcbc.k2, sess->auth.xcbc.k3); 75 sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded; 76 sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2; 77 sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3; 78 return 0; 79 } 80 81 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { 82 uint32_t dust[4*15]; 83 84 sess->template_job.hash_alg = IMB_AUTH_AES_CMAC; 85 86 uint16_t cmac_digest_len = 87 get_digest_byte_length(IMB_AUTH_AES_CMAC); 88 89 if (sess->auth.req_digest_len > cmac_digest_len) { 90 IPSEC_MB_LOG(ERR, "Invalid digest size"); 91 return -EINVAL; 92 } 93 /* 94 * Multi-buffer lib supports digest sizes from 4 to 16 bytes 95 * in version 0.50 and sizes of 12 and 16 bytes, 96 * in version 0.49. 97 * If size requested is different, generate the full digest 98 * (16 bytes) in a temporary location and then memcpy 99 * the requested number of bytes. 100 */ 101 if (sess->auth.req_digest_len < 4) 102 sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len; 103 else 104 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 105 106 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, 107 sess->auth.cmac.expkey, dust); 108 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, 109 sess->auth.cmac.skey1, sess->auth.cmac.skey2); 110 sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey; 111 sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1; 112 sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2; 113 return 0; 114 } 115 116 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 117 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { 118 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 119 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 120 } else 121 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 122 123 if (sess->auth.req_digest_len > 124 get_digest_byte_length(IMB_AUTH_AES_GMAC)) { 125 IPSEC_MB_LOG(ERR, "Invalid digest size"); 126 return -EINVAL; 127 } 128 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 129 sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length; 130 sess->iv.offset = xform->auth.iv.offset; 131 132 switch (xform->auth.key.length) { 133 case IMB_KEY_128_BYTES: 134 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128; 135 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, 136 &sess->cipher.gcm_key); 137 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 138 break; 139 case IMB_KEY_192_BYTES: 140 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192; 141 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, 142 &sess->cipher.gcm_key); 143 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 144 break; 145 case IMB_KEY_256_BYTES: 146 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256; 147 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, 148 &sess->cipher.gcm_key); 149 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 150 break; 151 default: 152 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 153 return -EINVAL; 154 } 155 sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; 156 157 return 0; 158 } 159 160 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { 161 if (xform->auth.key.length == 16) { 162 sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; 163 164 if (sess->auth.req_digest_len != 4) { 165 IPSEC_MB_LOG(ERR, "Invalid digest size"); 166 return -EINVAL; 167 } 168 } else if (xform->auth.key.length == 32) { 169 sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; 170 if (sess->auth.req_digest_len != 4 && 171 sess->auth.req_digest_len != 8 && 172 sess->auth.req_digest_len != 16) { 173 IPSEC_MB_LOG(ERR, "Invalid digest size"); 174 return -EINVAL; 175 } 176 } else { 177 IPSEC_MB_LOG(ERR, "Invalid authentication key length"); 178 return -EINVAL; 179 } 180 181 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 182 183 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 184 xform->auth.key.length); 185 sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key; 186 return 0; 187 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 188 sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN; 189 uint16_t snow3g_uia2_digest_len = 190 get_truncated_digest_byte_length( 191 IMB_AUTH_SNOW3G_UIA2_BITLEN); 192 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) { 193 IPSEC_MB_LOG(ERR, "Invalid digest size"); 194 return -EINVAL; 195 } 196 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 197 198 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, 199 &sess->auth.pKeySched_snow3g_auth); 200 sess->template_job.u.SNOW3G_UIA2._key = (void *) 201 &sess->auth.pKeySched_snow3g_auth; 202 return 0; 203 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { 204 sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1; 205 uint16_t kasumi_f9_digest_len = 206 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); 207 if (sess->auth.req_digest_len != kasumi_f9_digest_len) { 208 IPSEC_MB_LOG(ERR, "Invalid digest size"); 209 return -EINVAL; 210 } 211 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 212 213 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, 214 &sess->auth.pKeySched_kasumi_auth); 215 sess->template_job.u.KASUMI_UIA1._key = (void *) 216 &sess->auth.pKeySched_kasumi_auth; 217 return 0; 218 } 219 220 switch (xform->auth.algo) { 221 case RTE_CRYPTO_AUTH_MD5_HMAC: 222 sess->template_job.hash_alg = IMB_AUTH_MD5; 223 break; 224 case RTE_CRYPTO_AUTH_SHA1_HMAC: 225 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; 226 if (xform->auth.key.length > get_auth_algo_blocksize( 227 IMB_AUTH_HMAC_SHA_1)) { 228 IMB_SHA1(mb_mgr, 229 xform->auth.key.data, 230 xform->auth.key.length, 231 hashed_key); 232 } 233 break; 234 case RTE_CRYPTO_AUTH_SHA1: 235 sess->template_job.hash_alg = IMB_AUTH_SHA_1; 236 auth_precompute = 0; 237 break; 238 case RTE_CRYPTO_AUTH_SHA224_HMAC: 239 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; 240 if (xform->auth.key.length > get_auth_algo_blocksize( 241 IMB_AUTH_HMAC_SHA_224)) { 242 IMB_SHA224(mb_mgr, 243 xform->auth.key.data, 244 xform->auth.key.length, 245 hashed_key); 246 } 247 break; 248 case RTE_CRYPTO_AUTH_SHA224: 249 sess->template_job.hash_alg = IMB_AUTH_SHA_224; 250 auth_precompute = 0; 251 break; 252 case RTE_CRYPTO_AUTH_SHA256_HMAC: 253 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; 254 if (xform->auth.key.length > get_auth_algo_blocksize( 255 IMB_AUTH_HMAC_SHA_256)) { 256 IMB_SHA256(mb_mgr, 257 xform->auth.key.data, 258 xform->auth.key.length, 259 hashed_key); 260 } 261 break; 262 case RTE_CRYPTO_AUTH_SHA256: 263 sess->template_job.hash_alg = IMB_AUTH_SHA_256; 264 auth_precompute = 0; 265 break; 266 case RTE_CRYPTO_AUTH_SHA384_HMAC: 267 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; 268 if (xform->auth.key.length > get_auth_algo_blocksize( 269 IMB_AUTH_HMAC_SHA_384)) { 270 IMB_SHA384(mb_mgr, 271 xform->auth.key.data, 272 xform->auth.key.length, 273 hashed_key); 274 } 275 break; 276 case RTE_CRYPTO_AUTH_SHA384: 277 sess->template_job.hash_alg = IMB_AUTH_SHA_384; 278 auth_precompute = 0; 279 break; 280 case RTE_CRYPTO_AUTH_SHA512_HMAC: 281 sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; 282 if (xform->auth.key.length > get_auth_algo_blocksize( 283 IMB_AUTH_HMAC_SHA_512)) { 284 IMB_SHA512(mb_mgr, 285 xform->auth.key.data, 286 xform->auth.key.length, 287 hashed_key); 288 } 289 break; 290 case RTE_CRYPTO_AUTH_SHA512: 291 sess->template_job.hash_alg = IMB_AUTH_SHA_512; 292 auth_precompute = 0; 293 break; 294 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 295 case RTE_CRYPTO_AUTH_SM3: 296 sess->template_job.hash_alg = IMB_AUTH_SM3; 297 break; 298 case RTE_CRYPTO_AUTH_SM3_HMAC: 299 sess->template_job.hash_alg = IMB_AUTH_HMAC_SM3; 300 break; 301 #endif 302 default: 303 IPSEC_MB_LOG(ERR, 304 "Unsupported authentication algorithm selection"); 305 return -ENOTSUP; 306 } 307 uint16_t trunc_digest_size = 308 get_truncated_digest_byte_length(sess->template_job.hash_alg); 309 uint16_t full_digest_size = 310 get_digest_byte_length(sess->template_job.hash_alg); 311 312 if (sess->auth.req_digest_len > full_digest_size || 313 sess->auth.req_digest_len == 0) { 314 IPSEC_MB_LOG(ERR, "Invalid digest size"); 315 return -EINVAL; 316 } 317 318 if (sess->auth.req_digest_len != trunc_digest_size && 319 sess->auth.req_digest_len != full_digest_size) 320 sess->template_job.auth_tag_output_len_in_bytes = full_digest_size; 321 else 322 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 323 324 /* Plain SHA does not require precompute key */ 325 if (auth_precompute == 0) 326 return 0; 327 328 /* Calculate Authentication precomputes */ 329 imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg, 330 xform->auth.key.data, xform->auth.key.length, 331 sess->auth.pads.inner, sess->auth.pads.outer); 332 sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = 333 sess->auth.pads.inner; 334 sess->template_job.u.HMAC._hashed_auth_key_xor_opad = 335 sess->auth.pads.outer; 336 337 return 0; 338 } 339 340 /** Set session cipher parameters */ 341 static int 342 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, 343 struct aesni_mb_session *sess, 344 const struct rte_crypto_sym_xform *xform) 345 { 346 uint8_t is_aes = 0; 347 uint8_t is_3DES = 0; 348 uint8_t is_docsis = 0; 349 uint8_t is_zuc = 0; 350 uint8_t is_snow3g = 0; 351 uint8_t is_kasumi = 0; 352 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 353 uint8_t is_sm4 = 0; 354 #endif 355 356 if (xform == NULL) { 357 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 358 return 0; 359 } 360 361 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { 362 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher"); 363 return -EINVAL; 364 } 365 366 /* Select cipher direction */ 367 switch (xform->cipher.op) { 368 case RTE_CRYPTO_CIPHER_OP_ENCRYPT: 369 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 370 break; 371 case RTE_CRYPTO_CIPHER_OP_DECRYPT: 372 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 373 break; 374 default: 375 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); 376 return -EINVAL; 377 } 378 379 /* Select cipher mode */ 380 switch (xform->cipher.algo) { 381 case RTE_CRYPTO_CIPHER_AES_CBC: 382 sess->template_job.cipher_mode = IMB_CIPHER_CBC; 383 is_aes = 1; 384 break; 385 case RTE_CRYPTO_CIPHER_AES_CTR: 386 sess->template_job.cipher_mode = IMB_CIPHER_CNTR; 387 is_aes = 1; 388 break; 389 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: 390 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI; 391 is_docsis = 1; 392 break; 393 case RTE_CRYPTO_CIPHER_DES_CBC: 394 sess->template_job.cipher_mode = IMB_CIPHER_DES; 395 break; 396 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: 397 sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES; 398 break; 399 case RTE_CRYPTO_CIPHER_3DES_CBC: 400 sess->template_job.cipher_mode = IMB_CIPHER_DES3; 401 is_3DES = 1; 402 break; 403 case RTE_CRYPTO_CIPHER_AES_ECB: 404 sess->template_job.cipher_mode = IMB_CIPHER_ECB; 405 is_aes = 1; 406 break; 407 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 408 sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3; 409 is_zuc = 1; 410 break; 411 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 412 sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; 413 is_snow3g = 1; 414 break; 415 case RTE_CRYPTO_CIPHER_KASUMI_F8: 416 sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; 417 is_kasumi = 1; 418 break; 419 case RTE_CRYPTO_CIPHER_NULL: 420 sess->template_job.cipher_mode = IMB_CIPHER_NULL; 421 sess->template_job.key_len_in_bytes = 0; 422 sess->iv.offset = xform->cipher.iv.offset; 423 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 424 return 0; 425 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 426 case RTE_CRYPTO_CIPHER_SM4_CBC: 427 sess->template_job.cipher_mode = IMB_CIPHER_SM4_CBC; 428 is_sm4 = 1; 429 break; 430 case RTE_CRYPTO_CIPHER_SM4_ECB: 431 sess->template_job.cipher_mode = IMB_CIPHER_SM4_ECB; 432 is_sm4 = 1; 433 break; 434 #endif 435 #if IMB_VERSION(1, 5, 0) < IMB_VERSION_NUM 436 case RTE_CRYPTO_CIPHER_SM4_CTR: 437 sess->template_job.cipher_mode = IMB_CIPHER_SM4_CNTR; 438 is_sm4 = 1; 439 break; 440 #endif 441 default: 442 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); 443 return -ENOTSUP; 444 } 445 446 /* Set IV parameters */ 447 sess->iv.offset = xform->cipher.iv.offset; 448 sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; 449 450 /* Check key length and choose key expansion function for AES */ 451 if (is_aes) { 452 switch (xform->cipher.key.length) { 453 case IMB_KEY_128_BYTES: 454 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 455 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 456 sess->cipher.expanded_aes_keys.encode, 457 sess->cipher.expanded_aes_keys.decode); 458 break; 459 case IMB_KEY_192_BYTES: 460 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 461 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, 462 sess->cipher.expanded_aes_keys.encode, 463 sess->cipher.expanded_aes_keys.decode); 464 break; 465 case IMB_KEY_256_BYTES: 466 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 467 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 468 sess->cipher.expanded_aes_keys.encode, 469 sess->cipher.expanded_aes_keys.decode); 470 break; 471 default: 472 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 473 return -EINVAL; 474 } 475 476 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 477 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 478 } else if (is_docsis) { 479 switch (xform->cipher.key.length) { 480 case IMB_KEY_128_BYTES: 481 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 482 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, 483 sess->cipher.expanded_aes_keys.encode, 484 sess->cipher.expanded_aes_keys.decode); 485 break; 486 case IMB_KEY_256_BYTES: 487 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 488 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, 489 sess->cipher.expanded_aes_keys.encode, 490 sess->cipher.expanded_aes_keys.decode); 491 break; 492 default: 493 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 494 return -EINVAL; 495 } 496 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 497 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 498 } else if (is_3DES) { 499 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], 500 sess->cipher.exp_3des_keys.key[1], 501 sess->cipher.exp_3des_keys.key[2]}; 502 503 switch (xform->cipher.key.length) { 504 case 24: 505 IMB_DES_KEYSCHED(mb_mgr, keys[0], 506 xform->cipher.key.data); 507 IMB_DES_KEYSCHED(mb_mgr, keys[1], 508 xform->cipher.key.data + 8); 509 IMB_DES_KEYSCHED(mb_mgr, keys[2], 510 xform->cipher.key.data + 16); 511 512 /* Initialize keys - 24 bytes: [K1-K2-K3] */ 513 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 514 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 515 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; 516 break; 517 case 16: 518 IMB_DES_KEYSCHED(mb_mgr, keys[0], 519 xform->cipher.key.data); 520 IMB_DES_KEYSCHED(mb_mgr, keys[1], 521 xform->cipher.key.data + 8); 522 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ 523 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 524 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; 525 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 526 break; 527 case 8: 528 IMB_DES_KEYSCHED(mb_mgr, keys[0], 529 xform->cipher.key.data); 530 531 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ 532 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; 533 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; 534 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; 535 break; 536 default: 537 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 538 return -EINVAL; 539 } 540 541 sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr; 542 sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr; 543 sess->template_job.key_len_in_bytes = 24; 544 } else if (is_zuc) { 545 if (xform->cipher.key.length != 16 && 546 xform->cipher.key.length != 32) { 547 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 548 return -EINVAL; 549 } 550 sess->template_job.key_len_in_bytes = xform->cipher.key.length; 551 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, 552 xform->cipher.key.length); 553 sess->template_job.enc_keys = sess->cipher.zuc_cipher_key; 554 sess->template_job.dec_keys = sess->cipher.zuc_cipher_key; 555 } else if (is_snow3g) { 556 if (xform->cipher.key.length != 16) { 557 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 558 return -EINVAL; 559 } 560 sess->template_job.key_len_in_bytes = 16; 561 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, 562 &sess->cipher.pKeySched_snow3g_cipher); 563 sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher; 564 sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher; 565 } else if (is_kasumi) { 566 if (xform->cipher.key.length != 16) { 567 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 568 return -EINVAL; 569 } 570 sess->template_job.key_len_in_bytes = 16; 571 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, 572 &sess->cipher.pKeySched_kasumi_cipher); 573 sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher; 574 sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher; 575 #if IMB_VERSION(1, 5, 0) <= IMB_VERSION_NUM 576 } else if (is_sm4) { 577 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 578 IMB_SM4_KEYEXP(mb_mgr, xform->cipher.key.data, 579 sess->cipher.expanded_sm4_keys.encode, 580 sess->cipher.expanded_sm4_keys.decode); 581 sess->template_job.enc_keys = sess->cipher.expanded_sm4_keys.encode; 582 sess->template_job.dec_keys = sess->cipher.expanded_sm4_keys.decode; 583 #endif 584 } else { 585 if (xform->cipher.key.length != 8) { 586 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 587 return -EINVAL; 588 } 589 sess->template_job.key_len_in_bytes = 8; 590 591 IMB_DES_KEYSCHED(mb_mgr, 592 (uint64_t *)sess->cipher.expanded_aes_keys.encode, 593 xform->cipher.key.data); 594 IMB_DES_KEYSCHED(mb_mgr, 595 (uint64_t *)sess->cipher.expanded_aes_keys.decode, 596 xform->cipher.key.data); 597 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 598 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 599 } 600 601 return 0; 602 } 603 604 static int 605 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, 606 struct aesni_mb_session *sess, 607 const struct rte_crypto_sym_xform *xform) 608 { 609 switch (xform->aead.op) { 610 case RTE_CRYPTO_AEAD_OP_ENCRYPT: 611 sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; 612 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 613 break; 614 case RTE_CRYPTO_AEAD_OP_DECRYPT: 615 sess->template_job.cipher_direction = IMB_DIR_DECRYPT; 616 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 617 break; 618 default: 619 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter"); 620 return -EINVAL; 621 } 622 623 /* Set IV parameters */ 624 sess->iv.offset = xform->aead.iv.offset; 625 sess->template_job.iv_len_in_bytes = xform->aead.iv.length; 626 627 /* Set digest sizes */ 628 sess->auth.req_digest_len = xform->aead.digest_length; 629 sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; 630 631 switch (xform->aead.algo) { 632 case RTE_CRYPTO_AEAD_AES_CCM: 633 sess->template_job.cipher_mode = IMB_CIPHER_CCM; 634 sess->template_job.hash_alg = IMB_AUTH_AES_CCM; 635 sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length; 636 637 /* Check key length and choose key expansion function for AES */ 638 switch (xform->aead.key.length) { 639 case IMB_KEY_128_BYTES: 640 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 641 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, 642 sess->cipher.expanded_aes_keys.encode, 643 sess->cipher.expanded_aes_keys.decode); 644 break; 645 case IMB_KEY_256_BYTES: 646 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 647 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, 648 sess->cipher.expanded_aes_keys.encode, 649 sess->cipher.expanded_aes_keys.decode); 650 break; 651 default: 652 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 653 return -EINVAL; 654 } 655 656 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 657 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 658 /* CCM digests must be between 4 and 16 and an even number */ 659 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || 660 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || 661 (sess->auth.req_digest_len & 1) == 1) { 662 IPSEC_MB_LOG(ERR, "Invalid digest size"); 663 return -EINVAL; 664 } 665 break; 666 667 case RTE_CRYPTO_AEAD_AES_GCM: 668 sess->template_job.cipher_mode = IMB_CIPHER_GCM; 669 sess->template_job.hash_alg = IMB_AUTH_AES_GMAC; 670 sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length; 671 672 switch (xform->aead.key.length) { 673 case IMB_KEY_128_BYTES: 674 sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; 675 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, 676 &sess->cipher.gcm_key); 677 break; 678 case IMB_KEY_192_BYTES: 679 sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; 680 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, 681 &sess->cipher.gcm_key); 682 break; 683 case IMB_KEY_256_BYTES: 684 sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; 685 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, 686 &sess->cipher.gcm_key); 687 break; 688 default: 689 IPSEC_MB_LOG(ERR, "Invalid cipher key length"); 690 return -EINVAL; 691 } 692 693 sess->template_job.enc_keys = &sess->cipher.gcm_key; 694 sess->template_job.dec_keys = &sess->cipher.gcm_key; 695 /* GCM digest size must be between 1 and 16 */ 696 if (sess->auth.req_digest_len == 0 || 697 sess->auth.req_digest_len > 16) { 698 IPSEC_MB_LOG(ERR, "Invalid digest size"); 699 return -EINVAL; 700 } 701 break; 702 703 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: 704 sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305; 705 sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305; 706 sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes = 707 xform->aead.aad_length; 708 709 if (xform->aead.key.length != 32) { 710 IPSEC_MB_LOG(ERR, "Invalid key length"); 711 return -EINVAL; 712 } 713 sess->template_job.key_len_in_bytes = 32; 714 memcpy(sess->cipher.expanded_aes_keys.encode, 715 xform->aead.key.data, 32); 716 sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; 717 sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; 718 if (sess->auth.req_digest_len != 16) { 719 IPSEC_MB_LOG(ERR, "Invalid digest size"); 720 return -EINVAL; 721 } 722 break; 723 default: 724 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter"); 725 return -ENOTSUP; 726 } 727 728 return 0; 729 } 730 731 /** Configure a aesni multi-buffer session from a crypto xform chain */ 732 int 733 aesni_mb_session_configure(IMB_MGR *mb_mgr, 734 void *priv_sess, 735 const struct rte_crypto_sym_xform *xform) 736 { 737 const struct rte_crypto_sym_xform *auth_xform = NULL; 738 const struct rte_crypto_sym_xform *cipher_xform = NULL; 739 const struct rte_crypto_sym_xform *aead_xform = NULL; 740 enum ipsec_mb_operation mode; 741 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess; 742 int ret; 743 744 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 745 &cipher_xform, &aead_xform); 746 if (ret) 747 return ret; 748 749 /* Select Crypto operation - hash then cipher / cipher then hash */ 750 switch (mode) { 751 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 752 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 753 break; 754 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 755 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 756 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 757 break; 758 case IPSEC_MB_OP_HASH_GEN_ONLY: 759 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 760 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 761 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 762 break; 763 /* 764 * Multi buffer library operates only at two modes, 765 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER. 766 * When doing ciphering only, chain order depends 767 * on cipher operation: encryption is always 768 * the first operation and decryption the last one. 769 */ 770 case IPSEC_MB_OP_ENCRYPT_ONLY: 771 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 772 break; 773 case IPSEC_MB_OP_DECRYPT_ONLY: 774 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 775 break; 776 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: 777 sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 778 break; 779 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: 780 sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 781 break; 782 case IPSEC_MB_OP_NOT_SUPPORTED: 783 default: 784 IPSEC_MB_LOG(ERR, 785 "Unsupported operation chain order parameter"); 786 return -ENOTSUP; 787 } 788 789 /* Default IV length = 0 */ 790 sess->template_job.iv_len_in_bytes = 0; 791 792 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); 793 if (ret != 0) { 794 IPSEC_MB_LOG(ERR, 795 "Invalid/unsupported authentication parameters"); 796 return ret; 797 } 798 799 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess, 800 cipher_xform); 801 if (ret != 0) { 802 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 803 return ret; 804 } 805 806 if (aead_xform) { 807 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess, 808 aead_xform); 809 if (ret != 0) { 810 IPSEC_MB_LOG(ERR, 811 "Invalid/unsupported aead parameters"); 812 return ret; 813 } 814 } 815 816 sess->session_id = imb_set_session(mb_mgr, &sess->template_job); 817 sess->pid = getpid(); 818 RTE_PER_LCORE(pid) = sess->pid; 819 820 return 0; 821 } 822 823 /** Check DOCSIS security session configuration is valid */ 824 static int 825 check_docsis_sec_session(struct rte_security_session_conf *conf) 826 { 827 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform; 828 struct rte_security_docsis_xform *docsis = &conf->docsis; 829 830 /* Downlink: CRC generate -> Cipher encrypt */ 831 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 832 833 if (crypto_sym != NULL && 834 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 835 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 836 crypto_sym->cipher.algo == 837 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 838 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 839 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 840 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 841 crypto_sym->next == NULL) { 842 return 0; 843 } 844 /* Uplink: Cipher decrypt -> CRC verify */ 845 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) { 846 847 if (crypto_sym != NULL && 848 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 849 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && 850 crypto_sym->cipher.algo == 851 RTE_CRYPTO_CIPHER_AES_DOCSISBPI && 852 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES || 853 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) && 854 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE && 855 crypto_sym->next == NULL) { 856 return 0; 857 } 858 } 859 860 return -EINVAL; 861 } 862 863 /** Set DOCSIS security session auth (CRC) parameters */ 864 static int 865 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, 866 struct rte_security_docsis_xform *xform) 867 { 868 if (xform == NULL) { 869 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform"); 870 return -EINVAL; 871 } 872 873 /* Select CRC generate/verify */ 874 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { 875 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 876 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; 877 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { 878 sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; 879 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; 880 } else { 881 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); 882 return -ENOTSUP; 883 } 884 885 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; 886 sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN; 887 888 return 0; 889 } 890 891 /** 892 * Parse DOCSIS security session configuration and set private session 893 * parameters 894 */ 895 static int 896 aesni_mb_set_docsis_sec_session_parameters( 897 __rte_unused struct rte_cryptodev *dev, 898 struct rte_security_session_conf *conf, 899 void *sess) 900 { 901 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 902 struct rte_security_docsis_xform *docsis_xform; 903 struct rte_crypto_sym_xform *cipher_xform; 904 struct aesni_mb_session *ipsec_sess = sess; 905 int ret = 0; 906 907 if (!mb_mgr) 908 return -ENOMEM; 909 910 ret = check_docsis_sec_session(conf); 911 if (ret) { 912 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 913 goto error_exit; 914 } 915 916 switch (conf->docsis.direction) { 917 case RTE_SECURITY_DOCSIS_UPLINK: 918 ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; 919 docsis_xform = &conf->docsis; 920 cipher_xform = conf->crypto_xform; 921 break; 922 case RTE_SECURITY_DOCSIS_DOWNLINK: 923 ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; 924 cipher_xform = conf->crypto_xform; 925 docsis_xform = &conf->docsis; 926 break; 927 default: 928 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration"); 929 ret = -EINVAL; 930 goto error_exit; 931 } 932 933 /* Default IV length = 0 */ 934 ipsec_sess->template_job.iv_len_in_bytes = 0; 935 936 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, 937 docsis_xform); 938 if (ret != 0) { 939 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters"); 940 goto error_exit; 941 } 942 943 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, 944 ipsec_sess, cipher_xform); 945 946 if (ret != 0) { 947 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); 948 goto error_exit; 949 } 950 951 ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); 952 953 error_exit: 954 free_mb_mgr(mb_mgr); 955 return ret; 956 } 957 958 static inline uint64_t 959 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, 960 uint32_t oop, const uint32_t auth_offset, 961 const uint32_t cipher_offset, const uint32_t auth_length, 962 const uint32_t cipher_length, uint8_t lb_sgl) 963 { 964 struct rte_mbuf *m_src, *m_dst; 965 uint8_t *p_src, *p_dst; 966 uintptr_t u_src, u_dst; 967 uint32_t cipher_end, auth_end; 968 969 /* Only cipher then hash needs special calculation. */ 970 if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) 971 return auth_offset; 972 973 m_src = op->sym->m_src; 974 m_dst = op->sym->m_dst; 975 976 p_src = rte_pktmbuf_mtod(m_src, uint8_t *); 977 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 978 u_src = (uintptr_t)p_src; 979 u_dst = (uintptr_t)p_dst + auth_offset; 980 981 /** 982 * Copy the content between cipher offset and auth offset for generating 983 * correct digest. 984 */ 985 if (cipher_offset > auth_offset) 986 memcpy(p_dst + auth_offset, 987 p_src + auth_offset, 988 cipher_offset - 989 auth_offset); 990 991 /** 992 * Copy the content between (cipher offset + length) and (auth offset + 993 * length) for generating correct digest 994 */ 995 cipher_end = cipher_offset + cipher_length; 996 auth_end = auth_offset + auth_length; 997 if (cipher_end < auth_end) 998 memcpy(p_dst + cipher_end, p_src + cipher_end, 999 auth_end - cipher_end); 1000 1001 /** 1002 * Since intel-ipsec-mb only supports positive values, 1003 * we need to deduct the correct offset between src and dst. 1004 */ 1005 1006 return u_src < u_dst ? (u_dst - u_src) : 1007 (UINT64_MAX - u_src + u_dst + 1); 1008 } 1009 1010 static inline void 1011 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, 1012 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len, 1013 struct rte_crypto_va_iova_ptr *iv, 1014 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) 1015 { 1016 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1017 1018 /* Set authentication parameters */ 1019 job->iv = iv->va; 1020 1021 switch (job->hash_alg) { 1022 case IMB_AUTH_AES_CCM: 1023 job->u.CCM.aad = (uint8_t *)aad->va + 18; 1024 job->iv++; 1025 break; 1026 1027 case IMB_AUTH_AES_GMAC: 1028 job->u.GCM.aad = aad->va; 1029 break; 1030 1031 case IMB_AUTH_AES_GMAC_128: 1032 case IMB_AUTH_AES_GMAC_192: 1033 case IMB_AUTH_AES_GMAC_256: 1034 job->u.GMAC._iv = iv->va; 1035 break; 1036 1037 case IMB_AUTH_CHACHA20_POLY1305: 1038 job->u.CHACHA20_POLY1305.aad = aad->va; 1039 break; 1040 default: 1041 job->u.HMAC._hashed_auth_key_xor_ipad = 1042 session->auth.pads.inner; 1043 job->u.HMAC._hashed_auth_key_xor_opad = 1044 session->auth.pads.outer; 1045 } 1046 1047 /* 1048 * Multi-buffer library current only support returning a truncated 1049 * digest length as specified in the relevant IPsec RFCs 1050 */ 1051 1052 /* Set digest location and length */ 1053 job->auth_tag_output = digest; 1054 1055 /* Data Parameters */ 1056 job->src = buf; 1057 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; 1058 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; 1059 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; 1060 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - 1061 sofs.ofs.auth.tail; 1062 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - 1063 sofs.ofs.cipher.tail; 1064 1065 job->user_data = udata; 1066 } 1067 1068 static int 1069 handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr, 1070 uint32_t *total_len, 1071 struct aesni_mb_op_buf_data *src_data, 1072 struct aesni_mb_op_buf_data *dst_data) 1073 { 1074 uint32_t data_len, part_len; 1075 1076 if (*total_len == 0) { 1077 job->sgl_state = IMB_SGL_COMPLETE; 1078 return 0; 1079 } 1080 1081 if (src_data->m == NULL) { 1082 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1083 return -EINVAL; 1084 } 1085 1086 job->sgl_state = IMB_SGL_UPDATE; 1087 1088 data_len = src_data->m->data_len - src_data->offset; 1089 1090 job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1091 src_data->offset); 1092 1093 if (dst_data->m != NULL) { 1094 if (dst_data->m->data_len - dst_data->offset == 0) { 1095 dst_data->m = dst_data->m->next; 1096 if (dst_data->m == NULL) { 1097 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1098 return -EINVAL; 1099 } 1100 dst_data->offset = 0; 1101 } 1102 part_len = RTE_MIN(data_len, (dst_data->m->data_len - 1103 dst_data->offset)); 1104 job->dst = rte_pktmbuf_mtod_offset(dst_data->m, 1105 uint8_t *, dst_data->offset); 1106 dst_data->offset += part_len; 1107 } else { 1108 part_len = RTE_MIN(data_len, *total_len); 1109 job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, 1110 src_data->offset); 1111 } 1112 1113 job->msg_len_to_cipher_in_bytes = part_len; 1114 job->msg_len_to_hash_in_bytes = part_len; 1115 1116 job = IMB_SUBMIT_JOB(mb_mgr); 1117 1118 *total_len -= part_len; 1119 1120 if (part_len != data_len) { 1121 src_data->offset += part_len; 1122 } else { 1123 src_data->m = src_data->m->next; 1124 src_data->offset = 0; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static uint64_t 1131 sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len) 1132 { 1133 uint64_t cipher_len; 1134 1135 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1136 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1137 cipher_len = (job->msg_len_to_cipher_in_bits >> 3) + 1138 (job->cipher_start_src_offset_in_bits >> 3); 1139 else 1140 cipher_len = job->msg_len_to_cipher_in_bytes + 1141 job->cipher_start_src_offset_in_bytes; 1142 1143 if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1144 job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN) 1145 *auth_len = (job->msg_len_to_hash_in_bits >> 3) + 1146 job->hash_start_src_offset_in_bytes; 1147 else 1148 *auth_len = job->msg_len_to_hash_in_bytes + 1149 job->hash_start_src_offset_in_bytes; 1150 1151 return RTE_MAX(*auth_len, cipher_len); 1152 } 1153 1154 static int 1155 handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, 1156 struct aesni_mb_session *session) 1157 { 1158 uint64_t auth_len, total_len; 1159 uint8_t *src, *linear_buf = NULL; 1160 int lb_offset = 0; 1161 struct rte_mbuf *src_seg; 1162 uint16_t src_len; 1163 1164 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 1165 linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0); 1166 if (linear_buf == NULL) { 1167 IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer"); 1168 return -1; 1169 } 1170 1171 for (src_seg = op->sym->m_src; (src_seg != NULL) && 1172 (total_len - lb_offset > 0); 1173 src_seg = src_seg->next) { 1174 src = rte_pktmbuf_mtod(src_seg, uint8_t *); 1175 src_len = RTE_MIN(src_seg->data_len, total_len - lb_offset); 1176 rte_memcpy(linear_buf + lb_offset, src, src_len); 1177 lb_offset += src_len; 1178 } 1179 1180 job->src = linear_buf; 1181 job->dst = linear_buf + dst_offset; 1182 job->user_data2 = linear_buf; 1183 1184 if (job->hash_alg == IMB_AUTH_AES_GMAC) 1185 job->u.GCM.aad = linear_buf; 1186 1187 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 1188 job->auth_tag_output = linear_buf + lb_offset; 1189 else 1190 job->auth_tag_output = linear_buf + auth_len; 1191 1192 return 0; 1193 } 1194 1195 static inline int 1196 imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) 1197 { 1198 if (alg == IMB_CIPHER_CHACHA20_POLY1305 || 1199 alg == IMB_CIPHER_CHACHA20_POLY1305_SGL || 1200 alg == IMB_CIPHER_GCM_SGL || 1201 alg == IMB_CIPHER_GCM) 1202 return 1; 1203 return 0; 1204 } 1205 1206 static inline int 1207 single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1208 int oop, uint32_t offset, struct rte_mbuf *m_src, 1209 struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) 1210 { 1211 uint32_t num_segs = 0; 1212 struct aesni_mb_op_buf_data src_sgl = {0}; 1213 struct aesni_mb_op_buf_data dst_sgl = {0}; 1214 uint32_t total_len; 1215 1216 job->sgl_state = IMB_SGL_ALL; 1217 1218 src_sgl.m = m_src; 1219 src_sgl.offset = offset; 1220 1221 while (src_sgl.offset >= src_sgl.m->data_len) { 1222 src_sgl.offset -= src_sgl.m->data_len; 1223 src_sgl.m = src_sgl.m->next; 1224 1225 RTE_ASSERT(src_sgl.m != NULL); 1226 } 1227 1228 if (oop) { 1229 dst_sgl.m = m_dst; 1230 dst_sgl.offset = offset; 1231 1232 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1233 dst_sgl.offset -= dst_sgl.m->data_len; 1234 dst_sgl.m = dst_sgl.m->next; 1235 1236 RTE_ASSERT(dst_sgl.m != NULL); 1237 } 1238 } 1239 total_len = op->sym->aead.data.length; 1240 1241 while (total_len != 0) { 1242 uint32_t data_len, part_len; 1243 1244 if (src_sgl.m == NULL) { 1245 IPSEC_MB_LOG(ERR, "Invalid source buffer"); 1246 return -EINVAL; 1247 } 1248 1249 data_len = src_sgl.m->data_len - src_sgl.offset; 1250 1251 sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1252 src_sgl.offset); 1253 1254 if (dst_sgl.m != NULL) { 1255 if (dst_sgl.m->data_len - dst_sgl.offset == 0) { 1256 dst_sgl.m = dst_sgl.m->next; 1257 if (dst_sgl.m == NULL) { 1258 IPSEC_MB_LOG(ERR, "Invalid destination buffer"); 1259 return -EINVAL; 1260 } 1261 dst_sgl.offset = 0; 1262 } 1263 part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - 1264 dst_sgl.offset)); 1265 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, 1266 uint8_t *, dst_sgl.offset); 1267 dst_sgl.offset += part_len; 1268 } else { 1269 part_len = RTE_MIN(data_len, total_len); 1270 sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, 1271 src_sgl.offset); 1272 } 1273 1274 sgl_segs[num_segs].len = part_len; 1275 1276 total_len -= part_len; 1277 1278 if (part_len != data_len) { 1279 src_sgl.offset += part_len; 1280 } else { 1281 src_sgl.m = src_sgl.m->next; 1282 src_sgl.offset = 0; 1283 } 1284 num_segs++; 1285 } 1286 job->num_sgl_io_segs = num_segs; 1287 job->sgl_io_segs = sgl_segs; 1288 return 0; 1289 } 1290 1291 static inline int 1292 multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, 1293 int oop, uint32_t offset, struct rte_mbuf *m_src, 1294 struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) 1295 { 1296 int ret; 1297 IMB_JOB base_job; 1298 struct aesni_mb_op_buf_data src_sgl = {0}; 1299 struct aesni_mb_op_buf_data dst_sgl = {0}; 1300 uint32_t total_len; 1301 1302 base_job = *job; 1303 job->sgl_state = IMB_SGL_INIT; 1304 job = IMB_SUBMIT_JOB(mb_mgr); 1305 total_len = op->sym->aead.data.length; 1306 1307 src_sgl.m = m_src; 1308 src_sgl.offset = offset; 1309 1310 while (src_sgl.offset >= src_sgl.m->data_len) { 1311 src_sgl.offset -= src_sgl.m->data_len; 1312 src_sgl.m = src_sgl.m->next; 1313 1314 RTE_ASSERT(src_sgl.m != NULL); 1315 } 1316 1317 if (oop) { 1318 dst_sgl.m = m_dst; 1319 dst_sgl.offset = offset; 1320 1321 while (dst_sgl.offset >= dst_sgl.m->data_len) { 1322 dst_sgl.offset -= dst_sgl.m->data_len; 1323 dst_sgl.m = dst_sgl.m->next; 1324 1325 RTE_ASSERT(dst_sgl.m != NULL); 1326 } 1327 } 1328 1329 while (job->sgl_state != IMB_SGL_COMPLETE) { 1330 job = IMB_GET_NEXT_JOB(mb_mgr); 1331 *job = base_job; 1332 ret = handle_aead_sgl_job(job, mb_mgr, &total_len, 1333 &src_sgl, &dst_sgl); 1334 if (ret < 0) 1335 return ret; 1336 } 1337 return 0; 1338 } 1339 1340 static inline int 1341 set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, 1342 struct aesni_mb_qp_data *qp_data, 1343 struct rte_crypto_op *op, uint8_t *digest_idx, 1344 const struct aesni_mb_session *session, 1345 struct rte_mbuf *m_src, struct rte_mbuf *m_dst, 1346 const int oop) 1347 { 1348 const uint32_t m_offset = op->sym->aead.data.offset; 1349 1350 job->u.GCM.aad = op->sym->aead.aad.data; 1351 if (sgl) { 1352 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1353 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1354 job->hash_alg = IMB_AUTH_GCM_SGL; 1355 job->hash_start_src_offset_in_bytes = 0; 1356 job->msg_len_to_hash_in_bytes = 0; 1357 job->msg_len_to_cipher_in_bytes = 0; 1358 job->cipher_start_src_offset_in_bytes = 0; 1359 imb_set_session(mb_mgr, job); 1360 } else { 1361 job->hash_start_src_offset_in_bytes = 1362 op->sym->aead.data.offset; 1363 job->msg_len_to_hash_in_bytes = 1364 op->sym->aead.data.length; 1365 job->cipher_start_src_offset_in_bytes = 1366 op->sym->aead.data.offset; 1367 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1368 } 1369 1370 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1371 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1372 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1373 } else { 1374 job->auth_tag_output = op->sym->aead.digest.data; 1375 } 1376 1377 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1378 session->iv.offset); 1379 1380 /* Set user data to be crypto operation data struct */ 1381 job->user_data = op; 1382 1383 if (sgl) { 1384 job->src = NULL; 1385 job->dst = NULL; 1386 1387 if (m_src->nb_segs <= MAX_NUM_SEGS) 1388 return single_sgl_job(job, op, oop, 1389 m_offset, m_src, m_dst, 1390 qp_data->sgl_segs); 1391 else 1392 return multi_sgl_job(job, op, oop, 1393 m_offset, m_src, m_dst, mb_mgr); 1394 } else { 1395 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1396 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1397 } 1398 1399 return 0; 1400 } 1401 1402 /** Check if conditions are met for digest-appended operations */ 1403 static uint8_t * 1404 aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, 1405 uint32_t oop) 1406 { 1407 unsigned int auth_size, cipher_size; 1408 uint8_t *end_cipher; 1409 uint8_t *start_cipher; 1410 1411 if (job->cipher_mode == IMB_CIPHER_NULL) 1412 return NULL; 1413 1414 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 || 1415 job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1416 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { 1417 cipher_size = (op->sym->cipher.data.offset >> 3) + 1418 (op->sym->cipher.data.length >> 3); 1419 } else { 1420 cipher_size = (op->sym->cipher.data.offset) + 1421 (op->sym->cipher.data.length); 1422 } 1423 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 1424 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1425 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 1426 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 1427 auth_size = (op->sym->auth.data.offset >> 3) + 1428 (op->sym->auth.data.length >> 3); 1429 } else { 1430 auth_size = (op->sym->auth.data.offset) + 1431 (op->sym->auth.data.length); 1432 } 1433 1434 if (!oop) { 1435 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size); 1436 start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); 1437 } else { 1438 end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size); 1439 start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); 1440 } 1441 1442 if (start_cipher < op->sym->auth.digest.data && 1443 op->sym->auth.digest.data < end_cipher) { 1444 return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size); 1445 } else { 1446 return NULL; 1447 } 1448 } 1449 1450 /** 1451 * Process a crypto operation and complete a IMB_JOB job structure for 1452 * submission to the multi buffer library for processing. 1453 * 1454 * @param qp queue pair 1455 * @param job IMB_JOB structure to fill 1456 * @param op crypto op to process 1457 * @param digest_idx ID for digest to use 1458 * 1459 * @return 1460 * - 0 on success, the IMB_JOB will be filled 1461 * - -1 if invalid session or errors allocating SGL linear buffer, 1462 * IMB_JOB will not be filled 1463 */ 1464 static inline int 1465 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1466 struct rte_crypto_op *op, uint8_t *digest_idx, 1467 IMB_MGR *mb_mgr, pid_t pid) 1468 { 1469 struct rte_mbuf *m_src = op->sym->m_src, *m_dst; 1470 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1471 struct aesni_mb_session *session; 1472 uint32_t m_offset; 1473 int oop; 1474 uint32_t auth_off_in_bytes; 1475 uint32_t ciph_off_in_bytes; 1476 uint32_t auth_len_in_bytes; 1477 uint32_t ciph_len_in_bytes; 1478 uint8_t sgl = 0; 1479 uint8_t lb_sgl = 0; 1480 1481 session = ipsec_mb_get_session_private(qp, op); 1482 if (session == NULL) { 1483 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1484 return -1; 1485 } 1486 1487 const IMB_CIPHER_MODE cipher_mode = 1488 session->template_job.cipher_mode; 1489 1490 if (session->pid != pid) { 1491 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1492 imb_set_session(mb_mgr, job); 1493 } else if (job->session_id != session->session_id) 1494 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1495 1496 if (!op->sym->m_dst) { 1497 /* in-place operation */ 1498 m_dst = m_src; 1499 oop = 0; 1500 } else if (op->sym->m_dst == op->sym->m_src) { 1501 /* in-place operation */ 1502 m_dst = m_src; 1503 oop = 0; 1504 } else { 1505 /* out-of-place operation */ 1506 m_dst = op->sym->m_dst; 1507 oop = 1; 1508 } 1509 1510 if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { 1511 sgl = 1; 1512 if (!imb_lib_support_sgl_algo(cipher_mode)) 1513 lb_sgl = 1; 1514 } 1515 1516 if (cipher_mode == IMB_CIPHER_GCM) 1517 return set_gcm_job(mb_mgr, job, sgl, qp_data, 1518 op, digest_idx, session, m_src, m_dst, oop); 1519 1520 /* Set authentication parameters */ 1521 const int aead = is_aead_algo(job->hash_alg, cipher_mode); 1522 1523 switch (job->hash_alg) { 1524 case IMB_AUTH_AES_CCM: 1525 job->u.CCM.aad = op->sym->aead.aad.data + 18; 1526 break; 1527 1528 case IMB_AUTH_AES_GMAC: 1529 job->u.GCM.aad = op->sym->aead.aad.data; 1530 if (sgl) { 1531 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; 1532 job->cipher_mode = IMB_CIPHER_GCM_SGL; 1533 job->hash_alg = IMB_AUTH_GCM_SGL; 1534 imb_set_session(mb_mgr, job); 1535 } 1536 break; 1537 case IMB_AUTH_AES_GMAC_128: 1538 case IMB_AUTH_AES_GMAC_192: 1539 case IMB_AUTH_AES_GMAC_256: 1540 job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1541 session->auth_iv.offset); 1542 break; 1543 case IMB_AUTH_ZUC_EIA3_BITLEN: 1544 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1545 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1546 session->auth_iv.offset); 1547 break; 1548 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1549 job->u.SNOW3G_UIA2._iv = 1550 rte_crypto_op_ctod_offset(op, uint8_t *, 1551 session->auth_iv.offset); 1552 break; 1553 case IMB_AUTH_CHACHA20_POLY1305: 1554 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; 1555 if (sgl) { 1556 job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; 1557 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; 1558 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; 1559 imb_set_session(mb_mgr, job); 1560 } 1561 break; 1562 default: 1563 break; 1564 } 1565 1566 if (aead) 1567 m_offset = op->sym->aead.data.offset; 1568 else 1569 m_offset = op->sym->cipher.data.offset; 1570 1571 if (cipher_mode == IMB_CIPHER_ZUC_EEA3) 1572 m_offset >>= 3; 1573 else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) 1574 m_offset = 0; 1575 else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) 1576 m_offset = 0; 1577 1578 /* Set digest output location */ 1579 if (job->hash_alg != IMB_AUTH_NULL && 1580 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1581 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1582 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1583 } else { 1584 if (aead) 1585 job->auth_tag_output = op->sym->aead.digest.data; 1586 else { 1587 job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop); 1588 if (job->auth_tag_output == NULL) { 1589 job->auth_tag_output = op->sym->auth.digest.data; 1590 } 1591 } 1592 if (session->auth.req_digest_len != 1593 job->auth_tag_output_len_in_bytes) { 1594 job->auth_tag_output = 1595 qp_data->temp_digests[*digest_idx]; 1596 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1597 } 1598 } 1599 /* 1600 * Multi-buffer library current only support returning a truncated 1601 * digest length as specified in the relevant IPsec RFCs 1602 */ 1603 1604 /* Data Parameters */ 1605 if (sgl) { 1606 job->src = NULL; 1607 job->dst = NULL; 1608 } else { 1609 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1610 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); 1611 } 1612 1613 switch (job->hash_alg) { 1614 case IMB_AUTH_AES_CCM: 1615 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; 1616 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length; 1617 1618 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1619 session->iv.offset + 1); 1620 break; 1621 1622 case IMB_AUTH_AES_GMAC: 1623 job->hash_start_src_offset_in_bytes = 1624 op->sym->aead.data.offset; 1625 job->msg_len_to_hash_in_bytes = 1626 op->sym->aead.data.length; 1627 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1628 session->iv.offset); 1629 break; 1630 case IMB_AUTH_AES_GMAC_128: 1631 case IMB_AUTH_AES_GMAC_192: 1632 case IMB_AUTH_AES_GMAC_256: 1633 job->hash_start_src_offset_in_bytes = 1634 op->sym->auth.data.offset; 1635 job->msg_len_to_hash_in_bytes = 1636 op->sym->auth.data.length; 1637 break; 1638 1639 case IMB_AUTH_GCM_SGL: 1640 case IMB_AUTH_CHACHA20_POLY1305_SGL: 1641 job->hash_start_src_offset_in_bytes = 0; 1642 job->msg_len_to_hash_in_bytes = 0; 1643 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1644 session->iv.offset); 1645 break; 1646 1647 case IMB_AUTH_CHACHA20_POLY1305: 1648 job->hash_start_src_offset_in_bytes = 1649 op->sym->aead.data.offset; 1650 job->msg_len_to_hash_in_bytes = 1651 op->sym->aead.data.length; 1652 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1653 session->iv.offset); 1654 break; 1655 /* ZUC and SNOW3G require length in bits and offset in bytes */ 1656 case IMB_AUTH_ZUC_EIA3_BITLEN: 1657 case IMB_AUTH_ZUC256_EIA3_BITLEN: 1658 case IMB_AUTH_SNOW3G_UIA2_BITLEN: 1659 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1660 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1661 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1662 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1663 1664 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1665 session, oop, auth_off_in_bytes, 1666 ciph_off_in_bytes, auth_len_in_bytes, 1667 ciph_len_in_bytes, lb_sgl); 1668 job->msg_len_to_hash_in_bits = op->sym->auth.data.length; 1669 1670 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1671 session->iv.offset); 1672 break; 1673 1674 /* KASUMI requires lengths and offset in bytes */ 1675 case IMB_AUTH_KASUMI_UIA1: 1676 auth_off_in_bytes = op->sym->auth.data.offset >> 3; 1677 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3; 1678 auth_len_in_bytes = op->sym->auth.data.length >> 3; 1679 ciph_len_in_bytes = op->sym->cipher.data.length >> 3; 1680 1681 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1682 session, oop, auth_off_in_bytes, 1683 ciph_off_in_bytes, auth_len_in_bytes, 1684 ciph_len_in_bytes, lb_sgl); 1685 job->msg_len_to_hash_in_bytes = auth_len_in_bytes; 1686 1687 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1688 session->iv.offset); 1689 break; 1690 1691 default: 1692 job->hash_start_src_offset_in_bytes = auth_start_offset(op, 1693 session, oop, op->sym->auth.data.offset, 1694 op->sym->cipher.data.offset, 1695 op->sym->auth.data.length, 1696 op->sym->cipher.data.length, lb_sgl); 1697 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length; 1698 1699 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, 1700 session->iv.offset); 1701 } 1702 1703 switch (job->cipher_mode) { 1704 /* ZUC requires length and offset in bytes */ 1705 case IMB_CIPHER_ZUC_EEA3: 1706 job->cipher_start_src_offset_in_bytes = 1707 op->sym->cipher.data.offset >> 3; 1708 job->msg_len_to_cipher_in_bytes = 1709 op->sym->cipher.data.length >> 3; 1710 break; 1711 /* ZUC and SNOW3G require length and offset in bits */ 1712 case IMB_CIPHER_SNOW3G_UEA2_BITLEN: 1713 case IMB_CIPHER_KASUMI_UEA1_BITLEN: 1714 job->cipher_start_src_offset_in_bits = 1715 op->sym->cipher.data.offset; 1716 job->msg_len_to_cipher_in_bits = 1717 op->sym->cipher.data.length; 1718 break; 1719 case IMB_CIPHER_GCM: 1720 job->cipher_start_src_offset_in_bytes = 1721 op->sym->aead.data.offset; 1722 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1723 break; 1724 case IMB_CIPHER_CCM: 1725 case IMB_CIPHER_CHACHA20_POLY1305: 1726 job->cipher_start_src_offset_in_bytes = 1727 op->sym->aead.data.offset; 1728 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; 1729 break; 1730 case IMB_CIPHER_GCM_SGL: 1731 case IMB_CIPHER_CHACHA20_POLY1305_SGL: 1732 job->msg_len_to_cipher_in_bytes = 0; 1733 job->cipher_start_src_offset_in_bytes = 0; 1734 break; 1735 default: 1736 job->cipher_start_src_offset_in_bytes = 1737 op->sym->cipher.data.offset; 1738 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; 1739 } 1740 1741 if (cipher_mode == IMB_CIPHER_NULL && oop) { 1742 memcpy(job->dst + job->cipher_start_src_offset_in_bytes, 1743 job->src + job->cipher_start_src_offset_in_bytes, 1744 job->msg_len_to_cipher_in_bytes); 1745 } 1746 1747 /* Set user data to be crypto operation data struct */ 1748 job->user_data = op; 1749 1750 if (sgl) { 1751 1752 if (lb_sgl) 1753 return handle_sgl_linear(job, op, m_offset, session); 1754 1755 if (m_src->nb_segs <= MAX_NUM_SEGS) 1756 return single_sgl_job(job, op, oop, 1757 m_offset, m_src, m_dst, 1758 qp_data->sgl_segs); 1759 else 1760 return multi_sgl_job(job, op, oop, 1761 m_offset, m_src, m_dst, mb_mgr); 1762 } 1763 1764 return 0; 1765 } 1766 1767 /** 1768 * Process a crypto operation containing a security op and complete a 1769 * IMB_JOB job structure for submission to the multi buffer library for 1770 * processing. 1771 */ 1772 static inline int 1773 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, 1774 struct rte_crypto_op *op, uint8_t *digest_idx) 1775 { 1776 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 1777 struct rte_mbuf *m_src, *m_dst; 1778 struct rte_crypto_sym_op *sym; 1779 struct aesni_mb_session *session = NULL; 1780 1781 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) { 1782 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1783 return -1; 1784 } 1785 session = SECURITY_GET_SESS_PRIV(op->sym->session); 1786 1787 if (unlikely(session == NULL)) { 1788 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 1789 return -1; 1790 } 1791 /* Only DOCSIS protocol operations supported now */ 1792 if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI || 1793 session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) { 1794 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1795 return -1; 1796 } 1797 1798 sym = op->sym; 1799 m_src = sym->m_src; 1800 1801 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) { 1802 /* in-place operation */ 1803 m_dst = m_src; 1804 } else { 1805 /* out-of-place operation not supported */ 1806 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1807 return -ENOTSUP; 1808 } 1809 1810 memcpy(job, &session->template_job, sizeof(IMB_JOB)); 1811 1812 /* Set cipher parameters */ 1813 job->enc_keys = session->cipher.expanded_aes_keys.encode; 1814 job->dec_keys = session->cipher.expanded_aes_keys.decode; 1815 1816 /* Set IV parameters */ 1817 job->iv = (uint8_t *)op + session->iv.offset; 1818 1819 /* Set digest output location */ 1820 job->auth_tag_output = qp_data->temp_digests[*digest_idx]; 1821 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; 1822 1823 /* Set data parameters */ 1824 job->src = rte_pktmbuf_mtod(m_src, uint8_t *); 1825 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, 1826 sym->cipher.data.offset); 1827 1828 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset; 1829 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length; 1830 1831 job->hash_start_src_offset_in_bytes = sym->auth.data.offset; 1832 job->msg_len_to_hash_in_bytes = sym->auth.data.length; 1833 1834 job->user_data = op; 1835 1836 return 0; 1837 } 1838 1839 static inline void 1840 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status) 1841 { 1842 uint16_t crc_offset; 1843 uint8_t *crc; 1844 1845 if (!job->msg_len_to_hash_in_bytes) 1846 return; 1847 1848 crc_offset = job->hash_start_src_offset_in_bytes + 1849 job->msg_len_to_hash_in_bytes - 1850 job->cipher_start_src_offset_in_bytes; 1851 crc = job->dst + crc_offset; 1852 1853 /* Verify CRC (at the end of the message) */ 1854 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0) 1855 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1856 } 1857 1858 static inline void 1859 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status) 1860 { 1861 /* Verify digest if required */ 1862 if (memcmp(job->auth_tag_output, digest, len) != 0) 1863 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 1864 } 1865 1866 static inline void 1867 generate_digest(IMB_JOB *job, struct rte_crypto_op *op, 1868 struct aesni_mb_session *sess) 1869 { 1870 /* No extra copy needed */ 1871 if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes)) 1872 return; 1873 1874 /* 1875 * This can only happen for HMAC, so only digest 1876 * for authentication algos is required 1877 */ 1878 memcpy(op->sym->auth.digest.data, job->auth_tag_output, 1879 sess->auth.req_digest_len); 1880 } 1881 1882 static void 1883 post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job, 1884 struct aesni_mb_session *sess, uint8_t *linear_buf) 1885 { 1886 1887 int lb_offset = 0; 1888 struct rte_mbuf *m_dst = op->sym->m_dst == NULL ? 1889 op->sym->m_src : op->sym->m_dst; 1890 uint16_t total_len, dst_len; 1891 uint64_t auth_len; 1892 uint8_t *dst; 1893 1894 total_len = sgl_linear_cipher_auth_len(job, &auth_len); 1895 1896 if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY) 1897 total_len += job->auth_tag_output_len_in_bytes; 1898 1899 for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) { 1900 dst = rte_pktmbuf_mtod(m_dst, uint8_t *); 1901 dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset); 1902 rte_memcpy(dst, linear_buf + lb_offset, dst_len); 1903 lb_offset += dst_len; 1904 } 1905 } 1906 1907 /** 1908 * Process a completed job and return rte_mbuf which job processed 1909 * 1910 * @param qp Queue Pair to process 1911 * @param job IMB_JOB job to process 1912 * 1913 * @return 1914 * - Returns processed crypto operation. 1915 * - Returns NULL on invalid job 1916 */ 1917 static inline struct rte_crypto_op * 1918 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) 1919 { 1920 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; 1921 struct aesni_mb_session *sess = NULL; 1922 uint8_t *linear_buf = NULL; 1923 int sgl = 0; 1924 uint8_t oop = 0; 1925 uint8_t is_docsis_sec = 0; 1926 1927 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1928 /* 1929 * Assuming at this point that if it's a security type op, that 1930 * this is for DOCSIS 1931 */ 1932 is_docsis_sec = 1; 1933 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1934 } else 1935 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1936 1937 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) { 1938 switch (job->status) { 1939 case IMB_STATUS_COMPLETED: 1940 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1941 1942 if ((op->sym->m_src->nb_segs > 1 || 1943 (op->sym->m_dst != NULL && 1944 op->sym->m_dst->nb_segs > 1)) && 1945 !imb_lib_support_sgl_algo(job->cipher_mode)) { 1946 linear_buf = (uint8_t *) job->user_data2; 1947 sgl = 1; 1948 1949 post_process_sgl_linear(op, job, sess, linear_buf); 1950 } 1951 1952 if (job->hash_alg == IMB_AUTH_NULL) 1953 break; 1954 1955 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { 1956 if (is_aead_algo(job->hash_alg, 1957 job->cipher_mode)) 1958 verify_digest(job, 1959 op->sym->aead.digest.data, 1960 sess->auth.req_digest_len, 1961 &op->status); 1962 else if (is_docsis_sec) 1963 verify_docsis_sec_crc(job, 1964 &op->status); 1965 else 1966 verify_digest(job, 1967 op->sym->auth.digest.data, 1968 sess->auth.req_digest_len, 1969 &op->status); 1970 } else { 1971 if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) { 1972 /* in-place operation */ 1973 oop = 0; 1974 } else { /* out-of-place operation */ 1975 oop = 1; 1976 } 1977 1978 /* Enable digest check */ 1979 if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL 1980 && !is_aead_algo(job->hash_alg, sess->template_job.cipher_mode) && 1981 aesni_mb_digest_appended_in_src(op, job, oop) != NULL) { 1982 unsigned int auth_size, cipher_size; 1983 int unencrypted_bytes = 0; 1984 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || 1985 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN || 1986 job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { 1987 cipher_size = (op->sym->cipher.data.offset >> 3) + 1988 (op->sym->cipher.data.length >> 3); 1989 } else { 1990 cipher_size = (op->sym->cipher.data.offset) + 1991 (op->sym->cipher.data.length); 1992 } 1993 if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || 1994 job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || 1995 job->hash_alg == IMB_AUTH_KASUMI_UIA1 || 1996 job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { 1997 auth_size = (op->sym->auth.data.offset >> 3) + 1998 (op->sym->auth.data.length >> 3); 1999 } else { 2000 auth_size = (op->sym->auth.data.offset) + 2001 (op->sym->auth.data.length); 2002 } 2003 /* Check for unencrypted bytes in partial digest cases */ 2004 if (job->cipher_mode != IMB_CIPHER_NULL) { 2005 unencrypted_bytes = auth_size + 2006 job->auth_tag_output_len_in_bytes - cipher_size; 2007 } 2008 if (unencrypted_bytes > 0) 2009 rte_memcpy( 2010 rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, 2011 cipher_size), 2012 rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, 2013 cipher_size), 2014 unencrypted_bytes); 2015 } 2016 generate_digest(job, op, sess); 2017 } 2018 break; 2019 default: 2020 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 2021 } 2022 if (sgl) 2023 rte_free(linear_buf); 2024 } 2025 2026 /* Free session if a session-less crypto op */ 2027 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 2028 memset(sess, 0, sizeof(struct aesni_mb_session)); 2029 rte_mempool_put(qp->sess_mp, op->sym->session); 2030 op->sym->session = NULL; 2031 } 2032 2033 return op; 2034 } 2035 2036 static inline void 2037 post_process_mb_sync_job(IMB_JOB *job) 2038 { 2039 uint32_t *st; 2040 2041 st = job->user_data; 2042 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG; 2043 } 2044 2045 static inline uint32_t 2046 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) 2047 { 2048 uint32_t i; 2049 2050 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) 2051 post_process_mb_sync_job(job); 2052 2053 return i; 2054 } 2055 2056 static inline uint32_t 2057 flush_mb_sync_mgr(IMB_MGR *mb_mgr) 2058 { 2059 IMB_JOB *job; 2060 2061 job = IMB_FLUSH_JOB(mb_mgr); 2062 return handle_completed_sync_jobs(job, mb_mgr); 2063 } 2064 2065 static inline IMB_JOB * 2066 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) 2067 { 2068 job->chain_order = IMB_ORDER_HASH_CIPHER; 2069 job->cipher_mode = IMB_CIPHER_NULL; 2070 job->hash_alg = IMB_AUTH_NULL; 2071 job->cipher_direction = IMB_DIR_DECRYPT; 2072 2073 /* Set user data to be crypto operation data struct */ 2074 job->user_data = op; 2075 2076 return job; 2077 } 2078 2079 uint16_t 2080 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 2081 uint16_t nb_ops) 2082 { 2083 struct ipsec_mb_qp *qp = queue_pair; 2084 IMB_MGR *mb_mgr = qp->mb_mgr; 2085 struct rte_crypto_op *op; 2086 struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE]; 2087 IMB_JOB *job; 2088 int retval, processed_jobs = 0; 2089 uint16_t i, nb_jobs; 2090 IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL}; 2091 pid_t pid; 2092 2093 if (unlikely(nb_ops == 0 || mb_mgr == NULL)) 2094 return 0; 2095 2096 uint8_t digest_idx = qp->digest_idx; 2097 uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ? 2098 IMB_MAX_BURST_SIZE : nb_ops; 2099 2100 /* 2101 * If nb_ops is greater than the max supported 2102 * ipsec_mb burst size, then process in bursts of 2103 * IMB_MAX_BURST_SIZE until all operations are submitted 2104 */ 2105 while (nb_ops) { 2106 uint16_t nb_submit_ops; 2107 uint16_t n = (nb_ops / burst_sz) ? 2108 burst_sz : nb_ops; 2109 2110 if (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) { 2111 /* 2112 * Not enough free jobs in the queue 2113 * Flush n jobs until enough jobs available 2114 */ 2115 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2116 for (i = 0; i < nb_jobs; i++) { 2117 job = jobs[i]; 2118 2119 op = post_process_mb_job(qp, job); 2120 if (op) { 2121 ops[processed_jobs++] = op; 2122 qp->stats.dequeued_count++; 2123 } else { 2124 qp->stats.dequeue_err_count++; 2125 break; 2126 } 2127 } 2128 nb_ops -= i; 2129 continue; 2130 } 2131 2132 if (!RTE_PER_LCORE(pid)) 2133 RTE_PER_LCORE(pid) = getpid(); 2134 2135 pid = RTE_PER_LCORE(pid); 2136 2137 /* 2138 * Get the next operations to process from ingress queue. 2139 * There is no need to return the job to the IMB_MGR 2140 * if there are no more operations to process, since 2141 * the IMB_MGR can use that pointer again in next 2142 * get_next calls. 2143 */ 2144 nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue, 2145 (void **)deqd_ops, n, NULL); 2146 for (i = 0; i < nb_submit_ops; i++) { 2147 job = jobs[i]; 2148 op = deqd_ops[i]; 2149 2150 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 2151 retval = set_sec_mb_job_params(job, qp, op, 2152 &digest_idx); 2153 else 2154 retval = set_mb_job_params(job, qp, op, 2155 &digest_idx, mb_mgr, pid); 2156 2157 if (unlikely(retval != 0)) { 2158 qp->stats.dequeue_err_count++; 2159 set_job_null_op(job, op); 2160 } 2161 } 2162 2163 /* Submit jobs to multi-buffer for processing */ 2164 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2165 int err = 0; 2166 2167 nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs); 2168 err = imb_get_errno(mb_mgr); 2169 if (err) 2170 IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err)); 2171 #else 2172 nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr, 2173 nb_submit_ops, jobs); 2174 #endif 2175 for (i = 0; i < nb_jobs; i++) { 2176 job = jobs[i]; 2177 2178 op = post_process_mb_job(qp, job); 2179 if (op) { 2180 ops[processed_jobs++] = op; 2181 qp->stats.dequeued_count++; 2182 } else { 2183 qp->stats.dequeue_err_count++; 2184 break; 2185 } 2186 } 2187 2188 qp->digest_idx = digest_idx; 2189 2190 if (processed_jobs < 1) { 2191 nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); 2192 2193 for (i = 0; i < nb_jobs; i++) { 2194 job = jobs[i]; 2195 2196 op = post_process_mb_job(qp, job); 2197 if (op) { 2198 ops[processed_jobs++] = op; 2199 qp->stats.dequeued_count++; 2200 } else { 2201 qp->stats.dequeue_err_count++; 2202 break; 2203 } 2204 } 2205 } 2206 nb_ops -= n; 2207 } 2208 2209 return processed_jobs; 2210 } 2211 2212 static inline int 2213 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) 2214 { 2215 /* no multi-seg support with current AESNI-MB PMD */ 2216 if (sgl->num != 1) 2217 return -ENOTSUP; 2218 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len) 2219 return -EINVAL; 2220 return 0; 2221 } 2222 2223 static inline IMB_JOB * 2224 submit_sync_job(IMB_MGR *mb_mgr) 2225 { 2226 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG 2227 return IMB_SUBMIT_JOB(mb_mgr); 2228 #else 2229 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr); 2230 #endif 2231 } 2232 2233 static inline uint32_t 2234 generate_sync_dgst(struct rte_crypto_sym_vec *vec, 2235 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2236 { 2237 uint32_t i, k; 2238 2239 for (i = 0, k = 0; i != vec->num; i++) { 2240 if (vec->status[i] == 0) { 2241 memcpy(vec->digest[i].va, dgst[i], len); 2242 k++; 2243 } 2244 } 2245 2246 return k; 2247 } 2248 2249 static inline uint32_t 2250 verify_sync_dgst(struct rte_crypto_sym_vec *vec, 2251 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len) 2252 { 2253 uint32_t i, k; 2254 2255 for (i = 0, k = 0; i != vec->num; i++) { 2256 if (vec->status[i] == 0) { 2257 if (memcmp(vec->digest[i].va, dgst[i], len) != 0) 2258 vec->status[i] = EBADMSG; 2259 else 2260 k++; 2261 } 2262 } 2263 2264 return k; 2265 } 2266 2267 uint32_t 2268 aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused, 2269 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs, 2270 struct rte_crypto_sym_vec *vec) 2271 { 2272 int32_t ret; 2273 uint32_t i, j, k, len; 2274 void *buf; 2275 IMB_JOB *job; 2276 IMB_MGR *mb_mgr; 2277 struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2278 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX]; 2279 2280 /* get per-thread MB MGR, create one if needed */ 2281 mb_mgr = get_per_thread_mb_mgr(); 2282 if (unlikely(mb_mgr == NULL)) 2283 return 0; 2284 2285 for (i = 0, j = 0, k = 0; i != vec->num; i++) { 2286 ret = check_crypto_sgl(sofs, vec->src_sgl + i); 2287 if (ret != 0) { 2288 vec->status[i] = ret; 2289 continue; 2290 } 2291 2292 buf = vec->src_sgl[i].vec[0].base; 2293 len = vec->src_sgl[i].vec[0].len; 2294 2295 job = IMB_GET_NEXT_JOB(mb_mgr); 2296 if (job == NULL) { 2297 k += flush_mb_sync_mgr(mb_mgr); 2298 job = IMB_GET_NEXT_JOB(mb_mgr); 2299 RTE_ASSERT(job != NULL); 2300 } 2301 2302 /* Submit job for processing */ 2303 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i], 2304 &vec->aad[i], tmp_dgst[i], &vec->status[i]); 2305 job = submit_sync_job(mb_mgr); 2306 j++; 2307 2308 /* handle completed jobs */ 2309 k += handle_completed_sync_jobs(job, mb_mgr); 2310 } 2311 2312 /* flush remaining jobs */ 2313 while (k != j) 2314 k += flush_mb_sync_mgr(mb_mgr); 2315 2316 /* finish processing for successful jobs: check/update digest */ 2317 if (k != 0) { 2318 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) 2319 k = verify_sync_dgst(vec, 2320 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2321 s->auth.req_digest_len); 2322 else 2323 k = generate_sync_dgst(vec, 2324 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst, 2325 s->auth.req_digest_len); 2326 } 2327 2328 return k; 2329 } 2330 2331 struct rte_cryptodev_ops aesni_mb_pmd_ops = { 2332 .dev_configure = ipsec_mb_config, 2333 .dev_start = ipsec_mb_start, 2334 .dev_stop = ipsec_mb_stop, 2335 .dev_close = ipsec_mb_close, 2336 2337 .stats_get = ipsec_mb_stats_get, 2338 .stats_reset = ipsec_mb_stats_reset, 2339 2340 .dev_infos_get = ipsec_mb_info_get, 2341 2342 .queue_pair_setup = ipsec_mb_qp_setup, 2343 .queue_pair_release = ipsec_mb_qp_release, 2344 2345 .sym_cpu_process = aesni_mb_process_bulk, 2346 2347 .sym_session_get_size = ipsec_mb_sym_session_get_size, 2348 .sym_session_configure = ipsec_mb_sym_session_configure, 2349 .sym_session_clear = ipsec_mb_sym_session_clear 2350 }; 2351 2352 /** 2353 * Configure a aesni multi-buffer session from a security session 2354 * configuration 2355 */ 2356 static int 2357 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf, 2358 struct rte_security_session *sess) 2359 { 2360 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 2361 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2362 int ret; 2363 2364 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || 2365 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { 2366 IPSEC_MB_LOG(ERR, "Invalid security protocol"); 2367 return -EINVAL; 2368 } 2369 2370 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf, 2371 sess_private_data); 2372 2373 if (ret != 0) { 2374 IPSEC_MB_LOG(ERR, "Failed to configure session parameters"); 2375 return ret; 2376 } 2377 2378 return ret; 2379 } 2380 2381 /** Clear the memory of session so it does not leave key material behind */ 2382 static int 2383 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused, 2384 struct rte_security_session *sess) 2385 { 2386 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 2387 2388 if (sess_priv) { 2389 memset(sess_priv, 0, sizeof(struct aesni_mb_session)); 2390 } 2391 return 0; 2392 } 2393 2394 static unsigned int 2395 aesni_mb_pmd_sec_sess_get_size(void *device __rte_unused) 2396 { 2397 return sizeof(struct aesni_mb_session); 2398 } 2399 2400 /** Get security capabilities for aesni multi-buffer */ 2401 static const struct rte_security_capability * 2402 aesni_mb_pmd_sec_capa_get(void *device __rte_unused) 2403 { 2404 return aesni_mb_pmd_security_cap; 2405 } 2406 2407 static struct rte_security_ops aesni_mb_pmd_sec_ops = { 2408 .session_create = aesni_mb_pmd_sec_sess_create, 2409 .session_update = NULL, 2410 .session_get_size = aesni_mb_pmd_sec_sess_get_size, 2411 .session_stats_get = NULL, 2412 .session_destroy = aesni_mb_pmd_sec_sess_destroy, 2413 .set_pkt_metadata = NULL, 2414 .capabilities_get = aesni_mb_pmd_sec_capa_get 2415 }; 2416 2417 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops; 2418 2419 static int 2420 aesni_mb_configure_dev(struct rte_cryptodev *dev) 2421 { 2422 struct rte_security_ctx *security_instance; 2423 2424 security_instance = rte_malloc("aesni_mb_sec", 2425 sizeof(struct rte_security_ctx), 2426 RTE_CACHE_LINE_SIZE); 2427 if (security_instance != NULL) { 2428 security_instance->device = (void *)dev; 2429 security_instance->ops = rte_aesni_mb_pmd_sec_ops; 2430 security_instance->sess_cnt = 0; 2431 dev->security_ctx = security_instance; 2432 2433 return 0; 2434 } 2435 2436 return -ENOMEM; 2437 } 2438 2439 static int 2440 aesni_mb_probe(struct rte_vdev_device *vdev) 2441 { 2442 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB); 2443 } 2444 2445 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = { 2446 .probe = aesni_mb_probe, 2447 .remove = ipsec_mb_remove 2448 }; 2449 2450 static struct cryptodev_driver aesni_mb_crypto_drv; 2451 2452 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, 2453 cryptodev_aesni_mb_pmd_drv); 2454 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); 2455 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, 2456 "max_nb_queue_pairs=<int> socket_id=<int>"); 2457 RTE_PMD_REGISTER_CRYPTO_DRIVER( 2458 aesni_mb_crypto_drv, 2459 cryptodev_aesni_mb_pmd_drv.driver, 2460 pmd_driver_id_aesni_mb); 2461 2462 /* Constructor function to register aesni-mb PMD */ 2463 RTE_INIT(ipsec_mb_register_aesni_mb) 2464 { 2465 struct ipsec_mb_internals *aesni_mb_data = 2466 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB]; 2467 2468 aesni_mb_data->caps = aesni_mb_capabilities; 2469 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst; 2470 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2471 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2472 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | 2473 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO | 2474 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA | 2475 RTE_CRYPTODEV_FF_SYM_SESSIONLESS | 2476 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 2477 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 2478 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 2479 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 2480 RTE_CRYPTODEV_FF_SECURITY | 2481 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; 2482 2483 aesni_mb_data->internals_priv_size = 0; 2484 aesni_mb_data->ops = &aesni_mb_pmd_ops; 2485 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data); 2486 aesni_mb_data->queue_pair_configure = NULL; 2487 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops; 2488 aesni_mb_data->dev_config = aesni_mb_configure_dev; 2489 aesni_mb_data->session_configure = aesni_mb_session_configure; 2490 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session); 2491 } 2492