1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <rte_cryptodev.h> 6 #include <rte_ether.h> 7 #include <rte_ip.h> 8 9 #include "cperf_ops.h" 10 #include "cperf_test_vectors.h" 11 12 static int 13 cperf_set_ops_asym(struct rte_crypto_op **ops, 14 uint32_t src_buf_offset __rte_unused, 15 uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops, 16 struct rte_cryptodev_sym_session *sess, 17 const struct cperf_options *options, 18 const struct cperf_test_vector *test_vector __rte_unused, 19 uint16_t iv_offset __rte_unused, 20 uint32_t *imix_idx __rte_unused, 21 uint64_t *tsc_start __rte_unused) 22 { 23 uint16_t i; 24 void *asym_sess = (void *)sess; 25 26 for (i = 0; i < nb_ops; i++) { 27 struct rte_crypto_asym_op *asym_op = ops[i]->asym; 28 29 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 30 asym_op->modex.base.data = options->modex_data->base.data; 31 asym_op->modex.base.length = options->modex_data->base.len; 32 asym_op->modex.result.data = options->modex_data->result.data; 33 asym_op->modex.result.length = options->modex_data->result.len; 34 rte_crypto_op_attach_asym_session(ops[i], asym_sess); 35 } 36 return 0; 37 } 38 39 #ifdef RTE_LIB_SECURITY 40 static void 41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, 42 const struct cperf_test_vector *test_vector) 43 { 44 struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 45 46 if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) || 47 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) { 48 memcpy(ip, test_vector->plaintext.data, 49 sizeof(struct rte_ipv4_hdr)); 50 51 ip->total_length = rte_cpu_to_be_16(m->data_len); 52 } 53 } 54 55 static int 56 cperf_set_ops_security(struct rte_crypto_op **ops, 57 uint32_t src_buf_offset __rte_unused, 58 uint32_t dst_buf_offset __rte_unused, 59 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 60 const struct cperf_options *options, 61 const struct cperf_test_vector *test_vector, 62 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 63 uint64_t *tsc_start) 64 { 65 uint16_t i; 66 67 for (i = 0; i < nb_ops; i++) { 68 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 69 struct rte_security_session *sec_sess = 70 (struct rte_security_session *)sess; 71 uint32_t buf_sz; 72 73 uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i], 74 uint32_t *, iv_offset); 75 *per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN; 76 77 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 78 rte_security_attach_session(ops[i], sec_sess); 79 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 80 src_buf_offset); 81 82 if (options->op_type == CPERF_PDCP) { 83 sym_op->m_src->buf_len = options->segment_sz; 84 sym_op->m_src->data_len = options->test_buffer_size; 85 sym_op->m_src->pkt_len = sym_op->m_src->data_len; 86 } 87 88 if (options->op_type == CPERF_DOCSIS) { 89 if (options->imix_distribution_count) { 90 buf_sz = options->imix_buffer_sizes[*imix_idx]; 91 *imix_idx = (*imix_idx + 1) % options->pool_sz; 92 } else 93 buf_sz = options->test_buffer_size; 94 95 sym_op->m_src->buf_len = options->segment_sz; 96 sym_op->m_src->data_len = buf_sz; 97 sym_op->m_src->pkt_len = buf_sz; 98 99 /* DOCSIS header is not CRC'ed */ 100 sym_op->auth.data.offset = options->docsis_hdr_sz; 101 sym_op->auth.data.length = buf_sz - 102 sym_op->auth.data.offset - RTE_ETHER_CRC_LEN; 103 /* 104 * DOCSIS header and SRC and DST MAC addresses are not 105 * ciphered 106 */ 107 sym_op->cipher.data.offset = sym_op->auth.data.offset + 108 RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN; 109 sym_op->cipher.data.length = buf_sz - 110 sym_op->cipher.data.offset; 111 } 112 113 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 114 if (dst_buf_offset == 0) 115 sym_op->m_dst = NULL; 116 else 117 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 118 dst_buf_offset); 119 } 120 121 RTE_SET_USED(tsc_start); 122 RTE_SET_USED(test_vector); 123 124 return 0; 125 } 126 127 static int 128 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, 129 uint32_t src_buf_offset __rte_unused, 130 uint32_t dst_buf_offset __rte_unused, 131 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 132 const struct cperf_options *options, 133 const struct cperf_test_vector *test_vector, 134 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 135 uint64_t *tsc_start) 136 { 137 struct rte_security_session *sec_sess = 138 (struct rte_security_session *)sess; 139 const uint32_t test_buffer_size = options->test_buffer_size; 140 const uint32_t headroom_sz = options->headroom_sz; 141 const uint32_t segment_sz = options->segment_sz; 142 uint64_t tsc_start_temp, tsc_end_temp; 143 uint16_t i = 0; 144 145 RTE_SET_USED(imix_idx); 146 147 for (i = 0; i < nb_ops; i++) { 148 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 149 struct rte_mbuf *m = sym_op->m_src; 150 151 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 152 rte_security_attach_session(ops[i], sec_sess); 153 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 154 src_buf_offset); 155 156 /* In case of IPsec, headroom is consumed by PMD, 157 * hence resetting it. 158 */ 159 m->data_off = headroom_sz; 160 161 m->buf_len = segment_sz; 162 m->data_len = test_buffer_size; 163 m->pkt_len = test_buffer_size; 164 165 sym_op->m_dst = NULL; 166 } 167 168 if (options->test_file != NULL) 169 return 0; 170 171 tsc_start_temp = rte_rdtsc_precise(); 172 173 for (i = 0; i < nb_ops; i++) { 174 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 175 struct rte_mbuf *m = sym_op->m_src; 176 177 test_ipsec_vec_populate(m, options, test_vector); 178 } 179 180 tsc_end_temp = rte_rdtsc_precise(); 181 *tsc_start += tsc_end_temp - tsc_start_temp; 182 183 return 0; 184 } 185 186 #endif 187 188 static int 189 cperf_set_ops_null_cipher(struct rte_crypto_op **ops, 190 uint32_t src_buf_offset, uint32_t dst_buf_offset, 191 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 192 const struct cperf_options *options, 193 const struct cperf_test_vector *test_vector __rte_unused, 194 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 195 uint64_t *tsc_start __rte_unused) 196 { 197 uint16_t i; 198 199 for (i = 0; i < nb_ops; i++) { 200 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 201 202 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 203 rte_crypto_op_attach_sym_session(ops[i], sess); 204 205 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 206 src_buf_offset); 207 208 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 209 if (dst_buf_offset == 0) 210 sym_op->m_dst = NULL; 211 else 212 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 213 dst_buf_offset); 214 215 /* cipher parameters */ 216 if (options->imix_distribution_count) { 217 sym_op->cipher.data.length = 218 options->imix_buffer_sizes[*imix_idx]; 219 *imix_idx = (*imix_idx + 1) % options->pool_sz; 220 } else 221 sym_op->cipher.data.length = options->test_buffer_size; 222 sym_op->cipher.data.offset = 0; 223 } 224 225 return 0; 226 } 227 228 static int 229 cperf_set_ops_null_auth(struct rte_crypto_op **ops, 230 uint32_t src_buf_offset, uint32_t dst_buf_offset, 231 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 232 const struct cperf_options *options, 233 const struct cperf_test_vector *test_vector __rte_unused, 234 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 235 uint64_t *tsc_start __rte_unused) 236 { 237 uint16_t i; 238 239 for (i = 0; i < nb_ops; i++) { 240 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 241 242 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 243 rte_crypto_op_attach_sym_session(ops[i], sess); 244 245 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 246 src_buf_offset); 247 248 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 249 if (dst_buf_offset == 0) 250 sym_op->m_dst = NULL; 251 else 252 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 253 dst_buf_offset); 254 255 /* auth parameters */ 256 if (options->imix_distribution_count) { 257 sym_op->auth.data.length = 258 options->imix_buffer_sizes[*imix_idx]; 259 *imix_idx = (*imix_idx + 1) % options->pool_sz; 260 } else 261 sym_op->auth.data.length = options->test_buffer_size; 262 sym_op->auth.data.offset = 0; 263 } 264 265 return 0; 266 } 267 268 static int 269 cperf_set_ops_cipher(struct rte_crypto_op **ops, 270 uint32_t src_buf_offset, uint32_t dst_buf_offset, 271 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 272 const struct cperf_options *options, 273 const struct cperf_test_vector *test_vector, 274 uint16_t iv_offset, uint32_t *imix_idx, 275 uint64_t *tsc_start __rte_unused) 276 { 277 uint16_t i; 278 279 for (i = 0; i < nb_ops; i++) { 280 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 281 282 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 283 rte_crypto_op_attach_sym_session(ops[i], sess); 284 285 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 286 src_buf_offset); 287 288 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 289 if (dst_buf_offset == 0) 290 sym_op->m_dst = NULL; 291 else 292 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 293 dst_buf_offset); 294 295 /* cipher parameters */ 296 if (options->imix_distribution_count) { 297 sym_op->cipher.data.length = 298 options->imix_buffer_sizes[*imix_idx]; 299 *imix_idx = (*imix_idx + 1) % options->pool_sz; 300 } else 301 sym_op->cipher.data.length = options->test_buffer_size; 302 303 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 304 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || 305 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) 306 sym_op->cipher.data.length <<= 3; 307 308 sym_op->cipher.data.offset = 0; 309 } 310 311 if (options->test == CPERF_TEST_TYPE_VERIFY) { 312 for (i = 0; i < nb_ops; i++) { 313 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 314 uint8_t *, iv_offset); 315 316 memcpy(iv_ptr, test_vector->cipher_iv.data, 317 test_vector->cipher_iv.length); 318 319 } 320 } 321 322 return 0; 323 } 324 325 static int 326 cperf_set_ops_auth(struct rte_crypto_op **ops, 327 uint32_t src_buf_offset, uint32_t dst_buf_offset, 328 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 329 const struct cperf_options *options, 330 const struct cperf_test_vector *test_vector, 331 uint16_t iv_offset, uint32_t *imix_idx, 332 uint64_t *tsc_start __rte_unused) 333 { 334 uint16_t i; 335 336 for (i = 0; i < nb_ops; i++) { 337 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 338 339 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 340 rte_crypto_op_attach_sym_session(ops[i], sess); 341 342 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 343 src_buf_offset); 344 345 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 346 if (dst_buf_offset == 0) 347 sym_op->m_dst = NULL; 348 else 349 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 350 dst_buf_offset); 351 352 if (test_vector->auth_iv.length) { 353 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 354 uint8_t *, 355 iv_offset); 356 memcpy(iv_ptr, test_vector->auth_iv.data, 357 test_vector->auth_iv.length); 358 } 359 360 /* authentication parameters */ 361 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 362 sym_op->auth.digest.data = test_vector->digest.data; 363 sym_op->auth.digest.phys_addr = 364 test_vector->digest.phys_addr; 365 } else { 366 367 uint32_t offset = options->test_buffer_size; 368 struct rte_mbuf *buf, *tbuf; 369 370 if (options->out_of_place) { 371 buf = sym_op->m_dst; 372 } else { 373 tbuf = sym_op->m_src; 374 while ((tbuf->next != NULL) && 375 (offset >= tbuf->data_len)) { 376 offset -= tbuf->data_len; 377 tbuf = tbuf->next; 378 } 379 /* 380 * If there is not enough room in segment, 381 * place the digest in the next segment 382 */ 383 if ((tbuf->data_len - offset) < options->digest_sz) { 384 tbuf = tbuf->next; 385 offset = 0; 386 } 387 buf = tbuf; 388 } 389 390 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, 391 uint8_t *, offset); 392 sym_op->auth.digest.phys_addr = 393 rte_pktmbuf_iova_offset(buf, offset); 394 395 } 396 397 if (options->imix_distribution_count) { 398 sym_op->auth.data.length = 399 options->imix_buffer_sizes[*imix_idx]; 400 *imix_idx = (*imix_idx + 1) % options->pool_sz; 401 } else 402 sym_op->auth.data.length = options->test_buffer_size; 403 404 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 405 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || 406 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) 407 sym_op->auth.data.length <<= 3; 408 409 sym_op->auth.data.offset = 0; 410 } 411 412 if (options->test == CPERF_TEST_TYPE_VERIFY) { 413 if (test_vector->auth_iv.length) { 414 for (i = 0; i < nb_ops; i++) { 415 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 416 uint8_t *, iv_offset); 417 418 memcpy(iv_ptr, test_vector->auth_iv.data, 419 test_vector->auth_iv.length); 420 } 421 } 422 } 423 return 0; 424 } 425 426 static int 427 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, 428 uint32_t src_buf_offset, uint32_t dst_buf_offset, 429 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 430 const struct cperf_options *options, 431 const struct cperf_test_vector *test_vector, 432 uint16_t iv_offset, uint32_t *imix_idx, 433 uint64_t *tsc_start __rte_unused) 434 { 435 uint16_t i; 436 437 for (i = 0; i < nb_ops; i++) { 438 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 439 440 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 441 rte_crypto_op_attach_sym_session(ops[i], sess); 442 443 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 444 src_buf_offset); 445 446 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 447 if (dst_buf_offset == 0) 448 sym_op->m_dst = NULL; 449 else 450 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 451 dst_buf_offset); 452 453 /* cipher parameters */ 454 if (options->imix_distribution_count) { 455 sym_op->cipher.data.length = 456 options->imix_buffer_sizes[*imix_idx]; 457 *imix_idx = (*imix_idx + 1) % options->pool_sz; 458 } else 459 sym_op->cipher.data.length = options->test_buffer_size; 460 461 if ((options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) && 462 (options->op_type == CPERF_AUTH_THEN_CIPHER)) 463 sym_op->cipher.data.length += options->digest_sz; 464 465 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 466 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || 467 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) 468 sym_op->cipher.data.length <<= 3; 469 470 sym_op->cipher.data.offset = 0; 471 472 /* authentication parameters */ 473 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 474 sym_op->auth.digest.data = test_vector->digest.data; 475 sym_op->auth.digest.phys_addr = 476 test_vector->digest.phys_addr; 477 } else { 478 479 uint32_t offset = options->test_buffer_size; 480 struct rte_mbuf *buf, *tbuf; 481 482 if (options->out_of_place) { 483 buf = sym_op->m_dst; 484 } else { 485 tbuf = sym_op->m_src; 486 while ((tbuf->next != NULL) && 487 (offset >= tbuf->data_len)) { 488 offset -= tbuf->data_len; 489 tbuf = tbuf->next; 490 } 491 /* 492 * If there is not enough room in segment, 493 * place the digest in the next segment 494 */ 495 if ((tbuf->data_len - offset) < options->digest_sz) { 496 tbuf = tbuf->next; 497 offset = 0; 498 } 499 buf = tbuf; 500 } 501 502 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, 503 uint8_t *, offset); 504 sym_op->auth.digest.phys_addr = 505 rte_pktmbuf_iova_offset(buf, offset); 506 } 507 508 if (options->imix_distribution_count) { 509 sym_op->auth.data.length = 510 options->imix_buffer_sizes[*imix_idx]; 511 *imix_idx = (*imix_idx + 1) % options->pool_sz; 512 } else 513 sym_op->auth.data.length = options->test_buffer_size; 514 515 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 516 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || 517 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) 518 sym_op->auth.data.length <<= 3; 519 520 sym_op->auth.data.offset = 0; 521 } 522 523 if (options->test == CPERF_TEST_TYPE_VERIFY) { 524 for (i = 0; i < nb_ops; i++) { 525 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 526 uint8_t *, iv_offset); 527 528 memcpy(iv_ptr, test_vector->cipher_iv.data, 529 test_vector->cipher_iv.length); 530 if (test_vector->auth_iv.length) { 531 /* 532 * Copy IV after the crypto operation and 533 * the cipher IV 534 */ 535 iv_ptr += test_vector->cipher_iv.length; 536 memcpy(iv_ptr, test_vector->auth_iv.data, 537 test_vector->auth_iv.length); 538 } 539 } 540 541 } 542 543 return 0; 544 } 545 546 static int 547 cperf_set_ops_aead(struct rte_crypto_op **ops, 548 uint32_t src_buf_offset, uint32_t dst_buf_offset, 549 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, 550 const struct cperf_options *options, 551 const struct cperf_test_vector *test_vector, 552 uint16_t iv_offset, uint32_t *imix_idx, 553 uint64_t *tsc_start __rte_unused) 554 { 555 uint16_t i; 556 /* AAD is placed after the IV */ 557 uint16_t aad_offset = iv_offset + 558 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16); 559 560 for (i = 0; i < nb_ops; i++) { 561 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 562 563 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 564 rte_crypto_op_attach_sym_session(ops[i], sess); 565 566 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 567 src_buf_offset); 568 569 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 570 if (dst_buf_offset == 0) 571 sym_op->m_dst = NULL; 572 else 573 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 574 dst_buf_offset); 575 576 /* AEAD parameters */ 577 if (options->imix_distribution_count) { 578 sym_op->aead.data.length = 579 options->imix_buffer_sizes[*imix_idx]; 580 *imix_idx = (*imix_idx + 1) % options->pool_sz; 581 } else 582 sym_op->aead.data.length = options->test_buffer_size; 583 sym_op->aead.data.offset = 0; 584 585 sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i], 586 uint8_t *, aad_offset); 587 sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i], 588 aad_offset); 589 590 if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) { 591 sym_op->aead.digest.data = test_vector->digest.data; 592 sym_op->aead.digest.phys_addr = 593 test_vector->digest.phys_addr; 594 } else { 595 596 uint32_t offset = sym_op->aead.data.length + 597 sym_op->aead.data.offset; 598 struct rte_mbuf *buf, *tbuf; 599 600 if (options->out_of_place) { 601 buf = sym_op->m_dst; 602 } else { 603 tbuf = sym_op->m_src; 604 while ((tbuf->next != NULL) && 605 (offset >= tbuf->data_len)) { 606 offset -= tbuf->data_len; 607 tbuf = tbuf->next; 608 } 609 /* 610 * If there is not enough room in segment, 611 * place the digest in the next segment 612 */ 613 if ((tbuf->data_len - offset) < options->digest_sz) { 614 tbuf = tbuf->next; 615 offset = 0; 616 } 617 buf = tbuf; 618 } 619 620 sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf, 621 uint8_t *, offset); 622 sym_op->aead.digest.phys_addr = 623 rte_pktmbuf_iova_offset(buf, offset); 624 } 625 } 626 627 if ((options->test == CPERF_TEST_TYPE_VERIFY) || 628 (options->test == CPERF_TEST_TYPE_LATENCY)) { 629 for (i = 0; i < nb_ops; i++) { 630 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 631 uint8_t *, iv_offset); 632 633 /* 634 * If doing AES-CCM, nonce is copied one byte 635 * after the start of IV field, and AAD is copied 636 * 18 bytes after the start of the AAD field. 637 */ 638 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) { 639 memcpy(iv_ptr + 1, test_vector->aead_iv.data, 640 test_vector->aead_iv.length); 641 642 memcpy(ops[i]->sym->aead.aad.data + 18, 643 test_vector->aad.data, 644 test_vector->aad.length); 645 } else { 646 memcpy(iv_ptr, test_vector->aead_iv.data, 647 test_vector->aead_iv.length); 648 649 memcpy(ops[i]->sym->aead.aad.data, 650 test_vector->aad.data, 651 test_vector->aad.length); 652 } 653 } 654 } 655 656 return 0; 657 } 658 659 static struct rte_cryptodev_sym_session * 660 create_ipsec_session(struct rte_mempool *sess_mp, 661 struct rte_mempool *priv_mp, 662 uint8_t dev_id, 663 const struct cperf_options *options, 664 const struct cperf_test_vector *test_vector, 665 uint16_t iv_offset) 666 { 667 struct rte_crypto_sym_xform xform = {0}; 668 struct rte_crypto_sym_xform auth_xform = {0}; 669 670 if (options->aead_algo != 0) { 671 /* Setup AEAD Parameters */ 672 xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; 673 xform.next = NULL; 674 xform.aead.algo = options->aead_algo; 675 xform.aead.op = options->aead_op; 676 xform.aead.iv.offset = iv_offset; 677 xform.aead.key.data = test_vector->aead_key.data; 678 xform.aead.key.length = test_vector->aead_key.length; 679 xform.aead.iv.length = test_vector->aead_iv.length; 680 xform.aead.digest_length = options->digest_sz; 681 xform.aead.aad_length = options->aead_aad_sz; 682 } else if (options->cipher_algo != 0 && options->auth_algo != 0) { 683 /* Setup Cipher Parameters */ 684 xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 685 xform.next = NULL; 686 xform.cipher.algo = options->cipher_algo; 687 xform.cipher.op = options->cipher_op; 688 xform.cipher.iv.offset = iv_offset; 689 xform.cipher.iv.length = test_vector->cipher_iv.length; 690 /* cipher different than null */ 691 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 692 xform.cipher.key.data = test_vector->cipher_key.data; 693 xform.cipher.key.length = 694 test_vector->cipher_key.length; 695 } else { 696 xform.cipher.key.data = NULL; 697 xform.cipher.key.length = 0; 698 } 699 700 /* Setup Auth Parameters */ 701 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 702 auth_xform.next = NULL; 703 auth_xform.auth.algo = options->auth_algo; 704 auth_xform.auth.op = options->auth_op; 705 auth_xform.auth.iv.offset = iv_offset + 706 xform.cipher.iv.length; 707 /* auth different than null */ 708 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 709 auth_xform.auth.digest_length = options->digest_sz; 710 auth_xform.auth.key.length = 711 test_vector->auth_key.length; 712 auth_xform.auth.key.data = test_vector->auth_key.data; 713 auth_xform.auth.iv.length = test_vector->auth_iv.length; 714 } else { 715 auth_xform.auth.digest_length = 0; 716 auth_xform.auth.key.length = 0; 717 auth_xform.auth.key.data = NULL; 718 auth_xform.auth.iv.length = 0; 719 } 720 721 xform.next = &auth_xform; 722 } else { 723 return NULL; 724 } 725 726 #define CPERF_IPSEC_SRC_IP 0x01010101 727 #define CPERF_IPSEC_DST_IP 0x02020202 728 #define CPERF_IPSEC_SALT 0x0 729 #define CPERF_IPSEC_DEFTTL 64 730 struct rte_security_ipsec_tunnel_param tunnel = { 731 .type = RTE_SECURITY_IPSEC_TUNNEL_IPV4, 732 {.ipv4 = { 733 .src_ip = { .s_addr = CPERF_IPSEC_SRC_IP}, 734 .dst_ip = { .s_addr = CPERF_IPSEC_DST_IP}, 735 .dscp = 0, 736 .df = 0, 737 .ttl = CPERF_IPSEC_DEFTTL, 738 } }, 739 }; 740 struct rte_security_session_conf sess_conf = { 741 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 742 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 743 {.ipsec = { 744 .spi = rte_lcore_id(), 745 /**< For testing sake, lcore_id is taken as SPI so that 746 * for every core a different session is created. 747 */ 748 .salt = CPERF_IPSEC_SALT, 749 .options = { 0 }, 750 .replay_win_sz = 0, 751 .direction = 752 ((options->cipher_op == 753 RTE_CRYPTO_CIPHER_OP_ENCRYPT) && 754 (options->auth_op == 755 RTE_CRYPTO_AUTH_OP_GENERATE)) || 756 (options->aead_op == 757 RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 758 RTE_SECURITY_IPSEC_SA_DIR_EGRESS : 759 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 760 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 761 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 762 .tunnel = tunnel, 763 } }, 764 .userdata = NULL, 765 .crypto_xform = &xform 766 }; 767 768 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 769 rte_cryptodev_get_sec_ctx(dev_id); 770 771 /* Create security session */ 772 return (void *)rte_security_session_create(ctx, 773 &sess_conf, sess_mp, priv_mp); 774 } 775 776 static struct rte_cryptodev_sym_session * 777 cperf_create_session(struct rte_mempool *sess_mp, 778 struct rte_mempool *priv_mp, 779 uint8_t dev_id, 780 const struct cperf_options *options, 781 const struct cperf_test_vector *test_vector, 782 uint16_t iv_offset) 783 { 784 struct rte_crypto_sym_xform cipher_xform; 785 struct rte_crypto_sym_xform auth_xform; 786 struct rte_crypto_sym_xform aead_xform; 787 struct rte_cryptodev_sym_session *sess = NULL; 788 void *asym_sess = NULL; 789 struct rte_crypto_asym_xform xform = {0}; 790 int ret; 791 792 if (options->op_type == CPERF_ASYM_MODEX) { 793 xform.next = NULL; 794 xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX; 795 xform.modex.modulus.data = options->modex_data->modulus.data; 796 xform.modex.modulus.length = options->modex_data->modulus.len; 797 xform.modex.exponent.data = options->modex_data->exponent.data; 798 xform.modex.exponent.length = options->modex_data->exponent.len; 799 800 ret = rte_cryptodev_asym_session_create(dev_id, &xform, 801 sess_mp, &asym_sess); 802 if (ret < 0) { 803 RTE_LOG(ERR, USER1, "Asym session create failed\n"); 804 return NULL; 805 } 806 return asym_sess; 807 } 808 #ifdef RTE_LIB_SECURITY 809 /* 810 * security only 811 */ 812 if (options->op_type == CPERF_PDCP) { 813 /* Setup Cipher Parameters */ 814 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 815 cipher_xform.next = NULL; 816 cipher_xform.cipher.algo = options->cipher_algo; 817 cipher_xform.cipher.op = options->cipher_op; 818 cipher_xform.cipher.iv.offset = iv_offset; 819 cipher_xform.cipher.iv.length = 4; 820 821 /* cipher different than null */ 822 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 823 cipher_xform.cipher.key.data = test_vector->cipher_key.data; 824 cipher_xform.cipher.key.length = test_vector->cipher_key.length; 825 } else { 826 cipher_xform.cipher.key.data = NULL; 827 cipher_xform.cipher.key.length = 0; 828 } 829 830 /* Setup Auth Parameters */ 831 if (options->auth_algo != 0) { 832 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 833 auth_xform.next = NULL; 834 auth_xform.auth.algo = options->auth_algo; 835 auth_xform.auth.op = options->auth_op; 836 auth_xform.auth.iv.offset = iv_offset + 837 cipher_xform.cipher.iv.length; 838 839 /* auth different than null */ 840 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 841 auth_xform.auth.digest_length = options->digest_sz; 842 auth_xform.auth.key.length = test_vector->auth_key.length; 843 auth_xform.auth.key.data = test_vector->auth_key.data; 844 auth_xform.auth.iv.length = test_vector->auth_iv.length; 845 } else { 846 auth_xform.auth.digest_length = 0; 847 auth_xform.auth.key.length = 0; 848 auth_xform.auth.key.data = NULL; 849 auth_xform.auth.iv.length = 0; 850 } 851 852 cipher_xform.next = &auth_xform; 853 } else { 854 cipher_xform.next = NULL; 855 } 856 857 struct rte_security_session_conf sess_conf = { 858 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 859 .protocol = RTE_SECURITY_PROTOCOL_PDCP, 860 {.pdcp = { 861 .bearer = 0x16, 862 .domain = options->pdcp_domain, 863 .pkt_dir = 0, 864 .sn_size = options->pdcp_sn_sz, 865 .hfn = options->pdcp_ses_hfn_en ? 866 PDCP_DEFAULT_HFN : 0, 867 .hfn_threshold = 0x70C0A, 868 .sdap_enabled = options->pdcp_sdap, 869 .hfn_ovrd = !(options->pdcp_ses_hfn_en), 870 } }, 871 .crypto_xform = &cipher_xform 872 }; 873 874 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 875 rte_cryptodev_get_sec_ctx(dev_id); 876 877 /* Create security session */ 878 return (void *)rte_security_session_create(ctx, 879 &sess_conf, sess_mp, priv_mp); 880 } 881 882 if (options->op_type == CPERF_IPSEC) { 883 return create_ipsec_session(sess_mp, priv_mp, dev_id, 884 options, test_vector, iv_offset); 885 } 886 887 if (options->op_type == CPERF_DOCSIS) { 888 enum rte_security_docsis_direction direction; 889 890 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 891 cipher_xform.next = NULL; 892 cipher_xform.cipher.algo = options->cipher_algo; 893 cipher_xform.cipher.op = options->cipher_op; 894 cipher_xform.cipher.iv.offset = iv_offset; 895 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 896 cipher_xform.cipher.key.data = 897 test_vector->cipher_key.data; 898 cipher_xform.cipher.key.length = 899 test_vector->cipher_key.length; 900 cipher_xform.cipher.iv.length = 901 test_vector->cipher_iv.length; 902 } else { 903 cipher_xform.cipher.key.data = NULL; 904 cipher_xform.cipher.key.length = 0; 905 cipher_xform.cipher.iv.length = 0; 906 } 907 cipher_xform.next = NULL; 908 909 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) 910 direction = RTE_SECURITY_DOCSIS_DOWNLINK; 911 else 912 direction = RTE_SECURITY_DOCSIS_UPLINK; 913 914 struct rte_security_session_conf sess_conf = { 915 .action_type = 916 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 917 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, 918 {.docsis = { 919 .direction = direction, 920 } }, 921 .crypto_xform = &cipher_xform 922 }; 923 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 924 rte_cryptodev_get_sec_ctx(dev_id); 925 926 /* Create security session */ 927 return (void *)rte_security_session_create(ctx, 928 &sess_conf, sess_mp, priv_mp); 929 } 930 #endif 931 sess = rte_cryptodev_sym_session_create(sess_mp); 932 /* 933 * cipher only 934 */ 935 if (options->op_type == CPERF_CIPHER_ONLY) { 936 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 937 cipher_xform.next = NULL; 938 cipher_xform.cipher.algo = options->cipher_algo; 939 cipher_xform.cipher.op = options->cipher_op; 940 cipher_xform.cipher.iv.offset = iv_offset; 941 942 /* cipher different than null */ 943 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 944 cipher_xform.cipher.key.data = 945 test_vector->cipher_key.data; 946 cipher_xform.cipher.key.length = 947 test_vector->cipher_key.length; 948 cipher_xform.cipher.iv.length = 949 test_vector->cipher_iv.length; 950 } else { 951 cipher_xform.cipher.key.data = NULL; 952 cipher_xform.cipher.key.length = 0; 953 cipher_xform.cipher.iv.length = 0; 954 } 955 /* create crypto session */ 956 rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform, 957 priv_mp); 958 /* 959 * auth only 960 */ 961 } else if (options->op_type == CPERF_AUTH_ONLY) { 962 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 963 auth_xform.next = NULL; 964 auth_xform.auth.algo = options->auth_algo; 965 auth_xform.auth.op = options->auth_op; 966 auth_xform.auth.iv.offset = iv_offset; 967 968 /* auth different than null */ 969 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 970 auth_xform.auth.digest_length = 971 options->digest_sz; 972 auth_xform.auth.key.length = 973 test_vector->auth_key.length; 974 auth_xform.auth.key.data = test_vector->auth_key.data; 975 auth_xform.auth.iv.length = 976 test_vector->auth_iv.length; 977 } else { 978 auth_xform.auth.digest_length = 0; 979 auth_xform.auth.key.length = 0; 980 auth_xform.auth.key.data = NULL; 981 auth_xform.auth.iv.length = 0; 982 } 983 /* create crypto session */ 984 rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform, 985 priv_mp); 986 /* 987 * cipher and auth 988 */ 989 } else if (options->op_type == CPERF_CIPHER_THEN_AUTH 990 || options->op_type == CPERF_AUTH_THEN_CIPHER) { 991 /* 992 * cipher 993 */ 994 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 995 cipher_xform.next = NULL; 996 cipher_xform.cipher.algo = options->cipher_algo; 997 cipher_xform.cipher.op = options->cipher_op; 998 cipher_xform.cipher.iv.offset = iv_offset; 999 1000 /* cipher different than null */ 1001 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 1002 cipher_xform.cipher.key.data = 1003 test_vector->cipher_key.data; 1004 cipher_xform.cipher.key.length = 1005 test_vector->cipher_key.length; 1006 cipher_xform.cipher.iv.length = 1007 test_vector->cipher_iv.length; 1008 } else { 1009 cipher_xform.cipher.key.data = NULL; 1010 cipher_xform.cipher.key.length = 0; 1011 cipher_xform.cipher.iv.length = 0; 1012 } 1013 1014 /* 1015 * auth 1016 */ 1017 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 1018 auth_xform.next = NULL; 1019 auth_xform.auth.algo = options->auth_algo; 1020 auth_xform.auth.op = options->auth_op; 1021 auth_xform.auth.iv.offset = iv_offset + 1022 cipher_xform.cipher.iv.length; 1023 1024 /* auth different than null */ 1025 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 1026 auth_xform.auth.digest_length = options->digest_sz; 1027 auth_xform.auth.iv.length = test_vector->auth_iv.length; 1028 auth_xform.auth.key.length = 1029 test_vector->auth_key.length; 1030 auth_xform.auth.key.data = 1031 test_vector->auth_key.data; 1032 } else { 1033 auth_xform.auth.digest_length = 0; 1034 auth_xform.auth.key.length = 0; 1035 auth_xform.auth.key.data = NULL; 1036 auth_xform.auth.iv.length = 0; 1037 } 1038 1039 /* cipher then auth */ 1040 if (options->op_type == CPERF_CIPHER_THEN_AUTH) { 1041 cipher_xform.next = &auth_xform; 1042 /* create crypto session */ 1043 rte_cryptodev_sym_session_init(dev_id, 1044 sess, &cipher_xform, priv_mp); 1045 } else { /* auth then cipher */ 1046 auth_xform.next = &cipher_xform; 1047 /* create crypto session */ 1048 rte_cryptodev_sym_session_init(dev_id, 1049 sess, &auth_xform, priv_mp); 1050 } 1051 } else { /* options->op_type == CPERF_AEAD */ 1052 aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; 1053 aead_xform.next = NULL; 1054 aead_xform.aead.algo = options->aead_algo; 1055 aead_xform.aead.op = options->aead_op; 1056 aead_xform.aead.iv.offset = iv_offset; 1057 1058 aead_xform.aead.key.data = 1059 test_vector->aead_key.data; 1060 aead_xform.aead.key.length = 1061 test_vector->aead_key.length; 1062 aead_xform.aead.iv.length = test_vector->aead_iv.length; 1063 1064 aead_xform.aead.digest_length = options->digest_sz; 1065 aead_xform.aead.aad_length = 1066 options->aead_aad_sz; 1067 1068 /* Create crypto session */ 1069 rte_cryptodev_sym_session_init(dev_id, 1070 sess, &aead_xform, priv_mp); 1071 } 1072 1073 return sess; 1074 } 1075 1076 int 1077 cperf_get_op_functions(const struct cperf_options *options, 1078 struct cperf_op_fns *op_fns) 1079 { 1080 memset(op_fns, 0, sizeof(struct cperf_op_fns)); 1081 1082 op_fns->sess_create = cperf_create_session; 1083 1084 switch (options->op_type) { 1085 case CPERF_AEAD: 1086 op_fns->populate_ops = cperf_set_ops_aead; 1087 break; 1088 1089 case CPERF_AUTH_THEN_CIPHER: 1090 case CPERF_CIPHER_THEN_AUTH: 1091 op_fns->populate_ops = cperf_set_ops_cipher_auth; 1092 break; 1093 case CPERF_AUTH_ONLY: 1094 if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) 1095 op_fns->populate_ops = cperf_set_ops_null_auth; 1096 else 1097 op_fns->populate_ops = cperf_set_ops_auth; 1098 break; 1099 case CPERF_CIPHER_ONLY: 1100 if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) 1101 op_fns->populate_ops = cperf_set_ops_null_cipher; 1102 else 1103 op_fns->populate_ops = cperf_set_ops_cipher; 1104 break; 1105 case CPERF_ASYM_MODEX: 1106 op_fns->populate_ops = cperf_set_ops_asym; 1107 break; 1108 #ifdef RTE_LIB_SECURITY 1109 case CPERF_PDCP: 1110 case CPERF_DOCSIS: 1111 op_fns->populate_ops = cperf_set_ops_security; 1112 break; 1113 case CPERF_IPSEC: 1114 op_fns->populate_ops = cperf_set_ops_security_ipsec; 1115 break; 1116 #endif 1117 default: 1118 return -1; 1119 } 1120 1121 return 0; 1122 } 1123