1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <rte_cryptodev.h> 6 #include <rte_ether.h> 7 #include <rte_ip.h> 8 9 #include "cperf_ops.h" 10 #include "cperf_test_vectors.h" 11 12 static void 13 cperf_set_ops_asym(struct rte_crypto_op **ops, 14 uint32_t src_buf_offset __rte_unused, 15 uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops, 16 void *sess, 17 const struct cperf_options *options, 18 const struct cperf_test_vector *test_vector __rte_unused, 19 uint16_t iv_offset __rte_unused, 20 uint32_t *imix_idx __rte_unused, 21 uint64_t *tsc_start __rte_unused) 22 { 23 uint16_t i; 24 void *asym_sess = (void *)sess; 25 26 for (i = 0; i < nb_ops; i++) { 27 struct rte_crypto_asym_op *asym_op = ops[i]->asym; 28 29 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 30 asym_op->modex.base.data = options->modex_data->base.data; 31 asym_op->modex.base.length = options->modex_data->base.len; 32 asym_op->modex.result.data = options->modex_data->result.data; 33 asym_op->modex.result.length = options->modex_data->result.len; 34 rte_crypto_op_attach_asym_session(ops[i], asym_sess); 35 } 36 } 37 38 #ifdef RTE_LIB_SECURITY 39 static void 40 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, 41 const struct cperf_test_vector *test_vector) 42 { 43 struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 44 45 if (options->is_outbound) { 46 memcpy(ip, test_vector->plaintext.data, 47 sizeof(struct rte_ipv4_hdr)); 48 49 ip->total_length = rte_cpu_to_be_16(m->data_len); 50 } 51 } 52 53 static void 54 cperf_set_ops_security(struct rte_crypto_op **ops, 55 uint32_t src_buf_offset __rte_unused, 56 uint32_t dst_buf_offset __rte_unused, 57 uint16_t nb_ops, void *sess, 58 const struct cperf_options *options, 59 const struct cperf_test_vector *test_vector, 60 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 61 uint64_t *tsc_start) 62 { 63 uint16_t i; 64 65 for (i = 0; i < nb_ops; i++) { 66 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 67 void *sec_sess = (void *)sess; 68 uint32_t buf_sz; 69 70 uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i], 71 uint32_t *, iv_offset); 72 *per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN; 73 74 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 75 rte_security_attach_session(ops[i], sec_sess); 76 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 77 src_buf_offset); 78 79 if (options->op_type == CPERF_PDCP) { 80 sym_op->m_src->buf_len = options->segment_sz; 81 sym_op->m_src->data_len = options->test_buffer_size; 82 sym_op->m_src->pkt_len = sym_op->m_src->data_len; 83 } 84 85 if (options->op_type == CPERF_DOCSIS) { 86 if (options->imix_distribution_count) { 87 buf_sz = options->imix_buffer_sizes[*imix_idx]; 88 *imix_idx = (*imix_idx + 1) % options->pool_sz; 89 } else 90 buf_sz = options->test_buffer_size; 91 92 sym_op->m_src->buf_len = options->segment_sz; 93 sym_op->m_src->data_len = buf_sz; 94 sym_op->m_src->pkt_len = buf_sz; 95 96 /* DOCSIS header is not CRC'ed */ 97 sym_op->auth.data.offset = options->docsis_hdr_sz; 98 sym_op->auth.data.length = buf_sz - 99 sym_op->auth.data.offset - RTE_ETHER_CRC_LEN; 100 /* 101 * DOCSIS header and SRC and DST MAC addresses are not 102 * ciphered 103 */ 104 sym_op->cipher.data.offset = sym_op->auth.data.offset + 105 RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN; 106 sym_op->cipher.data.length = buf_sz - 107 sym_op->cipher.data.offset; 108 } 109 110 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 111 if (dst_buf_offset == 0) 112 sym_op->m_dst = NULL; 113 else 114 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 115 dst_buf_offset); 116 } 117 118 RTE_SET_USED(tsc_start); 119 RTE_SET_USED(test_vector); 120 } 121 122 static void 123 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, 124 uint32_t src_buf_offset __rte_unused, 125 uint32_t dst_buf_offset __rte_unused, 126 uint16_t nb_ops, void *sess, 127 const struct cperf_options *options, 128 const struct cperf_test_vector *test_vector, 129 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 130 uint64_t *tsc_start) 131 { 132 void *sec_sess = sess; 133 const uint32_t test_buffer_size = options->test_buffer_size; 134 const uint32_t headroom_sz = options->headroom_sz; 135 const uint32_t segment_sz = options->segment_sz; 136 uint64_t tsc_start_temp, tsc_end_temp; 137 uint16_t i = 0; 138 139 RTE_SET_USED(imix_idx); 140 141 for (i = 0; i < nb_ops; i++) { 142 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 143 struct rte_mbuf *m = sym_op->m_src; 144 145 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 146 rte_security_attach_session(ops[i], sec_sess); 147 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 148 src_buf_offset); 149 150 /* In case of IPsec, headroom is consumed by PMD, 151 * hence resetting it. 152 */ 153 m->data_off = headroom_sz; 154 155 m->buf_len = segment_sz; 156 m->data_len = test_buffer_size; 157 m->pkt_len = test_buffer_size; 158 159 sym_op->m_dst = NULL; 160 } 161 162 if (options->test_file != NULL) 163 return; 164 165 tsc_start_temp = rte_rdtsc_precise(); 166 167 for (i = 0; i < nb_ops; i++) { 168 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 169 struct rte_mbuf *m = sym_op->m_src; 170 171 test_ipsec_vec_populate(m, options, test_vector); 172 } 173 174 tsc_end_temp = rte_rdtsc_precise(); 175 *tsc_start += tsc_end_temp - tsc_start_temp; 176 } 177 178 #endif 179 180 static void 181 cperf_set_ops_null_cipher(struct rte_crypto_op **ops, 182 uint32_t src_buf_offset, uint32_t dst_buf_offset, 183 uint16_t nb_ops, void *sess, 184 const struct cperf_options *options, 185 const struct cperf_test_vector *test_vector __rte_unused, 186 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 187 uint64_t *tsc_start __rte_unused) 188 { 189 uint16_t i; 190 191 for (i = 0; i < nb_ops; i++) { 192 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 193 194 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 195 rte_crypto_op_attach_sym_session(ops[i], sess); 196 197 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 198 src_buf_offset); 199 200 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 201 if (dst_buf_offset == 0) 202 sym_op->m_dst = NULL; 203 else 204 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 205 dst_buf_offset); 206 207 /* cipher parameters */ 208 if (options->imix_distribution_count) { 209 sym_op->cipher.data.length = 210 options->imix_buffer_sizes[*imix_idx]; 211 *imix_idx = (*imix_idx + 1) % options->pool_sz; 212 } else 213 sym_op->cipher.data.length = options->test_buffer_size; 214 sym_op->cipher.data.offset = 0; 215 } 216 } 217 218 static void 219 cperf_set_ops_null_auth(struct rte_crypto_op **ops, 220 uint32_t src_buf_offset, uint32_t dst_buf_offset, 221 uint16_t nb_ops, void *sess, 222 const struct cperf_options *options, 223 const struct cperf_test_vector *test_vector __rte_unused, 224 uint16_t iv_offset __rte_unused, uint32_t *imix_idx, 225 uint64_t *tsc_start __rte_unused) 226 { 227 uint16_t i; 228 229 for (i = 0; i < nb_ops; i++) { 230 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 231 232 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 233 rte_crypto_op_attach_sym_session(ops[i], sess); 234 235 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 236 src_buf_offset); 237 238 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 239 if (dst_buf_offset == 0) 240 sym_op->m_dst = NULL; 241 else 242 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 243 dst_buf_offset); 244 245 /* auth parameters */ 246 if (options->imix_distribution_count) { 247 sym_op->auth.data.length = 248 options->imix_buffer_sizes[*imix_idx]; 249 *imix_idx = (*imix_idx + 1) % options->pool_sz; 250 } else 251 sym_op->auth.data.length = options->test_buffer_size; 252 sym_op->auth.data.offset = 0; 253 } 254 } 255 256 static void 257 cperf_set_ops_cipher(struct rte_crypto_op **ops, 258 uint32_t src_buf_offset, uint32_t dst_buf_offset, 259 uint16_t nb_ops, void *sess, 260 const struct cperf_options *options, 261 const struct cperf_test_vector *test_vector, 262 uint16_t iv_offset, uint32_t *imix_idx, 263 uint64_t *tsc_start __rte_unused) 264 { 265 uint16_t i; 266 267 for (i = 0; i < nb_ops; i++) { 268 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 269 270 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 271 rte_crypto_op_attach_sym_session(ops[i], sess); 272 273 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 274 src_buf_offset); 275 276 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 277 if (dst_buf_offset == 0) 278 sym_op->m_dst = NULL; 279 else 280 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 281 dst_buf_offset); 282 283 /* cipher parameters */ 284 if (options->imix_distribution_count) { 285 sym_op->cipher.data.length = 286 options->imix_buffer_sizes[*imix_idx]; 287 *imix_idx = (*imix_idx + 1) % options->pool_sz; 288 } else 289 sym_op->cipher.data.length = options->test_buffer_size; 290 291 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 292 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || 293 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) 294 sym_op->cipher.data.length <<= 3; 295 296 sym_op->cipher.data.offset = 0; 297 } 298 299 if (options->test == CPERF_TEST_TYPE_VERIFY) { 300 for (i = 0; i < nb_ops; i++) { 301 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 302 uint8_t *, iv_offset); 303 304 memcpy(iv_ptr, test_vector->cipher_iv.data, 305 test_vector->cipher_iv.length); 306 307 } 308 } 309 } 310 311 static void 312 cperf_set_ops_auth(struct rte_crypto_op **ops, 313 uint32_t src_buf_offset, uint32_t dst_buf_offset, 314 uint16_t nb_ops, void *sess, 315 const struct cperf_options *options, 316 const struct cperf_test_vector *test_vector, 317 uint16_t iv_offset, uint32_t *imix_idx, 318 uint64_t *tsc_start __rte_unused) 319 { 320 uint16_t i; 321 322 for (i = 0; i < nb_ops; i++) { 323 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 324 325 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 326 rte_crypto_op_attach_sym_session(ops[i], sess); 327 328 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 329 src_buf_offset); 330 331 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 332 if (dst_buf_offset == 0) 333 sym_op->m_dst = NULL; 334 else 335 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 336 dst_buf_offset); 337 338 if (test_vector->auth_iv.length) { 339 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 340 uint8_t *, 341 iv_offset); 342 memcpy(iv_ptr, test_vector->auth_iv.data, 343 test_vector->auth_iv.length); 344 } 345 346 /* authentication parameters */ 347 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 348 sym_op->auth.digest.data = test_vector->digest.data; 349 sym_op->auth.digest.phys_addr = 350 test_vector->digest.phys_addr; 351 } else { 352 353 uint32_t offset = options->test_buffer_size; 354 struct rte_mbuf *buf, *tbuf; 355 356 if (options->out_of_place) { 357 buf = sym_op->m_dst; 358 } else { 359 tbuf = sym_op->m_src; 360 while ((tbuf->next != NULL) && 361 (offset >= tbuf->data_len)) { 362 offset -= tbuf->data_len; 363 tbuf = tbuf->next; 364 } 365 /* 366 * If there is not enough room in segment, 367 * place the digest in the next segment 368 */ 369 if ((tbuf->data_len - offset) < options->digest_sz) { 370 tbuf = tbuf->next; 371 offset = 0; 372 } 373 buf = tbuf; 374 } 375 376 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, 377 uint8_t *, offset); 378 sym_op->auth.digest.phys_addr = 379 rte_pktmbuf_iova_offset(buf, offset); 380 381 } 382 383 if (options->imix_distribution_count) { 384 sym_op->auth.data.length = 385 options->imix_buffer_sizes[*imix_idx]; 386 *imix_idx = (*imix_idx + 1) % options->pool_sz; 387 } else 388 sym_op->auth.data.length = options->test_buffer_size; 389 390 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 391 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || 392 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) 393 sym_op->auth.data.length <<= 3; 394 395 sym_op->auth.data.offset = 0; 396 } 397 398 if (options->test == CPERF_TEST_TYPE_VERIFY) { 399 if (test_vector->auth_iv.length) { 400 for (i = 0; i < nb_ops; i++) { 401 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 402 uint8_t *, iv_offset); 403 404 memcpy(iv_ptr, test_vector->auth_iv.data, 405 test_vector->auth_iv.length); 406 } 407 } 408 } 409 } 410 411 static void 412 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, 413 uint32_t src_buf_offset, uint32_t dst_buf_offset, 414 uint16_t nb_ops, void *sess, 415 const struct cperf_options *options, 416 const struct cperf_test_vector *test_vector, 417 uint16_t iv_offset, uint32_t *imix_idx, 418 uint64_t *tsc_start __rte_unused) 419 { 420 uint16_t i; 421 422 for (i = 0; i < nb_ops; i++) { 423 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 424 425 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 426 rte_crypto_op_attach_sym_session(ops[i], sess); 427 428 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 429 src_buf_offset); 430 431 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 432 if (dst_buf_offset == 0) 433 sym_op->m_dst = NULL; 434 else 435 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 436 dst_buf_offset); 437 438 /* cipher parameters */ 439 if (options->imix_distribution_count) { 440 sym_op->cipher.data.length = 441 options->imix_buffer_sizes[*imix_idx]; 442 *imix_idx = (*imix_idx + 1) % options->pool_sz; 443 } else 444 sym_op->cipher.data.length = options->test_buffer_size; 445 446 if ((options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) && 447 (options->op_type == CPERF_AUTH_THEN_CIPHER)) 448 sym_op->cipher.data.length += options->digest_sz; 449 450 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 451 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || 452 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) 453 sym_op->cipher.data.length <<= 3; 454 455 sym_op->cipher.data.offset = 0; 456 457 /* authentication parameters */ 458 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 459 sym_op->auth.digest.data = test_vector->digest.data; 460 sym_op->auth.digest.phys_addr = 461 test_vector->digest.phys_addr; 462 } else { 463 464 uint32_t offset = options->test_buffer_size; 465 struct rte_mbuf *buf, *tbuf; 466 467 if (options->out_of_place) { 468 buf = sym_op->m_dst; 469 } else { 470 tbuf = sym_op->m_src; 471 while ((tbuf->next != NULL) && 472 (offset >= tbuf->data_len)) { 473 offset -= tbuf->data_len; 474 tbuf = tbuf->next; 475 } 476 /* 477 * If there is not enough room in segment, 478 * place the digest in the next segment 479 */ 480 if ((tbuf->data_len - offset) < options->digest_sz) { 481 tbuf = tbuf->next; 482 offset = 0; 483 } 484 buf = tbuf; 485 } 486 487 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, 488 uint8_t *, offset); 489 sym_op->auth.digest.phys_addr = 490 rte_pktmbuf_iova_offset(buf, offset); 491 } 492 493 if (options->imix_distribution_count) { 494 sym_op->auth.data.length = 495 options->imix_buffer_sizes[*imix_idx]; 496 *imix_idx = (*imix_idx + 1) % options->pool_sz; 497 } else 498 sym_op->auth.data.length = options->test_buffer_size; 499 500 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 501 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || 502 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) 503 sym_op->auth.data.length <<= 3; 504 505 sym_op->auth.data.offset = 0; 506 } 507 508 if (options->test == CPERF_TEST_TYPE_VERIFY) { 509 for (i = 0; i < nb_ops; i++) { 510 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 511 uint8_t *, iv_offset); 512 513 memcpy(iv_ptr, test_vector->cipher_iv.data, 514 test_vector->cipher_iv.length); 515 if (test_vector->auth_iv.length) { 516 /* 517 * Copy IV after the crypto operation and 518 * the cipher IV 519 */ 520 iv_ptr += test_vector->cipher_iv.length; 521 memcpy(iv_ptr, test_vector->auth_iv.data, 522 test_vector->auth_iv.length); 523 } 524 } 525 526 } 527 } 528 529 static void 530 cperf_set_ops_aead(struct rte_crypto_op **ops, 531 uint32_t src_buf_offset, uint32_t dst_buf_offset, 532 uint16_t nb_ops, void *sess, 533 const struct cperf_options *options, 534 const struct cperf_test_vector *test_vector, 535 uint16_t iv_offset, uint32_t *imix_idx, 536 uint64_t *tsc_start __rte_unused) 537 { 538 uint16_t i; 539 /* AAD is placed after the IV */ 540 uint16_t aad_offset = iv_offset + 541 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16); 542 543 for (i = 0; i < nb_ops; i++) { 544 struct rte_crypto_sym_op *sym_op = ops[i]->sym; 545 546 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 547 rte_crypto_op_attach_sym_session(ops[i], sess); 548 549 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + 550 src_buf_offset); 551 552 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ 553 if (dst_buf_offset == 0) 554 sym_op->m_dst = NULL; 555 else 556 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + 557 dst_buf_offset); 558 559 /* AEAD parameters */ 560 if (options->imix_distribution_count) { 561 sym_op->aead.data.length = 562 options->imix_buffer_sizes[*imix_idx]; 563 *imix_idx = (*imix_idx + 1) % options->pool_sz; 564 } else 565 sym_op->aead.data.length = options->test_buffer_size; 566 sym_op->aead.data.offset = 0; 567 568 sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i], 569 uint8_t *, aad_offset); 570 sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i], 571 aad_offset); 572 573 if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) { 574 sym_op->aead.digest.data = test_vector->digest.data; 575 sym_op->aead.digest.phys_addr = 576 test_vector->digest.phys_addr; 577 } else { 578 579 uint32_t offset = sym_op->aead.data.length + 580 sym_op->aead.data.offset; 581 struct rte_mbuf *buf, *tbuf; 582 583 if (options->out_of_place) { 584 buf = sym_op->m_dst; 585 } else { 586 tbuf = sym_op->m_src; 587 while ((tbuf->next != NULL) && 588 (offset >= tbuf->data_len)) { 589 offset -= tbuf->data_len; 590 tbuf = tbuf->next; 591 } 592 /* 593 * If there is not enough room in segment, 594 * place the digest in the next segment 595 */ 596 if ((tbuf->data_len - offset) < options->digest_sz) { 597 tbuf = tbuf->next; 598 offset = 0; 599 } 600 buf = tbuf; 601 } 602 603 sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf, 604 uint8_t *, offset); 605 sym_op->aead.digest.phys_addr = 606 rte_pktmbuf_iova_offset(buf, offset); 607 } 608 } 609 610 if ((options->test == CPERF_TEST_TYPE_VERIFY) || 611 (options->test == CPERF_TEST_TYPE_LATENCY)) { 612 for (i = 0; i < nb_ops; i++) { 613 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], 614 uint8_t *, iv_offset); 615 616 /* 617 * If doing AES-CCM, nonce is copied one byte 618 * after the start of IV field, and AAD is copied 619 * 18 bytes after the start of the AAD field. 620 */ 621 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) { 622 memcpy(iv_ptr + 1, test_vector->aead_iv.data, 623 test_vector->aead_iv.length); 624 625 memcpy(ops[i]->sym->aead.aad.data + 18, 626 test_vector->aad.data, 627 test_vector->aad.length); 628 } else { 629 memcpy(iv_ptr, test_vector->aead_iv.data, 630 test_vector->aead_iv.length); 631 632 memcpy(ops[i]->sym->aead.aad.data, 633 test_vector->aad.data, 634 test_vector->aad.length); 635 } 636 } 637 } 638 } 639 640 static void * 641 create_ipsec_session(struct rte_mempool *sess_mp, 642 uint8_t dev_id, 643 const struct cperf_options *options, 644 const struct cperf_test_vector *test_vector, 645 uint16_t iv_offset) 646 { 647 struct rte_crypto_sym_xform auth_xform = {0}; 648 struct rte_crypto_sym_xform *crypto_xform; 649 struct rte_crypto_sym_xform xform = {0}; 650 651 if (options->aead_algo != 0) { 652 /* Setup AEAD Parameters */ 653 xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; 654 xform.next = NULL; 655 xform.aead.algo = options->aead_algo; 656 xform.aead.op = options->aead_op; 657 xform.aead.iv.offset = iv_offset; 658 xform.aead.key.data = test_vector->aead_key.data; 659 xform.aead.key.length = test_vector->aead_key.length; 660 xform.aead.iv.length = test_vector->aead_iv.length; 661 xform.aead.digest_length = options->digest_sz; 662 xform.aead.aad_length = options->aead_aad_sz; 663 crypto_xform = &xform; 664 } else if (options->cipher_algo != 0 && options->auth_algo != 0) { 665 /* Setup Cipher Parameters */ 666 xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 667 xform.cipher.algo = options->cipher_algo; 668 xform.cipher.op = options->cipher_op; 669 xform.cipher.iv.offset = iv_offset; 670 xform.cipher.iv.length = test_vector->cipher_iv.length; 671 /* cipher different than null */ 672 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 673 xform.cipher.key.data = test_vector->cipher_key.data; 674 xform.cipher.key.length = 675 test_vector->cipher_key.length; 676 } else { 677 xform.cipher.key.data = NULL; 678 xform.cipher.key.length = 0; 679 } 680 681 /* Setup Auth Parameters */ 682 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 683 auth_xform.auth.algo = options->auth_algo; 684 auth_xform.auth.op = options->auth_op; 685 auth_xform.auth.iv.offset = iv_offset + 686 xform.cipher.iv.length; 687 /* auth different than null */ 688 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 689 auth_xform.auth.digest_length = options->digest_sz; 690 auth_xform.auth.key.length = 691 test_vector->auth_key.length; 692 auth_xform.auth.key.data = test_vector->auth_key.data; 693 auth_xform.auth.iv.length = test_vector->auth_iv.length; 694 } else { 695 auth_xform.auth.digest_length = 0; 696 auth_xform.auth.key.length = 0; 697 auth_xform.auth.key.data = NULL; 698 auth_xform.auth.iv.length = 0; 699 } 700 701 if (options->is_outbound) { 702 crypto_xform = &xform; 703 xform.next = &auth_xform; 704 auth_xform.next = NULL; 705 } else { 706 crypto_xform = &auth_xform; 707 auth_xform.next = &xform; 708 xform.next = NULL; 709 } 710 } else { 711 return NULL; 712 } 713 714 #define CPERF_IPSEC_SRC_IP 0x01010101 715 #define CPERF_IPSEC_DST_IP 0x02020202 716 #define CPERF_IPSEC_SALT 0x0 717 #define CPERF_IPSEC_DEFTTL 64 718 struct rte_security_ipsec_tunnel_param tunnel = { 719 .type = RTE_SECURITY_IPSEC_TUNNEL_IPV4, 720 {.ipv4 = { 721 .src_ip = { .s_addr = CPERF_IPSEC_SRC_IP}, 722 .dst_ip = { .s_addr = CPERF_IPSEC_DST_IP}, 723 .dscp = 0, 724 .df = 0, 725 .ttl = CPERF_IPSEC_DEFTTL, 726 } }, 727 }; 728 struct rte_security_session_conf sess_conf = { 729 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 730 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 731 {.ipsec = { 732 .spi = rte_lcore_id() + 1, 733 /**< For testing sake, lcore_id is taken as SPI so that 734 * for every core a different session is created. 735 */ 736 .salt = CPERF_IPSEC_SALT, 737 .options = { 0 }, 738 .replay_win_sz = 0, 739 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 740 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 741 .tunnel = tunnel, 742 } }, 743 .userdata = NULL, 744 .crypto_xform = crypto_xform, 745 }; 746 747 if (options->is_outbound) 748 sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 749 else 750 sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 751 752 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 753 rte_cryptodev_get_sec_ctx(dev_id); 754 755 /* Create security session */ 756 return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); 757 } 758 759 static void * 760 cperf_create_session(struct rte_mempool *sess_mp, 761 uint8_t dev_id, 762 const struct cperf_options *options, 763 const struct cperf_test_vector *test_vector, 764 uint16_t iv_offset) 765 { 766 struct rte_crypto_sym_xform cipher_xform; 767 struct rte_crypto_sym_xform auth_xform; 768 struct rte_crypto_sym_xform aead_xform; 769 void *sess = NULL; 770 void *asym_sess = NULL; 771 struct rte_crypto_asym_xform xform = {0}; 772 int ret; 773 774 if (options->op_type == CPERF_ASYM_MODEX) { 775 xform.next = NULL; 776 xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX; 777 xform.modex.modulus.data = options->modex_data->modulus.data; 778 xform.modex.modulus.length = options->modex_data->modulus.len; 779 xform.modex.exponent.data = options->modex_data->exponent.data; 780 xform.modex.exponent.length = options->modex_data->exponent.len; 781 782 ret = rte_cryptodev_asym_session_create(dev_id, &xform, 783 sess_mp, &asym_sess); 784 if (ret < 0) { 785 RTE_LOG(ERR, USER1, "Asym session create failed\n"); 786 return NULL; 787 } 788 return asym_sess; 789 } 790 #ifdef RTE_LIB_SECURITY 791 /* 792 * security only 793 */ 794 if (options->op_type == CPERF_PDCP) { 795 /* Setup Cipher Parameters */ 796 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 797 cipher_xform.next = NULL; 798 cipher_xform.cipher.algo = options->cipher_algo; 799 cipher_xform.cipher.op = options->cipher_op; 800 cipher_xform.cipher.iv.offset = iv_offset; 801 cipher_xform.cipher.iv.length = 4; 802 803 /* cipher different than null */ 804 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 805 cipher_xform.cipher.key.data = test_vector->cipher_key.data; 806 cipher_xform.cipher.key.length = test_vector->cipher_key.length; 807 } else { 808 cipher_xform.cipher.key.data = NULL; 809 cipher_xform.cipher.key.length = 0; 810 } 811 812 /* Setup Auth Parameters */ 813 if (options->auth_algo != 0) { 814 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 815 auth_xform.next = NULL; 816 auth_xform.auth.algo = options->auth_algo; 817 auth_xform.auth.op = options->auth_op; 818 auth_xform.auth.iv.offset = iv_offset + 819 cipher_xform.cipher.iv.length; 820 821 /* auth different than null */ 822 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 823 auth_xform.auth.digest_length = options->digest_sz; 824 auth_xform.auth.key.length = test_vector->auth_key.length; 825 auth_xform.auth.key.data = test_vector->auth_key.data; 826 auth_xform.auth.iv.length = test_vector->auth_iv.length; 827 } else { 828 auth_xform.auth.digest_length = 0; 829 auth_xform.auth.key.length = 0; 830 auth_xform.auth.key.data = NULL; 831 auth_xform.auth.iv.length = 0; 832 } 833 834 cipher_xform.next = &auth_xform; 835 } else { 836 cipher_xform.next = NULL; 837 } 838 839 struct rte_security_session_conf sess_conf = { 840 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 841 .protocol = RTE_SECURITY_PROTOCOL_PDCP, 842 {.pdcp = { 843 .bearer = 0x16, 844 .domain = options->pdcp_domain, 845 .pkt_dir = 0, 846 .sn_size = options->pdcp_sn_sz, 847 .hfn = options->pdcp_ses_hfn_en ? 848 PDCP_DEFAULT_HFN : 0, 849 .hfn_threshold = 0x70C0A, 850 .sdap_enabled = options->pdcp_sdap, 851 .hfn_ovrd = !(options->pdcp_ses_hfn_en), 852 } }, 853 .crypto_xform = &cipher_xform 854 }; 855 856 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 857 rte_cryptodev_get_sec_ctx(dev_id); 858 859 /* Create security session */ 860 return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); 861 } 862 863 if (options->op_type == CPERF_IPSEC) { 864 return create_ipsec_session(sess_mp, dev_id, 865 options, test_vector, iv_offset); 866 } 867 868 if (options->op_type == CPERF_DOCSIS) { 869 enum rte_security_docsis_direction direction; 870 871 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 872 cipher_xform.next = NULL; 873 cipher_xform.cipher.algo = options->cipher_algo; 874 cipher_xform.cipher.op = options->cipher_op; 875 cipher_xform.cipher.iv.offset = iv_offset; 876 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 877 cipher_xform.cipher.key.data = 878 test_vector->cipher_key.data; 879 cipher_xform.cipher.key.length = 880 test_vector->cipher_key.length; 881 cipher_xform.cipher.iv.length = 882 test_vector->cipher_iv.length; 883 } else { 884 cipher_xform.cipher.key.data = NULL; 885 cipher_xform.cipher.key.length = 0; 886 cipher_xform.cipher.iv.length = 0; 887 } 888 cipher_xform.next = NULL; 889 890 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) 891 direction = RTE_SECURITY_DOCSIS_DOWNLINK; 892 else 893 direction = RTE_SECURITY_DOCSIS_UPLINK; 894 895 struct rte_security_session_conf sess_conf = { 896 .action_type = 897 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, 898 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, 899 {.docsis = { 900 .direction = direction, 901 } }, 902 .crypto_xform = &cipher_xform 903 }; 904 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 905 rte_cryptodev_get_sec_ctx(dev_id); 906 907 /* Create security session */ 908 return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); 909 } 910 #endif 911 /* 912 * cipher only 913 */ 914 if (options->op_type == CPERF_CIPHER_ONLY) { 915 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 916 cipher_xform.next = NULL; 917 cipher_xform.cipher.algo = options->cipher_algo; 918 cipher_xform.cipher.op = options->cipher_op; 919 cipher_xform.cipher.iv.offset = iv_offset; 920 921 /* cipher different than null */ 922 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 923 cipher_xform.cipher.key.data = 924 test_vector->cipher_key.data; 925 cipher_xform.cipher.key.length = 926 test_vector->cipher_key.length; 927 cipher_xform.cipher.iv.length = 928 test_vector->cipher_iv.length; 929 } else { 930 cipher_xform.cipher.key.data = NULL; 931 cipher_xform.cipher.key.length = 0; 932 cipher_xform.cipher.iv.length = 0; 933 } 934 /* create crypto session */ 935 sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform, 936 sess_mp); 937 /* 938 * auth only 939 */ 940 } else if (options->op_type == CPERF_AUTH_ONLY) { 941 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 942 auth_xform.next = NULL; 943 auth_xform.auth.algo = options->auth_algo; 944 auth_xform.auth.op = options->auth_op; 945 auth_xform.auth.iv.offset = iv_offset; 946 947 /* auth different than null */ 948 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 949 auth_xform.auth.digest_length = 950 options->digest_sz; 951 auth_xform.auth.key.length = 952 test_vector->auth_key.length; 953 auth_xform.auth.key.data = test_vector->auth_key.data; 954 auth_xform.auth.iv.length = 955 test_vector->auth_iv.length; 956 } else { 957 auth_xform.auth.digest_length = 0; 958 auth_xform.auth.key.length = 0; 959 auth_xform.auth.key.data = NULL; 960 auth_xform.auth.iv.length = 0; 961 } 962 /* create crypto session */ 963 sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform, 964 sess_mp); 965 /* 966 * cipher and auth 967 */ 968 } else if (options->op_type == CPERF_CIPHER_THEN_AUTH 969 || options->op_type == CPERF_AUTH_THEN_CIPHER) { 970 /* 971 * cipher 972 */ 973 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 974 cipher_xform.next = NULL; 975 cipher_xform.cipher.algo = options->cipher_algo; 976 cipher_xform.cipher.op = options->cipher_op; 977 cipher_xform.cipher.iv.offset = iv_offset; 978 979 /* cipher different than null */ 980 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 981 cipher_xform.cipher.key.data = 982 test_vector->cipher_key.data; 983 cipher_xform.cipher.key.length = 984 test_vector->cipher_key.length; 985 cipher_xform.cipher.iv.length = 986 test_vector->cipher_iv.length; 987 } else { 988 cipher_xform.cipher.key.data = NULL; 989 cipher_xform.cipher.key.length = 0; 990 cipher_xform.cipher.iv.length = 0; 991 } 992 993 /* 994 * auth 995 */ 996 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 997 auth_xform.next = NULL; 998 auth_xform.auth.algo = options->auth_algo; 999 auth_xform.auth.op = options->auth_op; 1000 auth_xform.auth.iv.offset = iv_offset + 1001 cipher_xform.cipher.iv.length; 1002 1003 /* auth different than null */ 1004 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { 1005 auth_xform.auth.digest_length = options->digest_sz; 1006 auth_xform.auth.iv.length = test_vector->auth_iv.length; 1007 auth_xform.auth.key.length = 1008 test_vector->auth_key.length; 1009 auth_xform.auth.key.data = 1010 test_vector->auth_key.data; 1011 } else { 1012 auth_xform.auth.digest_length = 0; 1013 auth_xform.auth.key.length = 0; 1014 auth_xform.auth.key.data = NULL; 1015 auth_xform.auth.iv.length = 0; 1016 } 1017 1018 /* cipher then auth */ 1019 if (options->op_type == CPERF_CIPHER_THEN_AUTH) { 1020 cipher_xform.next = &auth_xform; 1021 /* create crypto session */ 1022 sess = rte_cryptodev_sym_session_create(dev_id, 1023 &cipher_xform, sess_mp); 1024 } else { /* auth then cipher */ 1025 auth_xform.next = &cipher_xform; 1026 /* create crypto session */ 1027 sess = rte_cryptodev_sym_session_create(dev_id, 1028 &auth_xform, sess_mp); 1029 } 1030 } else { /* options->op_type == CPERF_AEAD */ 1031 aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; 1032 aead_xform.next = NULL; 1033 aead_xform.aead.algo = options->aead_algo; 1034 aead_xform.aead.op = options->aead_op; 1035 aead_xform.aead.iv.offset = iv_offset; 1036 1037 aead_xform.aead.key.data = 1038 test_vector->aead_key.data; 1039 aead_xform.aead.key.length = 1040 test_vector->aead_key.length; 1041 aead_xform.aead.iv.length = test_vector->aead_iv.length; 1042 1043 aead_xform.aead.digest_length = options->digest_sz; 1044 aead_xform.aead.aad_length = 1045 options->aead_aad_sz; 1046 1047 /* Create crypto session */ 1048 sess = rte_cryptodev_sym_session_create(dev_id, &aead_xform, 1049 sess_mp); 1050 } 1051 1052 return sess; 1053 } 1054 1055 int 1056 cperf_get_op_functions(const struct cperf_options *options, 1057 struct cperf_op_fns *op_fns) 1058 { 1059 memset(op_fns, 0, sizeof(struct cperf_op_fns)); 1060 1061 op_fns->sess_create = cperf_create_session; 1062 1063 switch (options->op_type) { 1064 case CPERF_AEAD: 1065 op_fns->populate_ops = cperf_set_ops_aead; 1066 break; 1067 1068 case CPERF_AUTH_THEN_CIPHER: 1069 case CPERF_CIPHER_THEN_AUTH: 1070 op_fns->populate_ops = cperf_set_ops_cipher_auth; 1071 break; 1072 case CPERF_AUTH_ONLY: 1073 if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) 1074 op_fns->populate_ops = cperf_set_ops_null_auth; 1075 else 1076 op_fns->populate_ops = cperf_set_ops_auth; 1077 break; 1078 case CPERF_CIPHER_ONLY: 1079 if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) 1080 op_fns->populate_ops = cperf_set_ops_null_cipher; 1081 else 1082 op_fns->populate_ops = cperf_set_ops_cipher; 1083 break; 1084 case CPERF_ASYM_MODEX: 1085 op_fns->populate_ops = cperf_set_ops_asym; 1086 break; 1087 #ifdef RTE_LIB_SECURITY 1088 case CPERF_PDCP: 1089 case CPERF_DOCSIS: 1090 op_fns->populate_ops = cperf_set_ops_security; 1091 break; 1092 case CPERF_IPSEC: 1093 op_fns->populate_ops = cperf_set_ops_security_ipsec; 1094 break; 1095 #endif 1096 default: 1097 return -1; 1098 } 1099 1100 return 0; 1101 } 1102