1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <rte_malloc.h> 34 #include <rte_cycles.h> 35 #include <rte_crypto.h> 36 #include <rte_cryptodev.h> 37 38 #include "cperf_test_verify.h" 39 #include "cperf_ops.h" 40 41 struct cperf_verify_ctx { 42 uint8_t dev_id; 43 uint16_t qp_id; 44 uint8_t lcore_id; 45 46 struct rte_mempool *pkt_mbuf_pool_in; 47 struct rte_mempool *pkt_mbuf_pool_out; 48 struct rte_mbuf **mbufs_in; 49 struct rte_mbuf **mbufs_out; 50 51 struct rte_mempool *crypto_op_pool; 52 53 struct rte_cryptodev_sym_session *sess; 54 55 cperf_populate_ops_t populate_ops; 56 57 const struct cperf_options *options; 58 const struct cperf_test_vector *test_vector; 59 }; 60 61 struct cperf_op_result { 62 enum rte_crypto_op_status status; 63 }; 64 65 static void 66 cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb) 67 { 68 uint32_t i; 69 70 if (ctx) { 71 if (ctx->sess) { 72 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); 73 rte_cryptodev_sym_session_free(ctx->sess); 74 } 75 76 if (ctx->mbufs_in) { 77 for (i = 0; i < mbuf_nb; i++) 78 rte_pktmbuf_free(ctx->mbufs_in[i]); 79 80 rte_free(ctx->mbufs_in); 81 } 82 83 if (ctx->mbufs_out) { 84 for (i = 0; i < mbuf_nb; i++) { 85 if (ctx->mbufs_out[i] != NULL) 86 rte_pktmbuf_free(ctx->mbufs_out[i]); 87 } 88 89 rte_free(ctx->mbufs_out); 90 } 91 92 if (ctx->pkt_mbuf_pool_in) 93 rte_mempool_free(ctx->pkt_mbuf_pool_in); 94 95 if (ctx->pkt_mbuf_pool_out) 96 rte_mempool_free(ctx->pkt_mbuf_pool_out); 97 98 if (ctx->crypto_op_pool) 99 rte_mempool_free(ctx->crypto_op_pool); 100 101 rte_free(ctx); 102 } 103 } 104 105 static struct rte_mbuf * 106 cperf_mbuf_create(struct rte_mempool *mempool, 107 uint32_t segments_nb, 108 const struct cperf_options *options, 109 const struct cperf_test_vector *test_vector) 110 { 111 struct rte_mbuf *mbuf; 112 uint32_t segment_sz = options->max_buffer_size / segments_nb; 113 uint32_t last_sz = options->max_buffer_size % segments_nb; 114 uint8_t *mbuf_data; 115 uint8_t *test_data = 116 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 117 test_vector->plaintext.data : 118 test_vector->ciphertext.data; 119 120 mbuf = rte_pktmbuf_alloc(mempool); 121 if (mbuf == NULL) 122 goto error; 123 124 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz); 125 if (mbuf_data == NULL) 126 goto error; 127 128 memcpy(mbuf_data, test_data, segment_sz); 129 test_data += segment_sz; 130 segments_nb--; 131 132 while (segments_nb) { 133 struct rte_mbuf *m; 134 135 m = rte_pktmbuf_alloc(mempool); 136 if (m == NULL) 137 goto error; 138 139 rte_pktmbuf_chain(mbuf, m); 140 141 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz); 142 if (mbuf_data == NULL) 143 goto error; 144 145 memcpy(mbuf_data, test_data, segment_sz); 146 test_data += segment_sz; 147 segments_nb--; 148 } 149 150 if (last_sz) { 151 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz); 152 if (mbuf_data == NULL) 153 goto error; 154 155 memcpy(mbuf_data, test_data, last_sz); 156 } 157 158 if (options->op_type != CPERF_CIPHER_ONLY) { 159 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, 160 options->digest_sz); 161 if (mbuf_data == NULL) 162 goto error; 163 } 164 165 if (options->op_type == CPERF_AEAD) { 166 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf, 167 RTE_ALIGN_CEIL(options->aead_aad_sz, 16)); 168 169 if (aead == NULL) 170 goto error; 171 172 memcpy(aead, test_vector->aad.data, test_vector->aad.length); 173 } 174 175 return mbuf; 176 error: 177 if (mbuf != NULL) 178 rte_pktmbuf_free(mbuf); 179 180 return NULL; 181 } 182 183 void * 184 cperf_verify_test_constructor(struct rte_mempool *sess_mp, 185 uint8_t dev_id, uint16_t qp_id, 186 const struct cperf_options *options, 187 const struct cperf_test_vector *test_vector, 188 const struct cperf_op_fns *op_fns) 189 { 190 struct cperf_verify_ctx *ctx = NULL; 191 unsigned int mbuf_idx = 0; 192 char pool_name[32] = ""; 193 194 ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0); 195 if (ctx == NULL) 196 goto err; 197 198 ctx->dev_id = dev_id; 199 ctx->qp_id = qp_id; 200 201 ctx->populate_ops = op_fns->populate_ops; 202 ctx->options = options; 203 ctx->test_vector = test_vector; 204 205 /* IV goes at the end of the cryptop operation */ 206 uint16_t iv_offset = sizeof(struct rte_crypto_op) + 207 sizeof(struct rte_crypto_sym_op); 208 209 ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector, 210 iv_offset); 211 if (ctx->sess == NULL) 212 goto err; 213 214 snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d", 215 dev_id); 216 217 ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name, 218 options->pool_sz * options->segments_nb, 0, 0, 219 RTE_PKTMBUF_HEADROOM + 220 RTE_CACHE_LINE_ROUNDUP( 221 (options->max_buffer_size / options->segments_nb) + 222 (options->max_buffer_size % options->segments_nb) + 223 options->digest_sz), 224 rte_socket_id()); 225 226 if (ctx->pkt_mbuf_pool_in == NULL) 227 goto err; 228 229 /* Generate mbufs_in with plaintext populated for test */ 230 ctx->mbufs_in = rte_malloc(NULL, 231 (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0); 232 233 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) { 234 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create( 235 ctx->pkt_mbuf_pool_in, options->segments_nb, 236 options, test_vector); 237 if (ctx->mbufs_in[mbuf_idx] == NULL) 238 goto err; 239 } 240 241 if (options->out_of_place == 1) { 242 243 snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d", 244 dev_id); 245 246 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create( 247 pool_name, options->pool_sz, 0, 0, 248 RTE_PKTMBUF_HEADROOM + 249 RTE_CACHE_LINE_ROUNDUP( 250 options->max_buffer_size + 251 options->digest_sz), 252 rte_socket_id()); 253 254 if (ctx->pkt_mbuf_pool_out == NULL) 255 goto err; 256 } 257 258 ctx->mbufs_out = rte_malloc(NULL, 259 (sizeof(struct rte_mbuf *) * 260 ctx->options->pool_sz), 0); 261 262 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) { 263 if (options->out_of_place == 1) { 264 ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create( 265 ctx->pkt_mbuf_pool_out, 1, 266 options, test_vector); 267 if (ctx->mbufs_out[mbuf_idx] == NULL) 268 goto err; 269 } else { 270 ctx->mbufs_out[mbuf_idx] = NULL; 271 } 272 } 273 274 snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d", 275 dev_id); 276 277 uint16_t priv_size = test_vector->cipher_iv.length + 278 test_vector->auth_iv.length; 279 ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name, 280 RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 281 512, priv_size, rte_socket_id()); 282 if (ctx->crypto_op_pool == NULL) 283 goto err; 284 285 return ctx; 286 err: 287 cperf_verify_test_free(ctx, mbuf_idx); 288 289 return NULL; 290 } 291 292 static int 293 cperf_verify_op(struct rte_crypto_op *op, 294 const struct cperf_options *options, 295 const struct cperf_test_vector *vector) 296 { 297 const struct rte_mbuf *m; 298 uint32_t len; 299 uint16_t nb_segs; 300 uint8_t *data; 301 uint32_t cipher_offset, auth_offset; 302 uint8_t cipher, auth; 303 int res = 0; 304 305 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) 306 return 1; 307 308 if (op->sym->m_dst) 309 m = op->sym->m_dst; 310 else 311 m = op->sym->m_src; 312 nb_segs = m->nb_segs; 313 len = 0; 314 while (m && nb_segs != 0) { 315 len += m->data_len; 316 m = m->next; 317 nb_segs--; 318 } 319 320 data = rte_malloc(NULL, len, 0); 321 if (data == NULL) 322 return 1; 323 324 if (op->sym->m_dst) 325 m = op->sym->m_dst; 326 else 327 m = op->sym->m_src; 328 nb_segs = m->nb_segs; 329 len = 0; 330 while (m && nb_segs != 0) { 331 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *), 332 m->data_len); 333 len += m->data_len; 334 m = m->next; 335 nb_segs--; 336 } 337 338 switch (options->op_type) { 339 case CPERF_CIPHER_ONLY: 340 cipher = 1; 341 cipher_offset = 0; 342 auth = 0; 343 auth_offset = 0; 344 break; 345 case CPERF_CIPHER_THEN_AUTH: 346 cipher = 1; 347 cipher_offset = 0; 348 auth = 1; 349 auth_offset = options->test_buffer_size; 350 break; 351 case CPERF_AUTH_ONLY: 352 cipher = 0; 353 cipher_offset = 0; 354 auth = 1; 355 auth_offset = options->test_buffer_size; 356 break; 357 case CPERF_AUTH_THEN_CIPHER: 358 cipher = 1; 359 cipher_offset = 0; 360 auth = 1; 361 auth_offset = options->test_buffer_size; 362 break; 363 case CPERF_AEAD: 364 cipher = 1; 365 cipher_offset = vector->aad.length; 366 auth = 1; 367 auth_offset = vector->aad.length + options->test_buffer_size; 368 break; 369 } 370 371 if (cipher == 1) { 372 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) 373 res += memcmp(data + cipher_offset, 374 vector->ciphertext.data, 375 options->test_buffer_size); 376 else 377 res += memcmp(data + cipher_offset, 378 vector->plaintext.data, 379 options->test_buffer_size); 380 } 381 382 if (auth == 1) { 383 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) 384 res += memcmp(data + auth_offset, 385 vector->digest.data, 386 options->digest_sz); 387 } 388 389 return !!res; 390 } 391 392 int 393 cperf_verify_test_runner(void *test_ctx) 394 { 395 struct cperf_verify_ctx *ctx = test_ctx; 396 397 uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0; 398 uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; 399 uint64_t ops_failed = 0; 400 401 static int only_once; 402 403 uint64_t i, m_idx = 0; 404 uint16_t ops_unused = 0; 405 406 struct rte_crypto_op *ops[ctx->options->max_burst_size]; 407 struct rte_crypto_op *ops_processed[ctx->options->max_burst_size]; 408 409 uint32_t lcore = rte_lcore_id(); 410 411 #ifdef CPERF_LINEARIZATION_ENABLE 412 struct rte_cryptodev_info dev_info; 413 int linearize = 0; 414 415 /* Check if source mbufs require coalescing */ 416 if (ctx->options->segments_nb > 1) { 417 rte_cryptodev_info_get(ctx->dev_id, &dev_info); 418 if ((dev_info.feature_flags & 419 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) 420 linearize = 1; 421 } 422 #endif /* CPERF_LINEARIZATION_ENABLE */ 423 424 ctx->lcore_id = lcore; 425 426 if (!ctx->options->csv) 427 printf("\n# Running verify test on device: %u, lcore: %u\n", 428 ctx->dev_id, lcore); 429 430 uint16_t iv_offset = sizeof(struct rte_crypto_op) + 431 sizeof(struct rte_crypto_sym_op); 432 433 while (ops_enqd_total < ctx->options->total_ops) { 434 435 uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size) 436 <= ctx->options->total_ops) ? 437 ctx->options->max_burst_size : 438 ctx->options->total_ops - 439 ops_enqd_total; 440 441 uint16_t ops_needed = burst_size - ops_unused; 442 443 /* Allocate crypto ops from pool */ 444 if (ops_needed != rte_crypto_op_bulk_alloc( 445 ctx->crypto_op_pool, 446 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 447 ops, ops_needed)) { 448 RTE_LOG(ERR, USER1, 449 "Failed to allocate more crypto operations " 450 "from the the crypto operation pool.\n" 451 "Consider increasing the pool size " 452 "with --pool-sz\n"); 453 return -1; 454 } 455 456 /* Setup crypto op, attach mbuf etc */ 457 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx], 458 &ctx->mbufs_out[m_idx], 459 ops_needed, ctx->sess, ctx->options, 460 ctx->test_vector, iv_offset); 461 462 #ifdef CPERF_LINEARIZATION_ENABLE 463 if (linearize) { 464 /* PMD doesn't support scatter-gather and source buffer 465 * is segmented. 466 * We need to linearize it before enqueuing. 467 */ 468 for (i = 0; i < burst_size; i++) 469 rte_pktmbuf_linearize(ops[i]->sym->m_src); 470 } 471 #endif /* CPERF_LINEARIZATION_ENABLE */ 472 473 /* Enqueue burst of ops on crypto device */ 474 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, 475 ops, burst_size); 476 if (ops_enqd < burst_size) 477 ops_enqd_failed++; 478 479 /** 480 * Calculate number of ops not enqueued (mainly for hw 481 * accelerators whose ingress queue can fill up). 482 */ 483 ops_unused = burst_size - ops_enqd; 484 ops_enqd_total += ops_enqd; 485 486 487 /* Dequeue processed burst of ops from crypto device */ 488 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, 489 ops_processed, ctx->options->max_burst_size); 490 491 m_idx += ops_needed; 492 if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz) 493 m_idx = 0; 494 495 if (ops_deqd == 0) { 496 /** 497 * Count dequeue polls which didn't return any 498 * processed operations. This statistic is mainly 499 * relevant to hw accelerators. 500 */ 501 ops_deqd_failed++; 502 continue; 503 } 504 505 for (i = 0; i < ops_deqd; i++) { 506 if (cperf_verify_op(ops_processed[i], ctx->options, 507 ctx->test_vector)) 508 ops_failed++; 509 /* free crypto ops so they can be reused. We don't free 510 * the mbufs here as we don't want to reuse them as 511 * the crypto operation will change the data and cause 512 * failures. 513 */ 514 rte_crypto_op_free(ops_processed[i]); 515 } 516 ops_deqd_total += ops_deqd; 517 } 518 519 /* Dequeue any operations still in the crypto device */ 520 521 while (ops_deqd_total < ctx->options->total_ops) { 522 /* Sending 0 length burst to flush sw crypto device */ 523 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); 524 525 /* dequeue burst */ 526 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, 527 ops_processed, ctx->options->max_burst_size); 528 if (ops_deqd == 0) { 529 ops_deqd_failed++; 530 continue; 531 } 532 533 for (i = 0; i < ops_deqd; i++) { 534 if (cperf_verify_op(ops_processed[i], ctx->options, 535 ctx->test_vector)) 536 ops_failed++; 537 /* free crypto ops so they can be reused. We don't free 538 * the mbufs here as we don't want to reuse them as 539 * the crypto operation will change the data and cause 540 * failures. 541 */ 542 rte_crypto_op_free(ops_processed[i]); 543 } 544 ops_deqd_total += ops_deqd; 545 } 546 547 if (!ctx->options->csv) { 548 if (!only_once) 549 printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n", 550 "lcore id", "Buf Size", "Burst size", 551 "Enqueued", "Dequeued", "Failed Enq", 552 "Failed Deq", "Failed Ops"); 553 only_once = 1; 554 555 printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64 556 "%12"PRIu64"%12"PRIu64"\n", 557 ctx->lcore_id, 558 ctx->options->max_buffer_size, 559 ctx->options->max_burst_size, 560 ops_enqd_total, 561 ops_deqd_total, 562 ops_enqd_failed, 563 ops_deqd_failed, 564 ops_failed); 565 } else { 566 if (!only_once) 567 printf("\n# lcore id, Buffer Size(B), " 568 "Burst Size,Enqueued,Dequeued,Failed Enq," 569 "Failed Deq,Failed Ops\n"); 570 only_once = 1; 571 572 printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" 573 "%"PRIu64"\n", 574 ctx->lcore_id, 575 ctx->options->max_buffer_size, 576 ctx->options->max_burst_size, 577 ops_enqd_total, 578 ops_deqd_total, 579 ops_enqd_failed, 580 ops_deqd_failed, 581 ops_failed); 582 } 583 584 return 0; 585 } 586 587 588 589 void 590 cperf_verify_test_destructor(void *arg) 591 { 592 struct cperf_verify_ctx *ctx = arg; 593 594 if (ctx == NULL) 595 return; 596 597 rte_cryptodev_stop(ctx->dev_id); 598 599 cperf_verify_test_free(ctx, ctx->options->pool_sz); 600 } 601