1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <stdint.h> 39 #include <inttypes.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 #include <netinet/in.h> 43 #include <setjmp.h> 44 #include <stdarg.h> 45 #include <ctype.h> 46 #include <errno.h> 47 #include <getopt.h> 48 49 #include <rte_atomic.h> 50 #include <rte_branch_prediction.h> 51 #include <rte_common.h> 52 #include <rte_cryptodev.h> 53 #include <rte_cycles.h> 54 #include <rte_debug.h> 55 #include <rte_eal.h> 56 #include <rte_ether.h> 57 #include <rte_ethdev.h> 58 #include <rte_interrupts.h> 59 #include <rte_ip.h> 60 #include <rte_launch.h> 61 #include <rte_lcore.h> 62 #include <rte_log.h> 63 #include <rte_malloc.h> 64 #include <rte_mbuf.h> 65 #include <rte_memcpy.h> 66 #include <rte_memory.h> 67 #include <rte_mempool.h> 68 #include <rte_memzone.h> 69 #include <rte_pci.h> 70 #include <rte_per_lcore.h> 71 #include <rte_prefetch.h> 72 #include <rte_random.h> 73 #include <rte_ring.h> 74 75 enum cdev_type { 76 CDEV_TYPE_ANY, 77 CDEV_TYPE_HW, 78 CDEV_TYPE_SW 79 }; 80 81 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 82 83 #define NB_MBUF 8192 84 85 #define MAX_STR_LEN 32 86 #define MAX_KEY_SIZE 128 87 #define MAX_PKT_BURST 32 88 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 89 90 /* 91 * Configurable number of RX/TX ring descriptors 92 */ 93 #define RTE_TEST_RX_DESC_DEFAULT 128 94 #define RTE_TEST_TX_DESC_DEFAULT 512 95 96 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 97 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 98 99 /* ethernet addresses of ports */ 100 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 101 102 /* mask of enabled ports */ 103 static uint64_t l2fwd_enabled_port_mask; 104 static uint64_t l2fwd_enabled_crypto_mask; 105 106 /* list of enabled ports */ 107 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 108 109 110 struct pkt_buffer { 111 unsigned len; 112 struct rte_mbuf *buffer[MAX_PKT_BURST]; 113 }; 114 115 struct op_buffer { 116 unsigned len; 117 struct rte_crypto_op *buffer[MAX_PKT_BURST]; 118 }; 119 120 #define MAX_RX_QUEUE_PER_LCORE 16 121 #define MAX_TX_QUEUE_PER_PORT 16 122 123 enum l2fwd_crypto_xform_chain { 124 L2FWD_CRYPTO_CIPHER_HASH, 125 L2FWD_CRYPTO_HASH_CIPHER, 126 L2FWD_CRYPTO_CIPHER_ONLY, 127 L2FWD_CRYPTO_HASH_ONLY 128 }; 129 130 struct l2fwd_key { 131 uint8_t *data; 132 uint32_t length; 133 phys_addr_t phys_addr; 134 }; 135 136 /** l2fwd crypto application command line options */ 137 struct l2fwd_crypto_options { 138 unsigned portmask; 139 unsigned nb_ports_per_lcore; 140 unsigned refresh_period; 141 unsigned single_lcore:1; 142 143 enum cdev_type type; 144 unsigned sessionless:1; 145 146 enum l2fwd_crypto_xform_chain xform_chain; 147 148 struct rte_crypto_sym_xform cipher_xform; 149 unsigned ckey_param; 150 151 struct l2fwd_key iv; 152 unsigned iv_param; 153 154 struct rte_crypto_sym_xform auth_xform; 155 uint8_t akey_param; 156 157 struct l2fwd_key aad; 158 unsigned aad_param; 159 160 uint16_t block_size; 161 char string_auth_algo[MAX_STR_LEN]; 162 char string_cipher_algo[MAX_STR_LEN]; 163 char string_type[MAX_STR_LEN]; 164 }; 165 166 /** l2fwd crypto lcore params */ 167 struct l2fwd_crypto_params { 168 uint8_t dev_id; 169 uint8_t qp_id; 170 171 unsigned digest_length; 172 unsigned block_size; 173 174 struct l2fwd_key iv; 175 struct l2fwd_key aad; 176 struct rte_cryptodev_sym_session *session; 177 178 uint8_t do_cipher; 179 uint8_t do_hash; 180 uint8_t hash_verify; 181 }; 182 183 /** lcore configuration */ 184 struct lcore_queue_conf { 185 unsigned nb_rx_ports; 186 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; 187 188 unsigned nb_crypto_devs; 189 unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE]; 190 191 struct op_buffer op_buf[RTE_MAX_ETHPORTS]; 192 struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS]; 193 } __rte_cache_aligned; 194 195 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 196 197 static const struct rte_eth_conf port_conf = { 198 .rxmode = { 199 .mq_mode = ETH_MQ_RX_NONE, 200 .max_rx_pkt_len = ETHER_MAX_LEN, 201 .split_hdr_size = 0, 202 .header_split = 0, /**< Header Split disabled */ 203 .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 204 .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 205 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 206 .hw_strip_crc = 0, /**< CRC stripped by hardware */ 207 }, 208 .txmode = { 209 .mq_mode = ETH_MQ_TX_NONE, 210 }, 211 }; 212 213 struct rte_mempool *l2fwd_pktmbuf_pool; 214 struct rte_mempool *l2fwd_crypto_op_pool; 215 216 /* Per-port statistics struct */ 217 struct l2fwd_port_statistics { 218 uint64_t tx; 219 uint64_t rx; 220 221 uint64_t crypto_enqueued; 222 uint64_t crypto_dequeued; 223 224 uint64_t dropped; 225 } __rte_cache_aligned; 226 227 struct l2fwd_crypto_statistics { 228 uint64_t enqueued; 229 uint64_t dequeued; 230 231 uint64_t errors; 232 } __rte_cache_aligned; 233 234 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 235 struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS]; 236 237 /* A tsc-based timer responsible for triggering statistics printout */ 238 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ 239 #define MAX_TIMER_PERIOD 86400UL /* 1 day max */ 240 241 /* default period is 10 seconds */ 242 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; 243 244 /* Print out statistics on packets dropped */ 245 static void 246 print_stats(void) 247 { 248 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 249 uint64_t total_packets_enqueued, total_packets_dequeued, 250 total_packets_errors; 251 unsigned portid; 252 uint64_t cdevid; 253 254 total_packets_dropped = 0; 255 total_packets_tx = 0; 256 total_packets_rx = 0; 257 total_packets_enqueued = 0; 258 total_packets_dequeued = 0; 259 total_packets_errors = 0; 260 261 const char clr[] = { 27, '[', '2', 'J', '\0' }; 262 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 263 264 /* Clear screen and move to top left */ 265 printf("%s%s", clr, topLeft); 266 267 printf("\nPort statistics ===================================="); 268 269 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 270 /* skip disabled ports */ 271 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 272 continue; 273 printf("\nStatistics for port %u ------------------------------" 274 "\nPackets sent: %32"PRIu64 275 "\nPackets received: %28"PRIu64 276 "\nPackets dropped: %29"PRIu64, 277 portid, 278 port_statistics[portid].tx, 279 port_statistics[portid].rx, 280 port_statistics[portid].dropped); 281 282 total_packets_dropped += port_statistics[portid].dropped; 283 total_packets_tx += port_statistics[portid].tx; 284 total_packets_rx += port_statistics[portid].rx; 285 } 286 printf("\nCrypto statistics =================================="); 287 288 for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) { 289 /* skip disabled ports */ 290 if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0) 291 continue; 292 printf("\nStatistics for cryptodev %"PRIu64 293 " -------------------------" 294 "\nPackets enqueued: %28"PRIu64 295 "\nPackets dequeued: %28"PRIu64 296 "\nPackets errors: %30"PRIu64, 297 cdevid, 298 crypto_statistics[cdevid].enqueued, 299 crypto_statistics[cdevid].dequeued, 300 crypto_statistics[cdevid].errors); 301 302 total_packets_enqueued += crypto_statistics[cdevid].enqueued; 303 total_packets_dequeued += crypto_statistics[cdevid].dequeued; 304 total_packets_errors += crypto_statistics[cdevid].errors; 305 } 306 printf("\nAggregate statistics ===============================" 307 "\nTotal packets received: %22"PRIu64 308 "\nTotal packets enqueued: %22"PRIu64 309 "\nTotal packets dequeued: %22"PRIu64 310 "\nTotal packets sent: %26"PRIu64 311 "\nTotal packets dropped: %23"PRIu64 312 "\nTotal packets crypto errors: %17"PRIu64, 313 total_packets_rx, 314 total_packets_enqueued, 315 total_packets_dequeued, 316 total_packets_tx, 317 total_packets_dropped, 318 total_packets_errors); 319 printf("\n====================================================\n"); 320 } 321 322 323 324 static int 325 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n, 326 struct l2fwd_crypto_params *cparams) 327 { 328 struct rte_crypto_op **op_buffer; 329 unsigned ret; 330 331 op_buffer = (struct rte_crypto_op **) 332 qconf->op_buf[cparams->dev_id].buffer; 333 334 ret = rte_cryptodev_enqueue_burst(cparams->dev_id, 335 cparams->qp_id, op_buffer, (uint16_t) n); 336 337 crypto_statistics[cparams->dev_id].enqueued += ret; 338 if (unlikely(ret < n)) { 339 crypto_statistics[cparams->dev_id].errors += (n - ret); 340 do { 341 rte_pktmbuf_free(op_buffer[ret]->sym->m_src); 342 rte_crypto_op_free(op_buffer[ret]); 343 } while (++ret < n); 344 } 345 346 return 0; 347 } 348 349 static int 350 l2fwd_crypto_enqueue(struct rte_crypto_op *op, 351 struct l2fwd_crypto_params *cparams) 352 { 353 unsigned lcore_id, len; 354 struct lcore_queue_conf *qconf; 355 356 lcore_id = rte_lcore_id(); 357 358 qconf = &lcore_queue_conf[lcore_id]; 359 len = qconf->op_buf[cparams->dev_id].len; 360 qconf->op_buf[cparams->dev_id].buffer[len] = op; 361 len++; 362 363 /* enough ops to be sent */ 364 if (len == MAX_PKT_BURST) { 365 l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams); 366 len = 0; 367 } 368 369 qconf->op_buf[cparams->dev_id].len = len; 370 return 0; 371 } 372 373 static int 374 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, 375 struct rte_crypto_op *op, 376 struct l2fwd_crypto_params *cparams) 377 { 378 struct ether_hdr *eth_hdr; 379 struct ipv4_hdr *ip_hdr; 380 381 unsigned ipdata_offset, pad_len, data_len; 382 char *padding; 383 384 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 385 386 if (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4)) 387 return -1; 388 389 ipdata_offset = sizeof(struct ether_hdr); 390 391 ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) + 392 ipdata_offset); 393 394 ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK) 395 * IPV4_IHL_MULTIPLIER; 396 397 398 /* Zero pad data to be crypto'd so it is block aligned */ 399 data_len = rte_pktmbuf_data_len(m) - ipdata_offset; 400 pad_len = data_len % cparams->block_size ? cparams->block_size - 401 (data_len % cparams->block_size) : 0; 402 403 if (pad_len) { 404 padding = rte_pktmbuf_append(m, pad_len); 405 if (unlikely(!padding)) 406 return -1; 407 408 data_len += pad_len; 409 memset(padding, 0, pad_len); 410 } 411 412 /* Set crypto operation data parameters */ 413 rte_crypto_op_attach_sym_session(op, cparams->session); 414 415 if (cparams->do_hash) { 416 if (!cparams->hash_verify) { 417 /* Append space for digest to end of packet */ 418 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, 419 cparams->digest_length); 420 } else { 421 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, 422 cparams->digest_length); 423 } 424 425 op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, 426 rte_pktmbuf_pkt_len(m) - cparams->digest_length); 427 op->sym->auth.digest.length = cparams->digest_length; 428 429 op->sym->auth.data.offset = ipdata_offset; 430 op->sym->auth.data.length = data_len; 431 432 if (cparams->aad.length) { 433 op->sym->auth.aad.data = cparams->aad.data; 434 op->sym->auth.aad.phys_addr = cparams->aad.phys_addr; 435 op->sym->auth.aad.length = cparams->aad.length; 436 } 437 } 438 439 if (cparams->do_cipher) { 440 op->sym->cipher.iv.data = cparams->iv.data; 441 op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr; 442 op->sym->cipher.iv.length = cparams->iv.length; 443 444 op->sym->cipher.data.offset = ipdata_offset; 445 if (cparams->do_hash && cparams->hash_verify) 446 /* Do not cipher the hash tag */ 447 op->sym->cipher.data.length = data_len - 448 cparams->digest_length; 449 else 450 op->sym->cipher.data.length = data_len; 451 } 452 453 op->sym->m_src = m; 454 455 return l2fwd_crypto_enqueue(op, cparams); 456 } 457 458 459 /* Send the burst of packets on an output interface */ 460 static int 461 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, 462 uint8_t port) 463 { 464 struct rte_mbuf **pkt_buffer; 465 unsigned ret; 466 467 pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer; 468 469 ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n); 470 port_statistics[port].tx += ret; 471 if (unlikely(ret < n)) { 472 port_statistics[port].dropped += (n - ret); 473 do { 474 rte_pktmbuf_free(pkt_buffer[ret]); 475 } while (++ret < n); 476 } 477 478 return 0; 479 } 480 481 /* Enqueue packets for TX and prepare them to be sent */ 482 static int 483 l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) 484 { 485 unsigned lcore_id, len; 486 struct lcore_queue_conf *qconf; 487 488 lcore_id = rte_lcore_id(); 489 490 qconf = &lcore_queue_conf[lcore_id]; 491 len = qconf->pkt_buf[port].len; 492 qconf->pkt_buf[port].buffer[len] = m; 493 len++; 494 495 /* enough pkts to be sent */ 496 if (unlikely(len == MAX_PKT_BURST)) { 497 l2fwd_send_burst(qconf, MAX_PKT_BURST, port); 498 len = 0; 499 } 500 501 qconf->pkt_buf[port].len = len; 502 return 0; 503 } 504 505 static void 506 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) 507 { 508 struct ether_hdr *eth; 509 void *tmp; 510 unsigned dst_port; 511 512 dst_port = l2fwd_dst_ports[portid]; 513 eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 514 515 /* 02:00:00:00:00:xx */ 516 tmp = ð->d_addr.addr_bytes[0]; 517 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 518 519 /* src addr */ 520 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); 521 522 l2fwd_send_packet(m, (uint8_t) dst_port); 523 } 524 525 /** Generate random key */ 526 static void 527 generate_random_key(uint8_t *key, unsigned length) 528 { 529 unsigned i; 530 531 for (i = 0; i < length; i++) 532 key[i] = rand() % 0xff; 533 } 534 535 static struct rte_cryptodev_sym_session * 536 initialize_crypto_session(struct l2fwd_crypto_options *options, 537 uint8_t cdev_id) 538 { 539 struct rte_crypto_sym_xform *first_xform; 540 541 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) { 542 first_xform = &options->cipher_xform; 543 first_xform->next = &options->auth_xform; 544 } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) { 545 first_xform = &options->auth_xform; 546 first_xform->next = &options->cipher_xform; 547 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) { 548 first_xform = &options->cipher_xform; 549 } else { 550 first_xform = &options->auth_xform; 551 } 552 553 /* Setup Cipher Parameters */ 554 return rte_cryptodev_sym_session_create(cdev_id, first_xform); 555 } 556 557 static void 558 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options); 559 560 /* main processing loop */ 561 static void 562 l2fwd_main_loop(struct l2fwd_crypto_options *options) 563 { 564 struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST]; 565 struct rte_crypto_op *ops_burst[MAX_PKT_BURST]; 566 567 unsigned lcore_id = rte_lcore_id(); 568 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 569 unsigned i, j, portid, nb_rx; 570 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; 571 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / 572 US_PER_S * BURST_TX_DRAIN_US; 573 struct l2fwd_crypto_params *cparams; 574 struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs]; 575 576 if (qconf->nb_rx_ports == 0) { 577 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); 578 return; 579 } 580 581 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); 582 583 l2fwd_crypto_options_print(options); 584 585 for (i = 0; i < qconf->nb_rx_ports; i++) { 586 587 portid = qconf->rx_port_list[i]; 588 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, 589 portid); 590 } 591 592 for (i = 0; i < qconf->nb_crypto_devs; i++) { 593 port_cparams[i].do_cipher = 0; 594 port_cparams[i].do_hash = 0; 595 596 switch (options->xform_chain) { 597 case L2FWD_CRYPTO_CIPHER_HASH: 598 case L2FWD_CRYPTO_HASH_CIPHER: 599 port_cparams[i].do_cipher = 1; 600 port_cparams[i].do_hash = 1; 601 break; 602 case L2FWD_CRYPTO_HASH_ONLY: 603 port_cparams[i].do_hash = 1; 604 break; 605 case L2FWD_CRYPTO_CIPHER_ONLY: 606 port_cparams[i].do_cipher = 1; 607 break; 608 } 609 610 port_cparams[i].dev_id = qconf->cryptodev_list[i]; 611 port_cparams[i].qp_id = 0; 612 613 port_cparams[i].block_size = options->block_size; 614 615 if (port_cparams[i].do_hash) { 616 port_cparams[i].digest_length = 617 options->auth_xform.auth.digest_length; 618 if (options->auth_xform.auth.add_auth_data_length) { 619 port_cparams[i].aad.data = options->aad.data; 620 port_cparams[i].aad.length = 621 options->auth_xform.auth.add_auth_data_length; 622 port_cparams[i].aad.phys_addr = options->aad.phys_addr; 623 if (!options->aad_param) 624 generate_random_key(port_cparams[i].aad.data, 625 sizeof(port_cparams[i].aad.length)); 626 627 } 628 629 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) 630 port_cparams[i].hash_verify = 1; 631 else 632 port_cparams[i].hash_verify = 0; 633 } 634 635 if (port_cparams[i].do_cipher) { 636 port_cparams[i].iv.data = options->iv.data; 637 port_cparams[i].iv.length = options->iv.length; 638 port_cparams[i].iv.phys_addr = options->iv.phys_addr; 639 if (!options->iv_param) 640 generate_random_key(port_cparams[i].iv.data, 641 sizeof(port_cparams[i].iv.length)); 642 643 } 644 645 port_cparams[i].session = initialize_crypto_session(options, 646 port_cparams[i].dev_id); 647 648 if (port_cparams[i].session == NULL) 649 return; 650 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id, 651 port_cparams[i].dev_id); 652 } 653 654 while (1) { 655 656 cur_tsc = rte_rdtsc(); 657 658 /* 659 * TX burst queue drain 660 */ 661 diff_tsc = cur_tsc - prev_tsc; 662 if (unlikely(diff_tsc > drain_tsc)) { 663 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 664 if (qconf->pkt_buf[portid].len == 0) 665 continue; 666 l2fwd_send_burst(&lcore_queue_conf[lcore_id], 667 qconf->pkt_buf[portid].len, 668 (uint8_t) portid); 669 qconf->pkt_buf[portid].len = 0; 670 } 671 672 /* if timer is enabled */ 673 if (timer_period > 0) { 674 675 /* advance the timer */ 676 timer_tsc += diff_tsc; 677 678 /* if timer has reached its timeout */ 679 if (unlikely(timer_tsc >= 680 (uint64_t)timer_period)) { 681 682 /* do this only on master core */ 683 if (lcore_id == rte_get_master_lcore() 684 && options->refresh_period) { 685 print_stats(); 686 timer_tsc = 0; 687 } 688 } 689 } 690 691 prev_tsc = cur_tsc; 692 } 693 694 /* 695 * Read packet from RX queues 696 */ 697 for (i = 0; i < qconf->nb_rx_ports; i++) { 698 portid = qconf->rx_port_list[i]; 699 700 cparams = &port_cparams[i]; 701 702 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, 703 pkts_burst, MAX_PKT_BURST); 704 705 port_statistics[portid].rx += nb_rx; 706 707 if (nb_rx) { 708 /* 709 * If we can't allocate a crypto_ops, then drop 710 * the rest of the burst and dequeue and 711 * process the packets to free offload structs 712 */ 713 if (rte_crypto_op_bulk_alloc( 714 l2fwd_crypto_op_pool, 715 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 716 ops_burst, nb_rx) != 717 nb_rx) { 718 for (j = 0; j < nb_rx; j++) 719 rte_pktmbuf_free(pkts_burst[i]); 720 721 nb_rx = 0; 722 } 723 724 /* Enqueue packets from Crypto device*/ 725 for (j = 0; j < nb_rx; j++) { 726 m = pkts_burst[j]; 727 728 l2fwd_simple_crypto_enqueue(m, 729 ops_burst[j], cparams); 730 } 731 } 732 733 /* Dequeue packets from Crypto device */ 734 do { 735 nb_rx = rte_cryptodev_dequeue_burst( 736 cparams->dev_id, cparams->qp_id, 737 ops_burst, MAX_PKT_BURST); 738 739 crypto_statistics[cparams->dev_id].dequeued += 740 nb_rx; 741 742 /* Forward crypto'd packets */ 743 for (j = 0; j < nb_rx; j++) { 744 m = ops_burst[j]->sym->m_src; 745 746 rte_crypto_op_free(ops_burst[j]); 747 l2fwd_simple_forward(m, portid); 748 } 749 } while (nb_rx == MAX_PKT_BURST); 750 } 751 } 752 } 753 754 static int 755 l2fwd_launch_one_lcore(void *arg) 756 { 757 l2fwd_main_loop((struct l2fwd_crypto_options *)arg); 758 return 0; 759 } 760 761 /* Display command line arguments usage */ 762 static void 763 l2fwd_crypto_usage(const char *prgname) 764 { 765 printf("%s [EAL options] -- --cdev TYPE [optional parameters]\n" 766 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 767 " -q NQ: number of queue (=ports) per lcore (default is 1)\n" 768 " -s manage all ports from single lcore" 769 " -t PERIOD: statistics will be refreshed each PERIOD seconds" 770 " (0 to disable, 10 default, 86400 maximum)\n" 771 772 " --cdev AESNI_MB / QAT\n" 773 " --chain HASH_CIPHER / CIPHER_HASH\n" 774 775 " --cipher_algo ALGO\n" 776 " --cipher_op ENCRYPT / DECRYPT\n" 777 " --cipher_key KEY\n" 778 " --iv IV\n" 779 780 " --auth_algo ALGO\n" 781 " --auth_op GENERATE / VERIFY\n" 782 " --auth_key KEY\n" 783 " --aad AAD\n" 784 785 " --sessionless\n", 786 prgname); 787 } 788 789 /** Parse crypto device type command line argument */ 790 static int 791 parse_cryptodev_type(enum cdev_type *type, char *optarg) 792 { 793 if (strcmp("HW", optarg) == 0) { 794 *type = CDEV_TYPE_HW; 795 return 0; 796 } else if (strcmp("SW", optarg) == 0) { 797 *type = CDEV_TYPE_SW; 798 return 0; 799 } else if (strcmp("ANY", optarg) == 0) { 800 *type = CDEV_TYPE_ANY; 801 return 0; 802 } 803 804 return -1; 805 } 806 807 /** Parse crypto chain xform command line argument */ 808 static int 809 parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg) 810 { 811 if (strcmp("CIPHER_HASH", optarg) == 0) { 812 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH; 813 return 0; 814 } else if (strcmp("HASH_CIPHER", optarg) == 0) { 815 options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER; 816 return 0; 817 } else if (strcmp("CIPHER_ONLY", optarg) == 0) { 818 options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY; 819 return 0; 820 } else if (strcmp("HASH_ONLY", optarg) == 0) { 821 options->xform_chain = L2FWD_CRYPTO_HASH_ONLY; 822 return 0; 823 } 824 825 return -1; 826 } 827 828 /** Parse crypto cipher algo option command line argument */ 829 static int 830 parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg) 831 { 832 if (strcmp("AES_CBC", optarg) == 0) { 833 *algo = RTE_CRYPTO_CIPHER_AES_CBC; 834 return 0; 835 } else if (strcmp("AES_GCM", optarg) == 0) { 836 *algo = RTE_CRYPTO_CIPHER_AES_GCM; 837 return 0; 838 } 839 840 printf("Cipher algorithm not supported!\n"); 841 return -1; 842 } 843 844 /** Parse crypto cipher operation command line argument */ 845 static int 846 parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg) 847 { 848 if (strcmp("ENCRYPT", optarg) == 0) { 849 *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 850 return 0; 851 } else if (strcmp("DECRYPT", optarg) == 0) { 852 *op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 853 return 0; 854 } 855 856 printf("Cipher operation not supported!\n"); 857 return -1; 858 } 859 860 /** Parse crypto key command line argument */ 861 static int 862 parse_key(uint8_t *data, char *input_arg) 863 { 864 unsigned byte_count; 865 char *token; 866 867 for (byte_count = 0, token = strtok(input_arg, ":"); 868 (byte_count < MAX_KEY_SIZE) && (token != NULL); 869 token = strtok(NULL, ":")) { 870 871 int number = (int)strtol(token, NULL, 16); 872 873 if (errno == EINVAL || errno == ERANGE || number > 0xFF) 874 return -1; 875 876 data[byte_count++] = (uint8_t)number; 877 } 878 879 return 0; 880 } 881 882 /** Parse crypto cipher operation command line argument */ 883 static int 884 parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg) 885 { 886 if (strcmp("MD5_HMAC", optarg) == 0) { 887 *algo = RTE_CRYPTO_AUTH_MD5_HMAC; 888 return 0; 889 } else if (strcmp("SHA1_HMAC", optarg) == 0) { 890 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC; 891 return 0; 892 } else if (strcmp("SHA224_HMAC", optarg) == 0) { 893 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC; 894 return 0; 895 } else if (strcmp("SHA256_HMAC", optarg) == 0) { 896 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC; 897 return 0; 898 } else if (strcmp("SHA384_HMAC", optarg) == 0) { 899 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC; 900 return 0; 901 } else if (strcmp("SHA512_HMAC", optarg) == 0) { 902 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC; 903 return 0; 904 } 905 906 printf("Authentication algorithm specified not supported!\n"); 907 return -1; 908 } 909 910 static int 911 parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg) 912 { 913 if (strcmp("VERIFY", optarg) == 0) { 914 *op = RTE_CRYPTO_AUTH_OP_VERIFY; 915 return 0; 916 } else if (strcmp("GENERATE", optarg) == 0) { 917 *op = RTE_CRYPTO_AUTH_OP_GENERATE; 918 return 0; 919 } 920 921 printf("Authentication operation specified not supported!\n"); 922 return -1; 923 } 924 925 /** Parse long options */ 926 static int 927 l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options, 928 struct option *lgopts, int option_index) 929 { 930 int retval; 931 932 if (strcmp(lgopts[option_index].name, "cdev_type") == 0) 933 return parse_cryptodev_type(&options->type, optarg); 934 935 else if (strcmp(lgopts[option_index].name, "chain") == 0) 936 return parse_crypto_opt_chain(options, optarg); 937 938 /* Cipher options */ 939 else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0) { 940 retval = parse_cipher_algo(&options->cipher_xform.cipher.algo, 941 optarg); 942 if (retval == 0) 943 strcpy(options->string_cipher_algo, optarg); 944 return retval; 945 } 946 947 else if (strcmp(lgopts[option_index].name, "cipher_op") == 0) 948 return parse_cipher_op(&options->cipher_xform.cipher.op, 949 optarg); 950 951 else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) { 952 options->ckey_param = 1; 953 return parse_key(options->cipher_xform.cipher.key.data, optarg); 954 } 955 956 else if (strcmp(lgopts[option_index].name, "iv") == 0) { 957 options->iv_param = 1; 958 return parse_key(options->iv.data, optarg); 959 } 960 961 /* Authentication options */ 962 else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) { 963 retval = parse_auth_algo(&options->auth_xform.auth.algo, 964 optarg); 965 if (retval == 0) 966 strcpy(options->string_auth_algo, optarg); 967 return retval; 968 } 969 970 else if (strcmp(lgopts[option_index].name, "auth_op") == 0) 971 return parse_auth_op(&options->auth_xform.auth.op, 972 optarg); 973 974 else if (strcmp(lgopts[option_index].name, "auth_key") == 0) { 975 options->akey_param = 1; 976 return parse_key(options->auth_xform.auth.key.data, optarg); 977 } 978 979 else if (strcmp(lgopts[option_index].name, "aad") == 0) { 980 options->aad_param = 1; 981 return parse_key(options->aad.data, optarg); 982 } 983 984 else if (strcmp(lgopts[option_index].name, "sessionless") == 0) { 985 options->sessionless = 1; 986 return 0; 987 } 988 989 return -1; 990 } 991 992 /** Parse port mask */ 993 static int 994 l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options, 995 const char *q_arg) 996 { 997 char *end = NULL; 998 unsigned long pm; 999 1000 /* parse hexadecimal string */ 1001 pm = strtoul(q_arg, &end, 16); 1002 if ((pm == '\0') || (end == NULL) || (*end != '\0')) 1003 pm = 0; 1004 1005 options->portmask = pm; 1006 if (options->portmask == 0) { 1007 printf("invalid portmask specified\n"); 1008 return -1; 1009 } 1010 1011 return pm; 1012 } 1013 1014 /** Parse number of queues */ 1015 static int 1016 l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options, 1017 const char *q_arg) 1018 { 1019 char *end = NULL; 1020 unsigned long n; 1021 1022 /* parse hexadecimal string */ 1023 n = strtoul(q_arg, &end, 10); 1024 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 1025 n = 0; 1026 else if (n >= MAX_RX_QUEUE_PER_LCORE) 1027 n = 0; 1028 1029 options->nb_ports_per_lcore = n; 1030 if (options->nb_ports_per_lcore == 0) { 1031 printf("invalid number of ports selected\n"); 1032 return -1; 1033 } 1034 1035 return 0; 1036 } 1037 1038 /** Parse timer period */ 1039 static int 1040 l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options, 1041 const char *q_arg) 1042 { 1043 char *end = NULL; 1044 unsigned long n; 1045 1046 /* parse number string */ 1047 n = (unsigned)strtol(q_arg, &end, 10); 1048 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 1049 n = 0; 1050 1051 if (n >= MAX_TIMER_PERIOD) { 1052 printf("Warning refresh period specified %lu is greater than " 1053 "max value %lu! using max value", 1054 n, MAX_TIMER_PERIOD); 1055 n = MAX_TIMER_PERIOD; 1056 } 1057 1058 options->refresh_period = n * 1000 * TIMER_MILLISECOND; 1059 1060 return 0; 1061 } 1062 1063 /** Generate default options for application */ 1064 static void 1065 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) 1066 { 1067 srand(time(NULL)); 1068 1069 options->portmask = 0xffffffff; 1070 options->nb_ports_per_lcore = 1; 1071 options->refresh_period = 10000; 1072 options->single_lcore = 0; 1073 options->sessionless = 0; 1074 1075 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH; 1076 1077 /* Cipher Data */ 1078 options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1079 options->cipher_xform.next = NULL; 1080 options->ckey_param = 0; 1081 options->iv_param = 0; 1082 1083 options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 1084 options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1085 1086 /* Authentication Data */ 1087 options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 1088 options->auth_xform.next = NULL; 1089 options->akey_param = 0; 1090 options->aad_param = 0; 1091 1092 options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC; 1093 options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; 1094 1095 options->type = CDEV_TYPE_ANY; 1096 } 1097 1098 static void 1099 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options) 1100 { 1101 printf("Options:-\nn"); 1102 printf("portmask: %x\n", options->portmask); 1103 printf("ports per lcore: %u\n", options->nb_ports_per_lcore); 1104 printf("refresh period : %u\n", options->refresh_period); 1105 printf("single lcore mode: %s\n", 1106 options->single_lcore ? "enabled" : "disabled"); 1107 printf("stats_printing: %s\n", 1108 options->refresh_period == 0 ? "disabled" : "enabled"); 1109 1110 printf("sessionless crypto: %s\n", 1111 options->sessionless ? "enabled" : "disabled"); 1112 } 1113 1114 /* Parse the argument given in the command line of the application */ 1115 static int 1116 l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options, 1117 int argc, char **argv) 1118 { 1119 int opt, retval, option_index; 1120 char **argvopt = argv, *prgname = argv[0]; 1121 1122 static struct option lgopts[] = { 1123 { "sessionless", no_argument, 0, 0 }, 1124 1125 { "cdev_type", required_argument, 0, 0 }, 1126 { "chain", required_argument, 0, 0 }, 1127 1128 { "cipher_algo", required_argument, 0, 0 }, 1129 { "cipher_op", required_argument, 0, 0 }, 1130 { "cipher_key", required_argument, 0, 0 }, 1131 1132 { "auth_algo", required_argument, 0, 0 }, 1133 { "auth_op", required_argument, 0, 0 }, 1134 { "auth_key", required_argument, 0, 0 }, 1135 1136 { "iv", required_argument, 0, 0 }, 1137 { "aad", required_argument, 0, 0 }, 1138 1139 { "sessionless", no_argument, 0, 0 }, 1140 1141 { NULL, 0, 0, 0 } 1142 }; 1143 1144 l2fwd_crypto_default_options(options); 1145 1146 while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts, 1147 &option_index)) != EOF) { 1148 switch (opt) { 1149 /* long options */ 1150 case 0: 1151 retval = l2fwd_crypto_parse_args_long_options(options, 1152 lgopts, option_index); 1153 if (retval < 0) { 1154 l2fwd_crypto_usage(prgname); 1155 return -1; 1156 } 1157 break; 1158 1159 /* portmask */ 1160 case 'p': 1161 retval = l2fwd_crypto_parse_portmask(options, optarg); 1162 if (retval < 0) { 1163 l2fwd_crypto_usage(prgname); 1164 return -1; 1165 } 1166 break; 1167 1168 /* nqueue */ 1169 case 'q': 1170 retval = l2fwd_crypto_parse_nqueue(options, optarg); 1171 if (retval < 0) { 1172 l2fwd_crypto_usage(prgname); 1173 return -1; 1174 } 1175 break; 1176 1177 /* single */ 1178 case 's': 1179 options->single_lcore = 1; 1180 1181 break; 1182 1183 /* timer period */ 1184 case 't': 1185 retval = l2fwd_crypto_parse_timer_period(options, 1186 optarg); 1187 if (retval < 0) { 1188 l2fwd_crypto_usage(prgname); 1189 return -1; 1190 } 1191 break; 1192 1193 default: 1194 l2fwd_crypto_usage(prgname); 1195 return -1; 1196 } 1197 } 1198 1199 1200 if (optind >= 0) 1201 argv[optind-1] = prgname; 1202 1203 retval = optind-1; 1204 optind = 0; /* reset getopt lib */ 1205 1206 return retval; 1207 } 1208 1209 /* Check the link status of all ports in up to 9s, and print them finally */ 1210 static void 1211 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1212 { 1213 #define CHECK_INTERVAL 100 /* 100ms */ 1214 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1215 uint8_t portid, count, all_ports_up, print_flag = 0; 1216 struct rte_eth_link link; 1217 1218 printf("\nChecking link status"); 1219 fflush(stdout); 1220 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1221 all_ports_up = 1; 1222 for (portid = 0; portid < port_num; portid++) { 1223 if ((port_mask & (1 << portid)) == 0) 1224 continue; 1225 memset(&link, 0, sizeof(link)); 1226 rte_eth_link_get_nowait(portid, &link); 1227 /* print link status if flag set */ 1228 if (print_flag == 1) { 1229 if (link.link_status) 1230 printf("Port %d Link Up - speed %u " 1231 "Mbps - %s\n", (uint8_t)portid, 1232 (unsigned)link.link_speed, 1233 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1234 ("full-duplex") : ("half-duplex\n")); 1235 else 1236 printf("Port %d Link Down\n", 1237 (uint8_t)portid); 1238 continue; 1239 } 1240 /* clear all_ports_up flag if any link down */ 1241 if (link.link_status == 0) { 1242 all_ports_up = 0; 1243 break; 1244 } 1245 } 1246 /* after finally printing all link status, get out */ 1247 if (print_flag == 1) 1248 break; 1249 1250 if (all_ports_up == 0) { 1251 printf("."); 1252 fflush(stdout); 1253 rte_delay_ms(CHECK_INTERVAL); 1254 } 1255 1256 /* set the print_flag if all ports up or timeout */ 1257 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1258 print_flag = 1; 1259 printf("done\n"); 1260 } 1261 } 1262 } 1263 1264 /* Check if device has to be HW/SW or any */ 1265 static int 1266 check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_info) 1267 { 1268 if (options->type == CDEV_TYPE_HW && 1269 (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)) 1270 return 0; 1271 if (options->type == CDEV_TYPE_SW && 1272 !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)) 1273 return 0; 1274 if (options->type == CDEV_TYPE_ANY) 1275 return 0; 1276 1277 return -1; 1278 } 1279 1280 static int 1281 initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports, 1282 uint8_t *enabled_cdevs) 1283 { 1284 unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0; 1285 const struct rte_cryptodev_capabilities *cap; 1286 enum rte_crypto_auth_algorithm cap_auth_algo; 1287 enum rte_crypto_auth_algorithm opt_auth_algo; 1288 enum rte_crypto_cipher_algorithm cap_cipher_algo; 1289 enum rte_crypto_cipher_algorithm opt_cipher_algo; 1290 int retval; 1291 1292 cdev_count = rte_cryptodev_count(); 1293 if (cdev_count == 0) { 1294 printf("No crypto devices available\n"); 1295 return -1; 1296 } 1297 1298 for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports; 1299 cdev_id++) { 1300 struct rte_cryptodev_qp_conf qp_conf; 1301 struct rte_cryptodev_info dev_info; 1302 1303 struct rte_cryptodev_config conf = { 1304 .nb_queue_pairs = 1, 1305 .socket_id = SOCKET_ID_ANY, 1306 .session_mp = { 1307 .nb_objs = 2048, 1308 .cache_size = 64 1309 } 1310 }; 1311 1312 rte_cryptodev_info_get(cdev_id, &dev_info); 1313 1314 /* Set cipher parameters */ 1315 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 1316 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 1317 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) { 1318 /* Check if device supports cipher algo */ 1319 i = 0; 1320 opt_cipher_algo = options->cipher_xform.cipher.algo; 1321 cap = &dev_info.capabilities[i]; 1322 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1323 cap_cipher_algo = cap->sym.cipher.algo; 1324 if (cap->sym.xform_type == 1325 RTE_CRYPTO_SYM_XFORM_CIPHER) { 1326 if (cap_cipher_algo == opt_cipher_algo) { 1327 if (check_type(options, &dev_info) == 0) 1328 break; 1329 } 1330 } 1331 cap = &dev_info.capabilities[++i]; 1332 } 1333 1334 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1335 printf("Algorithm %s not supported by cryptodev %u" 1336 " or device not of preferred type (%s)\n", 1337 options->string_cipher_algo, cdev_id, 1338 options->string_type); 1339 continue; 1340 } 1341 1342 options->block_size = cap->sym.cipher.block_size; 1343 options->iv.length = cap->sym.cipher.iv_size.min; 1344 options->cipher_xform.cipher.key.length = 1345 cap->sym.cipher.key_size.min; 1346 if (!options->ckey_param) 1347 generate_random_key( 1348 options->cipher_xform.cipher.key.data, 1349 options->cipher_xform.cipher.key.length); 1350 1351 } 1352 1353 /* Set auth parameters */ 1354 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 1355 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 1356 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) { 1357 /* Check if device supports auth algo */ 1358 i = 0; 1359 opt_auth_algo = options->auth_xform.auth.algo; 1360 cap = &dev_info.capabilities[i]; 1361 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1362 cap_auth_algo = cap->sym.auth.algo; 1363 if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) && 1364 (cap_auth_algo == opt_auth_algo) && 1365 (check_type(options, &dev_info) == 0)) { 1366 break; 1367 } 1368 cap = &dev_info.capabilities[++i]; 1369 } 1370 1371 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1372 printf("Algorithm %s not supported by cryptodev %u" 1373 " or device not of preferred type (%s)\n", 1374 options->string_auth_algo, cdev_id, 1375 options->string_type); 1376 continue; 1377 } 1378 1379 options->block_size = cap->sym.auth.block_size; 1380 options->auth_xform.auth.add_auth_data_length = 1381 cap->sym.auth.aad_size.min; 1382 options->auth_xform.auth.digest_length = 1383 cap->sym.auth.digest_size.min; 1384 options->auth_xform.auth.key.length = 1385 cap->sym.auth.key_size.min; 1386 1387 if (!options->akey_param) 1388 generate_random_key( 1389 options->auth_xform.auth.key.data, 1390 options->auth_xform.auth.key.length); 1391 } 1392 1393 retval = rte_cryptodev_configure(cdev_id, &conf); 1394 if (retval < 0) { 1395 printf("Failed to configure cryptodev %u", cdev_id); 1396 return -1; 1397 } 1398 1399 qp_conf.nb_descriptors = 2048; 1400 1401 retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf, 1402 SOCKET_ID_ANY); 1403 if (retval < 0) { 1404 printf("Failed to setup queue pair %u on cryptodev %u", 1405 0, cdev_id); 1406 return -1; 1407 } 1408 1409 l2fwd_enabled_crypto_mask |= (1 << cdev_id); 1410 1411 enabled_cdevs[cdev_id] = 1; 1412 enabled_cdev_count++; 1413 } 1414 1415 return enabled_cdev_count; 1416 } 1417 1418 static int 1419 initialize_ports(struct l2fwd_crypto_options *options) 1420 { 1421 uint8_t last_portid, portid; 1422 unsigned enabled_portcount = 0; 1423 unsigned nb_ports = rte_eth_dev_count(); 1424 1425 if (nb_ports == 0) { 1426 printf("No Ethernet ports - bye\n"); 1427 return -1; 1428 } 1429 1430 if (nb_ports > RTE_MAX_ETHPORTS) 1431 nb_ports = RTE_MAX_ETHPORTS; 1432 1433 /* Reset l2fwd_dst_ports */ 1434 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 1435 l2fwd_dst_ports[portid] = 0; 1436 1437 for (last_portid = 0, portid = 0; portid < nb_ports; portid++) { 1438 int retval; 1439 1440 /* Skip ports that are not enabled */ 1441 if ((options->portmask & (1 << portid)) == 0) 1442 continue; 1443 1444 /* init port */ 1445 printf("Initializing port %u... ", (unsigned) portid); 1446 fflush(stdout); 1447 retval = rte_eth_dev_configure(portid, 1, 1, &port_conf); 1448 if (retval < 0) { 1449 printf("Cannot configure device: err=%d, port=%u\n", 1450 retval, (unsigned) portid); 1451 return -1; 1452 } 1453 1454 /* init one RX queue */ 1455 fflush(stdout); 1456 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 1457 rte_eth_dev_socket_id(portid), 1458 NULL, l2fwd_pktmbuf_pool); 1459 if (retval < 0) { 1460 printf("rte_eth_rx_queue_setup:err=%d, port=%u\n", 1461 retval, (unsigned) portid); 1462 return -1; 1463 } 1464 1465 /* init one TX queue on each port */ 1466 fflush(stdout); 1467 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd, 1468 rte_eth_dev_socket_id(portid), 1469 NULL); 1470 if (retval < 0) { 1471 printf("rte_eth_tx_queue_setup:err=%d, port=%u\n", 1472 retval, (unsigned) portid); 1473 1474 return -1; 1475 } 1476 1477 /* Start device */ 1478 retval = rte_eth_dev_start(portid); 1479 if (retval < 0) { 1480 printf("rte_eth_dev_start:err=%d, port=%u\n", 1481 retval, (unsigned) portid); 1482 return -1; 1483 } 1484 1485 rte_eth_promiscuous_enable(portid); 1486 1487 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]); 1488 1489 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", 1490 (unsigned) portid, 1491 l2fwd_ports_eth_addr[portid].addr_bytes[0], 1492 l2fwd_ports_eth_addr[portid].addr_bytes[1], 1493 l2fwd_ports_eth_addr[portid].addr_bytes[2], 1494 l2fwd_ports_eth_addr[portid].addr_bytes[3], 1495 l2fwd_ports_eth_addr[portid].addr_bytes[4], 1496 l2fwd_ports_eth_addr[portid].addr_bytes[5]); 1497 1498 /* initialize port stats */ 1499 memset(&port_statistics, 0, sizeof(port_statistics)); 1500 1501 /* Setup port forwarding table */ 1502 if (enabled_portcount % 2) { 1503 l2fwd_dst_ports[portid] = last_portid; 1504 l2fwd_dst_ports[last_portid] = portid; 1505 } else { 1506 last_portid = portid; 1507 } 1508 1509 l2fwd_enabled_port_mask |= (1 << portid); 1510 enabled_portcount++; 1511 } 1512 1513 if (enabled_portcount == 1) { 1514 l2fwd_dst_ports[last_portid] = last_portid; 1515 } else if (enabled_portcount % 2) { 1516 printf("odd number of ports in portmask- bye\n"); 1517 return -1; 1518 } 1519 1520 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask); 1521 1522 return enabled_portcount; 1523 } 1524 1525 static void 1526 reserve_key_memory(struct l2fwd_crypto_options *options) 1527 { 1528 options->cipher_xform.cipher.key.data = rte_malloc("crypto key", 1529 MAX_KEY_SIZE, 0); 1530 if (options->cipher_xform.cipher.key.data == NULL) 1531 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher key"); 1532 1533 1534 options->auth_xform.auth.key.data = rte_malloc("auth key", 1535 MAX_KEY_SIZE, 0); 1536 if (options->auth_xform.auth.key.data == NULL) 1537 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth key"); 1538 1539 options->iv.data = rte_malloc("iv", MAX_KEY_SIZE, 0); 1540 if (options->iv.data == NULL) 1541 rte_exit(EXIT_FAILURE, "Failed to allocate memory for IV"); 1542 options->iv.phys_addr = rte_malloc_virt2phy(options->iv.data); 1543 1544 options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0); 1545 if (options->aad.data == NULL) 1546 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD"); 1547 options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data); 1548 } 1549 1550 int 1551 main(int argc, char **argv) 1552 { 1553 struct lcore_queue_conf *qconf; 1554 struct l2fwd_crypto_options options; 1555 1556 uint8_t nb_ports, nb_cryptodevs, portid, cdev_id; 1557 unsigned lcore_id, rx_lcore_id; 1558 int ret, enabled_cdevcount, enabled_portcount; 1559 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0}; 1560 1561 /* init EAL */ 1562 ret = rte_eal_init(argc, argv); 1563 if (ret < 0) 1564 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 1565 argc -= ret; 1566 argv += ret; 1567 1568 /* reserve memory for Cipher/Auth key and IV */ 1569 reserve_key_memory(&options); 1570 1571 /* parse application arguments (after the EAL ones) */ 1572 ret = l2fwd_crypto_parse_args(&options, argc, argv); 1573 if (ret < 0) 1574 rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n"); 1575 1576 /* create the mbuf pool */ 1577 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512, 1578 sizeof(struct rte_crypto_op), 1579 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 1580 if (l2fwd_pktmbuf_pool == NULL) 1581 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 1582 1583 /* create crypto op pool */ 1584 l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool", 1585 RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0, 1586 rte_socket_id()); 1587 if (l2fwd_crypto_op_pool == NULL) 1588 rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n"); 1589 1590 /* Enable Ethernet ports */ 1591 enabled_portcount = initialize_ports(&options); 1592 if (enabled_portcount < 1) 1593 rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n"); 1594 1595 nb_ports = rte_eth_dev_count(); 1596 /* Initialize the port/queue configuration of each logical core */ 1597 for (rx_lcore_id = 0, qconf = NULL, portid = 0; 1598 portid < nb_ports; portid++) { 1599 1600 /* skip ports that are not enabled */ 1601 if ((options.portmask & (1 << portid)) == 0) 1602 continue; 1603 1604 if (options.single_lcore && qconf == NULL) { 1605 while (rte_lcore_is_enabled(rx_lcore_id) == 0) { 1606 rx_lcore_id++; 1607 if (rx_lcore_id >= RTE_MAX_LCORE) 1608 rte_exit(EXIT_FAILURE, 1609 "Not enough cores\n"); 1610 } 1611 } else if (!options.single_lcore) { 1612 /* get the lcore_id for this port */ 1613 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 1614 lcore_queue_conf[rx_lcore_id].nb_rx_ports == 1615 options.nb_ports_per_lcore) { 1616 rx_lcore_id++; 1617 if (rx_lcore_id >= RTE_MAX_LCORE) 1618 rte_exit(EXIT_FAILURE, 1619 "Not enough cores\n"); 1620 } 1621 } 1622 1623 /* Assigned a new logical core in the loop above. */ 1624 if (qconf != &lcore_queue_conf[rx_lcore_id]) 1625 qconf = &lcore_queue_conf[rx_lcore_id]; 1626 1627 qconf->rx_port_list[qconf->nb_rx_ports] = portid; 1628 qconf->nb_rx_ports++; 1629 1630 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned)portid); 1631 } 1632 1633 /* Enable Crypto devices */ 1634 enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount, 1635 enabled_cdevs); 1636 if (enabled_cdevcount < 0) 1637 rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n"); 1638 1639 if (enabled_cdevcount < enabled_portcount) 1640 rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) " 1641 "has to be more or equal to number of ports (%d)\n", 1642 enabled_cdevcount, enabled_portcount); 1643 1644 nb_cryptodevs = rte_cryptodev_count(); 1645 1646 /* Initialize the port/cryptodev configuration of each logical core */ 1647 for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0; 1648 cdev_id < nb_cryptodevs && enabled_cdevcount; 1649 cdev_id++) { 1650 /* Crypto op not supported by crypto device */ 1651 if (!enabled_cdevs[cdev_id]) 1652 continue; 1653 1654 if (options.single_lcore && qconf == NULL) { 1655 while (rte_lcore_is_enabled(rx_lcore_id) == 0) { 1656 rx_lcore_id++; 1657 if (rx_lcore_id >= RTE_MAX_LCORE) 1658 rte_exit(EXIT_FAILURE, 1659 "Not enough cores\n"); 1660 } 1661 } else if (!options.single_lcore) { 1662 /* get the lcore_id for this port */ 1663 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 1664 lcore_queue_conf[rx_lcore_id].nb_crypto_devs == 1665 options.nb_ports_per_lcore) { 1666 rx_lcore_id++; 1667 if (rx_lcore_id >= RTE_MAX_LCORE) 1668 rte_exit(EXIT_FAILURE, 1669 "Not enough cores\n"); 1670 } 1671 } 1672 1673 /* Assigned a new logical core in the loop above. */ 1674 if (qconf != &lcore_queue_conf[rx_lcore_id]) 1675 qconf = &lcore_queue_conf[rx_lcore_id]; 1676 1677 qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id; 1678 qconf->nb_crypto_devs++; 1679 1680 enabled_cdevcount--; 1681 1682 printf("Lcore %u: cryptodev %u\n", rx_lcore_id, 1683 (unsigned)cdev_id); 1684 } 1685 1686 /* launch per-lcore init on every lcore */ 1687 rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options, 1688 CALL_MASTER); 1689 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 1690 if (rte_eal_wait_lcore(lcore_id) < 0) 1691 return -1; 1692 } 1693 1694 return 0; 1695 } 1696