1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2016 Intel Corporation 3 */ 4 5 #include <time.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <inttypes.h> 11 #include <sys/types.h> 12 #include <sys/queue.h> 13 #include <netinet/in.h> 14 #include <setjmp.h> 15 #include <stdarg.h> 16 #include <ctype.h> 17 #include <errno.h> 18 #include <getopt.h> 19 #include <fcntl.h> 20 #include <unistd.h> 21 22 #include <rte_atomic.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_common.h> 25 #include <rte_cryptodev.h> 26 #include <rte_cycles.h> 27 #include <rte_debug.h> 28 #include <rte_eal.h> 29 #include <rte_ether.h> 30 #include <rte_ethdev.h> 31 #include <rte_interrupts.h> 32 #include <rte_ip.h> 33 #include <rte_launch.h> 34 #include <rte_lcore.h> 35 #include <rte_log.h> 36 #include <rte_malloc.h> 37 #include <rte_mbuf.h> 38 #include <rte_memcpy.h> 39 #include <rte_memory.h> 40 #include <rte_mempool.h> 41 #include <rte_per_lcore.h> 42 #include <rte_prefetch.h> 43 #include <rte_random.h> 44 #include <rte_hexdump.h> 45 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 46 #include <rte_cryptodev_scheduler.h> 47 #endif 48 49 enum cdev_type { 50 CDEV_TYPE_ANY, 51 CDEV_TYPE_HW, 52 CDEV_TYPE_SW 53 }; 54 55 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 56 57 #define NB_MBUF 8192 58 59 #define MAX_STR_LEN 32 60 #define MAX_KEY_SIZE 128 61 #define MAX_IV_SIZE 16 62 #define MAX_AAD_SIZE 65535 63 #define MAX_PKT_BURST 32 64 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 65 #define SESSION_POOL_CACHE_SIZE 0 66 67 #define MAXIMUM_IV_LENGTH 16 68 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \ 69 sizeof(struct rte_crypto_sym_op)) 70 71 /* 72 * Configurable number of RX/TX ring descriptors 73 */ 74 #define RTE_TEST_RX_DESC_DEFAULT 1024 75 #define RTE_TEST_TX_DESC_DEFAULT 1024 76 77 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 78 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 79 80 /* ethernet addresses of ports */ 81 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 82 83 /* mask of enabled ports */ 84 static uint64_t l2fwd_enabled_port_mask; 85 static uint64_t l2fwd_enabled_crypto_mask; 86 87 /* list of enabled ports */ 88 static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 89 90 91 struct pkt_buffer { 92 unsigned len; 93 struct rte_mbuf *buffer[MAX_PKT_BURST]; 94 }; 95 96 struct op_buffer { 97 unsigned len; 98 struct rte_crypto_op *buffer[MAX_PKT_BURST]; 99 }; 100 101 #define MAX_RX_QUEUE_PER_LCORE 16 102 #define MAX_TX_QUEUE_PER_PORT 16 103 104 enum l2fwd_crypto_xform_chain { 105 L2FWD_CRYPTO_CIPHER_HASH, 106 L2FWD_CRYPTO_HASH_CIPHER, 107 L2FWD_CRYPTO_CIPHER_ONLY, 108 L2FWD_CRYPTO_HASH_ONLY, 109 L2FWD_CRYPTO_AEAD 110 }; 111 112 struct l2fwd_key { 113 uint8_t *data; 114 uint32_t length; 115 rte_iova_t phys_addr; 116 }; 117 118 struct l2fwd_iv { 119 uint8_t *data; 120 uint16_t length; 121 }; 122 123 /** l2fwd crypto application command line options */ 124 struct l2fwd_crypto_options { 125 unsigned portmask; 126 unsigned nb_ports_per_lcore; 127 unsigned refresh_period; 128 unsigned single_lcore:1; 129 130 enum cdev_type type; 131 unsigned sessionless:1; 132 133 enum l2fwd_crypto_xform_chain xform_chain; 134 135 struct rte_crypto_sym_xform cipher_xform; 136 unsigned ckey_param; 137 int ckey_random_size; 138 139 struct l2fwd_iv cipher_iv; 140 unsigned int cipher_iv_param; 141 int cipher_iv_random_size; 142 143 struct rte_crypto_sym_xform auth_xform; 144 uint8_t akey_param; 145 int akey_random_size; 146 147 struct l2fwd_iv auth_iv; 148 unsigned int auth_iv_param; 149 int auth_iv_random_size; 150 151 struct rte_crypto_sym_xform aead_xform; 152 unsigned int aead_key_param; 153 int aead_key_random_size; 154 155 struct l2fwd_iv aead_iv; 156 unsigned int aead_iv_param; 157 int aead_iv_random_size; 158 159 struct l2fwd_key aad; 160 unsigned aad_param; 161 int aad_random_size; 162 163 int digest_size; 164 165 uint16_t block_size; 166 char string_type[MAX_STR_LEN]; 167 168 uint64_t cryptodev_mask; 169 170 unsigned int mac_updating; 171 }; 172 173 /** l2fwd crypto lcore params */ 174 struct l2fwd_crypto_params { 175 uint8_t dev_id; 176 uint8_t qp_id; 177 178 unsigned digest_length; 179 unsigned block_size; 180 181 struct l2fwd_iv cipher_iv; 182 struct l2fwd_iv auth_iv; 183 struct l2fwd_iv aead_iv; 184 struct l2fwd_key aad; 185 struct rte_cryptodev_sym_session *session; 186 187 uint8_t do_cipher; 188 uint8_t do_hash; 189 uint8_t do_aead; 190 uint8_t hash_verify; 191 192 enum rte_crypto_cipher_algorithm cipher_algo; 193 enum rte_crypto_auth_algorithm auth_algo; 194 enum rte_crypto_aead_algorithm aead_algo; 195 }; 196 197 /** lcore configuration */ 198 struct lcore_queue_conf { 199 unsigned nb_rx_ports; 200 uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE]; 201 202 unsigned nb_crypto_devs; 203 unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE]; 204 205 struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS]; 206 struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS]; 207 } __rte_cache_aligned; 208 209 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 210 211 static struct rte_eth_conf port_conf = { 212 .rxmode = { 213 .mq_mode = ETH_MQ_RX_NONE, 214 .max_rx_pkt_len = ETHER_MAX_LEN, 215 .split_hdr_size = 0, 216 }, 217 .txmode = { 218 .mq_mode = ETH_MQ_TX_NONE, 219 }, 220 }; 221 222 struct rte_mempool *l2fwd_pktmbuf_pool; 223 struct rte_mempool *l2fwd_crypto_op_pool; 224 static struct { 225 struct rte_mempool *sess_mp; 226 struct rte_mempool *priv_mp; 227 } session_pool_socket[RTE_MAX_NUMA_NODES]; 228 229 /* Per-port statistics struct */ 230 struct l2fwd_port_statistics { 231 uint64_t tx; 232 uint64_t rx; 233 234 uint64_t crypto_enqueued; 235 uint64_t crypto_dequeued; 236 237 uint64_t dropped; 238 } __rte_cache_aligned; 239 240 struct l2fwd_crypto_statistics { 241 uint64_t enqueued; 242 uint64_t dequeued; 243 244 uint64_t errors; 245 } __rte_cache_aligned; 246 247 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 248 struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS]; 249 250 /* A tsc-based timer responsible for triggering statistics printout */ 251 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ 252 #define MAX_TIMER_PERIOD 86400UL /* 1 day max */ 253 254 /* default period is 10 seconds */ 255 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; 256 257 /* Print out statistics on packets dropped */ 258 static void 259 print_stats(void) 260 { 261 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 262 uint64_t total_packets_enqueued, total_packets_dequeued, 263 total_packets_errors; 264 uint16_t portid; 265 uint64_t cdevid; 266 267 total_packets_dropped = 0; 268 total_packets_tx = 0; 269 total_packets_rx = 0; 270 total_packets_enqueued = 0; 271 total_packets_dequeued = 0; 272 total_packets_errors = 0; 273 274 const char clr[] = { 27, '[', '2', 'J', '\0' }; 275 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 276 277 /* Clear screen and move to top left */ 278 printf("%s%s", clr, topLeft); 279 280 printf("\nPort statistics ===================================="); 281 282 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 283 /* skip disabled ports */ 284 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 285 continue; 286 printf("\nStatistics for port %u ------------------------------" 287 "\nPackets sent: %32"PRIu64 288 "\nPackets received: %28"PRIu64 289 "\nPackets dropped: %29"PRIu64, 290 portid, 291 port_statistics[portid].tx, 292 port_statistics[portid].rx, 293 port_statistics[portid].dropped); 294 295 total_packets_dropped += port_statistics[portid].dropped; 296 total_packets_tx += port_statistics[portid].tx; 297 total_packets_rx += port_statistics[portid].rx; 298 } 299 printf("\nCrypto statistics =================================="); 300 301 for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) { 302 /* skip disabled ports */ 303 if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0) 304 continue; 305 printf("\nStatistics for cryptodev %"PRIu64 306 " -------------------------" 307 "\nPackets enqueued: %28"PRIu64 308 "\nPackets dequeued: %28"PRIu64 309 "\nPackets errors: %30"PRIu64, 310 cdevid, 311 crypto_statistics[cdevid].enqueued, 312 crypto_statistics[cdevid].dequeued, 313 crypto_statistics[cdevid].errors); 314 315 total_packets_enqueued += crypto_statistics[cdevid].enqueued; 316 total_packets_dequeued += crypto_statistics[cdevid].dequeued; 317 total_packets_errors += crypto_statistics[cdevid].errors; 318 } 319 printf("\nAggregate statistics ===============================" 320 "\nTotal packets received: %22"PRIu64 321 "\nTotal packets enqueued: %22"PRIu64 322 "\nTotal packets dequeued: %22"PRIu64 323 "\nTotal packets sent: %26"PRIu64 324 "\nTotal packets dropped: %23"PRIu64 325 "\nTotal packets crypto errors: %17"PRIu64, 326 total_packets_rx, 327 total_packets_enqueued, 328 total_packets_dequeued, 329 total_packets_tx, 330 total_packets_dropped, 331 total_packets_errors); 332 printf("\n====================================================\n"); 333 } 334 335 static int 336 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n, 337 struct l2fwd_crypto_params *cparams) 338 { 339 struct rte_crypto_op **op_buffer; 340 unsigned ret; 341 342 op_buffer = (struct rte_crypto_op **) 343 qconf->op_buf[cparams->dev_id].buffer; 344 345 ret = rte_cryptodev_enqueue_burst(cparams->dev_id, 346 cparams->qp_id, op_buffer, (uint16_t) n); 347 348 crypto_statistics[cparams->dev_id].enqueued += ret; 349 if (unlikely(ret < n)) { 350 crypto_statistics[cparams->dev_id].errors += (n - ret); 351 do { 352 rte_pktmbuf_free(op_buffer[ret]->sym->m_src); 353 rte_crypto_op_free(op_buffer[ret]); 354 } while (++ret < n); 355 } 356 357 return 0; 358 } 359 360 static int 361 l2fwd_crypto_enqueue(struct rte_crypto_op *op, 362 struct l2fwd_crypto_params *cparams) 363 { 364 unsigned lcore_id, len; 365 struct lcore_queue_conf *qconf; 366 367 lcore_id = rte_lcore_id(); 368 369 qconf = &lcore_queue_conf[lcore_id]; 370 len = qconf->op_buf[cparams->dev_id].len; 371 qconf->op_buf[cparams->dev_id].buffer[len] = op; 372 len++; 373 374 /* enough ops to be sent */ 375 if (len == MAX_PKT_BURST) { 376 l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams); 377 len = 0; 378 } 379 380 qconf->op_buf[cparams->dev_id].len = len; 381 return 0; 382 } 383 384 static int 385 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, 386 struct rte_crypto_op *op, 387 struct l2fwd_crypto_params *cparams) 388 { 389 struct ether_hdr *eth_hdr; 390 struct ipv4_hdr *ip_hdr; 391 392 uint32_t ipdata_offset, data_len; 393 uint32_t pad_len = 0; 394 char *padding; 395 396 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 397 398 if (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4)) 399 return -1; 400 401 ipdata_offset = sizeof(struct ether_hdr); 402 403 ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) + 404 ipdata_offset); 405 406 ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK) 407 * IPV4_IHL_MULTIPLIER; 408 409 410 /* Zero pad data to be crypto'd so it is block aligned */ 411 data_len = rte_pktmbuf_data_len(m) - ipdata_offset; 412 413 if ((cparams->do_hash || cparams->do_aead) && cparams->hash_verify) 414 data_len -= cparams->digest_length; 415 416 if (cparams->do_cipher) { 417 /* 418 * Following algorithms are block cipher algorithms, 419 * and might need padding 420 */ 421 switch (cparams->cipher_algo) { 422 case RTE_CRYPTO_CIPHER_AES_CBC: 423 case RTE_CRYPTO_CIPHER_AES_ECB: 424 case RTE_CRYPTO_CIPHER_DES_CBC: 425 case RTE_CRYPTO_CIPHER_3DES_CBC: 426 case RTE_CRYPTO_CIPHER_3DES_ECB: 427 if (data_len % cparams->block_size) 428 pad_len = cparams->block_size - 429 (data_len % cparams->block_size); 430 break; 431 default: 432 pad_len = 0; 433 } 434 435 if (pad_len) { 436 padding = rte_pktmbuf_append(m, pad_len); 437 if (unlikely(!padding)) 438 return -1; 439 440 data_len += pad_len; 441 memset(padding, 0, pad_len); 442 } 443 } 444 445 /* Set crypto operation data parameters */ 446 rte_crypto_op_attach_sym_session(op, cparams->session); 447 448 if (cparams->do_hash) { 449 if (cparams->auth_iv.length) { 450 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, 451 uint8_t *, 452 IV_OFFSET + 453 cparams->cipher_iv.length); 454 /* 455 * Copy IV at the end of the crypto operation, 456 * after the cipher IV, if added 457 */ 458 rte_memcpy(iv_ptr, cparams->auth_iv.data, 459 cparams->auth_iv.length); 460 } 461 if (!cparams->hash_verify) { 462 /* Append space for digest to end of packet */ 463 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, 464 cparams->digest_length); 465 } else { 466 op->sym->auth.digest.data = rte_pktmbuf_mtod(m, 467 uint8_t *) + ipdata_offset + data_len; 468 } 469 470 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, 471 rte_pktmbuf_pkt_len(m) - cparams->digest_length); 472 473 /* For wireless algorithms, offset/length must be in bits */ 474 if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 475 cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || 476 cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { 477 op->sym->auth.data.offset = ipdata_offset << 3; 478 op->sym->auth.data.length = data_len << 3; 479 } else { 480 op->sym->auth.data.offset = ipdata_offset; 481 op->sym->auth.data.length = data_len; 482 } 483 } 484 485 if (cparams->do_cipher) { 486 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 487 IV_OFFSET); 488 /* Copy IV at the end of the crypto operation */ 489 rte_memcpy(iv_ptr, cparams->cipher_iv.data, 490 cparams->cipher_iv.length); 491 492 /* For wireless algorithms, offset/length must be in bits */ 493 if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 494 cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || 495 cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 496 op->sym->cipher.data.offset = ipdata_offset << 3; 497 op->sym->cipher.data.length = data_len << 3; 498 } else { 499 op->sym->cipher.data.offset = ipdata_offset; 500 op->sym->cipher.data.length = data_len; 501 } 502 } 503 504 if (cparams->do_aead) { 505 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 506 IV_OFFSET); 507 /* Copy IV at the end of the crypto operation */ 508 /* 509 * If doing AES-CCM, nonce is copied one byte 510 * after the start of IV field 511 */ 512 if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) 513 rte_memcpy(iv_ptr + 1, cparams->aead_iv.data, 514 cparams->aead_iv.length); 515 else 516 rte_memcpy(iv_ptr, cparams->aead_iv.data, 517 cparams->aead_iv.length); 518 519 op->sym->aead.data.offset = ipdata_offset; 520 op->sym->aead.data.length = data_len; 521 522 if (!cparams->hash_verify) { 523 /* Append space for digest to end of packet */ 524 op->sym->aead.digest.data = (uint8_t *)rte_pktmbuf_append(m, 525 cparams->digest_length); 526 } else { 527 op->sym->aead.digest.data = rte_pktmbuf_mtod(m, 528 uint8_t *) + ipdata_offset + data_len; 529 } 530 531 op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, 532 rte_pktmbuf_pkt_len(m) - cparams->digest_length); 533 534 if (cparams->aad.length) { 535 op->sym->aead.aad.data = cparams->aad.data; 536 op->sym->aead.aad.phys_addr = cparams->aad.phys_addr; 537 } 538 } 539 540 op->sym->m_src = m; 541 542 return l2fwd_crypto_enqueue(op, cparams); 543 } 544 545 546 /* Send the burst of packets on an output interface */ 547 static int 548 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, 549 uint16_t port) 550 { 551 struct rte_mbuf **pkt_buffer; 552 unsigned ret; 553 554 pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer; 555 556 ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n); 557 port_statistics[port].tx += ret; 558 if (unlikely(ret < n)) { 559 port_statistics[port].dropped += (n - ret); 560 do { 561 rte_pktmbuf_free(pkt_buffer[ret]); 562 } while (++ret < n); 563 } 564 565 return 0; 566 } 567 568 /* Enqueue packets for TX and prepare them to be sent */ 569 static int 570 l2fwd_send_packet(struct rte_mbuf *m, uint16_t port) 571 { 572 unsigned lcore_id, len; 573 struct lcore_queue_conf *qconf; 574 575 lcore_id = rte_lcore_id(); 576 577 qconf = &lcore_queue_conf[lcore_id]; 578 len = qconf->pkt_buf[port].len; 579 qconf->pkt_buf[port].buffer[len] = m; 580 len++; 581 582 /* enough pkts to be sent */ 583 if (unlikely(len == MAX_PKT_BURST)) { 584 l2fwd_send_burst(qconf, MAX_PKT_BURST, port); 585 len = 0; 586 } 587 588 qconf->pkt_buf[port].len = len; 589 return 0; 590 } 591 592 static void 593 l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid) 594 { 595 struct ether_hdr *eth; 596 void *tmp; 597 598 eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 599 600 /* 02:00:00:00:00:xx */ 601 tmp = ð->d_addr.addr_bytes[0]; 602 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40); 603 604 /* src addr */ 605 ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], ð->s_addr); 606 } 607 608 static void 609 l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid, 610 struct l2fwd_crypto_options *options) 611 { 612 uint16_t dst_port; 613 614 dst_port = l2fwd_dst_ports[portid]; 615 616 if (options->mac_updating) 617 l2fwd_mac_updating(m, dst_port); 618 619 l2fwd_send_packet(m, dst_port); 620 } 621 622 /** Generate random key */ 623 static void 624 generate_random_key(uint8_t *key, unsigned length) 625 { 626 int fd; 627 int ret; 628 629 fd = open("/dev/urandom", O_RDONLY); 630 if (fd < 0) 631 rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); 632 633 ret = read(fd, key, length); 634 close(fd); 635 636 if (ret != (signed)length) 637 rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); 638 } 639 640 static struct rte_cryptodev_sym_session * 641 initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id) 642 { 643 struct rte_crypto_sym_xform *first_xform; 644 struct rte_cryptodev_sym_session *session; 645 int retval = rte_cryptodev_socket_id(cdev_id); 646 647 if (retval < 0) 648 return NULL; 649 650 uint8_t socket_id = (uint8_t) retval; 651 652 if (options->xform_chain == L2FWD_CRYPTO_AEAD) { 653 first_xform = &options->aead_xform; 654 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) { 655 first_xform = &options->cipher_xform; 656 first_xform->next = &options->auth_xform; 657 } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) { 658 first_xform = &options->auth_xform; 659 first_xform->next = &options->cipher_xform; 660 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) { 661 first_xform = &options->cipher_xform; 662 } else { 663 first_xform = &options->auth_xform; 664 } 665 666 session = rte_cryptodev_sym_session_create( 667 session_pool_socket[socket_id].sess_mp); 668 if (session == NULL) 669 return NULL; 670 671 if (rte_cryptodev_sym_session_init(cdev_id, session, 672 first_xform, 673 session_pool_socket[socket_id].priv_mp) < 0) 674 return NULL; 675 676 return session; 677 } 678 679 static void 680 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options); 681 682 /* main processing loop */ 683 static void 684 l2fwd_main_loop(struct l2fwd_crypto_options *options) 685 { 686 struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST]; 687 struct rte_crypto_op *ops_burst[MAX_PKT_BURST]; 688 689 unsigned lcore_id = rte_lcore_id(); 690 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 691 unsigned int i, j, nb_rx, len; 692 uint16_t portid; 693 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; 694 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / 695 US_PER_S * BURST_TX_DRAIN_US; 696 struct l2fwd_crypto_params *cparams; 697 struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs]; 698 struct rte_cryptodev_sym_session *session; 699 700 if (qconf->nb_rx_ports == 0) { 701 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); 702 return; 703 } 704 705 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); 706 707 for (i = 0; i < qconf->nb_rx_ports; i++) { 708 709 portid = qconf->rx_port_list[i]; 710 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, 711 portid); 712 } 713 714 for (i = 0; i < qconf->nb_crypto_devs; i++) { 715 port_cparams[i].do_cipher = 0; 716 port_cparams[i].do_hash = 0; 717 port_cparams[i].do_aead = 0; 718 719 switch (options->xform_chain) { 720 case L2FWD_CRYPTO_AEAD: 721 port_cparams[i].do_aead = 1; 722 break; 723 case L2FWD_CRYPTO_CIPHER_HASH: 724 case L2FWD_CRYPTO_HASH_CIPHER: 725 port_cparams[i].do_cipher = 1; 726 port_cparams[i].do_hash = 1; 727 break; 728 case L2FWD_CRYPTO_HASH_ONLY: 729 port_cparams[i].do_hash = 1; 730 break; 731 case L2FWD_CRYPTO_CIPHER_ONLY: 732 port_cparams[i].do_cipher = 1; 733 break; 734 } 735 736 port_cparams[i].dev_id = qconf->cryptodev_list[i]; 737 port_cparams[i].qp_id = 0; 738 739 port_cparams[i].block_size = options->block_size; 740 741 if (port_cparams[i].do_hash) { 742 port_cparams[i].auth_iv.data = options->auth_iv.data; 743 port_cparams[i].auth_iv.length = options->auth_iv.length; 744 if (!options->auth_iv_param) 745 generate_random_key(port_cparams[i].auth_iv.data, 746 port_cparams[i].auth_iv.length); 747 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) 748 port_cparams[i].hash_verify = 1; 749 else 750 port_cparams[i].hash_verify = 0; 751 752 port_cparams[i].auth_algo = options->auth_xform.auth.algo; 753 port_cparams[i].digest_length = 754 options->auth_xform.auth.digest_length; 755 /* Set IV parameters */ 756 if (options->auth_iv.length) { 757 options->auth_xform.auth.iv.offset = 758 IV_OFFSET + options->cipher_iv.length; 759 options->auth_xform.auth.iv.length = 760 options->auth_iv.length; 761 } 762 } 763 764 if (port_cparams[i].do_aead) { 765 port_cparams[i].aead_iv.data = options->aead_iv.data; 766 port_cparams[i].aead_iv.length = options->aead_iv.length; 767 if (!options->aead_iv_param) 768 generate_random_key(port_cparams[i].aead_iv.data, 769 port_cparams[i].aead_iv.length); 770 port_cparams[i].aead_algo = options->aead_xform.aead.algo; 771 port_cparams[i].digest_length = 772 options->aead_xform.aead.digest_length; 773 if (options->aead_xform.aead.aad_length) { 774 port_cparams[i].aad.data = options->aad.data; 775 port_cparams[i].aad.phys_addr = options->aad.phys_addr; 776 port_cparams[i].aad.length = options->aad.length; 777 if (!options->aad_param) 778 generate_random_key(port_cparams[i].aad.data, 779 port_cparams[i].aad.length); 780 /* 781 * If doing AES-CCM, first 18 bytes has to be reserved, 782 * and actual AAD should start from byte 18 783 */ 784 if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM) 785 memmove(port_cparams[i].aad.data + 18, 786 port_cparams[i].aad.data, 787 port_cparams[i].aad.length); 788 789 } else 790 port_cparams[i].aad.length = 0; 791 792 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_DECRYPT) 793 port_cparams[i].hash_verify = 1; 794 else 795 port_cparams[i].hash_verify = 0; 796 797 /* Set IV parameters */ 798 options->aead_xform.aead.iv.offset = IV_OFFSET; 799 options->aead_xform.aead.iv.length = options->aead_iv.length; 800 } 801 802 if (port_cparams[i].do_cipher) { 803 port_cparams[i].cipher_iv.data = options->cipher_iv.data; 804 port_cparams[i].cipher_iv.length = options->cipher_iv.length; 805 if (!options->cipher_iv_param) 806 generate_random_key(port_cparams[i].cipher_iv.data, 807 port_cparams[i].cipher_iv.length); 808 809 port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo; 810 /* Set IV parameters */ 811 options->cipher_xform.cipher.iv.offset = IV_OFFSET; 812 options->cipher_xform.cipher.iv.length = 813 options->cipher_iv.length; 814 } 815 816 session = initialize_crypto_session(options, 817 port_cparams[i].dev_id); 818 if (session == NULL) 819 rte_exit(EXIT_FAILURE, "Failed to initialize crypto session\n"); 820 821 port_cparams[i].session = session; 822 823 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id, 824 port_cparams[i].dev_id); 825 } 826 827 l2fwd_crypto_options_print(options); 828 829 /* 830 * Initialize previous tsc timestamp before the loop, 831 * to avoid showing the port statistics immediately, 832 * so user can see the crypto information. 833 */ 834 prev_tsc = rte_rdtsc(); 835 while (1) { 836 837 cur_tsc = rte_rdtsc(); 838 839 /* 840 * Crypto device/TX burst queue drain 841 */ 842 diff_tsc = cur_tsc - prev_tsc; 843 if (unlikely(diff_tsc > drain_tsc)) { 844 /* Enqueue all crypto ops remaining in buffers */ 845 for (i = 0; i < qconf->nb_crypto_devs; i++) { 846 cparams = &port_cparams[i]; 847 len = qconf->op_buf[cparams->dev_id].len; 848 l2fwd_crypto_send_burst(qconf, len, cparams); 849 qconf->op_buf[cparams->dev_id].len = 0; 850 } 851 /* Transmit all packets remaining in buffers */ 852 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 853 if (qconf->pkt_buf[portid].len == 0) 854 continue; 855 l2fwd_send_burst(&lcore_queue_conf[lcore_id], 856 qconf->pkt_buf[portid].len, 857 portid); 858 qconf->pkt_buf[portid].len = 0; 859 } 860 861 /* if timer is enabled */ 862 if (timer_period > 0) { 863 864 /* advance the timer */ 865 timer_tsc += diff_tsc; 866 867 /* if timer has reached its timeout */ 868 if (unlikely(timer_tsc >= 869 (uint64_t)timer_period)) { 870 871 /* do this only on master core */ 872 if (lcore_id == rte_get_master_lcore() 873 && options->refresh_period) { 874 print_stats(); 875 timer_tsc = 0; 876 } 877 } 878 } 879 880 prev_tsc = cur_tsc; 881 } 882 883 /* 884 * Read packet from RX queues 885 */ 886 for (i = 0; i < qconf->nb_rx_ports; i++) { 887 portid = qconf->rx_port_list[i]; 888 889 cparams = &port_cparams[i]; 890 891 nb_rx = rte_eth_rx_burst(portid, 0, 892 pkts_burst, MAX_PKT_BURST); 893 894 port_statistics[portid].rx += nb_rx; 895 896 if (nb_rx) { 897 /* 898 * If we can't allocate a crypto_ops, then drop 899 * the rest of the burst and dequeue and 900 * process the packets to free offload structs 901 */ 902 if (rte_crypto_op_bulk_alloc( 903 l2fwd_crypto_op_pool, 904 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 905 ops_burst, nb_rx) != 906 nb_rx) { 907 for (j = 0; j < nb_rx; j++) 908 rte_pktmbuf_free(pkts_burst[j]); 909 910 nb_rx = 0; 911 } 912 913 /* Enqueue packets from Crypto device*/ 914 for (j = 0; j < nb_rx; j++) { 915 m = pkts_burst[j]; 916 917 l2fwd_simple_crypto_enqueue(m, 918 ops_burst[j], cparams); 919 } 920 } 921 922 /* Dequeue packets from Crypto device */ 923 do { 924 nb_rx = rte_cryptodev_dequeue_burst( 925 cparams->dev_id, cparams->qp_id, 926 ops_burst, MAX_PKT_BURST); 927 928 crypto_statistics[cparams->dev_id].dequeued += 929 nb_rx; 930 931 /* Forward crypto'd packets */ 932 for (j = 0; j < nb_rx; j++) { 933 m = ops_burst[j]->sym->m_src; 934 935 rte_crypto_op_free(ops_burst[j]); 936 l2fwd_simple_forward(m, portid, 937 options); 938 } 939 } while (nb_rx == MAX_PKT_BURST); 940 } 941 } 942 } 943 944 static int 945 l2fwd_launch_one_lcore(void *arg) 946 { 947 l2fwd_main_loop((struct l2fwd_crypto_options *)arg); 948 return 0; 949 } 950 951 /* Display command line arguments usage */ 952 static void 953 l2fwd_crypto_usage(const char *prgname) 954 { 955 printf("%s [EAL options] --\n" 956 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 957 " -q NQ: number of queue (=ports) per lcore (default is 1)\n" 958 " -s manage all ports from single lcore\n" 959 " -T PERIOD: statistics will be refreshed each PERIOD seconds" 960 " (0 to disable, 10 default, 86400 maximum)\n" 961 962 " --cdev_type HW / SW / ANY\n" 963 " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /" 964 " HASH_ONLY / AEAD\n" 965 966 " --cipher_algo ALGO\n" 967 " --cipher_op ENCRYPT / DECRYPT\n" 968 " --cipher_key KEY (bytes separated with \":\")\n" 969 " --cipher_key_random_size SIZE: size of cipher key when generated randomly\n" 970 " --cipher_iv IV (bytes separated with \":\")\n" 971 " --cipher_iv_random_size SIZE: size of cipher IV when generated randomly\n" 972 973 " --auth_algo ALGO\n" 974 " --auth_op GENERATE / VERIFY\n" 975 " --auth_key KEY (bytes separated with \":\")\n" 976 " --auth_key_random_size SIZE: size of auth key when generated randomly\n" 977 " --auth_iv IV (bytes separated with \":\")\n" 978 " --auth_iv_random_size SIZE: size of auth IV when generated randomly\n" 979 980 " --aead_algo ALGO\n" 981 " --aead_op ENCRYPT / DECRYPT\n" 982 " --aead_key KEY (bytes separated with \":\")\n" 983 " --aead_key_random_size SIZE: size of AEAD key when generated randomly\n" 984 " --aead_iv IV (bytes separated with \":\")\n" 985 " --aead_iv_random_size SIZE: size of AEAD IV when generated randomly\n" 986 " --aad AAD (bytes separated with \":\")\n" 987 " --aad_random_size SIZE: size of AAD when generated randomly\n" 988 989 " --digest_size SIZE: size of digest to be generated/verified\n" 990 991 " --sessionless\n" 992 " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n" 993 994 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n" 995 " When enabled:\n" 996 " - The source MAC address is replaced by the TX port MAC address\n" 997 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n", 998 prgname); 999 } 1000 1001 /** Parse crypto device type command line argument */ 1002 static int 1003 parse_cryptodev_type(enum cdev_type *type, char *optarg) 1004 { 1005 if (strcmp("HW", optarg) == 0) { 1006 *type = CDEV_TYPE_HW; 1007 return 0; 1008 } else if (strcmp("SW", optarg) == 0) { 1009 *type = CDEV_TYPE_SW; 1010 return 0; 1011 } else if (strcmp("ANY", optarg) == 0) { 1012 *type = CDEV_TYPE_ANY; 1013 return 0; 1014 } 1015 1016 return -1; 1017 } 1018 1019 /** Parse crypto chain xform command line argument */ 1020 static int 1021 parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg) 1022 { 1023 if (strcmp("CIPHER_HASH", optarg) == 0) { 1024 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH; 1025 return 0; 1026 } else if (strcmp("HASH_CIPHER", optarg) == 0) { 1027 options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER; 1028 return 0; 1029 } else if (strcmp("CIPHER_ONLY", optarg) == 0) { 1030 options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY; 1031 return 0; 1032 } else if (strcmp("HASH_ONLY", optarg) == 0) { 1033 options->xform_chain = L2FWD_CRYPTO_HASH_ONLY; 1034 return 0; 1035 } else if (strcmp("AEAD", optarg) == 0) { 1036 options->xform_chain = L2FWD_CRYPTO_AEAD; 1037 return 0; 1038 } 1039 1040 return -1; 1041 } 1042 1043 /** Parse crypto cipher algo option command line argument */ 1044 static int 1045 parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg) 1046 { 1047 1048 if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) { 1049 RTE_LOG(ERR, USER1, "Cipher algorithm specified " 1050 "not supported!\n"); 1051 return -1; 1052 } 1053 1054 return 0; 1055 } 1056 1057 /** Parse crypto cipher operation command line argument */ 1058 static int 1059 parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg) 1060 { 1061 if (strcmp("ENCRYPT", optarg) == 0) { 1062 *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1063 return 0; 1064 } else if (strcmp("DECRYPT", optarg) == 0) { 1065 *op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 1066 return 0; 1067 } 1068 1069 printf("Cipher operation not supported!\n"); 1070 return -1; 1071 } 1072 1073 /** Parse bytes from command line argument */ 1074 static int 1075 parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size) 1076 { 1077 unsigned byte_count; 1078 char *token; 1079 1080 errno = 0; 1081 for (byte_count = 0, token = strtok(input_arg, ":"); 1082 (byte_count < max_size) && (token != NULL); 1083 token = strtok(NULL, ":")) { 1084 1085 int number = (int)strtol(token, NULL, 16); 1086 1087 if (errno == EINVAL || errno == ERANGE || number > 0xFF) 1088 return -1; 1089 1090 data[byte_count++] = (uint8_t)number; 1091 } 1092 1093 return byte_count; 1094 } 1095 1096 /** Parse size param*/ 1097 static int 1098 parse_size(int *size, const char *q_arg) 1099 { 1100 char *end = NULL; 1101 unsigned long n; 1102 1103 /* parse hexadecimal string */ 1104 n = strtoul(q_arg, &end, 10); 1105 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 1106 n = 0; 1107 1108 if (n == 0) { 1109 printf("invalid size\n"); 1110 return -1; 1111 } 1112 1113 *size = n; 1114 return 0; 1115 } 1116 1117 /** Parse crypto cipher operation command line argument */ 1118 static int 1119 parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg) 1120 { 1121 if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) { 1122 RTE_LOG(ERR, USER1, "Authentication algorithm specified " 1123 "not supported!\n"); 1124 return -1; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int 1131 parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg) 1132 { 1133 if (strcmp("VERIFY", optarg) == 0) { 1134 *op = RTE_CRYPTO_AUTH_OP_VERIFY; 1135 return 0; 1136 } else if (strcmp("GENERATE", optarg) == 0) { 1137 *op = RTE_CRYPTO_AUTH_OP_GENERATE; 1138 return 0; 1139 } 1140 1141 printf("Authentication operation specified not supported!\n"); 1142 return -1; 1143 } 1144 1145 static int 1146 parse_aead_algo(enum rte_crypto_aead_algorithm *algo, char *optarg) 1147 { 1148 if (rte_cryptodev_get_aead_algo_enum(algo, optarg) < 0) { 1149 RTE_LOG(ERR, USER1, "AEAD algorithm specified " 1150 "not supported!\n"); 1151 return -1; 1152 } 1153 1154 return 0; 1155 } 1156 1157 static int 1158 parse_aead_op(enum rte_crypto_aead_operation *op, char *optarg) 1159 { 1160 if (strcmp("ENCRYPT", optarg) == 0) { 1161 *op = RTE_CRYPTO_AEAD_OP_ENCRYPT; 1162 return 0; 1163 } else if (strcmp("DECRYPT", optarg) == 0) { 1164 *op = RTE_CRYPTO_AEAD_OP_DECRYPT; 1165 return 0; 1166 } 1167 1168 printf("AEAD operation specified not supported!\n"); 1169 return -1; 1170 } 1171 static int 1172 parse_cryptodev_mask(struct l2fwd_crypto_options *options, 1173 const char *q_arg) 1174 { 1175 char *end = NULL; 1176 uint64_t pm; 1177 1178 /* parse hexadecimal string */ 1179 pm = strtoul(q_arg, &end, 16); 1180 if ((pm == '\0') || (end == NULL) || (*end != '\0')) 1181 pm = 0; 1182 1183 options->cryptodev_mask = pm; 1184 if (options->cryptodev_mask == 0) { 1185 printf("invalid cryptodev_mask specified\n"); 1186 return -1; 1187 } 1188 1189 return 0; 1190 } 1191 1192 /** Parse long options */ 1193 static int 1194 l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options, 1195 struct option *lgopts, int option_index) 1196 { 1197 int retval; 1198 1199 if (strcmp(lgopts[option_index].name, "cdev_type") == 0) { 1200 retval = parse_cryptodev_type(&options->type, optarg); 1201 if (retval == 0) 1202 snprintf(options->string_type, MAX_STR_LEN, 1203 "%s", optarg); 1204 return retval; 1205 } 1206 1207 else if (strcmp(lgopts[option_index].name, "chain") == 0) 1208 return parse_crypto_opt_chain(options, optarg); 1209 1210 /* Cipher options */ 1211 else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0) 1212 return parse_cipher_algo(&options->cipher_xform.cipher.algo, 1213 optarg); 1214 1215 else if (strcmp(lgopts[option_index].name, "cipher_op") == 0) 1216 return parse_cipher_op(&options->cipher_xform.cipher.op, 1217 optarg); 1218 1219 else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) { 1220 options->ckey_param = 1; 1221 options->cipher_xform.cipher.key.length = 1222 parse_bytes(options->cipher_xform.cipher.key.data, optarg, 1223 MAX_KEY_SIZE); 1224 if (options->cipher_xform.cipher.key.length > 0) 1225 return 0; 1226 else 1227 return -1; 1228 } 1229 1230 else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0) 1231 return parse_size(&options->ckey_random_size, optarg); 1232 1233 else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) { 1234 options->cipher_iv_param = 1; 1235 options->cipher_iv.length = 1236 parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE); 1237 if (options->cipher_iv.length > 0) 1238 return 0; 1239 else 1240 return -1; 1241 } 1242 1243 else if (strcmp(lgopts[option_index].name, "cipher_iv_random_size") == 0) 1244 return parse_size(&options->cipher_iv_random_size, optarg); 1245 1246 /* Authentication options */ 1247 else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) { 1248 return parse_auth_algo(&options->auth_xform.auth.algo, 1249 optarg); 1250 } 1251 1252 else if (strcmp(lgopts[option_index].name, "auth_op") == 0) 1253 return parse_auth_op(&options->auth_xform.auth.op, 1254 optarg); 1255 1256 else if (strcmp(lgopts[option_index].name, "auth_key") == 0) { 1257 options->akey_param = 1; 1258 options->auth_xform.auth.key.length = 1259 parse_bytes(options->auth_xform.auth.key.data, optarg, 1260 MAX_KEY_SIZE); 1261 if (options->auth_xform.auth.key.length > 0) 1262 return 0; 1263 else 1264 return -1; 1265 } 1266 1267 else if (strcmp(lgopts[option_index].name, "auth_key_random_size") == 0) { 1268 return parse_size(&options->akey_random_size, optarg); 1269 } 1270 1271 else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) { 1272 options->auth_iv_param = 1; 1273 options->auth_iv.length = 1274 parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE); 1275 if (options->auth_iv.length > 0) 1276 return 0; 1277 else 1278 return -1; 1279 } 1280 1281 else if (strcmp(lgopts[option_index].name, "auth_iv_random_size") == 0) 1282 return parse_size(&options->auth_iv_random_size, optarg); 1283 1284 /* AEAD options */ 1285 else if (strcmp(lgopts[option_index].name, "aead_algo") == 0) { 1286 return parse_aead_algo(&options->aead_xform.aead.algo, 1287 optarg); 1288 } 1289 1290 else if (strcmp(lgopts[option_index].name, "aead_op") == 0) 1291 return parse_aead_op(&options->aead_xform.aead.op, 1292 optarg); 1293 1294 else if (strcmp(lgopts[option_index].name, "aead_key") == 0) { 1295 options->aead_key_param = 1; 1296 options->aead_xform.aead.key.length = 1297 parse_bytes(options->aead_xform.aead.key.data, optarg, 1298 MAX_KEY_SIZE); 1299 if (options->aead_xform.aead.key.length > 0) 1300 return 0; 1301 else 1302 return -1; 1303 } 1304 1305 else if (strcmp(lgopts[option_index].name, "aead_key_random_size") == 0) 1306 return parse_size(&options->aead_key_random_size, optarg); 1307 1308 1309 else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) { 1310 options->aead_iv_param = 1; 1311 options->aead_iv.length = 1312 parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE); 1313 if (options->aead_iv.length > 0) 1314 return 0; 1315 else 1316 return -1; 1317 } 1318 1319 else if (strcmp(lgopts[option_index].name, "aead_iv_random_size") == 0) 1320 return parse_size(&options->aead_iv_random_size, optarg); 1321 1322 else if (strcmp(lgopts[option_index].name, "aad") == 0) { 1323 options->aad_param = 1; 1324 options->aad.length = 1325 parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE); 1326 if (options->aad.length > 0) 1327 return 0; 1328 else 1329 return -1; 1330 } 1331 1332 else if (strcmp(lgopts[option_index].name, "aad_random_size") == 0) { 1333 return parse_size(&options->aad_random_size, optarg); 1334 } 1335 1336 else if (strcmp(lgopts[option_index].name, "digest_size") == 0) { 1337 return parse_size(&options->digest_size, optarg); 1338 } 1339 1340 else if (strcmp(lgopts[option_index].name, "sessionless") == 0) { 1341 options->sessionless = 1; 1342 return 0; 1343 } 1344 1345 else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0) 1346 return parse_cryptodev_mask(options, optarg); 1347 1348 else if (strcmp(lgopts[option_index].name, "mac-updating") == 0) { 1349 options->mac_updating = 1; 1350 return 0; 1351 } 1352 1353 else if (strcmp(lgopts[option_index].name, "no-mac-updating") == 0) { 1354 options->mac_updating = 0; 1355 return 0; 1356 } 1357 1358 return -1; 1359 } 1360 1361 /** Parse port mask */ 1362 static int 1363 l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options, 1364 const char *q_arg) 1365 { 1366 char *end = NULL; 1367 unsigned long pm; 1368 1369 /* parse hexadecimal string */ 1370 pm = strtoul(q_arg, &end, 16); 1371 if ((pm == '\0') || (end == NULL) || (*end != '\0')) 1372 pm = 0; 1373 1374 options->portmask = pm; 1375 if (options->portmask == 0) { 1376 printf("invalid portmask specified\n"); 1377 return -1; 1378 } 1379 1380 return pm; 1381 } 1382 1383 /** Parse number of queues */ 1384 static int 1385 l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options, 1386 const char *q_arg) 1387 { 1388 char *end = NULL; 1389 unsigned long n; 1390 1391 /* parse hexadecimal string */ 1392 n = strtoul(q_arg, &end, 10); 1393 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 1394 n = 0; 1395 else if (n >= MAX_RX_QUEUE_PER_LCORE) 1396 n = 0; 1397 1398 options->nb_ports_per_lcore = n; 1399 if (options->nb_ports_per_lcore == 0) { 1400 printf("invalid number of ports selected\n"); 1401 return -1; 1402 } 1403 1404 return 0; 1405 } 1406 1407 /** Parse timer period */ 1408 static int 1409 l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options, 1410 const char *q_arg) 1411 { 1412 char *end = NULL; 1413 unsigned long n; 1414 1415 /* parse number string */ 1416 n = (unsigned)strtol(q_arg, &end, 10); 1417 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 1418 n = 0; 1419 1420 if (n >= MAX_TIMER_PERIOD) { 1421 printf("Warning refresh period specified %lu is greater than " 1422 "max value %lu! using max value", 1423 n, MAX_TIMER_PERIOD); 1424 n = MAX_TIMER_PERIOD; 1425 } 1426 1427 options->refresh_period = n * 1000 * TIMER_MILLISECOND; 1428 1429 return 0; 1430 } 1431 1432 /** Generate default options for application */ 1433 static void 1434 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) 1435 { 1436 options->portmask = 0xffffffff; 1437 options->nb_ports_per_lcore = 1; 1438 options->refresh_period = 10000; 1439 options->single_lcore = 0; 1440 options->sessionless = 0; 1441 1442 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH; 1443 1444 /* Cipher Data */ 1445 options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1446 options->cipher_xform.next = NULL; 1447 options->ckey_param = 0; 1448 options->ckey_random_size = -1; 1449 options->cipher_xform.cipher.key.length = 0; 1450 options->cipher_iv_param = 0; 1451 options->cipher_iv_random_size = -1; 1452 options->cipher_iv.length = 0; 1453 1454 options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 1455 options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1456 1457 /* Authentication Data */ 1458 options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; 1459 options->auth_xform.next = NULL; 1460 options->akey_param = 0; 1461 options->akey_random_size = -1; 1462 options->auth_xform.auth.key.length = 0; 1463 options->auth_iv_param = 0; 1464 options->auth_iv_random_size = -1; 1465 options->auth_iv.length = 0; 1466 1467 options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC; 1468 options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; 1469 1470 /* AEAD Data */ 1471 options->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; 1472 options->aead_xform.next = NULL; 1473 options->aead_key_param = 0; 1474 options->aead_key_random_size = -1; 1475 options->aead_xform.aead.key.length = 0; 1476 options->aead_iv_param = 0; 1477 options->aead_iv_random_size = -1; 1478 options->aead_iv.length = 0; 1479 1480 options->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM; 1481 options->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; 1482 1483 options->aad_param = 0; 1484 options->aad_random_size = -1; 1485 options->aad.length = 0; 1486 1487 options->digest_size = -1; 1488 1489 options->type = CDEV_TYPE_ANY; 1490 options->cryptodev_mask = UINT64_MAX; 1491 1492 options->mac_updating = 1; 1493 } 1494 1495 static void 1496 display_cipher_info(struct l2fwd_crypto_options *options) 1497 { 1498 printf("\n---- Cipher information ---\n"); 1499 printf("Algorithm: %s\n", 1500 rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]); 1501 rte_hexdump(stdout, "Cipher key:", 1502 options->cipher_xform.cipher.key.data, 1503 options->cipher_xform.cipher.key.length); 1504 rte_hexdump(stdout, "IV:", options->cipher_iv.data, options->cipher_iv.length); 1505 } 1506 1507 static void 1508 display_auth_info(struct l2fwd_crypto_options *options) 1509 { 1510 printf("\n---- Authentication information ---\n"); 1511 printf("Algorithm: %s\n", 1512 rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]); 1513 rte_hexdump(stdout, "Auth key:", 1514 options->auth_xform.auth.key.data, 1515 options->auth_xform.auth.key.length); 1516 rte_hexdump(stdout, "IV:", options->auth_iv.data, options->auth_iv.length); 1517 } 1518 1519 static void 1520 display_aead_info(struct l2fwd_crypto_options *options) 1521 { 1522 printf("\n---- AEAD information ---\n"); 1523 printf("Algorithm: %s\n", 1524 rte_crypto_aead_algorithm_strings[options->aead_xform.aead.algo]); 1525 rte_hexdump(stdout, "AEAD key:", 1526 options->aead_xform.aead.key.data, 1527 options->aead_xform.aead.key.length); 1528 rte_hexdump(stdout, "IV:", options->aead_iv.data, options->aead_iv.length); 1529 rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length); 1530 } 1531 1532 static void 1533 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options) 1534 { 1535 char string_cipher_op[MAX_STR_LEN]; 1536 char string_auth_op[MAX_STR_LEN]; 1537 char string_aead_op[MAX_STR_LEN]; 1538 1539 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) 1540 strcpy(string_cipher_op, "Encrypt"); 1541 else 1542 strcpy(string_cipher_op, "Decrypt"); 1543 1544 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) 1545 strcpy(string_auth_op, "Auth generate"); 1546 else 1547 strcpy(string_auth_op, "Auth verify"); 1548 1549 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) 1550 strcpy(string_aead_op, "Authenticated encryption"); 1551 else 1552 strcpy(string_aead_op, "Authenticated decryption"); 1553 1554 1555 printf("Options:-\nn"); 1556 printf("portmask: %x\n", options->portmask); 1557 printf("ports per lcore: %u\n", options->nb_ports_per_lcore); 1558 printf("refresh period : %u\n", options->refresh_period); 1559 printf("single lcore mode: %s\n", 1560 options->single_lcore ? "enabled" : "disabled"); 1561 printf("stats_printing: %s\n", 1562 options->refresh_period == 0 ? "disabled" : "enabled"); 1563 1564 printf("sessionless crypto: %s\n", 1565 options->sessionless ? "enabled" : "disabled"); 1566 1567 if (options->ckey_param && (options->ckey_random_size != -1)) 1568 printf("Cipher key already parsed, ignoring size of random key\n"); 1569 1570 if (options->akey_param && (options->akey_random_size != -1)) 1571 printf("Auth key already parsed, ignoring size of random key\n"); 1572 1573 if (options->cipher_iv_param && (options->cipher_iv_random_size != -1)) 1574 printf("Cipher IV already parsed, ignoring size of random IV\n"); 1575 1576 if (options->auth_iv_param && (options->auth_iv_random_size != -1)) 1577 printf("Auth IV already parsed, ignoring size of random IV\n"); 1578 1579 if (options->aad_param && (options->aad_random_size != -1)) 1580 printf("AAD already parsed, ignoring size of random AAD\n"); 1581 1582 printf("\nCrypto chain: "); 1583 switch (options->xform_chain) { 1584 case L2FWD_CRYPTO_AEAD: 1585 printf("Input --> %s --> Output\n", string_aead_op); 1586 display_aead_info(options); 1587 break; 1588 case L2FWD_CRYPTO_CIPHER_HASH: 1589 printf("Input --> %s --> %s --> Output\n", 1590 string_cipher_op, string_auth_op); 1591 display_cipher_info(options); 1592 display_auth_info(options); 1593 break; 1594 case L2FWD_CRYPTO_HASH_CIPHER: 1595 printf("Input --> %s --> %s --> Output\n", 1596 string_auth_op, string_cipher_op); 1597 display_cipher_info(options); 1598 display_auth_info(options); 1599 break; 1600 case L2FWD_CRYPTO_HASH_ONLY: 1601 printf("Input --> %s --> Output\n", string_auth_op); 1602 display_auth_info(options); 1603 break; 1604 case L2FWD_CRYPTO_CIPHER_ONLY: 1605 printf("Input --> %s --> Output\n", string_cipher_op); 1606 display_cipher_info(options); 1607 break; 1608 } 1609 } 1610 1611 /* Parse the argument given in the command line of the application */ 1612 static int 1613 l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options, 1614 int argc, char **argv) 1615 { 1616 int opt, retval, option_index; 1617 char **argvopt = argv, *prgname = argv[0]; 1618 1619 static struct option lgopts[] = { 1620 { "sessionless", no_argument, 0, 0 }, 1621 1622 { "cdev_type", required_argument, 0, 0 }, 1623 { "chain", required_argument, 0, 0 }, 1624 1625 { "cipher_algo", required_argument, 0, 0 }, 1626 { "cipher_op", required_argument, 0, 0 }, 1627 { "cipher_key", required_argument, 0, 0 }, 1628 { "cipher_key_random_size", required_argument, 0, 0 }, 1629 { "cipher_iv", required_argument, 0, 0 }, 1630 { "cipher_iv_random_size", required_argument, 0, 0 }, 1631 1632 { "auth_algo", required_argument, 0, 0 }, 1633 { "auth_op", required_argument, 0, 0 }, 1634 { "auth_key", required_argument, 0, 0 }, 1635 { "auth_key_random_size", required_argument, 0, 0 }, 1636 { "auth_iv", required_argument, 0, 0 }, 1637 { "auth_iv_random_size", required_argument, 0, 0 }, 1638 1639 { "aead_algo", required_argument, 0, 0 }, 1640 { "aead_op", required_argument, 0, 0 }, 1641 { "aead_key", required_argument, 0, 0 }, 1642 { "aead_key_random_size", required_argument, 0, 0 }, 1643 { "aead_iv", required_argument, 0, 0 }, 1644 { "aead_iv_random_size", required_argument, 0, 0 }, 1645 1646 { "aad", required_argument, 0, 0 }, 1647 { "aad_random_size", required_argument, 0, 0 }, 1648 1649 { "digest_size", required_argument, 0, 0 }, 1650 1651 { "sessionless", no_argument, 0, 0 }, 1652 { "cryptodev_mask", required_argument, 0, 0}, 1653 1654 { "mac-updating", no_argument, 0, 0}, 1655 { "no-mac-updating", no_argument, 0, 0}, 1656 1657 { NULL, 0, 0, 0 } 1658 }; 1659 1660 l2fwd_crypto_default_options(options); 1661 1662 while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts, 1663 &option_index)) != EOF) { 1664 switch (opt) { 1665 /* long options */ 1666 case 0: 1667 retval = l2fwd_crypto_parse_args_long_options(options, 1668 lgopts, option_index); 1669 if (retval < 0) { 1670 l2fwd_crypto_usage(prgname); 1671 return -1; 1672 } 1673 break; 1674 1675 /* portmask */ 1676 case 'p': 1677 retval = l2fwd_crypto_parse_portmask(options, optarg); 1678 if (retval < 0) { 1679 l2fwd_crypto_usage(prgname); 1680 return -1; 1681 } 1682 break; 1683 1684 /* nqueue */ 1685 case 'q': 1686 retval = l2fwd_crypto_parse_nqueue(options, optarg); 1687 if (retval < 0) { 1688 l2fwd_crypto_usage(prgname); 1689 return -1; 1690 } 1691 break; 1692 1693 /* single */ 1694 case 's': 1695 options->single_lcore = 1; 1696 1697 break; 1698 1699 /* timer period */ 1700 case 'T': 1701 retval = l2fwd_crypto_parse_timer_period(options, 1702 optarg); 1703 if (retval < 0) { 1704 l2fwd_crypto_usage(prgname); 1705 return -1; 1706 } 1707 break; 1708 1709 default: 1710 l2fwd_crypto_usage(prgname); 1711 return -1; 1712 } 1713 } 1714 1715 1716 if (optind >= 0) 1717 argv[optind-1] = prgname; 1718 1719 retval = optind-1; 1720 optind = 1; /* reset getopt lib */ 1721 1722 return retval; 1723 } 1724 1725 /* Check the link status of all ports in up to 9s, and print them finally */ 1726 static void 1727 check_all_ports_link_status(uint32_t port_mask) 1728 { 1729 #define CHECK_INTERVAL 100 /* 100ms */ 1730 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1731 uint16_t portid; 1732 uint8_t count, all_ports_up, print_flag = 0; 1733 struct rte_eth_link link; 1734 1735 printf("\nChecking link status"); 1736 fflush(stdout); 1737 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1738 all_ports_up = 1; 1739 RTE_ETH_FOREACH_DEV(portid) { 1740 if ((port_mask & (1 << portid)) == 0) 1741 continue; 1742 memset(&link, 0, sizeof(link)); 1743 rte_eth_link_get_nowait(portid, &link); 1744 /* print link status if flag set */ 1745 if (print_flag == 1) { 1746 if (link.link_status) 1747 printf( 1748 "Port%d Link Up. Speed %u Mbps - %s\n", 1749 portid, link.link_speed, 1750 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1751 ("full-duplex") : ("half-duplex\n")); 1752 else 1753 printf("Port %d Link Down\n", portid); 1754 continue; 1755 } 1756 /* clear all_ports_up flag if any link down */ 1757 if (link.link_status == ETH_LINK_DOWN) { 1758 all_ports_up = 0; 1759 break; 1760 } 1761 } 1762 /* after finally printing all link status, get out */ 1763 if (print_flag == 1) 1764 break; 1765 1766 if (all_ports_up == 0) { 1767 printf("."); 1768 fflush(stdout); 1769 rte_delay_ms(CHECK_INTERVAL); 1770 } 1771 1772 /* set the print_flag if all ports up or timeout */ 1773 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1774 print_flag = 1; 1775 printf("done\n"); 1776 } 1777 } 1778 } 1779 1780 /* Check if device has to be HW/SW or any */ 1781 static int 1782 check_type(const struct l2fwd_crypto_options *options, 1783 const struct rte_cryptodev_info *dev_info) 1784 { 1785 if (options->type == CDEV_TYPE_HW && 1786 (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)) 1787 return 0; 1788 if (options->type == CDEV_TYPE_SW && 1789 !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)) 1790 return 0; 1791 if (options->type == CDEV_TYPE_ANY) 1792 return 0; 1793 1794 return -1; 1795 } 1796 1797 static const struct rte_cryptodev_capabilities * 1798 check_device_support_cipher_algo(const struct l2fwd_crypto_options *options, 1799 const struct rte_cryptodev_info *dev_info, 1800 uint8_t cdev_id) 1801 { 1802 unsigned int i = 0; 1803 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0]; 1804 enum rte_crypto_cipher_algorithm cap_cipher_algo; 1805 enum rte_crypto_cipher_algorithm opt_cipher_algo = 1806 options->cipher_xform.cipher.algo; 1807 1808 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1809 cap_cipher_algo = cap->sym.cipher.algo; 1810 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1811 if (cap_cipher_algo == opt_cipher_algo) { 1812 if (check_type(options, dev_info) == 0) 1813 break; 1814 } 1815 } 1816 cap = &dev_info->capabilities[++i]; 1817 } 1818 1819 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1820 printf("Algorithm %s not supported by cryptodev %u" 1821 " or device not of preferred type (%s)\n", 1822 rte_crypto_cipher_algorithm_strings[opt_cipher_algo], 1823 cdev_id, 1824 options->string_type); 1825 return NULL; 1826 } 1827 1828 return cap; 1829 } 1830 1831 static const struct rte_cryptodev_capabilities * 1832 check_device_support_auth_algo(const struct l2fwd_crypto_options *options, 1833 const struct rte_cryptodev_info *dev_info, 1834 uint8_t cdev_id) 1835 { 1836 unsigned int i = 0; 1837 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0]; 1838 enum rte_crypto_auth_algorithm cap_auth_algo; 1839 enum rte_crypto_auth_algorithm opt_auth_algo = 1840 options->auth_xform.auth.algo; 1841 1842 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1843 cap_auth_algo = cap->sym.auth.algo; 1844 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1845 if (cap_auth_algo == opt_auth_algo) { 1846 if (check_type(options, dev_info) == 0) 1847 break; 1848 } 1849 } 1850 cap = &dev_info->capabilities[++i]; 1851 } 1852 1853 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1854 printf("Algorithm %s not supported by cryptodev %u" 1855 " or device not of preferred type (%s)\n", 1856 rte_crypto_auth_algorithm_strings[opt_auth_algo], 1857 cdev_id, 1858 options->string_type); 1859 return NULL; 1860 } 1861 1862 return cap; 1863 } 1864 1865 static const struct rte_cryptodev_capabilities * 1866 check_device_support_aead_algo(const struct l2fwd_crypto_options *options, 1867 const struct rte_cryptodev_info *dev_info, 1868 uint8_t cdev_id) 1869 { 1870 unsigned int i = 0; 1871 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0]; 1872 enum rte_crypto_aead_algorithm cap_aead_algo; 1873 enum rte_crypto_aead_algorithm opt_aead_algo = 1874 options->aead_xform.aead.algo; 1875 1876 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1877 cap_aead_algo = cap->sym.aead.algo; 1878 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { 1879 if (cap_aead_algo == opt_aead_algo) { 1880 if (check_type(options, dev_info) == 0) 1881 break; 1882 } 1883 } 1884 cap = &dev_info->capabilities[++i]; 1885 } 1886 1887 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 1888 printf("Algorithm %s not supported by cryptodev %u" 1889 " or device not of preferred type (%s)\n", 1890 rte_crypto_aead_algorithm_strings[opt_aead_algo], 1891 cdev_id, 1892 options->string_type); 1893 return NULL; 1894 } 1895 1896 return cap; 1897 } 1898 1899 /* Check if the device is enabled by cryptodev_mask */ 1900 static int 1901 check_cryptodev_mask(struct l2fwd_crypto_options *options, 1902 uint8_t cdev_id) 1903 { 1904 if (options->cryptodev_mask & (1 << cdev_id)) 1905 return 0; 1906 1907 return -1; 1908 } 1909 1910 static inline int 1911 check_supported_size(uint16_t length, uint16_t min, uint16_t max, 1912 uint16_t increment) 1913 { 1914 uint16_t supp_size; 1915 1916 /* Single value */ 1917 if (increment == 0) { 1918 if (length == min) 1919 return 0; 1920 else 1921 return -1; 1922 } 1923 1924 /* Range of values */ 1925 for (supp_size = min; supp_size <= max; supp_size += increment) { 1926 if (length == supp_size) 1927 return 0; 1928 } 1929 1930 return -1; 1931 } 1932 1933 static int 1934 check_iv_param(const struct rte_crypto_param_range *iv_range_size, 1935 unsigned int iv_param, int iv_random_size, 1936 uint16_t iv_length) 1937 { 1938 /* 1939 * Check if length of provided IV is supported 1940 * by the algorithm chosen. 1941 */ 1942 if (iv_param) { 1943 if (check_supported_size(iv_length, 1944 iv_range_size->min, 1945 iv_range_size->max, 1946 iv_range_size->increment) 1947 != 0) 1948 return -1; 1949 /* 1950 * Check if length of IV to be randomly generated 1951 * is supported by the algorithm chosen. 1952 */ 1953 } else if (iv_random_size != -1) { 1954 if (check_supported_size(iv_random_size, 1955 iv_range_size->min, 1956 iv_range_size->max, 1957 iv_range_size->increment) 1958 != 0) 1959 return -1; 1960 } 1961 1962 return 0; 1963 } 1964 1965 static int 1966 check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id) 1967 { 1968 struct rte_cryptodev_info dev_info; 1969 const struct rte_cryptodev_capabilities *cap; 1970 1971 rte_cryptodev_info_get(cdev_id, &dev_info); 1972 1973 /* Set AEAD parameters */ 1974 if (options->xform_chain == L2FWD_CRYPTO_AEAD) { 1975 /* Check if device supports AEAD algo */ 1976 cap = check_device_support_aead_algo(options, &dev_info, 1977 cdev_id); 1978 if (cap == NULL) 1979 return -1; 1980 1981 if (check_iv_param(&cap->sym.aead.iv_size, 1982 options->aead_iv_param, 1983 options->aead_iv_random_size, 1984 options->aead_iv.length) != 0) { 1985 RTE_LOG(DEBUG, USER1, 1986 "Device %u does not support IV length\n", 1987 cdev_id); 1988 return -1; 1989 } 1990 1991 /* 1992 * Check if length of provided AEAD key is supported 1993 * by the algorithm chosen. 1994 */ 1995 if (options->aead_key_param) { 1996 if (check_supported_size( 1997 options->aead_xform.aead.key.length, 1998 cap->sym.aead.key_size.min, 1999 cap->sym.aead.key_size.max, 2000 cap->sym.aead.key_size.increment) 2001 != 0) { 2002 RTE_LOG(DEBUG, USER1, 2003 "Device %u does not support " 2004 "AEAD key length\n", 2005 cdev_id); 2006 return -1; 2007 } 2008 /* 2009 * Check if length of the aead key to be randomly generated 2010 * is supported by the algorithm chosen. 2011 */ 2012 } else if (options->aead_key_random_size != -1) { 2013 if (check_supported_size(options->aead_key_random_size, 2014 cap->sym.aead.key_size.min, 2015 cap->sym.aead.key_size.max, 2016 cap->sym.aead.key_size.increment) 2017 != 0) { 2018 RTE_LOG(DEBUG, USER1, 2019 "Device %u does not support " 2020 "AEAD key length\n", 2021 cdev_id); 2022 return -1; 2023 } 2024 } 2025 2026 2027 /* 2028 * Check if length of provided AAD is supported 2029 * by the algorithm chosen. 2030 */ 2031 if (options->aad_param) { 2032 if (check_supported_size(options->aad.length, 2033 cap->sym.aead.aad_size.min, 2034 cap->sym.aead.aad_size.max, 2035 cap->sym.aead.aad_size.increment) 2036 != 0) { 2037 RTE_LOG(DEBUG, USER1, 2038 "Device %u does not support " 2039 "AAD length\n", 2040 cdev_id); 2041 return -1; 2042 } 2043 /* 2044 * Check if length of AAD to be randomly generated 2045 * is supported by the algorithm chosen. 2046 */ 2047 } else if (options->aad_random_size != -1) { 2048 if (check_supported_size(options->aad_random_size, 2049 cap->sym.aead.aad_size.min, 2050 cap->sym.aead.aad_size.max, 2051 cap->sym.aead.aad_size.increment) 2052 != 0) { 2053 RTE_LOG(DEBUG, USER1, 2054 "Device %u does not support " 2055 "AAD length\n", 2056 cdev_id); 2057 return -1; 2058 } 2059 } 2060 2061 /* Check if digest size is supported by the algorithm. */ 2062 if (options->digest_size != -1) { 2063 if (check_supported_size(options->digest_size, 2064 cap->sym.aead.digest_size.min, 2065 cap->sym.aead.digest_size.max, 2066 cap->sym.aead.digest_size.increment) 2067 != 0) { 2068 RTE_LOG(DEBUG, USER1, 2069 "Device %u does not support " 2070 "digest length\n", 2071 cdev_id); 2072 return -1; 2073 } 2074 } 2075 } 2076 2077 /* Set cipher parameters */ 2078 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 2079 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 2080 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) { 2081 /* Check if device supports cipher algo */ 2082 cap = check_device_support_cipher_algo(options, &dev_info, 2083 cdev_id); 2084 if (cap == NULL) 2085 return -1; 2086 2087 if (check_iv_param(&cap->sym.cipher.iv_size, 2088 options->cipher_iv_param, 2089 options->cipher_iv_random_size, 2090 options->cipher_iv.length) != 0) { 2091 RTE_LOG(DEBUG, USER1, 2092 "Device %u does not support IV length\n", 2093 cdev_id); 2094 return -1; 2095 } 2096 2097 /* 2098 * Check if length of provided cipher key is supported 2099 * by the algorithm chosen. 2100 */ 2101 if (options->ckey_param) { 2102 if (check_supported_size( 2103 options->cipher_xform.cipher.key.length, 2104 cap->sym.cipher.key_size.min, 2105 cap->sym.cipher.key_size.max, 2106 cap->sym.cipher.key_size.increment) 2107 != 0) { 2108 RTE_LOG(DEBUG, USER1, 2109 "Device %u does not support cipher " 2110 "key length\n", 2111 cdev_id); 2112 return -1; 2113 } 2114 /* 2115 * Check if length of the cipher key to be randomly generated 2116 * is supported by the algorithm chosen. 2117 */ 2118 } else if (options->ckey_random_size != -1) { 2119 if (check_supported_size(options->ckey_random_size, 2120 cap->sym.cipher.key_size.min, 2121 cap->sym.cipher.key_size.max, 2122 cap->sym.cipher.key_size.increment) 2123 != 0) { 2124 RTE_LOG(DEBUG, USER1, 2125 "Device %u does not support cipher " 2126 "key length\n", 2127 cdev_id); 2128 return -1; 2129 } 2130 } 2131 } 2132 2133 /* Set auth parameters */ 2134 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 2135 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 2136 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) { 2137 /* Check if device supports auth algo */ 2138 cap = check_device_support_auth_algo(options, &dev_info, 2139 cdev_id); 2140 if (cap == NULL) 2141 return -1; 2142 2143 if (check_iv_param(&cap->sym.auth.iv_size, 2144 options->auth_iv_param, 2145 options->auth_iv_random_size, 2146 options->auth_iv.length) != 0) { 2147 RTE_LOG(DEBUG, USER1, 2148 "Device %u does not support IV length\n", 2149 cdev_id); 2150 return -1; 2151 } 2152 /* 2153 * Check if length of provided auth key is supported 2154 * by the algorithm chosen. 2155 */ 2156 if (options->akey_param) { 2157 if (check_supported_size( 2158 options->auth_xform.auth.key.length, 2159 cap->sym.auth.key_size.min, 2160 cap->sym.auth.key_size.max, 2161 cap->sym.auth.key_size.increment) 2162 != 0) { 2163 RTE_LOG(DEBUG, USER1, 2164 "Device %u does not support auth " 2165 "key length\n", 2166 cdev_id); 2167 return -1; 2168 } 2169 /* 2170 * Check if length of the auth key to be randomly generated 2171 * is supported by the algorithm chosen. 2172 */ 2173 } else if (options->akey_random_size != -1) { 2174 if (check_supported_size(options->akey_random_size, 2175 cap->sym.auth.key_size.min, 2176 cap->sym.auth.key_size.max, 2177 cap->sym.auth.key_size.increment) 2178 != 0) { 2179 RTE_LOG(DEBUG, USER1, 2180 "Device %u does not support auth " 2181 "key length\n", 2182 cdev_id); 2183 return -1; 2184 } 2185 } 2186 2187 /* Check if digest size is supported by the algorithm. */ 2188 if (options->digest_size != -1) { 2189 if (check_supported_size(options->digest_size, 2190 cap->sym.auth.digest_size.min, 2191 cap->sym.auth.digest_size.max, 2192 cap->sym.auth.digest_size.increment) 2193 != 0) { 2194 RTE_LOG(DEBUG, USER1, 2195 "Device %u does not support " 2196 "digest length\n", 2197 cdev_id); 2198 return -1; 2199 } 2200 } 2201 } 2202 2203 return 0; 2204 } 2205 2206 static int 2207 initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports, 2208 uint8_t *enabled_cdevs) 2209 { 2210 uint8_t cdev_id, cdev_count, enabled_cdev_count = 0; 2211 const struct rte_cryptodev_capabilities *cap; 2212 unsigned int sess_sz, max_sess_sz = 0; 2213 uint32_t sessions_needed = 0; 2214 int retval; 2215 2216 cdev_count = rte_cryptodev_count(); 2217 if (cdev_count == 0) { 2218 printf("No crypto devices available\n"); 2219 return -1; 2220 } 2221 2222 for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports; 2223 cdev_id++) { 2224 if (check_cryptodev_mask(options, cdev_id) < 0) 2225 continue; 2226 2227 if (check_capabilities(options, cdev_id) < 0) 2228 continue; 2229 2230 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id); 2231 if (sess_sz > max_sess_sz) 2232 max_sess_sz = sess_sz; 2233 2234 l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id); 2235 2236 enabled_cdevs[cdev_id] = 1; 2237 enabled_cdev_count++; 2238 } 2239 2240 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 2241 struct rte_cryptodev_qp_conf qp_conf; 2242 struct rte_cryptodev_info dev_info; 2243 2244 if (enabled_cdevs[cdev_id] == 0) 2245 continue; 2246 2247 retval = rte_cryptodev_socket_id(cdev_id); 2248 2249 if (retval < 0) { 2250 printf("Invalid crypto device id used\n"); 2251 return -1; 2252 } 2253 2254 uint8_t socket_id = (uint8_t) retval; 2255 2256 struct rte_cryptodev_config conf = { 2257 .nb_queue_pairs = 1, 2258 .socket_id = socket_id, 2259 }; 2260 2261 rte_cryptodev_info_get(cdev_id, &dev_info); 2262 2263 /* 2264 * Two sessions objects are required for each session 2265 * (one for the header, one for the private data) 2266 */ 2267 if (!strcmp(dev_info.driver_name, "crypto_scheduler")) { 2268 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 2269 uint32_t nb_slaves = 2270 rte_cryptodev_scheduler_slaves_get(cdev_id, 2271 NULL); 2272 2273 sessions_needed = enabled_cdev_count * nb_slaves; 2274 #endif 2275 } else 2276 sessions_needed = enabled_cdev_count; 2277 2278 if (session_pool_socket[socket_id].priv_mp == NULL) { 2279 char mp_name[RTE_MEMPOOL_NAMESIZE]; 2280 2281 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 2282 "priv_sess_mp_%u", socket_id); 2283 2284 session_pool_socket[socket_id].priv_mp = 2285 rte_mempool_create(mp_name, 2286 sessions_needed, 2287 max_sess_sz, 2288 0, 0, NULL, NULL, NULL, 2289 NULL, socket_id, 2290 0); 2291 2292 if (session_pool_socket[socket_id].priv_mp == NULL) { 2293 printf("Cannot create pool on socket %d\n", 2294 socket_id); 2295 return -ENOMEM; 2296 } 2297 2298 printf("Allocated pool \"%s\" on socket %d\n", 2299 mp_name, socket_id); 2300 } 2301 2302 if (session_pool_socket[socket_id].sess_mp == NULL) { 2303 char mp_name[RTE_MEMPOOL_NAMESIZE]; 2304 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 2305 "sess_mp_%u", socket_id); 2306 2307 session_pool_socket[socket_id].sess_mp = 2308 rte_cryptodev_sym_session_pool_create( 2309 mp_name, 2310 sessions_needed, 2311 0, 0, 0, socket_id); 2312 2313 if (session_pool_socket[socket_id].sess_mp == NULL) { 2314 printf("Cannot create pool on socket %d\n", 2315 socket_id); 2316 return -ENOMEM; 2317 } 2318 2319 printf("Allocated pool \"%s\" on socket %d\n", 2320 mp_name, socket_id); 2321 } 2322 2323 /* Set AEAD parameters */ 2324 if (options->xform_chain == L2FWD_CRYPTO_AEAD) { 2325 cap = check_device_support_aead_algo(options, &dev_info, 2326 cdev_id); 2327 2328 options->block_size = cap->sym.aead.block_size; 2329 2330 /* Set IV if not provided from command line */ 2331 if (options->aead_iv_param == 0) { 2332 if (options->aead_iv_random_size != -1) 2333 options->aead_iv.length = 2334 options->aead_iv_random_size; 2335 /* No size provided, use minimum size. */ 2336 else 2337 options->aead_iv.length = 2338 cap->sym.aead.iv_size.min; 2339 } 2340 2341 /* Set key if not provided from command line */ 2342 if (options->aead_key_param == 0) { 2343 if (options->aead_key_random_size != -1) 2344 options->aead_xform.aead.key.length = 2345 options->aead_key_random_size; 2346 /* No size provided, use minimum size. */ 2347 else 2348 options->aead_xform.aead.key.length = 2349 cap->sym.aead.key_size.min; 2350 2351 generate_random_key( 2352 options->aead_xform.aead.key.data, 2353 options->aead_xform.aead.key.length); 2354 } 2355 2356 /* Set AAD if not provided from command line */ 2357 if (options->aad_param == 0) { 2358 if (options->aad_random_size != -1) 2359 options->aad.length = 2360 options->aad_random_size; 2361 /* No size provided, use minimum size. */ 2362 else 2363 options->aad.length = 2364 cap->sym.auth.aad_size.min; 2365 } 2366 2367 options->aead_xform.aead.aad_length = 2368 options->aad.length; 2369 2370 /* Set digest size if not provided from command line */ 2371 if (options->digest_size != -1) 2372 options->aead_xform.aead.digest_length = 2373 options->digest_size; 2374 /* No size provided, use minimum size. */ 2375 else 2376 options->aead_xform.aead.digest_length = 2377 cap->sym.aead.digest_size.min; 2378 } 2379 2380 /* Set cipher parameters */ 2381 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 2382 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 2383 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) { 2384 cap = check_device_support_cipher_algo(options, &dev_info, 2385 cdev_id); 2386 options->block_size = cap->sym.cipher.block_size; 2387 2388 /* Set IV if not provided from command line */ 2389 if (options->cipher_iv_param == 0) { 2390 if (options->cipher_iv_random_size != -1) 2391 options->cipher_iv.length = 2392 options->cipher_iv_random_size; 2393 /* No size provided, use minimum size. */ 2394 else 2395 options->cipher_iv.length = 2396 cap->sym.cipher.iv_size.min; 2397 } 2398 2399 /* Set key if not provided from command line */ 2400 if (options->ckey_param == 0) { 2401 if (options->ckey_random_size != -1) 2402 options->cipher_xform.cipher.key.length = 2403 options->ckey_random_size; 2404 /* No size provided, use minimum size. */ 2405 else 2406 options->cipher_xform.cipher.key.length = 2407 cap->sym.cipher.key_size.min; 2408 2409 generate_random_key( 2410 options->cipher_xform.cipher.key.data, 2411 options->cipher_xform.cipher.key.length); 2412 } 2413 } 2414 2415 /* Set auth parameters */ 2416 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH || 2417 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER || 2418 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) { 2419 cap = check_device_support_auth_algo(options, &dev_info, 2420 cdev_id); 2421 2422 /* Set IV if not provided from command line */ 2423 if (options->auth_iv_param == 0) { 2424 if (options->auth_iv_random_size != -1) 2425 options->auth_iv.length = 2426 options->auth_iv_random_size; 2427 /* No size provided, use minimum size. */ 2428 else 2429 options->auth_iv.length = 2430 cap->sym.auth.iv_size.min; 2431 } 2432 2433 /* Set key if not provided from command line */ 2434 if (options->akey_param == 0) { 2435 if (options->akey_random_size != -1) 2436 options->auth_xform.auth.key.length = 2437 options->akey_random_size; 2438 /* No size provided, use minimum size. */ 2439 else 2440 options->auth_xform.auth.key.length = 2441 cap->sym.auth.key_size.min; 2442 2443 generate_random_key( 2444 options->auth_xform.auth.key.data, 2445 options->auth_xform.auth.key.length); 2446 } 2447 2448 /* Set digest size if not provided from command line */ 2449 if (options->digest_size != -1) 2450 options->auth_xform.auth.digest_length = 2451 options->digest_size; 2452 /* No size provided, use minimum size. */ 2453 else 2454 options->auth_xform.auth.digest_length = 2455 cap->sym.auth.digest_size.min; 2456 } 2457 2458 retval = rte_cryptodev_configure(cdev_id, &conf); 2459 if (retval < 0) { 2460 printf("Failed to configure cryptodev %u", cdev_id); 2461 return -1; 2462 } 2463 2464 qp_conf.nb_descriptors = 2048; 2465 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp; 2466 qp_conf.mp_session_private = 2467 session_pool_socket[socket_id].priv_mp; 2468 2469 retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf, 2470 socket_id); 2471 if (retval < 0) { 2472 printf("Failed to setup queue pair %u on cryptodev %u", 2473 0, cdev_id); 2474 return -1; 2475 } 2476 2477 retval = rte_cryptodev_start(cdev_id); 2478 if (retval < 0) { 2479 printf("Failed to start device %u: error %d\n", 2480 cdev_id, retval); 2481 return -1; 2482 } 2483 } 2484 2485 return enabled_cdev_count; 2486 } 2487 2488 static int 2489 initialize_ports(struct l2fwd_crypto_options *options) 2490 { 2491 uint16_t last_portid = 0, portid; 2492 unsigned enabled_portcount = 0; 2493 unsigned nb_ports = rte_eth_dev_count_avail(); 2494 2495 if (nb_ports == 0) { 2496 printf("No Ethernet ports - bye\n"); 2497 return -1; 2498 } 2499 2500 /* Reset l2fwd_dst_ports */ 2501 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 2502 l2fwd_dst_ports[portid] = 0; 2503 2504 RTE_ETH_FOREACH_DEV(portid) { 2505 int retval; 2506 struct rte_eth_dev_info dev_info; 2507 struct rte_eth_rxconf rxq_conf; 2508 struct rte_eth_txconf txq_conf; 2509 struct rte_eth_conf local_port_conf = port_conf; 2510 2511 /* Skip ports that are not enabled */ 2512 if ((options->portmask & (1 << portid)) == 0) 2513 continue; 2514 2515 /* init port */ 2516 printf("Initializing port %u... ", portid); 2517 fflush(stdout); 2518 rte_eth_dev_info_get(portid, &dev_info); 2519 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 2520 local_port_conf.txmode.offloads |= 2521 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 2522 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf); 2523 if (retval < 0) { 2524 printf("Cannot configure device: err=%d, port=%u\n", 2525 retval, portid); 2526 return -1; 2527 } 2528 2529 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, 2530 &nb_txd); 2531 if (retval < 0) { 2532 printf("Cannot adjust number of descriptors: err=%d, port=%u\n", 2533 retval, portid); 2534 return -1; 2535 } 2536 2537 /* init one RX queue */ 2538 fflush(stdout); 2539 rxq_conf = dev_info.default_rxconf; 2540 rxq_conf.offloads = local_port_conf.rxmode.offloads; 2541 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 2542 rte_eth_dev_socket_id(portid), 2543 &rxq_conf, l2fwd_pktmbuf_pool); 2544 if (retval < 0) { 2545 printf("rte_eth_rx_queue_setup:err=%d, port=%u\n", 2546 retval, portid); 2547 return -1; 2548 } 2549 2550 /* init one TX queue on each port */ 2551 fflush(stdout); 2552 txq_conf = dev_info.default_txconf; 2553 txq_conf.offloads = local_port_conf.txmode.offloads; 2554 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd, 2555 rte_eth_dev_socket_id(portid), 2556 &txq_conf); 2557 if (retval < 0) { 2558 printf("rte_eth_tx_queue_setup:err=%d, port=%u\n", 2559 retval, portid); 2560 2561 return -1; 2562 } 2563 2564 /* Start device */ 2565 retval = rte_eth_dev_start(portid); 2566 if (retval < 0) { 2567 printf("rte_eth_dev_start:err=%d, port=%u\n", 2568 retval, portid); 2569 return -1; 2570 } 2571 2572 rte_eth_promiscuous_enable(portid); 2573 2574 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]); 2575 2576 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", 2577 portid, 2578 l2fwd_ports_eth_addr[portid].addr_bytes[0], 2579 l2fwd_ports_eth_addr[portid].addr_bytes[1], 2580 l2fwd_ports_eth_addr[portid].addr_bytes[2], 2581 l2fwd_ports_eth_addr[portid].addr_bytes[3], 2582 l2fwd_ports_eth_addr[portid].addr_bytes[4], 2583 l2fwd_ports_eth_addr[portid].addr_bytes[5]); 2584 2585 /* initialize port stats */ 2586 memset(&port_statistics, 0, sizeof(port_statistics)); 2587 2588 /* Setup port forwarding table */ 2589 if (enabled_portcount % 2) { 2590 l2fwd_dst_ports[portid] = last_portid; 2591 l2fwd_dst_ports[last_portid] = portid; 2592 } else { 2593 last_portid = portid; 2594 } 2595 2596 l2fwd_enabled_port_mask |= (1 << portid); 2597 enabled_portcount++; 2598 } 2599 2600 if (enabled_portcount == 1) { 2601 l2fwd_dst_ports[last_portid] = last_portid; 2602 } else if (enabled_portcount % 2) { 2603 printf("odd number of ports in portmask- bye\n"); 2604 return -1; 2605 } 2606 2607 check_all_ports_link_status(l2fwd_enabled_port_mask); 2608 2609 return enabled_portcount; 2610 } 2611 2612 static void 2613 reserve_key_memory(struct l2fwd_crypto_options *options) 2614 { 2615 options->cipher_xform.cipher.key.data = rte_malloc("crypto key", 2616 MAX_KEY_SIZE, 0); 2617 if (options->cipher_xform.cipher.key.data == NULL) 2618 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher key"); 2619 2620 options->auth_xform.auth.key.data = rte_malloc("auth key", 2621 MAX_KEY_SIZE, 0); 2622 if (options->auth_xform.auth.key.data == NULL) 2623 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth key"); 2624 2625 options->aead_xform.aead.key.data = rte_malloc("aead key", 2626 MAX_KEY_SIZE, 0); 2627 if (options->aead_xform.aead.key.data == NULL) 2628 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD key"); 2629 2630 options->cipher_iv.data = rte_malloc("cipher iv", MAX_KEY_SIZE, 0); 2631 if (options->cipher_iv.data == NULL) 2632 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher IV"); 2633 2634 options->auth_iv.data = rte_malloc("auth iv", MAX_KEY_SIZE, 0); 2635 if (options->auth_iv.data == NULL) 2636 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth IV"); 2637 2638 options->aead_iv.data = rte_malloc("aead_iv", MAX_KEY_SIZE, 0); 2639 if (options->aead_iv.data == NULL) 2640 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD iv"); 2641 2642 options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0); 2643 if (options->aad.data == NULL) 2644 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD"); 2645 options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data); 2646 } 2647 2648 int 2649 main(int argc, char **argv) 2650 { 2651 struct lcore_queue_conf *qconf = NULL; 2652 struct l2fwd_crypto_options options; 2653 2654 uint8_t nb_cryptodevs, cdev_id; 2655 uint16_t portid; 2656 unsigned lcore_id, rx_lcore_id = 0; 2657 int ret, enabled_cdevcount, enabled_portcount; 2658 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0}; 2659 2660 /* init EAL */ 2661 ret = rte_eal_init(argc, argv); 2662 if (ret < 0) 2663 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 2664 argc -= ret; 2665 argv += ret; 2666 2667 /* reserve memory for Cipher/Auth key and IV */ 2668 reserve_key_memory(&options); 2669 2670 /* parse application arguments (after the EAL ones) */ 2671 ret = l2fwd_crypto_parse_args(&options, argc, argv); 2672 if (ret < 0) 2673 rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n"); 2674 2675 printf("MAC updating %s\n", 2676 options.mac_updating ? "enabled" : "disabled"); 2677 2678 /* create the mbuf pool */ 2679 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512, 2680 sizeof(struct rte_crypto_op), 2681 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 2682 if (l2fwd_pktmbuf_pool == NULL) 2683 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 2684 2685 /* create crypto op pool */ 2686 l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool", 2687 RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, MAXIMUM_IV_LENGTH, 2688 rte_socket_id()); 2689 if (l2fwd_crypto_op_pool == NULL) 2690 rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n"); 2691 2692 /* Enable Ethernet ports */ 2693 enabled_portcount = initialize_ports(&options); 2694 if (enabled_portcount < 1) 2695 rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n"); 2696 2697 /* Initialize the port/queue configuration of each logical core */ 2698 RTE_ETH_FOREACH_DEV(portid) { 2699 2700 /* skip ports that are not enabled */ 2701 if ((options.portmask & (1 << portid)) == 0) 2702 continue; 2703 2704 if (options.single_lcore && qconf == NULL) { 2705 while (rte_lcore_is_enabled(rx_lcore_id) == 0) { 2706 rx_lcore_id++; 2707 if (rx_lcore_id >= RTE_MAX_LCORE) 2708 rte_exit(EXIT_FAILURE, 2709 "Not enough cores\n"); 2710 } 2711 } else if (!options.single_lcore) { 2712 /* get the lcore_id for this port */ 2713 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 2714 lcore_queue_conf[rx_lcore_id].nb_rx_ports == 2715 options.nb_ports_per_lcore) { 2716 rx_lcore_id++; 2717 if (rx_lcore_id >= RTE_MAX_LCORE) 2718 rte_exit(EXIT_FAILURE, 2719 "Not enough cores\n"); 2720 } 2721 } 2722 2723 /* Assigned a new logical core in the loop above. */ 2724 if (qconf != &lcore_queue_conf[rx_lcore_id]) 2725 qconf = &lcore_queue_conf[rx_lcore_id]; 2726 2727 qconf->rx_port_list[qconf->nb_rx_ports] = portid; 2728 qconf->nb_rx_ports++; 2729 2730 printf("Lcore %u: RX port %u\n", rx_lcore_id, portid); 2731 } 2732 2733 /* Enable Crypto devices */ 2734 enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount, 2735 enabled_cdevs); 2736 if (enabled_cdevcount < 0) 2737 rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n"); 2738 2739 if (enabled_cdevcount < enabled_portcount) 2740 rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) " 2741 "has to be more or equal to number of ports (%d)\n", 2742 enabled_cdevcount, enabled_portcount); 2743 2744 nb_cryptodevs = rte_cryptodev_count(); 2745 2746 /* Initialize the port/cryptodev configuration of each logical core */ 2747 for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0; 2748 cdev_id < nb_cryptodevs && enabled_cdevcount; 2749 cdev_id++) { 2750 /* Crypto op not supported by crypto device */ 2751 if (!enabled_cdevs[cdev_id]) 2752 continue; 2753 2754 if (options.single_lcore && qconf == NULL) { 2755 while (rte_lcore_is_enabled(rx_lcore_id) == 0) { 2756 rx_lcore_id++; 2757 if (rx_lcore_id >= RTE_MAX_LCORE) 2758 rte_exit(EXIT_FAILURE, 2759 "Not enough cores\n"); 2760 } 2761 } else if (!options.single_lcore) { 2762 /* get the lcore_id for this port */ 2763 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 2764 lcore_queue_conf[rx_lcore_id].nb_crypto_devs == 2765 options.nb_ports_per_lcore) { 2766 rx_lcore_id++; 2767 if (rx_lcore_id >= RTE_MAX_LCORE) 2768 rte_exit(EXIT_FAILURE, 2769 "Not enough cores\n"); 2770 } 2771 } 2772 2773 /* Assigned a new logical core in the loop above. */ 2774 if (qconf != &lcore_queue_conf[rx_lcore_id]) 2775 qconf = &lcore_queue_conf[rx_lcore_id]; 2776 2777 qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id; 2778 qconf->nb_crypto_devs++; 2779 2780 enabled_cdevcount--; 2781 2782 printf("Lcore %u: cryptodev %u\n", rx_lcore_id, 2783 (unsigned)cdev_id); 2784 } 2785 2786 /* launch per-lcore init on every lcore */ 2787 rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options, 2788 CALL_MASTER); 2789 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 2790 if (rte_eal_wait_lcore(lcore_id) < 0) 2791 return -1; 2792 } 2793 2794 return 0; 2795 } 2796