1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2021 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <stdint.h> 8 #include <inttypes.h> 9 #include <sys/types.h> 10 #include <string.h> 11 #include <sys/queue.h> 12 #include <stdarg.h> 13 #include <errno.h> 14 #include <getopt.h> 15 #include <signal.h> 16 #include <stdbool.h> 17 #include <assert.h> 18 19 #include <rte_common.h> 20 #include <rte_vect.h> 21 #include <rte_byteorder.h> 22 #include <rte_log.h> 23 #include <rte_malloc.h> 24 #include <rte_memory.h> 25 #include <rte_memcpy.h> 26 #include <rte_eal.h> 27 #include <rte_launch.h> 28 #include <rte_cycles.h> 29 #include <rte_prefetch.h> 30 #include <rte_lcore.h> 31 #include <rte_per_lcore.h> 32 #include <rte_branch_prediction.h> 33 #include <rte_interrupts.h> 34 #include <rte_random.h> 35 #include <rte_debug.h> 36 #include <rte_ether.h> 37 #include <rte_mempool.h> 38 #include <rte_mbuf.h> 39 #include <rte_ip.h> 40 #include <rte_tcp.h> 41 #include <rte_udp.h> 42 #include <rte_string_fns.h> 43 #include <rte_cpuflags.h> 44 45 #include <cmdline_parse.h> 46 #include <cmdline_parse_etheraddr.h> 47 48 #include "l3fwd.h" 49 #include "l3fwd_event.h" 50 #include "l3fwd_route.h" 51 52 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE 53 #define MAX_RX_QUEUE_PER_PORT 128 54 55 #define MAX_LCORE_PARAMS 1024 56 57 static_assert(MEMPOOL_CACHE_SIZE >= MAX_PKT_BURST, "MAX_PKT_BURST should be at most MEMPOOL_CACHE_SIZE"); 58 uint16_t nb_rxd = RX_DESC_DEFAULT; 59 uint16_t nb_txd = TX_DESC_DEFAULT; 60 uint32_t nb_pkt_per_burst = DEFAULT_PKT_BURST; 61 uint32_t mb_mempool_cache_size = MEMPOOL_CACHE_SIZE; 62 63 /**< Ports set in promiscuous mode off by default. */ 64 static int promiscuous_on; 65 66 /* Select Longest-Prefix, Exact match, Forwarding Information Base or Access Control. */ 67 enum L3FWD_LOOKUP_MODE { 68 L3FWD_LOOKUP_DEFAULT, 69 L3FWD_LOOKUP_LPM, 70 L3FWD_LOOKUP_EM, 71 L3FWD_LOOKUP_FIB, 72 L3FWD_LOOKUP_ACL 73 }; 74 static enum L3FWD_LOOKUP_MODE lookup_mode; 75 76 /* Global variables. */ 77 static int numa_on = 1; /**< NUMA is enabled by default. */ 78 static int parse_ptype; /**< Parse packet type using rx callback, and */ 79 /**< disabled by default */ 80 static int disable_rss; /**< Disable RSS mode */ 81 static int relax_rx_offload; /**< Relax Rx offload mode, disabled by default */ 82 static int per_port_pool; /**< Use separate buffer pools per port; disabled */ 83 /**< by default */ 84 85 volatile bool force_quit; 86 87 /* ethernet addresses of ports */ 88 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS]; 89 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; 90 91 xmm_t val_eth[RTE_MAX_ETHPORTS]; 92 93 /* mask of enabled ports */ 94 uint32_t enabled_port_mask; 95 96 /* Used only in exact match mode. */ 97 int ipv6; /**< ipv6 is false by default. */ 98 99 struct lcore_conf lcore_conf[RTE_MAX_LCORE]; 100 101 struct parm_cfg parm_config; 102 103 struct __rte_cache_aligned lcore_params { 104 uint16_t port_id; 105 uint16_t queue_id; 106 uint32_t lcore_id; 107 }; 108 109 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; 110 static struct lcore_params lcore_params_array_default[] = { 111 {0, 0, 2}, 112 {0, 1, 2}, 113 {0, 2, 2}, 114 {1, 0, 2}, 115 {1, 1, 2}, 116 {1, 2, 2}, 117 {2, 0, 2}, 118 {3, 0, 3}, 119 {3, 1, 3}, 120 }; 121 122 static struct lcore_params * lcore_params = lcore_params_array_default; 123 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / 124 sizeof(lcore_params_array_default[0]); 125 126 static struct rte_eth_conf port_conf = { 127 .rxmode = { 128 .mq_mode = RTE_ETH_MQ_RX_RSS, 129 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM, 130 }, 131 .rx_adv_conf = { 132 .rss_conf = { 133 .rss_key = NULL, 134 .rss_hf = RTE_ETH_RSS_IP, 135 }, 136 }, 137 .txmode = { 138 .mq_mode = RTE_ETH_MQ_TX_NONE, 139 }, 140 }; 141 142 uint32_t max_pkt_len; 143 144 #ifdef RTE_LIB_EVENTDEV 145 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS]; 146 #endif 147 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS]; 148 static uint8_t lkp_per_socket[NB_SOCKETS]; 149 150 struct l3fwd_lkp_mode { 151 void (*read_config_files)(void); 152 void (*setup)(int); 153 int (*check_ptype)(int); 154 rte_rx_callback_fn cb_parse_ptype; 155 int (*main_loop)(void *); 156 void* (*get_ipv4_lookup_struct)(int); 157 void* (*get_ipv6_lookup_struct)(int); 158 void (*free_routes)(void); 159 }; 160 161 static struct l3fwd_lkp_mode l3fwd_lkp; 162 163 static struct l3fwd_lkp_mode l3fwd_em_lkp = { 164 .read_config_files = read_config_files_em, 165 .setup = setup_hash, 166 .check_ptype = em_check_ptype, 167 .cb_parse_ptype = em_cb_parse_ptype, 168 .main_loop = em_main_loop, 169 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct, 170 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct, 171 .free_routes = em_free_routes, 172 }; 173 174 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = { 175 .read_config_files = read_config_files_lpm, 176 .setup = setup_lpm, 177 .check_ptype = lpm_check_ptype, 178 .cb_parse_ptype = lpm_cb_parse_ptype, 179 .main_loop = lpm_main_loop, 180 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct, 181 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct, 182 .free_routes = lpm_free_routes, 183 }; 184 185 static struct l3fwd_lkp_mode l3fwd_fib_lkp = { 186 .read_config_files = read_config_files_lpm, 187 .setup = setup_fib, 188 .check_ptype = lpm_check_ptype, 189 .cb_parse_ptype = lpm_cb_parse_ptype, 190 .main_loop = fib_main_loop, 191 .get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct, 192 .get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct, 193 .free_routes = lpm_free_routes, 194 }; 195 196 static struct l3fwd_lkp_mode l3fwd_acl_lkp = { 197 .read_config_files = read_config_files_acl, 198 .setup = setup_acl, 199 .check_ptype = em_check_ptype, 200 .cb_parse_ptype = em_cb_parse_ptype, 201 .main_loop = acl_main_loop, 202 .get_ipv4_lookup_struct = acl_get_ipv4_l3fwd_lookup_struct, 203 .get_ipv6_lookup_struct = acl_get_ipv6_l3fwd_lookup_struct, 204 .free_routes = acl_free_routes, 205 }; 206 207 /* 208 * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735). 209 * 198.18.{0-15}.0/24 = Port {0-15} 210 */ 211 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = { 212 {RTE_IPV4(198, 18, 0, 0), 24, 0}, 213 {RTE_IPV4(198, 18, 1, 0), 24, 1}, 214 {RTE_IPV4(198, 18, 2, 0), 24, 2}, 215 {RTE_IPV4(198, 18, 3, 0), 24, 3}, 216 {RTE_IPV4(198, 18, 4, 0), 24, 4}, 217 {RTE_IPV4(198, 18, 5, 0), 24, 5}, 218 {RTE_IPV4(198, 18, 6, 0), 24, 6}, 219 {RTE_IPV4(198, 18, 7, 0), 24, 7}, 220 {RTE_IPV4(198, 18, 8, 0), 24, 8}, 221 {RTE_IPV4(198, 18, 9, 0), 24, 9}, 222 {RTE_IPV4(198, 18, 10, 0), 24, 10}, 223 {RTE_IPV4(198, 18, 11, 0), 24, 11}, 224 {RTE_IPV4(198, 18, 12, 0), 24, 12}, 225 {RTE_IPV4(198, 18, 13, 0), 24, 13}, 226 {RTE_IPV4(198, 18, 14, 0), 24, 14}, 227 {RTE_IPV4(198, 18, 15, 0), 24, 15}, 228 }; 229 230 /* 231 * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180). 232 * 2001:200:0:{0-f}::/64 = Port {0-15} 233 */ 234 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = { 235 {RTE_IPV6(0x2001, 0x0200, 0, 0x0, 0, 0, 0, 0), 64, 0}, 236 {RTE_IPV6(0x2001, 0x0200, 0, 0x1, 0, 0, 0, 0), 64, 1}, 237 {RTE_IPV6(0x2001, 0x0200, 0, 0x2, 0, 0, 0, 0), 64, 2}, 238 {RTE_IPV6(0x2001, 0x0200, 0, 0x3, 0, 0, 0, 0), 64, 3}, 239 {RTE_IPV6(0x2001, 0x0200, 0, 0x4, 0, 0, 0, 0), 64, 4}, 240 {RTE_IPV6(0x2001, 0x0200, 0, 0x5, 0, 0, 0, 0), 64, 5}, 241 {RTE_IPV6(0x2001, 0x0200, 0, 0x6, 0, 0, 0, 0), 64, 6}, 242 {RTE_IPV6(0x2001, 0x0200, 0, 0x7, 0, 0, 0, 0), 64, 7}, 243 {RTE_IPV6(0x2001, 0x0200, 0, 0x8, 0, 0, 0, 0), 64, 8}, 244 {RTE_IPV6(0x2001, 0x0200, 0, 0x9, 0, 0, 0, 0), 64, 9}, 245 {RTE_IPV6(0x2001, 0x0200, 0, 0xa, 0, 0, 0, 0), 64, 10}, 246 {RTE_IPV6(0x2001, 0x0200, 0, 0xb, 0, 0, 0, 0), 64, 11}, 247 {RTE_IPV6(0x2001, 0x0200, 0, 0xc, 0, 0, 0, 0), 64, 12}, 248 {RTE_IPV6(0x2001, 0x0200, 0, 0xd, 0, 0, 0, 0), 64, 13}, 249 {RTE_IPV6(0x2001, 0x0200, 0, 0xe, 0, 0, 0, 0), 64, 14}, 250 {RTE_IPV6(0x2001, 0x0200, 0, 0xf, 0, 0, 0, 0), 64, 15}, 251 }; 252 253 /* 254 * API's called during initialization to setup ACL/EM/LPM rules. 255 */ 256 void 257 l3fwd_set_rule_ipv4_name(const char *optarg) 258 { 259 parm_config.rule_ipv4_name = optarg; 260 } 261 262 void 263 l3fwd_set_rule_ipv6_name(const char *optarg) 264 { 265 parm_config.rule_ipv6_name = optarg; 266 } 267 268 void 269 l3fwd_set_alg(const char *optarg) 270 { 271 parm_config.alg = parse_acl_alg(optarg); 272 } 273 274 /* 275 * Setup lookup methods for forwarding. 276 * Currently exact-match, longest-prefix-match and forwarding information 277 * base are the supported ones. 278 */ 279 static void 280 setup_l3fwd_lookup_tables(void) 281 { 282 /* Setup HASH lookup functions. */ 283 if (lookup_mode == L3FWD_LOOKUP_EM) 284 l3fwd_lkp = l3fwd_em_lkp; 285 /* Setup FIB lookup functions. */ 286 else if (lookup_mode == L3FWD_LOOKUP_FIB) 287 l3fwd_lkp = l3fwd_fib_lkp; 288 /* Setup ACL lookup functions. */ 289 else if (lookup_mode == L3FWD_LOOKUP_ACL) 290 l3fwd_lkp = l3fwd_acl_lkp; 291 /* Setup LPM lookup functions. */ 292 else 293 l3fwd_lkp = l3fwd_lpm_lkp; 294 } 295 296 static int 297 check_lcore_params(void) 298 { 299 uint16_t queue, i; 300 uint32_t lcore; 301 int socketid; 302 303 for (i = 0; i < nb_lcore_params; ++i) { 304 queue = lcore_params[i].queue_id; 305 if (queue >= MAX_RX_QUEUE_PER_PORT) { 306 printf("invalid queue number: %" PRIu16 "\n", queue); 307 return -1; 308 } 309 lcore = lcore_params[i].lcore_id; 310 if (!rte_lcore_is_enabled(lcore)) { 311 printf("error: lcore %u is not enabled in lcore mask\n", lcore); 312 return -1; 313 } 314 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && 315 (numa_on == 0)) { 316 printf("warning: lcore %u is on socket %d with numa off\n", 317 lcore, socketid); 318 } 319 } 320 return 0; 321 } 322 323 static int 324 check_port_config(void) 325 { 326 uint16_t portid; 327 uint16_t i; 328 329 for (i = 0; i < nb_lcore_params; ++i) { 330 portid = lcore_params[i].port_id; 331 if ((enabled_port_mask & (1 << portid)) == 0) { 332 printf("port %u is not enabled in port mask\n", portid); 333 return -1; 334 } 335 if (!rte_eth_dev_is_valid_port(portid)) { 336 printf("port %u is not present on the board\n", portid); 337 return -1; 338 } 339 } 340 return 0; 341 } 342 343 static uint16_t 344 get_port_n_rx_queues(const uint16_t port) 345 { 346 int queue = -1; 347 uint16_t i; 348 349 for (i = 0; i < nb_lcore_params; ++i) { 350 if (lcore_params[i].port_id == port) { 351 if (lcore_params[i].queue_id == queue+1) 352 queue = lcore_params[i].queue_id; 353 else 354 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be" 355 " in sequence and must start with 0\n", 356 lcore_params[i].port_id); 357 } 358 } 359 return (uint16_t)(++queue); 360 } 361 362 static int 363 init_lcore_rx_queues(void) 364 { 365 uint16_t i, nb_rx_queue; 366 uint32_t lcore; 367 368 for (i = 0; i < nb_lcore_params; ++i) { 369 lcore = lcore_params[i].lcore_id; 370 nb_rx_queue = lcore_conf[lcore].n_rx_queue; 371 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { 372 printf("error: too many queues (%u) for lcore: %u\n", 373 (unsigned int)nb_rx_queue + 1, lcore); 374 return -1; 375 } else { 376 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = 377 lcore_params[i].port_id; 378 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = 379 lcore_params[i].queue_id; 380 lcore_conf[lcore].n_rx_queue++; 381 } 382 } 383 return 0; 384 } 385 386 /* display usage */ 387 static void 388 print_usage(const char *prgname) 389 { 390 char alg[PATH_MAX]; 391 392 usage_acl_alg(alg, sizeof(alg)); 393 fprintf(stderr, "%s [EAL options] --" 394 " -p PORTMASK" 395 " --rule_ipv4=FILE" 396 " --rule_ipv6=FILE" 397 " [-P]" 398 " [--lookup]" 399 " --config (port,queue,lcore)[,(port,queue,lcore)]" 400 " [--rx-queue-size NPKTS]" 401 " [--tx-queue-size NPKTS]" 402 " [--burst NPKTS]" 403 " [--mbcache CACHESZ]" 404 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]" 405 " [--max-pkt-len PKTLEN]" 406 " [--no-numa]" 407 " [--ipv6]" 408 " [--parse-ptype]" 409 " [--per-port-pool]" 410 " [--mode]" 411 #ifdef RTE_LIB_EVENTDEV 412 " [--eventq-sched]" 413 " [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]" 414 #endif 415 " [-E]" 416 " [-L]\n\n" 417 418 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" 419 " -P : Enable promiscuous mode\n" 420 " --lookup: Select the lookup method\n" 421 " Default: lpm\n" 422 " Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base),\n" 423 " acl (Access Control List)\n" 424 " --config (port,queue,lcore): Rx queue configuration\n" 425 " --rx-queue-size NPKTS: Rx queue size in decimal\n" 426 " Default: %d\n" 427 " --tx-queue-size NPKTS: Tx queue size in decimal\n" 428 " Default: %d\n" 429 " --burst NPKTS: Burst size in decimal\n" 430 " Default: %d\n" 431 " --mbcache CACHESZ: Mbuf cache size in decimal\n" 432 " Default: %d\n" 433 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n" 434 " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n" 435 " --no-numa: Disable numa awareness\n" 436 " --ipv6: Set if running ipv6 packets\n" 437 " --parse-ptype: Set to use software to analyze packet type\n" 438 " --per-port-pool: Use separate buffer pool per port\n" 439 " --mode: Packet transfer mode for I/O, poll or eventdev\n" 440 " Default mode = poll\n" 441 #ifdef RTE_LIB_EVENTDEV 442 " --eventq-sched: Event queue synchronization method\n" 443 " ordered, atomic or parallel.\n" 444 " Default: atomic\n" 445 " Valid only if --mode=eventdev\n" 446 " --event-eth-rxqs: Number of ethernet RX queues per device.\n" 447 " Default: 1\n" 448 " Valid only if --mode=eventdev\n" 449 " --event-vector: Enable event vectorization.\n" 450 " --event-vector-size: Max vector size if event vectorization is enabled.\n" 451 " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n" 452 #endif 453 " -E : Enable exact match, legacy flag please use --lookup=em instead\n" 454 " -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n" 455 " --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n" 456 " Each rule occupies one line.\n" 457 " 2 kinds of rules are supported.\n" 458 " One is ACL entry at while line leads with character '%c',\n" 459 " another is route entry at while line leads with character '%c'.\n" 460 " --rule_ipv6=FILE: Specify the ipv6 rules entries file.\n" 461 " --alg: ACL classify method to use, one of: %s.\n\n", 462 prgname, RX_DESC_DEFAULT, TX_DESC_DEFAULT, DEFAULT_PKT_BURST, MEMPOOL_CACHE_SIZE, 463 ACL_LEAD_CHAR, ROUTE_LEAD_CHAR, alg); 464 } 465 466 static int 467 parse_max_pkt_len(const char *pktlen) 468 { 469 char *end = NULL; 470 unsigned long len; 471 472 /* parse decimal string */ 473 len = strtoul(pktlen, &end, 10); 474 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0')) 475 return -1; 476 477 if (len == 0) 478 return -1; 479 480 return len; 481 } 482 483 static int 484 parse_portmask(const char *portmask) 485 { 486 char *end = NULL; 487 unsigned long pm; 488 489 /* parse hexadecimal string */ 490 pm = strtoul(portmask, &end, 16); 491 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 492 return 0; 493 494 return pm; 495 } 496 497 static int 498 parse_config(const char *q_arg) 499 { 500 char s[256]; 501 const char *p, *p0 = q_arg; 502 char *end; 503 enum fieldnames { 504 FLD_PORT = 0, 505 FLD_QUEUE, 506 FLD_LCORE, 507 _NUM_FLD 508 }; 509 unsigned long int_fld[_NUM_FLD]; 510 char *str_fld[_NUM_FLD]; 511 int i; 512 unsigned size; 513 uint16_t max_fld[_NUM_FLD] = { 514 RTE_MAX_ETHPORTS, 515 RTE_MAX_QUEUES_PER_PORT, 516 RTE_MAX_LCORE 517 }; 518 519 nb_lcore_params = 0; 520 521 while ((p = strchr(p0,'(')) != NULL) { 522 ++p; 523 if((p0 = strchr(p,')')) == NULL) 524 return -1; 525 526 size = p0 - p; 527 if(size >= sizeof(s)) 528 return -1; 529 530 snprintf(s, sizeof(s), "%.*s", size, p); 531 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) 532 return -1; 533 for (i = 0; i < _NUM_FLD; i++){ 534 errno = 0; 535 int_fld[i] = strtoul(str_fld[i], &end, 0); 536 if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) 537 return -1; 538 } 539 if (nb_lcore_params >= MAX_LCORE_PARAMS) { 540 printf("exceeded max number of lcore params: %hu\n", 541 nb_lcore_params); 542 return -1; 543 } 544 lcore_params_array[nb_lcore_params].port_id = 545 (uint16_t)int_fld[FLD_PORT]; 546 lcore_params_array[nb_lcore_params].queue_id = 547 (uint16_t)int_fld[FLD_QUEUE]; 548 lcore_params_array[nb_lcore_params].lcore_id = 549 (uint32_t)int_fld[FLD_LCORE]; 550 ++nb_lcore_params; 551 } 552 lcore_params = lcore_params_array; 553 return 0; 554 } 555 556 static void 557 parse_eth_dest(const char *optarg) 558 { 559 uint16_t portid; 560 char *port_end; 561 uint8_t c, *dest, peer_addr[6]; 562 563 errno = 0; 564 portid = strtoul(optarg, &port_end, 10); 565 if (errno != 0 || port_end == optarg || *port_end++ != ',') 566 rte_exit(EXIT_FAILURE, 567 "Invalid eth-dest: %s", optarg); 568 if (portid >= RTE_MAX_ETHPORTS) 569 rte_exit(EXIT_FAILURE, 570 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n", 571 portid, RTE_MAX_ETHPORTS); 572 573 if (cmdline_parse_etheraddr(NULL, port_end, 574 &peer_addr, sizeof(peer_addr)) < 0) 575 rte_exit(EXIT_FAILURE, 576 "Invalid ethernet address: %s\n", 577 port_end); 578 dest = (uint8_t *)&dest_eth_addr[portid]; 579 for (c = 0; c < 6; c++) 580 dest[c] = peer_addr[c]; 581 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; 582 } 583 584 static void 585 parse_mode(const char *optarg __rte_unused) 586 { 587 #ifdef RTE_LIB_EVENTDEV 588 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 589 590 if (!strcmp(optarg, "poll")) 591 evt_rsrc->enabled = false; 592 else if (!strcmp(optarg, "eventdev")) 593 evt_rsrc->enabled = true; 594 #endif 595 } 596 597 static void 598 parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx) 599 { 600 char *end = NULL; 601 unsigned long value; 602 603 /* parse decimal string */ 604 value = strtoul(queue_size_arg, &end, 10); 605 if ((queue_size_arg[0] == '\0') || (end == NULL) || 606 (*end != '\0') || (value == 0)) { 607 if (rx == 1) 608 rte_exit(EXIT_FAILURE, "Invalid rx-queue-size\n"); 609 else 610 rte_exit(EXIT_FAILURE, "Invalid tx-queue-size\n"); 611 612 return; 613 } 614 615 if (value > UINT16_MAX) { 616 if (rx == 1) 617 rte_exit(EXIT_FAILURE, "rx-queue-size %lu > %d\n", 618 value, UINT16_MAX); 619 else 620 rte_exit(EXIT_FAILURE, "tx-queue-size %lu > %d\n", 621 value, UINT16_MAX); 622 623 return; 624 } 625 626 *queue_size = value; 627 } 628 629 #ifdef RTE_LIB_EVENTDEV 630 static void 631 parse_eventq_sched(const char *optarg) 632 { 633 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 634 635 if (!strcmp(optarg, "ordered")) 636 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED; 637 if (!strcmp(optarg, "atomic")) 638 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC; 639 if (!strcmp(optarg, "parallel")) 640 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL; 641 } 642 643 static void 644 parse_event_eth_rx_queues(const char *eth_rx_queues) 645 { 646 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 647 char *end = NULL; 648 uint16_t num_eth_rx_queues; 649 650 /* parse decimal string */ 651 num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10); 652 if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0')) 653 return; 654 655 if (num_eth_rx_queues == 0) 656 return; 657 658 evt_rsrc->eth_rx_queues = num_eth_rx_queues; 659 } 660 #endif 661 662 static int 663 parse_lookup(const char *optarg) 664 { 665 if (!strcmp(optarg, "em")) 666 lookup_mode = L3FWD_LOOKUP_EM; 667 else if (!strcmp(optarg, "lpm")) 668 lookup_mode = L3FWD_LOOKUP_LPM; 669 else if (!strcmp(optarg, "fib")) 670 lookup_mode = L3FWD_LOOKUP_FIB; 671 else if (!strcmp(optarg, "acl")) 672 lookup_mode = L3FWD_LOOKUP_ACL; 673 else { 674 fprintf(stderr, "Invalid lookup option! Accepted options: acl, em, lpm, fib\n"); 675 return -1; 676 } 677 return 0; 678 } 679 680 static void 681 parse_mbcache_size(const char *optarg) 682 { 683 unsigned long mb_cache_size; 684 char *end = NULL; 685 686 mb_cache_size = strtoul(optarg, &end, 10); 687 if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0')) 688 return; 689 if (mb_cache_size <= RTE_MEMPOOL_CACHE_MAX_SIZE) 690 mb_mempool_cache_size = (uint32_t)mb_cache_size; 691 else 692 rte_exit(EXIT_FAILURE, "mbcache must be >= 0 and <= %d\n", 693 RTE_MEMPOOL_CACHE_MAX_SIZE); 694 } 695 696 static void 697 parse_pkt_burst(const char *optarg) 698 { 699 struct rte_eth_dev_info dev_info; 700 unsigned long pkt_burst; 701 uint16_t burst_size; 702 char *end = NULL; 703 int ret; 704 705 /* parse decimal string */ 706 pkt_burst = strtoul(optarg, &end, 10); 707 if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0')) 708 return; 709 710 if (pkt_burst > MAX_PKT_BURST) { 711 RTE_LOG(INFO, L3FWD, "User provided burst must be <= %d. Using default value %d\n", 712 MAX_PKT_BURST, nb_pkt_per_burst); 713 return; 714 } else if (pkt_burst > 0) { 715 nb_pkt_per_burst = (uint32_t)pkt_burst; 716 return; 717 } 718 719 /* If user gives a value of zero, query the PMD for its recommended Rx burst size. */ 720 ret = rte_eth_dev_info_get(0, &dev_info); 721 if (ret != 0) 722 return; 723 burst_size = dev_info.default_rxportconf.burst_size; 724 if (burst_size == 0) { 725 RTE_LOG(INFO, L3FWD, "PMD does not recommend a burst size. Using default value %d. " 726 "User provided value must be in [1, %d]\n", 727 nb_pkt_per_burst, MAX_PKT_BURST); 728 return; 729 } else if (burst_size > MAX_PKT_BURST) { 730 RTE_LOG(INFO, L3FWD, "PMD recommended burst size %d exceeds maximum value %d. " 731 "Using default value %d\n", 732 burst_size, MAX_PKT_BURST, nb_pkt_per_burst); 733 return; 734 } 735 nb_pkt_per_burst = burst_size; 736 RTE_LOG(INFO, L3FWD, "Using PMD-provided burst value %d\n", burst_size); 737 } 738 739 #define MAX_JUMBO_PKT_LEN 9600 740 741 static const char short_options[] = 742 "p:" /* portmask */ 743 "P" /* promiscuous */ 744 "L" /* legacy enable long prefix match */ 745 "E" /* legacy enable exact match */ 746 ; 747 748 #define CMD_LINE_OPT_CONFIG "config" 749 #define CMD_LINE_OPT_RX_QUEUE_SIZE "rx-queue-size" 750 #define CMD_LINE_OPT_TX_QUEUE_SIZE "tx-queue-size" 751 #define CMD_LINE_OPT_ETH_DEST "eth-dest" 752 #define CMD_LINE_OPT_NO_NUMA "no-numa" 753 #define CMD_LINE_OPT_IPV6 "ipv6" 754 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len" 755 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num" 756 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype" 757 #define CMD_LINE_OPT_DISABLE_RSS "disable-rss" 758 #define CMD_LINE_OPT_RELAX_RX_OFFLOAD "relax-rx-offload" 759 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool" 760 #define CMD_LINE_OPT_MODE "mode" 761 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched" 762 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs" 763 #define CMD_LINE_OPT_LOOKUP "lookup" 764 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector" 765 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size" 766 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo" 767 #define CMD_LINE_OPT_RULE_IPV4 "rule_ipv4" 768 #define CMD_LINE_OPT_RULE_IPV6 "rule_ipv6" 769 #define CMD_LINE_OPT_ALG "alg" 770 #define CMD_LINE_OPT_PKT_BURST "burst" 771 #define CMD_LINE_OPT_MB_CACHE_SIZE "mbcache" 772 773 enum { 774 /* long options mapped to a short option */ 775 776 /* first long only option value must be >= 256, so that we won't 777 * conflict with short options */ 778 CMD_LINE_OPT_MIN_NUM = 256, 779 CMD_LINE_OPT_CONFIG_NUM, 780 CMD_LINE_OPT_RX_QUEUE_SIZE_NUM, 781 CMD_LINE_OPT_TX_QUEUE_SIZE_NUM, 782 CMD_LINE_OPT_ETH_DEST_NUM, 783 CMD_LINE_OPT_NO_NUMA_NUM, 784 CMD_LINE_OPT_IPV6_NUM, 785 CMD_LINE_OPT_MAX_PKT_LEN_NUM, 786 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM, 787 CMD_LINE_OPT_PARSE_PTYPE_NUM, 788 CMD_LINE_OPT_DISABLE_RSS_NUM, 789 CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM, 790 CMD_LINE_OPT_RULE_IPV4_NUM, 791 CMD_LINE_OPT_RULE_IPV6_NUM, 792 CMD_LINE_OPT_ALG_NUM, 793 CMD_LINE_OPT_PARSE_PER_PORT_POOL, 794 CMD_LINE_OPT_MODE_NUM, 795 CMD_LINE_OPT_EVENTQ_SYNC_NUM, 796 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM, 797 CMD_LINE_OPT_LOOKUP_NUM, 798 CMD_LINE_OPT_ENABLE_VECTOR_NUM, 799 CMD_LINE_OPT_VECTOR_SIZE_NUM, 800 CMD_LINE_OPT_VECTOR_TMO_NS_NUM, 801 CMD_LINE_OPT_PKT_BURST_NUM, 802 CMD_LINE_OPT_MB_CACHE_SIZE_NUM, 803 }; 804 805 static const struct option lgopts[] = { 806 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, 807 {CMD_LINE_OPT_RX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_RX_QUEUE_SIZE_NUM}, 808 {CMD_LINE_OPT_TX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_TX_QUEUE_SIZE_NUM}, 809 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM}, 810 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM}, 811 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM}, 812 {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM}, 813 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM}, 814 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM}, 815 {CMD_LINE_OPT_RELAX_RX_OFFLOAD, 0, 0, CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM}, 816 {CMD_LINE_OPT_DISABLE_RSS, 0, 0, CMD_LINE_OPT_DISABLE_RSS_NUM}, 817 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL}, 818 {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM}, 819 {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM}, 820 {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0, 821 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM}, 822 {CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM}, 823 {CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM}, 824 {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM}, 825 {CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM}, 826 {CMD_LINE_OPT_RULE_IPV4, 1, 0, CMD_LINE_OPT_RULE_IPV4_NUM}, 827 {CMD_LINE_OPT_RULE_IPV6, 1, 0, CMD_LINE_OPT_RULE_IPV6_NUM}, 828 {CMD_LINE_OPT_ALG, 1, 0, CMD_LINE_OPT_ALG_NUM}, 829 {CMD_LINE_OPT_PKT_BURST, 1, 0, CMD_LINE_OPT_PKT_BURST_NUM}, 830 {CMD_LINE_OPT_MB_CACHE_SIZE, 1, 0, CMD_LINE_OPT_MB_CACHE_SIZE_NUM}, 831 {NULL, 0, 0, 0} 832 }; 833 834 /* 835 * This expression is used to calculate the number of mbufs needed 836 * depending on user input, taking into account memory for rx and 837 * tx hardware rings, cache per lcore and mtable per port per lcore. 838 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum 839 * value of 8192 840 */ 841 #define NB_MBUF(nports) RTE_MAX( \ 842 (nports*nb_rx_queue*nb_rxd + \ 843 nports*nb_lcores*MAX_PKT_BURST + \ 844 nports*n_tx_queue*nb_txd + \ 845 nb_lcores*MEMPOOL_CACHE_SIZE), \ 846 (unsigned)8192) 847 848 /* Parse the argument given in the command line of the application */ 849 static int 850 parse_args(int argc, char **argv) 851 { 852 int opt, ret; 853 char **argvopt; 854 int option_index; 855 char *prgname = argv[0]; 856 uint8_t lcore_params = 0; 857 #ifdef RTE_LIB_EVENTDEV 858 uint8_t eventq_sched = 0; 859 uint8_t eth_rx_q = 0; 860 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 861 #endif 862 863 argvopt = argv; 864 865 /* Error or normal output strings. */ 866 while ((opt = getopt_long(argc, argvopt, short_options, 867 lgopts, &option_index)) != EOF) { 868 869 switch (opt) { 870 /* portmask */ 871 case 'p': 872 enabled_port_mask = parse_portmask(optarg); 873 if (enabled_port_mask == 0) { 874 fprintf(stderr, "Invalid portmask\n"); 875 print_usage(prgname); 876 return -1; 877 } 878 break; 879 880 case 'P': 881 promiscuous_on = 1; 882 break; 883 884 case 'E': 885 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) { 886 fprintf(stderr, "Only one lookup mode is allowed at a time!\n"); 887 return -1; 888 } 889 lookup_mode = L3FWD_LOOKUP_EM; 890 break; 891 892 case 'L': 893 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) { 894 fprintf(stderr, "Only one lookup mode is allowed at a time!\n"); 895 return -1; 896 } 897 lookup_mode = L3FWD_LOOKUP_LPM; 898 break; 899 900 /* long options */ 901 case CMD_LINE_OPT_CONFIG_NUM: 902 ret = parse_config(optarg); 903 if (ret) { 904 fprintf(stderr, "Invalid config\n"); 905 print_usage(prgname); 906 return -1; 907 } 908 lcore_params = 1; 909 break; 910 911 case CMD_LINE_OPT_RX_QUEUE_SIZE_NUM: 912 parse_queue_size(optarg, &nb_rxd, 1); 913 break; 914 915 case CMD_LINE_OPT_TX_QUEUE_SIZE_NUM: 916 parse_queue_size(optarg, &nb_txd, 0); 917 break; 918 919 case CMD_LINE_OPT_PKT_BURST_NUM: 920 parse_pkt_burst(optarg); 921 break; 922 923 case CMD_LINE_OPT_MB_CACHE_SIZE_NUM: 924 parse_mbcache_size(optarg); 925 break; 926 927 case CMD_LINE_OPT_ETH_DEST_NUM: 928 parse_eth_dest(optarg); 929 break; 930 931 case CMD_LINE_OPT_NO_NUMA_NUM: 932 numa_on = 0; 933 break; 934 935 case CMD_LINE_OPT_IPV6_NUM: 936 ipv6 = 1; 937 break; 938 939 case CMD_LINE_OPT_MAX_PKT_LEN_NUM: 940 max_pkt_len = parse_max_pkt_len(optarg); 941 break; 942 943 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM: 944 fprintf(stderr, "Hash entry number will be ignored\n"); 945 break; 946 947 case CMD_LINE_OPT_PARSE_PTYPE_NUM: 948 printf("soft parse-ptype is enabled\n"); 949 parse_ptype = 1; 950 break; 951 952 case CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM: 953 printf("Rx offload is relaxed\n"); 954 relax_rx_offload = 1; 955 break; 956 957 case CMD_LINE_OPT_DISABLE_RSS_NUM: 958 printf("RSS is disabled\n"); 959 disable_rss = 1; 960 break; 961 962 case CMD_LINE_OPT_PARSE_PER_PORT_POOL: 963 printf("per port buffer pool is enabled\n"); 964 per_port_pool = 1; 965 break; 966 967 case CMD_LINE_OPT_MODE_NUM: 968 parse_mode(optarg); 969 break; 970 971 #ifdef RTE_LIB_EVENTDEV 972 case CMD_LINE_OPT_EVENTQ_SYNC_NUM: 973 parse_eventq_sched(optarg); 974 eventq_sched = 1; 975 break; 976 977 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM: 978 parse_event_eth_rx_queues(optarg); 979 eth_rx_q = 1; 980 break; 981 982 case CMD_LINE_OPT_ENABLE_VECTOR_NUM: 983 printf("event vectorization is enabled\n"); 984 evt_rsrc->vector_enabled = 1; 985 break; 986 987 case CMD_LINE_OPT_VECTOR_SIZE_NUM: 988 evt_rsrc->vector_size = strtol(optarg, NULL, 10); 989 break; 990 991 case CMD_LINE_OPT_VECTOR_TMO_NS_NUM: 992 evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10); 993 break; 994 #endif 995 996 case CMD_LINE_OPT_LOOKUP_NUM: 997 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) { 998 fprintf(stderr, "Only one lookup mode is allowed at a time!\n"); 999 return -1; 1000 } 1001 ret = parse_lookup(optarg); 1002 /* 1003 * If parse_lookup was passed an invalid lookup type 1004 * then return -1. Error log included within 1005 * parse_lookup for simplicity. 1006 */ 1007 if (ret) 1008 return -1; 1009 break; 1010 1011 case CMD_LINE_OPT_RULE_IPV4_NUM: 1012 l3fwd_set_rule_ipv4_name(optarg); 1013 break; 1014 case CMD_LINE_OPT_RULE_IPV6_NUM: 1015 l3fwd_set_rule_ipv6_name(optarg); 1016 break; 1017 case CMD_LINE_OPT_ALG_NUM: 1018 l3fwd_set_alg(optarg); 1019 break; 1020 default: 1021 print_usage(prgname); 1022 return -1; 1023 } 1024 } 1025 1026 RTE_SET_USED(lcore_params); /* needed if no eventdev block */ 1027 #ifdef RTE_LIB_EVENTDEV 1028 if (evt_rsrc->enabled && lcore_params) { 1029 fprintf(stderr, "lcore config is not valid when event mode is selected\n"); 1030 return -1; 1031 } 1032 1033 if (!evt_rsrc->enabled && eth_rx_q) { 1034 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n"); 1035 return -1; 1036 } 1037 1038 if (!evt_rsrc->enabled && eventq_sched) { 1039 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n"); 1040 return -1; 1041 } 1042 1043 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) { 1044 evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT; 1045 fprintf(stderr, "vector size set to default (%" PRIu16 ")\n", 1046 evt_rsrc->vector_size); 1047 } 1048 1049 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) { 1050 evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT; 1051 fprintf(stderr, 1052 "vector timeout set to default (%" PRIu64 " ns)\n", 1053 evt_rsrc->vector_tmo_ns); 1054 } 1055 #endif 1056 1057 /* 1058 * Nothing is selected, pick longest-prefix match 1059 * as default match. 1060 */ 1061 if (lookup_mode == L3FWD_LOOKUP_DEFAULT) { 1062 fprintf(stderr, "Neither ACL, LPM, EM, or FIB selected, defaulting to LPM\n"); 1063 lookup_mode = L3FWD_LOOKUP_LPM; 1064 } 1065 1066 /* For ACL, update port config rss hash filter */ 1067 if (lookup_mode == L3FWD_LOOKUP_ACL) { 1068 port_conf.rx_adv_conf.rss_conf.rss_hf |= 1069 RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP; 1070 } 1071 1072 if (optind >= 0) 1073 argv[optind-1] = prgname; 1074 1075 ret = optind-1; 1076 optind = 1; /* reset getopt lib */ 1077 return ret; 1078 } 1079 1080 static void 1081 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 1082 { 1083 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1084 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 1085 printf("%s%s", name, buf); 1086 } 1087 1088 int 1089 init_mem(uint16_t portid, unsigned int nb_mbuf) 1090 { 1091 #ifdef RTE_LIB_EVENTDEV 1092 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 1093 #endif 1094 struct lcore_conf *qconf; 1095 int socketid; 1096 unsigned lcore_id; 1097 char s[64]; 1098 1099 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1100 if (rte_lcore_is_enabled(lcore_id) == 0) 1101 continue; 1102 1103 if (numa_on) 1104 socketid = rte_lcore_to_socket_id(lcore_id); 1105 else 1106 socketid = 0; 1107 1108 if (socketid >= NB_SOCKETS) { 1109 rte_exit(EXIT_FAILURE, 1110 "Socket %d of lcore %u is out of range %d\n", 1111 socketid, lcore_id, NB_SOCKETS); 1112 } 1113 1114 if (pktmbuf_pool[portid][socketid] == NULL) { 1115 snprintf(s, sizeof(s), "mbuf_pool_%d:%d", 1116 portid, socketid); 1117 pktmbuf_pool[portid][socketid] = 1118 rte_pktmbuf_pool_create(s, nb_mbuf, 1119 mb_mempool_cache_size, 0, 1120 RTE_MBUF_DEFAULT_BUF_SIZE, socketid); 1121 if (pktmbuf_pool[portid][socketid] == NULL) 1122 rte_exit(EXIT_FAILURE, 1123 "Cannot init mbuf pool on socket %d\n", 1124 socketid); 1125 else 1126 printf("Allocated mbuf pool on socket %d\n", 1127 socketid); 1128 1129 /* Setup ACL, LPM, EM(f.e Hash) or FIB. But, only once per 1130 * available socket. 1131 */ 1132 if (!lkp_per_socket[socketid]) { 1133 l3fwd_lkp.setup(socketid); 1134 lkp_per_socket[socketid] = 1; 1135 } 1136 } 1137 1138 #ifdef RTE_LIB_EVENTDEV 1139 if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) { 1140 unsigned int nb_vec; 1141 1142 nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) / 1143 evt_rsrc->vector_size; 1144 nb_vec = RTE_MAX(512U, nb_vec); 1145 nb_vec += rte_lcore_count() * 32; 1146 snprintf(s, sizeof(s), "vector_pool_%d", portid); 1147 vector_pool[portid] = rte_event_vector_pool_create( 1148 s, nb_vec, 32, evt_rsrc->vector_size, socketid); 1149 if (vector_pool[portid] == NULL) 1150 rte_exit(EXIT_FAILURE, 1151 "Failed to create vector pool for port %d\n", 1152 portid); 1153 else 1154 printf("Allocated vector pool for port %d\n", 1155 portid); 1156 } 1157 #endif 1158 1159 qconf = &lcore_conf[lcore_id]; 1160 qconf->ipv4_lookup_struct = 1161 l3fwd_lkp.get_ipv4_lookup_struct(socketid); 1162 qconf->ipv6_lookup_struct = 1163 l3fwd_lkp.get_ipv6_lookup_struct(socketid); 1164 } 1165 return 0; 1166 } 1167 1168 /* Check the link status of all ports in up to 9s, and print them finally */ 1169 static void 1170 check_all_ports_link_status(uint32_t port_mask) 1171 { 1172 #define CHECK_INTERVAL 100 /* 100ms */ 1173 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1174 uint16_t portid; 1175 uint8_t count, all_ports_up, print_flag = 0; 1176 struct rte_eth_link link; 1177 int ret; 1178 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 1179 1180 printf("\nChecking link status"); 1181 fflush(stdout); 1182 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1183 if (force_quit) 1184 return; 1185 all_ports_up = 1; 1186 RTE_ETH_FOREACH_DEV(portid) { 1187 if (force_quit) 1188 return; 1189 if ((port_mask & (1 << portid)) == 0) 1190 continue; 1191 memset(&link, 0, sizeof(link)); 1192 ret = rte_eth_link_get_nowait(portid, &link); 1193 if (ret < 0) { 1194 all_ports_up = 0; 1195 if (print_flag == 1) 1196 printf("Port %u link get failed: %s\n", 1197 portid, rte_strerror(-ret)); 1198 continue; 1199 } 1200 /* print link status if flag set */ 1201 if (print_flag == 1) { 1202 rte_eth_link_to_str(link_status_text, 1203 sizeof(link_status_text), &link); 1204 printf("Port %d %s\n", portid, 1205 link_status_text); 1206 continue; 1207 } 1208 /* clear all_ports_up flag if any link down */ 1209 if (link.link_status == RTE_ETH_LINK_DOWN) { 1210 all_ports_up = 0; 1211 break; 1212 } 1213 } 1214 /* after finally printing all link status, get out */ 1215 if (print_flag == 1) 1216 break; 1217 1218 if (all_ports_up == 0) { 1219 printf("."); 1220 fflush(stdout); 1221 rte_delay_ms(CHECK_INTERVAL); 1222 } 1223 1224 /* set the print_flag if all ports up or timeout */ 1225 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1226 print_flag = 1; 1227 printf("done\n"); 1228 } 1229 } 1230 } 1231 1232 static void 1233 signal_handler(int signum) 1234 { 1235 if (signum == SIGINT || signum == SIGTERM) { 1236 printf("\n\nSignal %d received, preparing to exit...\n", 1237 signum); 1238 force_quit = true; 1239 } 1240 } 1241 1242 static int 1243 prepare_ptype_parser(uint16_t portid, uint16_t queueid) 1244 { 1245 if (parse_ptype) { 1246 printf("Port %d: softly parse packet type info\n", portid); 1247 if (rte_eth_add_rx_callback(portid, queueid, 1248 l3fwd_lkp.cb_parse_ptype, 1249 NULL)) 1250 return 1; 1251 1252 printf("Failed to add rx callback: port=%d\n", portid); 1253 return 0; 1254 } 1255 1256 if (l3fwd_lkp.check_ptype(portid)) 1257 return 1; 1258 1259 printf("port %d cannot parse packet type, please add --%s\n", 1260 portid, CMD_LINE_OPT_PARSE_PTYPE); 1261 return 0; 1262 } 1263 1264 static uint32_t 1265 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1266 { 1267 uint32_t overhead_len; 1268 1269 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1270 overhead_len = max_rx_pktlen - max_mtu; 1271 else 1272 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1273 1274 return overhead_len; 1275 } 1276 1277 int 1278 config_port_max_pkt_len(struct rte_eth_conf *conf, 1279 struct rte_eth_dev_info *dev_info) 1280 { 1281 uint32_t overhead_len; 1282 1283 if (max_pkt_len == 0) 1284 return 0; 1285 1286 if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN) 1287 return -1; 1288 1289 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1290 dev_info->max_mtu); 1291 conf->rxmode.mtu = max_pkt_len - overhead_len; 1292 1293 if (conf->rxmode.mtu > RTE_ETHER_MTU) 1294 conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 1295 1296 return 0; 1297 } 1298 1299 static void 1300 l3fwd_poll_resource_setup(void) 1301 { 1302 uint8_t socketid; 1303 uint16_t nb_rx_queue, queue; 1304 struct rte_eth_dev_info dev_info; 1305 uint32_t n_tx_queue, nb_lcores; 1306 struct rte_eth_txconf *txconf; 1307 struct lcore_conf *qconf; 1308 uint16_t queueid, portid; 1309 unsigned int nb_ports; 1310 unsigned int lcore_id; 1311 int ret; 1312 1313 if (check_lcore_params() < 0) 1314 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n"); 1315 1316 ret = init_lcore_rx_queues(); 1317 if (ret < 0) 1318 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); 1319 1320 nb_ports = rte_eth_dev_count_avail(); 1321 1322 if (check_port_config() < 0) 1323 rte_exit(EXIT_FAILURE, "check_port_config failed\n"); 1324 1325 nb_lcores = rte_lcore_count(); 1326 1327 /* initialize all ports */ 1328 RTE_ETH_FOREACH_DEV(portid) { 1329 struct rte_eth_conf local_port_conf = port_conf; 1330 1331 /* skip ports that are not enabled */ 1332 if ((enabled_port_mask & (1 << portid)) == 0) { 1333 printf("\nSkipping disabled port %d\n", portid); 1334 continue; 1335 } 1336 1337 /* init port */ 1338 printf("Initializing port %d ... ", portid ); 1339 fflush(stdout); 1340 1341 nb_rx_queue = get_port_n_rx_queues(portid); 1342 n_tx_queue = nb_lcores; 1343 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) 1344 n_tx_queue = MAX_TX_QUEUE_PER_PORT; 1345 printf("Creating queues: nb_rxq=%d nb_txq=%u... ", 1346 nb_rx_queue, (unsigned)n_tx_queue ); 1347 1348 ret = rte_eth_dev_info_get(portid, &dev_info); 1349 if (ret != 0) 1350 rte_exit(EXIT_FAILURE, 1351 "Error during getting device (port %u) info: %s\n", 1352 portid, strerror(-ret)); 1353 1354 ret = config_port_max_pkt_len(&local_port_conf, &dev_info); 1355 if (ret != 0) 1356 rte_exit(EXIT_FAILURE, 1357 "Invalid max packet length: %u (port %u)\n", 1358 max_pkt_len, portid); 1359 1360 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 1361 local_port_conf.txmode.offloads |= 1362 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1363 1364 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 1365 dev_info.flow_type_rss_offloads; 1366 1367 if (disable_rss == 1 || dev_info.max_rx_queues == 1) 1368 local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 1369 1370 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 1371 port_conf.rx_adv_conf.rss_conf.rss_hf) { 1372 printf("Port %u modified RSS hash function based on hardware support," 1373 "requested:%#"PRIx64" configured:%#"PRIx64"\n", 1374 portid, 1375 port_conf.rx_adv_conf.rss_conf.rss_hf, 1376 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 1377 } 1378 1379 /* Relax Rx offload requirement */ 1380 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) != 1381 local_port_conf.rxmode.offloads) { 1382 printf("Port %u requested Rx offloads 0x%"PRIx64 1383 " does not match Rx offloads capabilities 0x%"PRIx64"\n", 1384 portid, local_port_conf.rxmode.offloads, 1385 dev_info.rx_offload_capa); 1386 if (relax_rx_offload) { 1387 local_port_conf.rxmode.offloads &= dev_info.rx_offload_capa; 1388 printf("Warning: modified Rx offload to 0x%"PRIx64 1389 " based on device capability\n", 1390 local_port_conf.rxmode.offloads); 1391 } 1392 } 1393 1394 ret = rte_eth_dev_configure(portid, nb_rx_queue, 1395 (uint16_t)n_tx_queue, &local_port_conf); 1396 if (ret < 0) 1397 rte_exit(EXIT_FAILURE, 1398 "Cannot configure device: err=%d, port=%d\n", 1399 ret, portid); 1400 1401 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, 1402 &nb_txd); 1403 if (ret < 0) 1404 rte_exit(EXIT_FAILURE, 1405 "Cannot adjust number of descriptors: err=%d, " 1406 "port=%d\n", ret, portid); 1407 1408 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); 1409 if (ret < 0) 1410 rte_exit(EXIT_FAILURE, 1411 "Cannot get MAC address: err=%d, port=%d\n", 1412 ret, portid); 1413 1414 print_ethaddr(" Address:", &ports_eth_addr[portid]); 1415 printf(", "); 1416 print_ethaddr("Destination:", 1417 (const struct rte_ether_addr *)&dest_eth_addr[portid]); 1418 printf(", "); 1419 1420 /* 1421 * prepare src MACs for each port. 1422 */ 1423 rte_ether_addr_copy(&ports_eth_addr[portid], 1424 (struct rte_ether_addr *)(val_eth + portid) + 1); 1425 1426 /* init memory */ 1427 if (!per_port_pool) { 1428 /* portid = 0; this is *not* signifying the first port, 1429 * rather, it signifies that portid is ignored. 1430 */ 1431 ret = init_mem(0, NB_MBUF(nb_ports)); 1432 } else { 1433 ret = init_mem(portid, NB_MBUF(1)); 1434 } 1435 if (ret < 0) 1436 rte_exit(EXIT_FAILURE, "init_mem failed\n"); 1437 1438 /* init one TX queue per couple (lcore,port) */ 1439 queueid = 0; 1440 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1441 if (rte_lcore_is_enabled(lcore_id) == 0) 1442 continue; 1443 1444 if (numa_on) 1445 socketid = 1446 (uint8_t)rte_lcore_to_socket_id(lcore_id); 1447 else 1448 socketid = 0; 1449 1450 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); 1451 fflush(stdout); 1452 1453 txconf = &dev_info.default_txconf; 1454 txconf->offloads = local_port_conf.txmode.offloads; 1455 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, 1456 socketid, txconf); 1457 if (ret < 0) 1458 rte_exit(EXIT_FAILURE, 1459 "rte_eth_tx_queue_setup: err=%d, " 1460 "port=%d\n", ret, portid); 1461 1462 qconf = &lcore_conf[lcore_id]; 1463 qconf->tx_queue_id[portid] = queueid; 1464 queueid++; 1465 1466 qconf->tx_port_id[qconf->n_tx_port] = portid; 1467 qconf->n_tx_port++; 1468 } 1469 printf("\n"); 1470 } 1471 1472 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1473 if (rte_lcore_is_enabled(lcore_id) == 0) 1474 continue; 1475 qconf = &lcore_conf[lcore_id]; 1476 printf("\nInitializing rx queues on lcore %u ... ", lcore_id ); 1477 fflush(stdout); 1478 /* init RX queues */ 1479 for(queue = 0; queue < qconf->n_rx_queue; ++queue) { 1480 struct rte_eth_conf local_conf; 1481 struct rte_eth_rxconf rxq_conf; 1482 1483 portid = qconf->rx_queue_list[queue].port_id; 1484 queueid = qconf->rx_queue_list[queue].queue_id; 1485 1486 if (numa_on) 1487 socketid = 1488 (uint8_t)rte_lcore_to_socket_id(lcore_id); 1489 else 1490 socketid = 0; 1491 1492 printf("rxq=%d,%d,%d ", portid, queueid, socketid); 1493 fflush(stdout); 1494 1495 ret = rte_eth_dev_info_get(portid, &dev_info); 1496 if (ret != 0) 1497 rte_exit(EXIT_FAILURE, 1498 "Error during getting device (port %u) info: %s\n", 1499 portid, strerror(-ret)); 1500 1501 ret = rte_eth_dev_conf_get(portid, &local_conf); 1502 if (ret != 0) 1503 rte_exit(EXIT_FAILURE, 1504 "Error during getting device (port %u) configuration: %s\n", 1505 portid, strerror(-ret)); 1506 1507 rxq_conf = dev_info.default_rxconf; 1508 rxq_conf.offloads = local_conf.rxmode.offloads; 1509 if (!per_port_pool) 1510 ret = rte_eth_rx_queue_setup(portid, queueid, 1511 nb_rxd, socketid, 1512 &rxq_conf, 1513 pktmbuf_pool[0][socketid]); 1514 else 1515 ret = rte_eth_rx_queue_setup(portid, queueid, 1516 nb_rxd, socketid, 1517 &rxq_conf, 1518 pktmbuf_pool[portid][socketid]); 1519 if (ret < 0) 1520 rte_exit(EXIT_FAILURE, 1521 "rte_eth_rx_queue_setup: err=%d, port=%d\n", 1522 ret, portid); 1523 } 1524 } 1525 } 1526 1527 static inline int 1528 l3fwd_service_enable(uint32_t service_id) 1529 { 1530 uint8_t min_service_count = UINT8_MAX; 1531 uint32_t slcore_array[RTE_MAX_LCORE]; 1532 unsigned int slcore = 0; 1533 uint8_t service_count; 1534 int32_t slcore_count; 1535 1536 if (!rte_service_lcore_count()) 1537 return -ENOENT; 1538 1539 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE); 1540 if (slcore_count < 0) 1541 return -ENOENT; 1542 /* Get the core which has least number of services running. */ 1543 while (slcore_count--) { 1544 /* Reset default mapping */ 1545 if (rte_service_map_lcore_set(service_id, 1546 slcore_array[slcore_count], 0) != 0) 1547 return -ENOENT; 1548 service_count = rte_service_lcore_count_services( 1549 slcore_array[slcore_count]); 1550 if (service_count < min_service_count) { 1551 slcore = slcore_array[slcore_count]; 1552 min_service_count = service_count; 1553 } 1554 } 1555 if (rte_service_map_lcore_set(service_id, slcore, 1)) 1556 return -ENOENT; 1557 rte_service_lcore_start(slcore); 1558 1559 return 0; 1560 } 1561 1562 #ifdef RTE_LIB_EVENTDEV 1563 static void 1564 l3fwd_event_service_setup(void) 1565 { 1566 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 1567 struct rte_event_dev_info evdev_info; 1568 uint32_t service_id, caps; 1569 int ret, i; 1570 1571 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info); 1572 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) { 1573 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id, 1574 &service_id); 1575 if (ret != -ESRCH && ret != 0) 1576 rte_exit(EXIT_FAILURE, 1577 "Error in starting eventdev service\n"); 1578 l3fwd_service_enable(service_id); 1579 } 1580 1581 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) { 1582 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id, 1583 evt_rsrc->rx_adptr.rx_adptr[i], &caps); 1584 if (ret < 0) 1585 rte_exit(EXIT_FAILURE, 1586 "Failed to get Rx adapter[%d] caps\n", 1587 evt_rsrc->rx_adptr.rx_adptr[i]); 1588 ret = rte_event_eth_rx_adapter_service_id_get( 1589 evt_rsrc->event_d_id, 1590 &service_id); 1591 if (ret != -ESRCH && ret != 0) 1592 rte_exit(EXIT_FAILURE, 1593 "Error in starting Rx adapter[%d] service\n", 1594 evt_rsrc->rx_adptr.rx_adptr[i]); 1595 l3fwd_service_enable(service_id); 1596 } 1597 1598 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) { 1599 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id, 1600 evt_rsrc->tx_adptr.tx_adptr[i], &caps); 1601 if (ret < 0) 1602 rte_exit(EXIT_FAILURE, 1603 "Failed to get Rx adapter[%d] caps\n", 1604 evt_rsrc->tx_adptr.tx_adptr[i]); 1605 ret = rte_event_eth_tx_adapter_service_id_get( 1606 evt_rsrc->event_d_id, 1607 &service_id); 1608 if (ret != -ESRCH && ret != 0) 1609 rte_exit(EXIT_FAILURE, 1610 "Error in starting Rx adapter[%d] service\n", 1611 evt_rsrc->tx_adptr.tx_adptr[i]); 1612 l3fwd_service_enable(service_id); 1613 } 1614 } 1615 #endif 1616 1617 int 1618 main(int argc, char **argv) 1619 { 1620 #ifdef RTE_LIB_EVENTDEV 1621 struct l3fwd_event_resources *evt_rsrc; 1622 int i; 1623 #endif 1624 struct lcore_conf *qconf; 1625 uint16_t queueid, portid; 1626 unsigned int lcore_id; 1627 uint16_t queue; 1628 int ret; 1629 1630 /* init EAL */ 1631 ret = rte_eal_init(argc, argv); 1632 if (ret < 0) 1633 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); 1634 argc -= ret; 1635 argv += ret; 1636 1637 force_quit = false; 1638 signal(SIGINT, signal_handler); 1639 signal(SIGTERM, signal_handler); 1640 1641 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */ 1642 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 1643 dest_eth_addr[portid] = 1644 RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40); 1645 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; 1646 } 1647 1648 #ifdef RTE_LIB_EVENTDEV 1649 evt_rsrc = l3fwd_get_eventdev_rsrc(); 1650 #endif 1651 /* parse application arguments (after the EAL ones) */ 1652 ret = parse_args(argc, argv); 1653 if (ret < 0) 1654 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n"); 1655 1656 /* Setup function pointers for lookup method. */ 1657 setup_l3fwd_lookup_tables(); 1658 1659 /* Add the config file rules */ 1660 l3fwd_lkp.read_config_files(); 1661 1662 #ifdef RTE_LIB_EVENTDEV 1663 evt_rsrc->per_port_pool = per_port_pool; 1664 evt_rsrc->pkt_pool = pktmbuf_pool; 1665 evt_rsrc->vec_pool = vector_pool; 1666 evt_rsrc->port_mask = enabled_port_mask; 1667 /* Configure eventdev parameters if user has requested */ 1668 if (evt_rsrc->enabled) { 1669 l3fwd_event_resource_setup(&port_conf); 1670 if (lookup_mode == L3FWD_LOOKUP_EM) 1671 l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop; 1672 else if (lookup_mode == L3FWD_LOOKUP_FIB) 1673 l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop; 1674 else 1675 l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; 1676 } else 1677 #endif 1678 l3fwd_poll_resource_setup(); 1679 1680 /* start ports */ 1681 RTE_ETH_FOREACH_DEV(portid) { 1682 if ((enabled_port_mask & (1 << portid)) == 0) { 1683 continue; 1684 } 1685 /* Start device */ 1686 ret = rte_eth_dev_start(portid); 1687 if (ret < 0) 1688 rte_exit(EXIT_FAILURE, 1689 "rte_eth_dev_start: err=%d, port=%d\n", 1690 ret, portid); 1691 1692 /* 1693 * If enabled, put device in promiscuous mode. 1694 * This allows IO forwarding mode to forward packets 1695 * to itself through 2 cross-connected ports of the 1696 * target machine. 1697 */ 1698 if (promiscuous_on) { 1699 ret = rte_eth_promiscuous_enable(portid); 1700 if (ret != 0) 1701 rte_exit(EXIT_FAILURE, 1702 "rte_eth_promiscuous_enable: err=%s, port=%u\n", 1703 rte_strerror(-ret), portid); 1704 } 1705 } 1706 1707 #ifdef RTE_LIB_EVENTDEV 1708 if (evt_rsrc->enabled) 1709 l3fwd_event_service_setup(); 1710 #endif 1711 1712 printf("\n"); 1713 1714 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1715 if (rte_lcore_is_enabled(lcore_id) == 0) 1716 continue; 1717 qconf = &lcore_conf[lcore_id]; 1718 for (queue = 0; queue < qconf->n_rx_queue; ++queue) { 1719 portid = qconf->rx_queue_list[queue].port_id; 1720 queueid = qconf->rx_queue_list[queue].queue_id; 1721 if (prepare_ptype_parser(portid, queueid) == 0) 1722 rte_exit(EXIT_FAILURE, "ptype check fails\n"); 1723 } 1724 } 1725 1726 check_all_ports_link_status(enabled_port_mask); 1727 1728 ret = 0; 1729 /* launch per-lcore init on every lcore */ 1730 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN); 1731 1732 #ifdef RTE_LIB_EVENTDEV 1733 if (evt_rsrc->enabled) { 1734 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) 1735 rte_event_eth_rx_adapter_stop( 1736 evt_rsrc->rx_adptr.rx_adptr[i]); 1737 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) 1738 rte_event_eth_tx_adapter_stop( 1739 evt_rsrc->tx_adptr.tx_adptr[i]); 1740 1741 RTE_ETH_FOREACH_DEV(portid) { 1742 if ((enabled_port_mask & (1 << portid)) == 0) 1743 continue; 1744 ret = rte_eth_dev_stop(portid); 1745 if (ret != 0) 1746 printf("rte_eth_dev_stop: err=%d, port=%u\n", 1747 ret, portid); 1748 } 1749 1750 rte_eal_mp_wait_lcore(); 1751 RTE_ETH_FOREACH_DEV(portid) { 1752 if ((enabled_port_mask & (1 << portid)) == 0) 1753 continue; 1754 rte_eth_dev_close(portid); 1755 } 1756 1757 rte_event_dev_stop(evt_rsrc->event_d_id); 1758 rte_event_dev_close(evt_rsrc->event_d_id); 1759 1760 } else 1761 #endif 1762 { 1763 rte_eal_mp_wait_lcore(); 1764 1765 RTE_ETH_FOREACH_DEV(portid) { 1766 if ((enabled_port_mask & (1 << portid)) == 0) 1767 continue; 1768 printf("Closing port %d...", portid); 1769 ret = rte_eth_dev_stop(portid); 1770 if (ret != 0) 1771 printf("rte_eth_dev_stop: err=%d, port=%u\n", 1772 ret, portid); 1773 rte_eth_dev_close(portid); 1774 printf(" Done\n"); 1775 } 1776 } 1777 1778 /* clean up config file routes */ 1779 l3fwd_lkp.free_routes(); 1780 1781 /* clean up the EAL */ 1782 rte_eal_cleanup(); 1783 1784 printf("Bye...\n"); 1785 1786 return ret; 1787 } 1788