1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", ETH_RSS_ETH }, 95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 97 { "vlan", ETH_RSS_VLAN }, 98 { "s-vlan", ETH_RSS_S_VLAN }, 99 { "c-vlan", ETH_RSS_C_VLAN }, 100 { "ipv4", ETH_RSS_IPV4 }, 101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", ETH_RSS_IPV6 }, 107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 116 { "port", ETH_RSS_PORT }, 117 { "vxlan", ETH_RSS_VXLAN }, 118 { "geneve", ETH_RSS_GENEVE }, 119 { "nvgre", ETH_RSS_NVGRE }, 120 { "ip", ETH_RSS_IP }, 121 { "udp", ETH_RSS_UDP }, 122 { "tcp", ETH_RSS_TCP }, 123 { "sctp", ETH_RSS_SCTP }, 124 { "tunnel", ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 135 { "esp", ETH_RSS_ESP }, 136 { "ah", ETH_RSS_AH }, 137 { "l2tpv3", ETH_RSS_L2TPV3 }, 138 { "pfcp", ETH_RSS_PFCP }, 139 { "pppoe", ETH_RSS_PPPOE }, 140 { "gtpu", ETH_RSS_GTPU }, 141 { "ecpri", ETH_RSS_ECPRI }, 142 { "mpls", ETH_RSS_MPLS }, 143 { NULL, 0 }, 144 }; 145 146 static const struct { 147 enum rte_eth_fec_mode mode; 148 const char *name; 149 } fec_mode_name[] = { 150 { 151 .mode = RTE_ETH_FEC_NOFEC, 152 .name = "off", 153 }, 154 { 155 .mode = RTE_ETH_FEC_AUTO, 156 .name = "auto", 157 }, 158 { 159 .mode = RTE_ETH_FEC_BASER, 160 .name = "baser", 161 }, 162 { 163 .mode = RTE_ETH_FEC_RS, 164 .name = "rs", 165 }, 166 }; 167 168 static void 169 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 170 { 171 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 172 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 173 printf("%s%s", name, buf); 174 } 175 176 void 177 nic_stats_display(portid_t port_id) 178 { 179 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 180 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 181 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 182 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 183 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 184 struct timespec cur_time; 185 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 186 diff_ns; 187 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 188 struct rte_eth_stats stats; 189 190 static const char *nic_stats_border = "########################"; 191 192 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 193 print_valid_ports(); 194 return; 195 } 196 rte_eth_stats_get(port_id, &stats); 197 printf("\n %s NIC statistics for port %-2d %s\n", 198 nic_stats_border, port_id, nic_stats_border); 199 200 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 201 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 203 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 204 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 205 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 206 207 diff_ns = 0; 208 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 209 uint64_t ns; 210 211 ns = cur_time.tv_sec * NS_PER_SEC; 212 ns += cur_time.tv_nsec; 213 214 if (prev_ns[port_id] != 0) 215 diff_ns = ns - prev_ns[port_id]; 216 prev_ns[port_id] = ns; 217 } 218 219 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 220 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 221 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 222 (stats.opackets - prev_pkts_tx[port_id]) : 0; 223 prev_pkts_rx[port_id] = stats.ipackets; 224 prev_pkts_tx[port_id] = stats.opackets; 225 mpps_rx = diff_ns > 0 ? 226 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 227 mpps_tx = diff_ns > 0 ? 228 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 229 230 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 231 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 232 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 233 (stats.obytes - prev_bytes_tx[port_id]) : 0; 234 prev_bytes_rx[port_id] = stats.ibytes; 235 prev_bytes_tx[port_id] = stats.obytes; 236 mbps_rx = diff_ns > 0 ? 237 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 238 mbps_tx = diff_ns > 0 ? 239 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 240 241 printf("\n Throughput (since last show)\n"); 242 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 243 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 244 mpps_tx, mbps_tx * 8); 245 246 printf(" %s############################%s\n", 247 nic_stats_border, nic_stats_border); 248 } 249 250 void 251 nic_stats_clear(portid_t port_id) 252 { 253 int ret; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 260 ret = rte_eth_stats_reset(port_id); 261 if (ret != 0) { 262 fprintf(stderr, 263 "%s: Error: failed to reset stats (port %u): %s", 264 __func__, port_id, strerror(-ret)); 265 return; 266 } 267 268 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 269 if (ret != 0) { 270 if (ret < 0) 271 ret = -ret; 272 fprintf(stderr, 273 "%s: Error: failed to get stats (port %u): %s", 274 __func__, port_id, strerror(ret)); 275 return; 276 } 277 printf("\n NIC statistics for port %d cleared\n", port_id); 278 } 279 280 void 281 nic_xstats_display(portid_t port_id) 282 { 283 struct rte_eth_xstat *xstats; 284 int cnt_xstats, idx_xstat; 285 struct rte_eth_xstat_name *xstats_names; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 printf("###### NIC extended statistics for port %-2d\n", port_id); 292 if (!rte_eth_dev_is_valid_port(port_id)) { 293 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 294 return; 295 } 296 297 /* Get count */ 298 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 299 if (cnt_xstats < 0) { 300 fprintf(stderr, "Error: Cannot get count of xstats\n"); 301 return; 302 } 303 304 /* Get id-name lookup table */ 305 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 306 if (xstats_names == NULL) { 307 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 308 return; 309 } 310 if (cnt_xstats != rte_eth_xstats_get_names( 311 port_id, xstats_names, cnt_xstats)) { 312 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 313 free(xstats_names); 314 return; 315 } 316 317 /* Get stats themselves */ 318 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 319 if (xstats == NULL) { 320 fprintf(stderr, "Cannot allocate memory for xstats\n"); 321 free(xstats_names); 322 return; 323 } 324 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 325 fprintf(stderr, "Error: Unable to get xstats\n"); 326 free(xstats_names); 327 free(xstats); 328 return; 329 } 330 331 /* Display xstats */ 332 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 333 if (xstats_hide_zero && !xstats[idx_xstat].value) 334 continue; 335 printf("%s: %"PRIu64"\n", 336 xstats_names[idx_xstat].name, 337 xstats[idx_xstat].value); 338 } 339 free(xstats_names); 340 free(xstats); 341 } 342 343 void 344 nic_xstats_clear(portid_t port_id) 345 { 346 int ret; 347 348 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 349 print_valid_ports(); 350 return; 351 } 352 353 ret = rte_eth_xstats_reset(port_id); 354 if (ret != 0) { 355 fprintf(stderr, 356 "%s: Error: failed to reset xstats (port %u): %s\n", 357 __func__, port_id, strerror(-ret)); 358 return; 359 } 360 361 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 362 if (ret != 0) { 363 if (ret < 0) 364 ret = -ret; 365 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 366 __func__, port_id, strerror(ret)); 367 return; 368 } 369 } 370 371 static const char * 372 get_queue_state_name(uint8_t queue_state) 373 { 374 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 375 return "stopped"; 376 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 377 return "started"; 378 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 379 return "hairpin"; 380 else 381 return "unknown"; 382 } 383 384 void 385 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 386 { 387 struct rte_eth_burst_mode mode; 388 struct rte_eth_rxq_info qinfo; 389 int32_t rc; 390 static const char *info_border = "*********************"; 391 392 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 393 if (rc != 0) { 394 fprintf(stderr, 395 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 396 port_id, queue_id, strerror(-rc), rc); 397 return; 398 } 399 400 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 401 info_border, port_id, queue_id, info_border); 402 403 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 404 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 405 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 406 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 407 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 408 printf("\nRX drop packets: %s", 409 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 410 printf("\nRX deferred start: %s", 411 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 412 printf("\nRX scattered packets: %s", 413 (qinfo.scattered_rx != 0) ? "on" : "off"); 414 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 415 if (qinfo.rx_buf_size != 0) 416 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 417 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 418 419 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 420 printf("\nBurst mode: %s%s", 421 mode.info, 422 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 423 " (per queue)" : ""); 424 425 printf("\n"); 426 } 427 428 void 429 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 430 { 431 struct rte_eth_burst_mode mode; 432 struct rte_eth_txq_info qinfo; 433 int32_t rc; 434 static const char *info_border = "*********************"; 435 436 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 437 if (rc != 0) { 438 fprintf(stderr, 439 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 440 port_id, queue_id, strerror(-rc), rc); 441 return; 442 } 443 444 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 445 info_border, port_id, queue_id, info_border); 446 447 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 448 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 449 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 450 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 451 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 452 printf("\nTX deferred start: %s", 453 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 454 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 455 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 456 457 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 458 printf("\nBurst mode: %s%s", 459 mode.info, 460 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 461 " (per queue)" : ""); 462 463 printf("\n"); 464 } 465 466 static int bus_match_all(const struct rte_bus *bus, const void *data) 467 { 468 RTE_SET_USED(bus); 469 RTE_SET_USED(data); 470 return 0; 471 } 472 473 static void 474 device_infos_display_speeds(uint32_t speed_capa) 475 { 476 printf("\n\tDevice speed capability:"); 477 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 478 printf(" Autonegotiate (all speeds)"); 479 if (speed_capa & ETH_LINK_SPEED_FIXED) 480 printf(" Disable autonegotiate (fixed speed) "); 481 if (speed_capa & ETH_LINK_SPEED_10M_HD) 482 printf(" 10 Mbps half-duplex "); 483 if (speed_capa & ETH_LINK_SPEED_10M) 484 printf(" 10 Mbps full-duplex "); 485 if (speed_capa & ETH_LINK_SPEED_100M_HD) 486 printf(" 100 Mbps half-duplex "); 487 if (speed_capa & ETH_LINK_SPEED_100M) 488 printf(" 100 Mbps full-duplex "); 489 if (speed_capa & ETH_LINK_SPEED_1G) 490 printf(" 1 Gbps "); 491 if (speed_capa & ETH_LINK_SPEED_2_5G) 492 printf(" 2.5 Gbps "); 493 if (speed_capa & ETH_LINK_SPEED_5G) 494 printf(" 5 Gbps "); 495 if (speed_capa & ETH_LINK_SPEED_10G) 496 printf(" 10 Gbps "); 497 if (speed_capa & ETH_LINK_SPEED_20G) 498 printf(" 20 Gbps "); 499 if (speed_capa & ETH_LINK_SPEED_25G) 500 printf(" 25 Gbps "); 501 if (speed_capa & ETH_LINK_SPEED_40G) 502 printf(" 40 Gbps "); 503 if (speed_capa & ETH_LINK_SPEED_50G) 504 printf(" 50 Gbps "); 505 if (speed_capa & ETH_LINK_SPEED_56G) 506 printf(" 56 Gbps "); 507 if (speed_capa & ETH_LINK_SPEED_100G) 508 printf(" 100 Gbps "); 509 if (speed_capa & ETH_LINK_SPEED_200G) 510 printf(" 200 Gbps "); 511 } 512 513 void 514 device_infos_display(const char *identifier) 515 { 516 static const char *info_border = "*********************"; 517 struct rte_bus *start = NULL, *next; 518 struct rte_dev_iterator dev_iter; 519 char name[RTE_ETH_NAME_MAX_LEN]; 520 struct rte_ether_addr mac_addr; 521 struct rte_device *dev; 522 struct rte_devargs da; 523 portid_t port_id; 524 struct rte_eth_dev_info dev_info; 525 char devstr[128]; 526 527 memset(&da, 0, sizeof(da)); 528 if (!identifier) 529 goto skip_parse; 530 531 if (rte_devargs_parsef(&da, "%s", identifier)) { 532 fprintf(stderr, "cannot parse identifier\n"); 533 return; 534 } 535 536 skip_parse: 537 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 538 539 start = next; 540 if (identifier && da.bus != next) 541 continue; 542 543 /* Skip buses that don't have iterate method */ 544 if (!next->dev_iterate) 545 continue; 546 547 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 548 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 549 550 if (!dev->driver) 551 continue; 552 /* Check for matching device if identifier is present */ 553 if (identifier && 554 strncmp(da.name, dev->name, strlen(dev->name))) 555 continue; 556 printf("\n%s Infos for device %s %s\n", 557 info_border, dev->name, info_border); 558 printf("Bus name: %s", dev->bus->name); 559 printf("\nDriver name: %s", dev->driver->name); 560 printf("\nDevargs: %s", 561 dev->devargs ? dev->devargs->args : ""); 562 printf("\nConnect to socket: %d", dev->numa_node); 563 printf("\n"); 564 565 /* List ports with matching device name */ 566 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 567 printf("\n\tPort id: %-2d", port_id); 568 if (eth_macaddr_get_print_err(port_id, 569 &mac_addr) == 0) 570 print_ethaddr("\n\tMAC address: ", 571 &mac_addr); 572 rte_eth_dev_get_name_by_port(port_id, name); 573 printf("\n\tDevice name: %s", name); 574 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 575 device_infos_display_speeds(dev_info.speed_capa); 576 printf("\n"); 577 } 578 } 579 }; 580 rte_devargs_reset(&da); 581 } 582 583 void 584 port_infos_display(portid_t port_id) 585 { 586 struct rte_port *port; 587 struct rte_ether_addr mac_addr; 588 struct rte_eth_link link; 589 struct rte_eth_dev_info dev_info; 590 int vlan_offload; 591 struct rte_mempool * mp; 592 static const char *info_border = "*********************"; 593 uint16_t mtu; 594 char name[RTE_ETH_NAME_MAX_LEN]; 595 int ret; 596 char fw_version[ETHDEV_FWVERS_LEN]; 597 598 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 599 print_valid_ports(); 600 return; 601 } 602 port = &ports[port_id]; 603 ret = eth_link_get_nowait_print_err(port_id, &link); 604 if (ret < 0) 605 return; 606 607 ret = eth_dev_info_get_print_err(port_id, &dev_info); 608 if (ret != 0) 609 return; 610 611 printf("\n%s Infos for port %-2d %s\n", 612 info_border, port_id, info_border); 613 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 614 print_ethaddr("MAC address: ", &mac_addr); 615 rte_eth_dev_get_name_by_port(port_id, name); 616 printf("\nDevice name: %s", name); 617 printf("\nDriver name: %s", dev_info.driver_name); 618 619 if (rte_eth_dev_fw_version_get(port_id, fw_version, 620 ETHDEV_FWVERS_LEN) == 0) 621 printf("\nFirmware-version: %s", fw_version); 622 else 623 printf("\nFirmware-version: %s", "not available"); 624 625 if (dev_info.device->devargs && dev_info.device->devargs->args) 626 printf("\nDevargs: %s", dev_info.device->devargs->args); 627 printf("\nConnect to socket: %u", port->socket_id); 628 629 if (port_numa[port_id] != NUMA_NO_CONFIG) { 630 mp = mbuf_pool_find(port_numa[port_id], 0); 631 if (mp) 632 printf("\nmemory allocation on the socket: %d", 633 port_numa[port_id]); 634 } else 635 printf("\nmemory allocation on the socket: %u",port->socket_id); 636 637 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 638 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 639 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 640 ("full-duplex") : ("half-duplex")); 641 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ? 642 ("On") : ("Off")); 643 644 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 645 printf("MTU: %u\n", mtu); 646 647 printf("Promiscuous mode: %s\n", 648 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 649 printf("Allmulticast mode: %s\n", 650 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 651 printf("Maximum number of MAC addresses: %u\n", 652 (unsigned int)(port->dev_info.max_mac_addrs)); 653 printf("Maximum number of MAC addresses of hash filtering: %u\n", 654 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 655 656 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 657 if (vlan_offload >= 0){ 658 printf("VLAN offload: \n"); 659 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 660 printf(" strip on, "); 661 else 662 printf(" strip off, "); 663 664 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 665 printf("filter on, "); 666 else 667 printf("filter off, "); 668 669 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 670 printf("extend on, "); 671 else 672 printf("extend off, "); 673 674 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 675 printf("qinq strip on\n"); 676 else 677 printf("qinq strip off\n"); 678 } 679 680 if (dev_info.hash_key_size > 0) 681 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 682 if (dev_info.reta_size > 0) 683 printf("Redirection table size: %u\n", dev_info.reta_size); 684 if (!dev_info.flow_type_rss_offloads) 685 printf("No RSS offload flow type is supported.\n"); 686 else { 687 uint16_t i; 688 char *p; 689 690 printf("Supported RSS offload flow types:\n"); 691 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 692 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 693 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 694 continue; 695 p = flowtype_to_str(i); 696 if (p) 697 printf(" %s\n", p); 698 else 699 printf(" user defined %d\n", i); 700 } 701 } 702 703 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 704 printf("Maximum configurable length of RX packet: %u\n", 705 dev_info.max_rx_pktlen); 706 printf("Maximum configurable size of LRO aggregated packet: %u\n", 707 dev_info.max_lro_pkt_size); 708 if (dev_info.max_vfs) 709 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 710 if (dev_info.max_vmdq_pools) 711 printf("Maximum number of VMDq pools: %u\n", 712 dev_info.max_vmdq_pools); 713 714 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 715 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 716 printf("Max possible number of RXDs per queue: %hu\n", 717 dev_info.rx_desc_lim.nb_max); 718 printf("Min possible number of RXDs per queue: %hu\n", 719 dev_info.rx_desc_lim.nb_min); 720 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 721 722 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 723 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 724 printf("Max possible number of TXDs per queue: %hu\n", 725 dev_info.tx_desc_lim.nb_max); 726 printf("Min possible number of TXDs per queue: %hu\n", 727 dev_info.tx_desc_lim.nb_min); 728 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 729 printf("Max segment number per packet: %hu\n", 730 dev_info.tx_desc_lim.nb_seg_max); 731 printf("Max segment number per MTU/TSO: %hu\n", 732 dev_info.tx_desc_lim.nb_mtu_seg_max); 733 734 /* Show switch info only if valid switch domain and port id is set */ 735 if (dev_info.switch_info.domain_id != 736 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 737 if (dev_info.switch_info.name) 738 printf("Switch name: %s\n", dev_info.switch_info.name); 739 740 printf("Switch domain Id: %u\n", 741 dev_info.switch_info.domain_id); 742 printf("Switch Port Id: %u\n", 743 dev_info.switch_info.port_id); 744 } 745 } 746 747 void 748 port_summary_header_display(void) 749 { 750 uint16_t port_number; 751 752 port_number = rte_eth_dev_count_avail(); 753 printf("Number of available ports: %i\n", port_number); 754 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 755 "Driver", "Status", "Link"); 756 } 757 758 void 759 port_summary_display(portid_t port_id) 760 { 761 struct rte_ether_addr mac_addr; 762 struct rte_eth_link link; 763 struct rte_eth_dev_info dev_info; 764 char name[RTE_ETH_NAME_MAX_LEN]; 765 int ret; 766 767 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 768 print_valid_ports(); 769 return; 770 } 771 772 ret = eth_link_get_nowait_print_err(port_id, &link); 773 if (ret < 0) 774 return; 775 776 ret = eth_dev_info_get_print_err(port_id, &dev_info); 777 if (ret != 0) 778 return; 779 780 rte_eth_dev_get_name_by_port(port_id, name); 781 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 782 if (ret != 0) 783 return; 784 785 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 786 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 787 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 788 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 789 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 790 rte_eth_link_speed_to_str(link.link_speed)); 791 } 792 793 void 794 port_eeprom_display(portid_t port_id) 795 { 796 struct rte_dev_eeprom_info einfo; 797 int ret; 798 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 799 print_valid_ports(); 800 return; 801 } 802 803 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 804 if (len_eeprom < 0) { 805 switch (len_eeprom) { 806 case -ENODEV: 807 fprintf(stderr, "port index %d invalid\n", port_id); 808 break; 809 case -ENOTSUP: 810 fprintf(stderr, "operation not supported by device\n"); 811 break; 812 case -EIO: 813 fprintf(stderr, "device is removed\n"); 814 break; 815 default: 816 fprintf(stderr, "Unable to get EEPROM: %d\n", 817 len_eeprom); 818 break; 819 } 820 return; 821 } 822 823 char buf[len_eeprom]; 824 einfo.offset = 0; 825 einfo.length = len_eeprom; 826 einfo.data = buf; 827 828 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 829 if (ret != 0) { 830 switch (ret) { 831 case -ENODEV: 832 fprintf(stderr, "port index %d invalid\n", port_id); 833 break; 834 case -ENOTSUP: 835 fprintf(stderr, "operation not supported by device\n"); 836 break; 837 case -EIO: 838 fprintf(stderr, "device is removed\n"); 839 break; 840 default: 841 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 842 break; 843 } 844 return; 845 } 846 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 847 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 848 } 849 850 void 851 port_module_eeprom_display(portid_t port_id) 852 { 853 struct rte_eth_dev_module_info minfo; 854 struct rte_dev_eeprom_info einfo; 855 int ret; 856 857 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 858 print_valid_ports(); 859 return; 860 } 861 862 863 ret = rte_eth_dev_get_module_info(port_id, &minfo); 864 if (ret != 0) { 865 switch (ret) { 866 case -ENODEV: 867 fprintf(stderr, "port index %d invalid\n", port_id); 868 break; 869 case -ENOTSUP: 870 fprintf(stderr, "operation not supported by device\n"); 871 break; 872 case -EIO: 873 fprintf(stderr, "device is removed\n"); 874 break; 875 default: 876 fprintf(stderr, "Unable to get module EEPROM: %d\n", 877 ret); 878 break; 879 } 880 return; 881 } 882 883 char buf[minfo.eeprom_len]; 884 einfo.offset = 0; 885 einfo.length = minfo.eeprom_len; 886 einfo.data = buf; 887 888 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 889 if (ret != 0) { 890 switch (ret) { 891 case -ENODEV: 892 fprintf(stderr, "port index %d invalid\n", port_id); 893 break; 894 case -ENOTSUP: 895 fprintf(stderr, "operation not supported by device\n"); 896 break; 897 case -EIO: 898 fprintf(stderr, "device is removed\n"); 899 break; 900 default: 901 fprintf(stderr, "Unable to get module EEPROM: %d\n", 902 ret); 903 break; 904 } 905 return; 906 } 907 908 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 909 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 910 } 911 912 int 913 port_id_is_invalid(portid_t port_id, enum print_warning warning) 914 { 915 uint16_t pid; 916 917 if (port_id == (portid_t)RTE_PORT_ALL) 918 return 0; 919 920 RTE_ETH_FOREACH_DEV(pid) 921 if (port_id == pid) 922 return 0; 923 924 if (warning == ENABLED_WARN) 925 fprintf(stderr, "Invalid port %d\n", port_id); 926 927 return 1; 928 } 929 930 void print_valid_ports(void) 931 { 932 portid_t pid; 933 934 printf("The valid ports array is ["); 935 RTE_ETH_FOREACH_DEV(pid) { 936 printf(" %d", pid); 937 } 938 printf(" ]\n"); 939 } 940 941 static int 942 vlan_id_is_invalid(uint16_t vlan_id) 943 { 944 if (vlan_id < 4096) 945 return 0; 946 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 947 return 1; 948 } 949 950 static int 951 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 952 { 953 const struct rte_pci_device *pci_dev; 954 const struct rte_bus *bus; 955 uint64_t pci_len; 956 957 if (reg_off & 0x3) { 958 fprintf(stderr, 959 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 960 (unsigned int)reg_off); 961 return 1; 962 } 963 964 if (!ports[port_id].dev_info.device) { 965 fprintf(stderr, "Invalid device\n"); 966 return 0; 967 } 968 969 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 970 if (bus && !strcmp(bus->name, "pci")) { 971 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 972 } else { 973 fprintf(stderr, "Not a PCI device\n"); 974 return 1; 975 } 976 977 pci_len = pci_dev->mem_resource[0].len; 978 if (reg_off >= pci_len) { 979 fprintf(stderr, 980 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 981 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 982 pci_len); 983 return 1; 984 } 985 return 0; 986 } 987 988 static int 989 reg_bit_pos_is_invalid(uint8_t bit_pos) 990 { 991 if (bit_pos <= 31) 992 return 0; 993 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 994 return 1; 995 } 996 997 #define display_port_and_reg_off(port_id, reg_off) \ 998 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 999 1000 static inline void 1001 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1002 { 1003 display_port_and_reg_off(port_id, (unsigned)reg_off); 1004 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1005 } 1006 1007 void 1008 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1009 { 1010 uint32_t reg_v; 1011 1012 1013 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1014 return; 1015 if (port_reg_off_is_invalid(port_id, reg_off)) 1016 return; 1017 if (reg_bit_pos_is_invalid(bit_x)) 1018 return; 1019 reg_v = port_id_pci_reg_read(port_id, reg_off); 1020 display_port_and_reg_off(port_id, (unsigned)reg_off); 1021 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1022 } 1023 1024 void 1025 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1026 uint8_t bit1_pos, uint8_t bit2_pos) 1027 { 1028 uint32_t reg_v; 1029 uint8_t l_bit; 1030 uint8_t h_bit; 1031 1032 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1033 return; 1034 if (port_reg_off_is_invalid(port_id, reg_off)) 1035 return; 1036 if (reg_bit_pos_is_invalid(bit1_pos)) 1037 return; 1038 if (reg_bit_pos_is_invalid(bit2_pos)) 1039 return; 1040 if (bit1_pos > bit2_pos) 1041 l_bit = bit2_pos, h_bit = bit1_pos; 1042 else 1043 l_bit = bit1_pos, h_bit = bit2_pos; 1044 1045 reg_v = port_id_pci_reg_read(port_id, reg_off); 1046 reg_v >>= l_bit; 1047 if (h_bit < 31) 1048 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1049 display_port_and_reg_off(port_id, (unsigned)reg_off); 1050 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1051 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1052 } 1053 1054 void 1055 port_reg_display(portid_t port_id, uint32_t reg_off) 1056 { 1057 uint32_t reg_v; 1058 1059 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1060 return; 1061 if (port_reg_off_is_invalid(port_id, reg_off)) 1062 return; 1063 reg_v = port_id_pci_reg_read(port_id, reg_off); 1064 display_port_reg_value(port_id, reg_off, reg_v); 1065 } 1066 1067 void 1068 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1069 uint8_t bit_v) 1070 { 1071 uint32_t reg_v; 1072 1073 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1074 return; 1075 if (port_reg_off_is_invalid(port_id, reg_off)) 1076 return; 1077 if (reg_bit_pos_is_invalid(bit_pos)) 1078 return; 1079 if (bit_v > 1) { 1080 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1081 (int) bit_v); 1082 return; 1083 } 1084 reg_v = port_id_pci_reg_read(port_id, reg_off); 1085 if (bit_v == 0) 1086 reg_v &= ~(1 << bit_pos); 1087 else 1088 reg_v |= (1 << bit_pos); 1089 port_id_pci_reg_write(port_id, reg_off, reg_v); 1090 display_port_reg_value(port_id, reg_off, reg_v); 1091 } 1092 1093 void 1094 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1095 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1096 { 1097 uint32_t max_v; 1098 uint32_t reg_v; 1099 uint8_t l_bit; 1100 uint8_t h_bit; 1101 1102 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1103 return; 1104 if (port_reg_off_is_invalid(port_id, reg_off)) 1105 return; 1106 if (reg_bit_pos_is_invalid(bit1_pos)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit2_pos)) 1109 return; 1110 if (bit1_pos > bit2_pos) 1111 l_bit = bit2_pos, h_bit = bit1_pos; 1112 else 1113 l_bit = bit1_pos, h_bit = bit2_pos; 1114 1115 if ((h_bit - l_bit) < 31) 1116 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1117 else 1118 max_v = 0xFFFFFFFF; 1119 1120 if (value > max_v) { 1121 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1122 (unsigned)value, (unsigned)value, 1123 (unsigned)max_v, (unsigned)max_v); 1124 return; 1125 } 1126 reg_v = port_id_pci_reg_read(port_id, reg_off); 1127 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1128 reg_v |= (value << l_bit); /* Set changed bits */ 1129 port_id_pci_reg_write(port_id, reg_off, reg_v); 1130 display_port_reg_value(port_id, reg_off, reg_v); 1131 } 1132 1133 void 1134 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1135 { 1136 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1137 return; 1138 if (port_reg_off_is_invalid(port_id, reg_off)) 1139 return; 1140 port_id_pci_reg_write(port_id, reg_off, reg_v); 1141 display_port_reg_value(port_id, reg_off, reg_v); 1142 } 1143 1144 void 1145 port_mtu_set(portid_t port_id, uint16_t mtu) 1146 { 1147 int diag; 1148 struct rte_port *rte_port = &ports[port_id]; 1149 struct rte_eth_dev_info dev_info; 1150 uint16_t eth_overhead; 1151 int ret; 1152 1153 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1154 return; 1155 1156 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1157 if (ret != 0) 1158 return; 1159 1160 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1161 fprintf(stderr, 1162 "Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1163 mtu, dev_info.min_mtu, dev_info.max_mtu); 1164 return; 1165 } 1166 diag = rte_eth_dev_set_mtu(port_id, mtu); 1167 if (diag) 1168 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1169 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1170 /* 1171 * Ether overhead in driver is equal to the difference of 1172 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1173 * device supports jumbo frame. 1174 */ 1175 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1176 if (mtu > RTE_ETHER_MTU) { 1177 rte_port->dev_conf.rxmode.offloads |= 1178 DEV_RX_OFFLOAD_JUMBO_FRAME; 1179 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1180 mtu + eth_overhead; 1181 } else 1182 rte_port->dev_conf.rxmode.offloads &= 1183 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1184 } 1185 } 1186 1187 /* Generic flow management functions. */ 1188 1189 static struct port_flow_tunnel * 1190 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1191 { 1192 struct port_flow_tunnel *flow_tunnel; 1193 1194 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1195 if (flow_tunnel->id == port_tunnel_id) 1196 goto out; 1197 } 1198 flow_tunnel = NULL; 1199 1200 out: 1201 return flow_tunnel; 1202 } 1203 1204 const char * 1205 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1206 { 1207 const char *type; 1208 switch (tunnel->type) { 1209 default: 1210 type = "unknown"; 1211 break; 1212 case RTE_FLOW_ITEM_TYPE_VXLAN: 1213 type = "vxlan"; 1214 break; 1215 } 1216 1217 return type; 1218 } 1219 1220 struct port_flow_tunnel * 1221 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 struct port_flow_tunnel *flow_tunnel; 1225 1226 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1227 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1228 goto out; 1229 } 1230 flow_tunnel = NULL; 1231 1232 out: 1233 return flow_tunnel; 1234 } 1235 1236 void port_flow_tunnel_list(portid_t port_id) 1237 { 1238 struct rte_port *port = &ports[port_id]; 1239 struct port_flow_tunnel *flt; 1240 1241 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1242 printf("port %u tunnel #%u type=%s", 1243 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1244 if (flt->tunnel.tun_id) 1245 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1246 printf("\n"); 1247 } 1248 } 1249 1250 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1251 { 1252 struct rte_port *port = &ports[port_id]; 1253 struct port_flow_tunnel *flt; 1254 1255 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1256 if (flt->id == tunnel_id) 1257 break; 1258 } 1259 if (flt) { 1260 LIST_REMOVE(flt, chain); 1261 free(flt); 1262 printf("port %u: flow tunnel #%u destroyed\n", 1263 port_id, tunnel_id); 1264 } 1265 } 1266 1267 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1268 { 1269 struct rte_port *port = &ports[port_id]; 1270 enum rte_flow_item_type type; 1271 struct port_flow_tunnel *flt; 1272 1273 if (!strcmp(ops->type, "vxlan")) 1274 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1275 else { 1276 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1277 ops->type); 1278 return; 1279 } 1280 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1281 if (flt->tunnel.type == type) 1282 break; 1283 } 1284 if (!flt) { 1285 flt = calloc(1, sizeof(*flt)); 1286 if (!flt) { 1287 fprintf(stderr, "failed to allocate port flt object\n"); 1288 return; 1289 } 1290 flt->tunnel.type = type; 1291 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1292 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1293 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1294 } 1295 printf("port %d: flow tunnel #%u type %s\n", 1296 port_id, flt->id, ops->type); 1297 } 1298 1299 /** Generate a port_flow entry from attributes/pattern/actions. */ 1300 static struct port_flow * 1301 port_flow_new(const struct rte_flow_attr *attr, 1302 const struct rte_flow_item *pattern, 1303 const struct rte_flow_action *actions, 1304 struct rte_flow_error *error) 1305 { 1306 const struct rte_flow_conv_rule rule = { 1307 .attr_ro = attr, 1308 .pattern_ro = pattern, 1309 .actions_ro = actions, 1310 }; 1311 struct port_flow *pf; 1312 int ret; 1313 1314 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1315 if (ret < 0) 1316 return NULL; 1317 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1318 if (!pf) { 1319 rte_flow_error_set 1320 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1321 "calloc() failed"); 1322 return NULL; 1323 } 1324 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1325 error) >= 0) 1326 return pf; 1327 free(pf); 1328 return NULL; 1329 } 1330 1331 /** Print a message out of a flow error. */ 1332 static int 1333 port_flow_complain(struct rte_flow_error *error) 1334 { 1335 static const char *const errstrlist[] = { 1336 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1337 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1338 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1339 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1340 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1341 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1342 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1343 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1344 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1345 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1346 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1347 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1348 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1349 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1350 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1351 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1352 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1353 }; 1354 const char *errstr; 1355 char buf[32]; 1356 int err = rte_errno; 1357 1358 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1359 !errstrlist[error->type]) 1360 errstr = "unknown type"; 1361 else 1362 errstr = errstrlist[error->type]; 1363 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1364 __func__, error->type, errstr, 1365 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1366 error->cause), buf) : "", 1367 error->message ? error->message : "(no stated reason)", 1368 rte_strerror(err)); 1369 return -err; 1370 } 1371 1372 static void 1373 rss_config_display(struct rte_flow_action_rss *rss_conf) 1374 { 1375 uint8_t i; 1376 1377 if (rss_conf == NULL) { 1378 fprintf(stderr, "Invalid rule\n"); 1379 return; 1380 } 1381 1382 printf("RSS:\n" 1383 " queues:"); 1384 if (rss_conf->queue_num == 0) 1385 printf(" none"); 1386 for (i = 0; i < rss_conf->queue_num; i++) 1387 printf(" %d", rss_conf->queue[i]); 1388 printf("\n"); 1389 1390 printf(" function: "); 1391 switch (rss_conf->func) { 1392 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1393 printf("default\n"); 1394 break; 1395 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1396 printf("toeplitz\n"); 1397 break; 1398 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1399 printf("simple_xor\n"); 1400 break; 1401 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1402 printf("symmetric_toeplitz\n"); 1403 break; 1404 default: 1405 printf("Unknown function\n"); 1406 return; 1407 } 1408 1409 printf(" types:\n"); 1410 if (rss_conf->types == 0) { 1411 printf(" none\n"); 1412 return; 1413 } 1414 for (i = 0; rss_type_table[i].str; i++) { 1415 if ((rss_conf->types & 1416 rss_type_table[i].rss_type) == 1417 rss_type_table[i].rss_type && 1418 rss_type_table[i].rss_type != 0) 1419 printf(" %s\n", rss_type_table[i].str); 1420 } 1421 } 1422 1423 static struct port_indirect_action * 1424 action_get_by_id(portid_t port_id, uint32_t id) 1425 { 1426 struct rte_port *port; 1427 struct port_indirect_action **ppia; 1428 struct port_indirect_action *pia = NULL; 1429 1430 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1431 port_id == (portid_t)RTE_PORT_ALL) 1432 return NULL; 1433 port = &ports[port_id]; 1434 ppia = &port->actions_list; 1435 while (*ppia) { 1436 if ((*ppia)->id == id) { 1437 pia = *ppia; 1438 break; 1439 } 1440 ppia = &(*ppia)->next; 1441 } 1442 if (!pia) 1443 fprintf(stderr, 1444 "Failed to find indirect action #%u on port %u\n", 1445 id, port_id); 1446 return pia; 1447 } 1448 1449 static int 1450 action_alloc(portid_t port_id, uint32_t id, 1451 struct port_indirect_action **action) 1452 { 1453 struct rte_port *port; 1454 struct port_indirect_action **ppia; 1455 struct port_indirect_action *pia = NULL; 1456 1457 *action = NULL; 1458 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1459 port_id == (portid_t)RTE_PORT_ALL) 1460 return -EINVAL; 1461 port = &ports[port_id]; 1462 if (id == UINT32_MAX) { 1463 /* taking first available ID */ 1464 if (port->actions_list) { 1465 if (port->actions_list->id == UINT32_MAX - 1) { 1466 fprintf(stderr, 1467 "Highest indirect action ID is already assigned, delete it first\n"); 1468 return -ENOMEM; 1469 } 1470 id = port->actions_list->id + 1; 1471 } else { 1472 id = 0; 1473 } 1474 } 1475 pia = calloc(1, sizeof(*pia)); 1476 if (!pia) { 1477 fprintf(stderr, 1478 "Allocation of port %u indirect action failed\n", 1479 port_id); 1480 return -ENOMEM; 1481 } 1482 ppia = &port->actions_list; 1483 while (*ppia && (*ppia)->id > id) 1484 ppia = &(*ppia)->next; 1485 if (*ppia && (*ppia)->id == id) { 1486 fprintf(stderr, 1487 "Indirect action #%u is already assigned, delete it first\n", 1488 id); 1489 free(pia); 1490 return -EINVAL; 1491 } 1492 pia->next = *ppia; 1493 pia->id = id; 1494 *ppia = pia; 1495 *action = pia; 1496 return 0; 1497 } 1498 1499 /** Create indirect action */ 1500 int 1501 port_action_handle_create(portid_t port_id, uint32_t id, 1502 const struct rte_flow_indir_action_conf *conf, 1503 const struct rte_flow_action *action) 1504 { 1505 struct port_indirect_action *pia; 1506 int ret; 1507 struct rte_flow_error error; 1508 1509 ret = action_alloc(port_id, id, &pia); 1510 if (ret) 1511 return ret; 1512 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1513 struct rte_flow_action_age *age = 1514 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1515 1516 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1517 age->context = &pia->age_type; 1518 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1519 struct rte_flow_action_conntrack *ct = 1520 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1521 1522 memcpy(ct, &conntrack_context, sizeof(*ct)); 1523 } 1524 /* Poisoning to make sure PMDs update it in case of error. */ 1525 memset(&error, 0x22, sizeof(error)); 1526 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1527 &error); 1528 if (!pia->handle) { 1529 uint32_t destroy_id = pia->id; 1530 port_action_handle_destroy(port_id, 1, &destroy_id); 1531 return port_flow_complain(&error); 1532 } 1533 pia->type = action->type; 1534 printf("Indirect action #%u created\n", pia->id); 1535 return 0; 1536 } 1537 1538 /** Destroy indirect action */ 1539 int 1540 port_action_handle_destroy(portid_t port_id, 1541 uint32_t n, 1542 const uint32_t *actions) 1543 { 1544 struct rte_port *port; 1545 struct port_indirect_action **tmp; 1546 uint32_t c = 0; 1547 int ret = 0; 1548 1549 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1550 port_id == (portid_t)RTE_PORT_ALL) 1551 return -EINVAL; 1552 port = &ports[port_id]; 1553 tmp = &port->actions_list; 1554 while (*tmp) { 1555 uint32_t i; 1556 1557 for (i = 0; i != n; ++i) { 1558 struct rte_flow_error error; 1559 struct port_indirect_action *pia = *tmp; 1560 1561 if (actions[i] != pia->id) 1562 continue; 1563 /* 1564 * Poisoning to make sure PMDs update it in case 1565 * of error. 1566 */ 1567 memset(&error, 0x33, sizeof(error)); 1568 1569 if (pia->handle && rte_flow_action_handle_destroy( 1570 port_id, pia->handle, &error)) { 1571 ret = port_flow_complain(&error); 1572 continue; 1573 } 1574 *tmp = pia->next; 1575 printf("Indirect action #%u destroyed\n", pia->id); 1576 free(pia); 1577 break; 1578 } 1579 if (i == n) 1580 tmp = &(*tmp)->next; 1581 ++c; 1582 } 1583 return ret; 1584 } 1585 1586 1587 /** Get indirect action by port + id */ 1588 struct rte_flow_action_handle * 1589 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1590 { 1591 1592 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1593 1594 return (pia) ? pia->handle : NULL; 1595 } 1596 1597 /** Update indirect action */ 1598 int 1599 port_action_handle_update(portid_t port_id, uint32_t id, 1600 const struct rte_flow_action *action) 1601 { 1602 struct rte_flow_error error; 1603 struct rte_flow_action_handle *action_handle; 1604 struct port_indirect_action *pia; 1605 const void *update; 1606 1607 action_handle = port_action_handle_get_by_id(port_id, id); 1608 if (!action_handle) 1609 return -EINVAL; 1610 pia = action_get_by_id(port_id, id); 1611 if (!pia) 1612 return -EINVAL; 1613 switch (pia->type) { 1614 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1615 update = action->conf; 1616 break; 1617 default: 1618 update = action; 1619 break; 1620 } 1621 if (rte_flow_action_handle_update(port_id, action_handle, update, 1622 &error)) { 1623 return port_flow_complain(&error); 1624 } 1625 printf("Indirect action #%u updated\n", id); 1626 return 0; 1627 } 1628 1629 int 1630 port_action_handle_query(portid_t port_id, uint32_t id) 1631 { 1632 struct rte_flow_error error; 1633 struct port_indirect_action *pia; 1634 union { 1635 struct rte_flow_query_count count; 1636 struct rte_flow_query_age age; 1637 struct rte_flow_action_conntrack ct; 1638 } query; 1639 1640 pia = action_get_by_id(port_id, id); 1641 if (!pia) 1642 return -EINVAL; 1643 switch (pia->type) { 1644 case RTE_FLOW_ACTION_TYPE_AGE: 1645 case RTE_FLOW_ACTION_TYPE_COUNT: 1646 break; 1647 default: 1648 fprintf(stderr, 1649 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1650 id, pia->type, port_id); 1651 return -ENOTSUP; 1652 } 1653 /* Poisoning to make sure PMDs update it in case of error. */ 1654 memset(&error, 0x55, sizeof(error)); 1655 memset(&query, 0, sizeof(query)); 1656 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1657 return port_flow_complain(&error); 1658 switch (pia->type) { 1659 case RTE_FLOW_ACTION_TYPE_AGE: 1660 printf("Indirect AGE action:\n" 1661 " aged: %u\n" 1662 " sec_since_last_hit_valid: %u\n" 1663 " sec_since_last_hit: %" PRIu32 "\n", 1664 query.age.aged, 1665 query.age.sec_since_last_hit_valid, 1666 query.age.sec_since_last_hit); 1667 break; 1668 case RTE_FLOW_ACTION_TYPE_COUNT: 1669 printf("Indirect COUNT action:\n" 1670 " hits_set: %u\n" 1671 " bytes_set: %u\n" 1672 " hits: %" PRIu64 "\n" 1673 " bytes: %" PRIu64 "\n", 1674 query.count.hits_set, 1675 query.count.bytes_set, 1676 query.count.hits, 1677 query.count.bytes); 1678 break; 1679 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1680 printf("Conntrack Context:\n" 1681 " Peer: %u, Flow dir: %s, Enable: %u\n" 1682 " Live: %u, SACK: %u, CACK: %u\n" 1683 " Packet dir: %s, Liberal: %u, State: %u\n" 1684 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1685 " Last Seq: %u, Last ACK: %u\n" 1686 " Last Win: %u, Last End: %u\n", 1687 query.ct.peer_port, 1688 query.ct.is_original_dir ? "Original" : "Reply", 1689 query.ct.enable, query.ct.live_connection, 1690 query.ct.selective_ack, query.ct.challenge_ack_passed, 1691 query.ct.last_direction ? "Original" : "Reply", 1692 query.ct.liberal_mode, query.ct.state, 1693 query.ct.max_ack_window, query.ct.retransmission_limit, 1694 query.ct.last_index, query.ct.last_seq, 1695 query.ct.last_ack, query.ct.last_window, 1696 query.ct.last_end); 1697 printf(" Original Dir:\n" 1698 " scale: %u, fin: %u, ack seen: %u\n" 1699 " unacked data: %u\n Sent end: %u," 1700 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1701 query.ct.original_dir.scale, 1702 query.ct.original_dir.close_initiated, 1703 query.ct.original_dir.last_ack_seen, 1704 query.ct.original_dir.data_unacked, 1705 query.ct.original_dir.sent_end, 1706 query.ct.original_dir.reply_end, 1707 query.ct.original_dir.max_win, 1708 query.ct.original_dir.max_ack); 1709 printf(" Reply Dir:\n" 1710 " scale: %u, fin: %u, ack seen: %u\n" 1711 " unacked data: %u\n Sent end: %u," 1712 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1713 query.ct.reply_dir.scale, 1714 query.ct.reply_dir.close_initiated, 1715 query.ct.reply_dir.last_ack_seen, 1716 query.ct.reply_dir.data_unacked, 1717 query.ct.reply_dir.sent_end, 1718 query.ct.reply_dir.reply_end, 1719 query.ct.reply_dir.max_win, 1720 query.ct.reply_dir.max_ack); 1721 break; 1722 default: 1723 fprintf(stderr, 1724 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1725 id, pia->type, port_id); 1726 break; 1727 } 1728 return 0; 1729 } 1730 1731 static struct port_flow_tunnel * 1732 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1733 const struct rte_flow_item *pattern, 1734 const struct rte_flow_action *actions, 1735 const struct tunnel_ops *tunnel_ops) 1736 { 1737 int ret; 1738 struct rte_port *port; 1739 struct port_flow_tunnel *pft; 1740 struct rte_flow_error error; 1741 1742 port = &ports[port_id]; 1743 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1744 if (!pft) { 1745 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1746 tunnel_ops->id); 1747 return NULL; 1748 } 1749 if (tunnel_ops->actions) { 1750 uint32_t num_actions; 1751 const struct rte_flow_action *aptr; 1752 1753 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1754 &pft->pmd_actions, 1755 &pft->num_pmd_actions, 1756 &error); 1757 if (ret) { 1758 port_flow_complain(&error); 1759 return NULL; 1760 } 1761 for (aptr = actions, num_actions = 1; 1762 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1763 aptr++, num_actions++); 1764 pft->actions = malloc( 1765 (num_actions + pft->num_pmd_actions) * 1766 sizeof(actions[0])); 1767 if (!pft->actions) { 1768 rte_flow_tunnel_action_decap_release( 1769 port_id, pft->actions, 1770 pft->num_pmd_actions, &error); 1771 return NULL; 1772 } 1773 rte_memcpy(pft->actions, pft->pmd_actions, 1774 pft->num_pmd_actions * sizeof(actions[0])); 1775 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1776 num_actions * sizeof(actions[0])); 1777 } 1778 if (tunnel_ops->items) { 1779 uint32_t num_items; 1780 const struct rte_flow_item *iptr; 1781 1782 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1783 &pft->pmd_items, 1784 &pft->num_pmd_items, 1785 &error); 1786 if (ret) { 1787 port_flow_complain(&error); 1788 return NULL; 1789 } 1790 for (iptr = pattern, num_items = 1; 1791 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1792 iptr++, num_items++); 1793 pft->items = malloc((num_items + pft->num_pmd_items) * 1794 sizeof(pattern[0])); 1795 if (!pft->items) { 1796 rte_flow_tunnel_item_release( 1797 port_id, pft->pmd_items, 1798 pft->num_pmd_items, &error); 1799 return NULL; 1800 } 1801 rte_memcpy(pft->items, pft->pmd_items, 1802 pft->num_pmd_items * sizeof(pattern[0])); 1803 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1804 num_items * sizeof(pattern[0])); 1805 } 1806 1807 return pft; 1808 } 1809 1810 static void 1811 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1812 const struct tunnel_ops *tunnel_ops, 1813 struct port_flow_tunnel *pft) 1814 { 1815 struct rte_flow_error error; 1816 1817 if (tunnel_ops->actions) { 1818 free(pft->actions); 1819 rte_flow_tunnel_action_decap_release( 1820 port_id, pft->pmd_actions, 1821 pft->num_pmd_actions, &error); 1822 pft->actions = NULL; 1823 pft->pmd_actions = NULL; 1824 } 1825 if (tunnel_ops->items) { 1826 free(pft->items); 1827 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1828 pft->num_pmd_items, 1829 &error); 1830 pft->items = NULL; 1831 pft->pmd_items = NULL; 1832 } 1833 } 1834 1835 /** Add port meter policy */ 1836 int 1837 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1838 const struct rte_flow_action *actions) 1839 { 1840 struct rte_mtr_error error; 1841 const struct rte_flow_action *act = actions; 1842 const struct rte_flow_action *start; 1843 struct rte_mtr_meter_policy_params policy; 1844 uint32_t i = 0, act_n; 1845 int ret; 1846 1847 for (i = 0; i < RTE_COLORS; i++) { 1848 for (act_n = 0, start = act; 1849 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1850 act_n++; 1851 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1852 policy.actions[i] = start; 1853 else 1854 policy.actions[i] = NULL; 1855 act++; 1856 } 1857 ret = rte_mtr_meter_policy_add(port_id, 1858 policy_id, 1859 &policy, &error); 1860 if (ret) 1861 print_mtr_err_msg(&error); 1862 return ret; 1863 } 1864 1865 /** Validate flow rule. */ 1866 int 1867 port_flow_validate(portid_t port_id, 1868 const struct rte_flow_attr *attr, 1869 const struct rte_flow_item *pattern, 1870 const struct rte_flow_action *actions, 1871 const struct tunnel_ops *tunnel_ops) 1872 { 1873 struct rte_flow_error error; 1874 struct port_flow_tunnel *pft = NULL; 1875 1876 /* Poisoning to make sure PMDs update it in case of error. */ 1877 memset(&error, 0x11, sizeof(error)); 1878 if (tunnel_ops->enabled) { 1879 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1880 actions, tunnel_ops); 1881 if (!pft) 1882 return -ENOENT; 1883 if (pft->items) 1884 pattern = pft->items; 1885 if (pft->actions) 1886 actions = pft->actions; 1887 } 1888 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1889 return port_flow_complain(&error); 1890 if (tunnel_ops->enabled) 1891 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1892 printf("Flow rule validated\n"); 1893 return 0; 1894 } 1895 1896 /** Return age action structure if exists, otherwise NULL. */ 1897 static struct rte_flow_action_age * 1898 age_action_get(const struct rte_flow_action *actions) 1899 { 1900 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1901 switch (actions->type) { 1902 case RTE_FLOW_ACTION_TYPE_AGE: 1903 return (struct rte_flow_action_age *) 1904 (uintptr_t)actions->conf; 1905 default: 1906 break; 1907 } 1908 } 1909 return NULL; 1910 } 1911 1912 /** Create flow rule. */ 1913 int 1914 port_flow_create(portid_t port_id, 1915 const struct rte_flow_attr *attr, 1916 const struct rte_flow_item *pattern, 1917 const struct rte_flow_action *actions, 1918 const struct tunnel_ops *tunnel_ops) 1919 { 1920 struct rte_flow *flow; 1921 struct rte_port *port; 1922 struct port_flow *pf; 1923 uint32_t id = 0; 1924 struct rte_flow_error error; 1925 struct port_flow_tunnel *pft = NULL; 1926 struct rte_flow_action_age *age = age_action_get(actions); 1927 1928 port = &ports[port_id]; 1929 if (port->flow_list) { 1930 if (port->flow_list->id == UINT32_MAX) { 1931 fprintf(stderr, 1932 "Highest rule ID is already assigned, delete it first"); 1933 return -ENOMEM; 1934 } 1935 id = port->flow_list->id + 1; 1936 } 1937 if (tunnel_ops->enabled) { 1938 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1939 actions, tunnel_ops); 1940 if (!pft) 1941 return -ENOENT; 1942 if (pft->items) 1943 pattern = pft->items; 1944 if (pft->actions) 1945 actions = pft->actions; 1946 } 1947 pf = port_flow_new(attr, pattern, actions, &error); 1948 if (!pf) 1949 return port_flow_complain(&error); 1950 if (age) { 1951 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 1952 age->context = &pf->age_type; 1953 } 1954 /* Poisoning to make sure PMDs update it in case of error. */ 1955 memset(&error, 0x22, sizeof(error)); 1956 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1957 if (!flow) { 1958 if (tunnel_ops->enabled) 1959 port_flow_tunnel_offload_cmd_release(port_id, 1960 tunnel_ops, pft); 1961 free(pf); 1962 return port_flow_complain(&error); 1963 } 1964 pf->next = port->flow_list; 1965 pf->id = id; 1966 pf->flow = flow; 1967 port->flow_list = pf; 1968 if (tunnel_ops->enabled) 1969 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1970 printf("Flow rule #%u created\n", pf->id); 1971 return 0; 1972 } 1973 1974 /** Destroy a number of flow rules. */ 1975 int 1976 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1977 { 1978 struct rte_port *port; 1979 struct port_flow **tmp; 1980 uint32_t c = 0; 1981 int ret = 0; 1982 1983 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1984 port_id == (portid_t)RTE_PORT_ALL) 1985 return -EINVAL; 1986 port = &ports[port_id]; 1987 tmp = &port->flow_list; 1988 while (*tmp) { 1989 uint32_t i; 1990 1991 for (i = 0; i != n; ++i) { 1992 struct rte_flow_error error; 1993 struct port_flow *pf = *tmp; 1994 1995 if (rule[i] != pf->id) 1996 continue; 1997 /* 1998 * Poisoning to make sure PMDs update it in case 1999 * of error. 2000 */ 2001 memset(&error, 0x33, sizeof(error)); 2002 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2003 ret = port_flow_complain(&error); 2004 continue; 2005 } 2006 printf("Flow rule #%u destroyed\n", pf->id); 2007 *tmp = pf->next; 2008 free(pf); 2009 break; 2010 } 2011 if (i == n) 2012 tmp = &(*tmp)->next; 2013 ++c; 2014 } 2015 return ret; 2016 } 2017 2018 /** Remove all flow rules. */ 2019 int 2020 port_flow_flush(portid_t port_id) 2021 { 2022 struct rte_flow_error error; 2023 struct rte_port *port; 2024 int ret = 0; 2025 2026 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2027 port_id == (portid_t)RTE_PORT_ALL) 2028 return -EINVAL; 2029 2030 port = &ports[port_id]; 2031 2032 if (port->flow_list == NULL) 2033 return ret; 2034 2035 /* Poisoning to make sure PMDs update it in case of error. */ 2036 memset(&error, 0x44, sizeof(error)); 2037 if (rte_flow_flush(port_id, &error)) { 2038 port_flow_complain(&error); 2039 } 2040 2041 while (port->flow_list) { 2042 struct port_flow *pf = port->flow_list->next; 2043 2044 free(port->flow_list); 2045 port->flow_list = pf; 2046 } 2047 return ret; 2048 } 2049 2050 /** Dump flow rules. */ 2051 int 2052 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2053 const char *file_name) 2054 { 2055 int ret = 0; 2056 FILE *file = stdout; 2057 struct rte_flow_error error; 2058 struct rte_port *port; 2059 struct port_flow *pflow; 2060 struct rte_flow *tmpFlow = NULL; 2061 bool found = false; 2062 2063 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2064 port_id == (portid_t)RTE_PORT_ALL) 2065 return -EINVAL; 2066 2067 if (!dump_all) { 2068 port = &ports[port_id]; 2069 pflow = port->flow_list; 2070 while (pflow) { 2071 if (rule_id != pflow->id) { 2072 pflow = pflow->next; 2073 } else { 2074 tmpFlow = pflow->flow; 2075 if (tmpFlow) 2076 found = true; 2077 break; 2078 } 2079 } 2080 if (found == false) { 2081 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2082 return -EINVAL; 2083 } 2084 } 2085 2086 if (file_name && strlen(file_name)) { 2087 file = fopen(file_name, "w"); 2088 if (!file) { 2089 fprintf(stderr, "Failed to create file %s: %s\n", 2090 file_name, strerror(errno)); 2091 return -errno; 2092 } 2093 } 2094 2095 if (!dump_all) 2096 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2097 else 2098 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2099 if (ret) { 2100 port_flow_complain(&error); 2101 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2102 } else 2103 printf("Flow dump finished\n"); 2104 if (file_name && strlen(file_name)) 2105 fclose(file); 2106 return ret; 2107 } 2108 2109 /** Query a flow rule. */ 2110 int 2111 port_flow_query(portid_t port_id, uint32_t rule, 2112 const struct rte_flow_action *action) 2113 { 2114 struct rte_flow_error error; 2115 struct rte_port *port; 2116 struct port_flow *pf; 2117 const char *name; 2118 union { 2119 struct rte_flow_query_count count; 2120 struct rte_flow_action_rss rss_conf; 2121 struct rte_flow_query_age age; 2122 } query; 2123 int ret; 2124 2125 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2126 port_id == (portid_t)RTE_PORT_ALL) 2127 return -EINVAL; 2128 port = &ports[port_id]; 2129 for (pf = port->flow_list; pf; pf = pf->next) 2130 if (pf->id == rule) 2131 break; 2132 if (!pf) { 2133 fprintf(stderr, "Flow rule #%u not found\n", rule); 2134 return -ENOENT; 2135 } 2136 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2137 &name, sizeof(name), 2138 (void *)(uintptr_t)action->type, &error); 2139 if (ret < 0) 2140 return port_flow_complain(&error); 2141 switch (action->type) { 2142 case RTE_FLOW_ACTION_TYPE_COUNT: 2143 case RTE_FLOW_ACTION_TYPE_RSS: 2144 case RTE_FLOW_ACTION_TYPE_AGE: 2145 break; 2146 default: 2147 fprintf(stderr, "Cannot query action type %d (%s)\n", 2148 action->type, name); 2149 return -ENOTSUP; 2150 } 2151 /* Poisoning to make sure PMDs update it in case of error. */ 2152 memset(&error, 0x55, sizeof(error)); 2153 memset(&query, 0, sizeof(query)); 2154 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2155 return port_flow_complain(&error); 2156 switch (action->type) { 2157 case RTE_FLOW_ACTION_TYPE_COUNT: 2158 printf("%s:\n" 2159 " hits_set: %u\n" 2160 " bytes_set: %u\n" 2161 " hits: %" PRIu64 "\n" 2162 " bytes: %" PRIu64 "\n", 2163 name, 2164 query.count.hits_set, 2165 query.count.bytes_set, 2166 query.count.hits, 2167 query.count.bytes); 2168 break; 2169 case RTE_FLOW_ACTION_TYPE_RSS: 2170 rss_config_display(&query.rss_conf); 2171 break; 2172 case RTE_FLOW_ACTION_TYPE_AGE: 2173 printf("%s:\n" 2174 " aged: %u\n" 2175 " sec_since_last_hit_valid: %u\n" 2176 " sec_since_last_hit: %" PRIu32 "\n", 2177 name, 2178 query.age.aged, 2179 query.age.sec_since_last_hit_valid, 2180 query.age.sec_since_last_hit); 2181 break; 2182 default: 2183 fprintf(stderr, 2184 "Cannot display result for action type %d (%s)\n", 2185 action->type, name); 2186 break; 2187 } 2188 return 0; 2189 } 2190 2191 /** List simply and destroy all aged flows. */ 2192 void 2193 port_flow_aged(portid_t port_id, uint8_t destroy) 2194 { 2195 void **contexts; 2196 int nb_context, total = 0, idx; 2197 struct rte_flow_error error; 2198 enum age_action_context_type *type; 2199 union { 2200 struct port_flow *pf; 2201 struct port_indirect_action *pia; 2202 } ctx; 2203 2204 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2205 port_id == (portid_t)RTE_PORT_ALL) 2206 return; 2207 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2208 printf("Port %u total aged flows: %d\n", port_id, total); 2209 if (total < 0) { 2210 port_flow_complain(&error); 2211 return; 2212 } 2213 if (total == 0) 2214 return; 2215 contexts = malloc(sizeof(void *) * total); 2216 if (contexts == NULL) { 2217 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2218 return; 2219 } 2220 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2221 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2222 if (nb_context != total) { 2223 fprintf(stderr, 2224 "Port:%d get aged flows count(%d) != total(%d)\n", 2225 port_id, nb_context, total); 2226 free(contexts); 2227 return; 2228 } 2229 total = 0; 2230 for (idx = 0; idx < nb_context; idx++) { 2231 if (!contexts[idx]) { 2232 fprintf(stderr, "Error: get Null context in port %u\n", 2233 port_id); 2234 continue; 2235 } 2236 type = (enum age_action_context_type *)contexts[idx]; 2237 switch (*type) { 2238 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2239 ctx.pf = container_of(type, struct port_flow, age_type); 2240 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2241 "\t%c%c%c\t\n", 2242 "Flow", 2243 ctx.pf->id, 2244 ctx.pf->rule.attr->group, 2245 ctx.pf->rule.attr->priority, 2246 ctx.pf->rule.attr->ingress ? 'i' : '-', 2247 ctx.pf->rule.attr->egress ? 'e' : '-', 2248 ctx.pf->rule.attr->transfer ? 't' : '-'); 2249 if (destroy && !port_flow_destroy(port_id, 1, 2250 &ctx.pf->id)) 2251 total++; 2252 break; 2253 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2254 ctx.pia = container_of(type, 2255 struct port_indirect_action, age_type); 2256 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2257 ctx.pia->id); 2258 break; 2259 default: 2260 fprintf(stderr, "Error: invalid context type %u\n", 2261 port_id); 2262 break; 2263 } 2264 } 2265 printf("\n%d flows destroyed\n", total); 2266 free(contexts); 2267 } 2268 2269 /** List flow rules. */ 2270 void 2271 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2272 { 2273 struct rte_port *port; 2274 struct port_flow *pf; 2275 struct port_flow *list = NULL; 2276 uint32_t i; 2277 2278 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2279 port_id == (portid_t)RTE_PORT_ALL) 2280 return; 2281 port = &ports[port_id]; 2282 if (!port->flow_list) 2283 return; 2284 /* Sort flows by group, priority and ID. */ 2285 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2286 struct port_flow **tmp; 2287 const struct rte_flow_attr *curr = pf->rule.attr; 2288 2289 if (n) { 2290 /* Filter out unwanted groups. */ 2291 for (i = 0; i != n; ++i) 2292 if (curr->group == group[i]) 2293 break; 2294 if (i == n) 2295 continue; 2296 } 2297 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2298 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2299 2300 if (curr->group > comp->group || 2301 (curr->group == comp->group && 2302 curr->priority > comp->priority) || 2303 (curr->group == comp->group && 2304 curr->priority == comp->priority && 2305 pf->id > (*tmp)->id)) 2306 continue; 2307 break; 2308 } 2309 pf->tmp = *tmp; 2310 *tmp = pf; 2311 } 2312 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2313 for (pf = list; pf != NULL; pf = pf->tmp) { 2314 const struct rte_flow_item *item = pf->rule.pattern; 2315 const struct rte_flow_action *action = pf->rule.actions; 2316 const char *name; 2317 2318 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2319 pf->id, 2320 pf->rule.attr->group, 2321 pf->rule.attr->priority, 2322 pf->rule.attr->ingress ? 'i' : '-', 2323 pf->rule.attr->egress ? 'e' : '-', 2324 pf->rule.attr->transfer ? 't' : '-'); 2325 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2326 if ((uint32_t)item->type > INT_MAX) 2327 name = "PMD_INTERNAL"; 2328 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2329 &name, sizeof(name), 2330 (void *)(uintptr_t)item->type, 2331 NULL) <= 0) 2332 name = "[UNKNOWN]"; 2333 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2334 printf("%s ", name); 2335 ++item; 2336 } 2337 printf("=>"); 2338 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2339 if ((uint32_t)action->type > INT_MAX) 2340 name = "PMD_INTERNAL"; 2341 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2342 &name, sizeof(name), 2343 (void *)(uintptr_t)action->type, 2344 NULL) <= 0) 2345 name = "[UNKNOWN]"; 2346 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2347 printf(" %s", name); 2348 ++action; 2349 } 2350 printf("\n"); 2351 } 2352 } 2353 2354 /** Restrict ingress traffic to the defined flow rules. */ 2355 int 2356 port_flow_isolate(portid_t port_id, int set) 2357 { 2358 struct rte_flow_error error; 2359 2360 /* Poisoning to make sure PMDs update it in case of error. */ 2361 memset(&error, 0x66, sizeof(error)); 2362 if (rte_flow_isolate(port_id, set, &error)) 2363 return port_flow_complain(&error); 2364 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2365 port_id, 2366 set ? "now restricted" : "not restricted anymore"); 2367 return 0; 2368 } 2369 2370 /* 2371 * RX/TX ring descriptors display functions. 2372 */ 2373 int 2374 rx_queue_id_is_invalid(queueid_t rxq_id) 2375 { 2376 if (rxq_id < nb_rxq) 2377 return 0; 2378 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2379 rxq_id, nb_rxq); 2380 return 1; 2381 } 2382 2383 int 2384 tx_queue_id_is_invalid(queueid_t txq_id) 2385 { 2386 if (txq_id < nb_txq) 2387 return 0; 2388 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2389 txq_id, nb_txq); 2390 return 1; 2391 } 2392 2393 static int 2394 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2395 { 2396 struct rte_port *port = &ports[port_id]; 2397 struct rte_eth_rxq_info rx_qinfo; 2398 int ret; 2399 2400 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2401 if (ret == 0) { 2402 *ring_size = rx_qinfo.nb_desc; 2403 return ret; 2404 } 2405 2406 if (ret != -ENOTSUP) 2407 return ret; 2408 /* 2409 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2410 * ring_size stored in testpmd will be used for validity verification. 2411 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2412 * being 0, it will use a default value provided by PMDs to setup this 2413 * rxq. If the default value is 0, it will use the 2414 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2415 */ 2416 if (port->nb_rx_desc[rxq_id]) 2417 *ring_size = port->nb_rx_desc[rxq_id]; 2418 else if (port->dev_info.default_rxportconf.ring_size) 2419 *ring_size = port->dev_info.default_rxportconf.ring_size; 2420 else 2421 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2422 return 0; 2423 } 2424 2425 static int 2426 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2427 { 2428 struct rte_port *port = &ports[port_id]; 2429 struct rte_eth_txq_info tx_qinfo; 2430 int ret; 2431 2432 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2433 if (ret == 0) { 2434 *ring_size = tx_qinfo.nb_desc; 2435 return ret; 2436 } 2437 2438 if (ret != -ENOTSUP) 2439 return ret; 2440 /* 2441 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2442 * ring_size stored in testpmd will be used for validity verification. 2443 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2444 * being 0, it will use a default value provided by PMDs to setup this 2445 * txq. If the default value is 0, it will use the 2446 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2447 */ 2448 if (port->nb_tx_desc[txq_id]) 2449 *ring_size = port->nb_tx_desc[txq_id]; 2450 else if (port->dev_info.default_txportconf.ring_size) 2451 *ring_size = port->dev_info.default_txportconf.ring_size; 2452 else 2453 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2454 return 0; 2455 } 2456 2457 static int 2458 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2459 { 2460 uint16_t ring_size; 2461 int ret; 2462 2463 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2464 if (ret) 2465 return 1; 2466 2467 if (rxdesc_id < ring_size) 2468 return 0; 2469 2470 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2471 rxdesc_id, ring_size); 2472 return 1; 2473 } 2474 2475 static int 2476 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2477 { 2478 uint16_t ring_size; 2479 int ret; 2480 2481 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2482 if (ret) 2483 return 1; 2484 2485 if (txdesc_id < ring_size) 2486 return 0; 2487 2488 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2489 txdesc_id, ring_size); 2490 return 1; 2491 } 2492 2493 static const struct rte_memzone * 2494 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2495 { 2496 char mz_name[RTE_MEMZONE_NAMESIZE]; 2497 const struct rte_memzone *mz; 2498 2499 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2500 port_id, q_id, ring_name); 2501 mz = rte_memzone_lookup(mz_name); 2502 if (mz == NULL) 2503 fprintf(stderr, 2504 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2505 ring_name, port_id, q_id, mz_name); 2506 return mz; 2507 } 2508 2509 union igb_ring_dword { 2510 uint64_t dword; 2511 struct { 2512 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2513 uint32_t lo; 2514 uint32_t hi; 2515 #else 2516 uint32_t hi; 2517 uint32_t lo; 2518 #endif 2519 } words; 2520 }; 2521 2522 struct igb_ring_desc_32_bytes { 2523 union igb_ring_dword lo_dword; 2524 union igb_ring_dword hi_dword; 2525 union igb_ring_dword resv1; 2526 union igb_ring_dword resv2; 2527 }; 2528 2529 struct igb_ring_desc_16_bytes { 2530 union igb_ring_dword lo_dword; 2531 union igb_ring_dword hi_dword; 2532 }; 2533 2534 static void 2535 ring_rxd_display_dword(union igb_ring_dword dword) 2536 { 2537 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2538 (unsigned)dword.words.hi); 2539 } 2540 2541 static void 2542 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2543 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2544 portid_t port_id, 2545 #else 2546 __rte_unused portid_t port_id, 2547 #endif 2548 uint16_t desc_id) 2549 { 2550 struct igb_ring_desc_16_bytes *ring = 2551 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2552 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2553 int ret; 2554 struct rte_eth_dev_info dev_info; 2555 2556 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2557 if (ret != 0) 2558 return; 2559 2560 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2561 /* 32 bytes RX descriptor, i40e only */ 2562 struct igb_ring_desc_32_bytes *ring = 2563 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2564 ring[desc_id].lo_dword.dword = 2565 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2566 ring_rxd_display_dword(ring[desc_id].lo_dword); 2567 ring[desc_id].hi_dword.dword = 2568 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2569 ring_rxd_display_dword(ring[desc_id].hi_dword); 2570 ring[desc_id].resv1.dword = 2571 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2572 ring_rxd_display_dword(ring[desc_id].resv1); 2573 ring[desc_id].resv2.dword = 2574 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2575 ring_rxd_display_dword(ring[desc_id].resv2); 2576 2577 return; 2578 } 2579 #endif 2580 /* 16 bytes RX descriptor */ 2581 ring[desc_id].lo_dword.dword = 2582 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2583 ring_rxd_display_dword(ring[desc_id].lo_dword); 2584 ring[desc_id].hi_dword.dword = 2585 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2586 ring_rxd_display_dword(ring[desc_id].hi_dword); 2587 } 2588 2589 static void 2590 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2591 { 2592 struct igb_ring_desc_16_bytes *ring; 2593 struct igb_ring_desc_16_bytes txd; 2594 2595 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2596 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2597 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2598 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2599 (unsigned)txd.lo_dword.words.lo, 2600 (unsigned)txd.lo_dword.words.hi, 2601 (unsigned)txd.hi_dword.words.lo, 2602 (unsigned)txd.hi_dword.words.hi); 2603 } 2604 2605 void 2606 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2607 { 2608 const struct rte_memzone *rx_mz; 2609 2610 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2611 return; 2612 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2613 if (rx_mz == NULL) 2614 return; 2615 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2616 } 2617 2618 void 2619 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2620 { 2621 const struct rte_memzone *tx_mz; 2622 2623 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2624 return; 2625 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2626 if (tx_mz == NULL) 2627 return; 2628 ring_tx_descriptor_display(tx_mz, txd_id); 2629 } 2630 2631 void 2632 fwd_lcores_config_display(void) 2633 { 2634 lcoreid_t lc_id; 2635 2636 printf("List of forwarding lcores:"); 2637 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2638 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2639 printf("\n"); 2640 } 2641 void 2642 rxtx_config_display(void) 2643 { 2644 portid_t pid; 2645 queueid_t qid; 2646 2647 printf(" %s packet forwarding%s packets/burst=%d\n", 2648 cur_fwd_eng->fwd_mode_name, 2649 retry_enabled == 0 ? "" : " with retry", 2650 nb_pkt_per_burst); 2651 2652 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2653 printf(" packet len=%u - nb packet segments=%d\n", 2654 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2655 2656 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2657 nb_fwd_lcores, nb_fwd_ports); 2658 2659 RTE_ETH_FOREACH_DEV(pid) { 2660 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2661 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2662 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2663 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2664 struct rte_eth_rxq_info rx_qinfo; 2665 struct rte_eth_txq_info tx_qinfo; 2666 uint16_t rx_free_thresh_tmp; 2667 uint16_t tx_free_thresh_tmp; 2668 uint16_t tx_rs_thresh_tmp; 2669 uint16_t nb_rx_desc_tmp; 2670 uint16_t nb_tx_desc_tmp; 2671 uint64_t offloads_tmp; 2672 uint8_t pthresh_tmp; 2673 uint8_t hthresh_tmp; 2674 uint8_t wthresh_tmp; 2675 int32_t rc; 2676 2677 /* per port config */ 2678 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2679 (unsigned int)pid, nb_rxq, nb_txq); 2680 2681 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2682 ports[pid].dev_conf.rxmode.offloads, 2683 ports[pid].dev_conf.txmode.offloads); 2684 2685 /* per rx queue config only for first queue to be less verbose */ 2686 for (qid = 0; qid < 1; qid++) { 2687 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2688 if (rc) { 2689 nb_rx_desc_tmp = nb_rx_desc[qid]; 2690 rx_free_thresh_tmp = 2691 rx_conf[qid].rx_free_thresh; 2692 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2693 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2694 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2695 offloads_tmp = rx_conf[qid].offloads; 2696 } else { 2697 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2698 rx_free_thresh_tmp = 2699 rx_qinfo.conf.rx_free_thresh; 2700 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2701 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2702 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2703 offloads_tmp = rx_qinfo.conf.offloads; 2704 } 2705 2706 printf(" RX queue: %d\n", qid); 2707 printf(" RX desc=%d - RX free threshold=%d\n", 2708 nb_rx_desc_tmp, rx_free_thresh_tmp); 2709 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2710 " wthresh=%d\n", 2711 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2712 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2713 } 2714 2715 /* per tx queue config only for first queue to be less verbose */ 2716 for (qid = 0; qid < 1; qid++) { 2717 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2718 if (rc) { 2719 nb_tx_desc_tmp = nb_tx_desc[qid]; 2720 tx_free_thresh_tmp = 2721 tx_conf[qid].tx_free_thresh; 2722 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2723 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2724 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2725 offloads_tmp = tx_conf[qid].offloads; 2726 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2727 } else { 2728 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2729 tx_free_thresh_tmp = 2730 tx_qinfo.conf.tx_free_thresh; 2731 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2732 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2733 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2734 offloads_tmp = tx_qinfo.conf.offloads; 2735 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2736 } 2737 2738 printf(" TX queue: %d\n", qid); 2739 printf(" TX desc=%d - TX free threshold=%d\n", 2740 nb_tx_desc_tmp, tx_free_thresh_tmp); 2741 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2742 " wthresh=%d\n", 2743 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2744 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2745 offloads_tmp, tx_rs_thresh_tmp); 2746 } 2747 } 2748 } 2749 2750 void 2751 port_rss_reta_info(portid_t port_id, 2752 struct rte_eth_rss_reta_entry64 *reta_conf, 2753 uint16_t nb_entries) 2754 { 2755 uint16_t i, idx, shift; 2756 int ret; 2757 2758 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2759 return; 2760 2761 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2762 if (ret != 0) { 2763 fprintf(stderr, 2764 "Failed to get RSS RETA info, return code = %d\n", 2765 ret); 2766 return; 2767 } 2768 2769 for (i = 0; i < nb_entries; i++) { 2770 idx = i / RTE_RETA_GROUP_SIZE; 2771 shift = i % RTE_RETA_GROUP_SIZE; 2772 if (!(reta_conf[idx].mask & (1ULL << shift))) 2773 continue; 2774 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2775 i, reta_conf[idx].reta[shift]); 2776 } 2777 } 2778 2779 /* 2780 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2781 * key of the port. 2782 */ 2783 void 2784 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2785 { 2786 struct rte_eth_rss_conf rss_conf = {0}; 2787 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2788 uint64_t rss_hf; 2789 uint8_t i; 2790 int diag; 2791 struct rte_eth_dev_info dev_info; 2792 uint8_t hash_key_size; 2793 int ret; 2794 2795 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2796 return; 2797 2798 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2799 if (ret != 0) 2800 return; 2801 2802 if (dev_info.hash_key_size > 0 && 2803 dev_info.hash_key_size <= sizeof(rss_key)) 2804 hash_key_size = dev_info.hash_key_size; 2805 else { 2806 fprintf(stderr, 2807 "dev_info did not provide a valid hash key size\n"); 2808 return; 2809 } 2810 2811 /* Get RSS hash key if asked to display it */ 2812 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2813 rss_conf.rss_key_len = hash_key_size; 2814 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2815 if (diag != 0) { 2816 switch (diag) { 2817 case -ENODEV: 2818 fprintf(stderr, "port index %d invalid\n", port_id); 2819 break; 2820 case -ENOTSUP: 2821 fprintf(stderr, "operation not supported by device\n"); 2822 break; 2823 default: 2824 fprintf(stderr, "operation failed - diag=%d\n", diag); 2825 break; 2826 } 2827 return; 2828 } 2829 rss_hf = rss_conf.rss_hf; 2830 if (rss_hf == 0) { 2831 printf("RSS disabled\n"); 2832 return; 2833 } 2834 printf("RSS functions:\n "); 2835 for (i = 0; rss_type_table[i].str; i++) { 2836 if (rss_hf & rss_type_table[i].rss_type) 2837 printf("%s ", rss_type_table[i].str); 2838 } 2839 printf("\n"); 2840 if (!show_rss_key) 2841 return; 2842 printf("RSS key:\n"); 2843 for (i = 0; i < hash_key_size; i++) 2844 printf("%02X", rss_key[i]); 2845 printf("\n"); 2846 } 2847 2848 void 2849 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2850 uint8_t hash_key_len) 2851 { 2852 struct rte_eth_rss_conf rss_conf; 2853 int diag; 2854 unsigned int i; 2855 2856 rss_conf.rss_key = NULL; 2857 rss_conf.rss_key_len = hash_key_len; 2858 rss_conf.rss_hf = 0; 2859 for (i = 0; rss_type_table[i].str; i++) { 2860 if (!strcmp(rss_type_table[i].str, rss_type)) 2861 rss_conf.rss_hf = rss_type_table[i].rss_type; 2862 } 2863 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2864 if (diag == 0) { 2865 rss_conf.rss_key = hash_key; 2866 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2867 } 2868 if (diag == 0) 2869 return; 2870 2871 switch (diag) { 2872 case -ENODEV: 2873 fprintf(stderr, "port index %d invalid\n", port_id); 2874 break; 2875 case -ENOTSUP: 2876 fprintf(stderr, "operation not supported by device\n"); 2877 break; 2878 default: 2879 fprintf(stderr, "operation failed - diag=%d\n", diag); 2880 break; 2881 } 2882 } 2883 2884 /* 2885 * Setup forwarding configuration for each logical core. 2886 */ 2887 static void 2888 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2889 { 2890 streamid_t nb_fs_per_lcore; 2891 streamid_t nb_fs; 2892 streamid_t sm_id; 2893 lcoreid_t nb_extra; 2894 lcoreid_t nb_fc; 2895 lcoreid_t nb_lc; 2896 lcoreid_t lc_id; 2897 2898 nb_fs = cfg->nb_fwd_streams; 2899 nb_fc = cfg->nb_fwd_lcores; 2900 if (nb_fs <= nb_fc) { 2901 nb_fs_per_lcore = 1; 2902 nb_extra = 0; 2903 } else { 2904 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2905 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2906 } 2907 2908 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2909 sm_id = 0; 2910 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2911 fwd_lcores[lc_id]->stream_idx = sm_id; 2912 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2913 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2914 } 2915 2916 /* 2917 * Assign extra remaining streams, if any. 2918 */ 2919 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2920 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2921 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2922 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2923 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2924 } 2925 } 2926 2927 static portid_t 2928 fwd_topology_tx_port_get(portid_t rxp) 2929 { 2930 static int warning_once = 1; 2931 2932 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2933 2934 switch (port_topology) { 2935 default: 2936 case PORT_TOPOLOGY_PAIRED: 2937 if ((rxp & 0x1) == 0) { 2938 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2939 return rxp + 1; 2940 if (warning_once) { 2941 fprintf(stderr, 2942 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 2943 warning_once = 0; 2944 } 2945 return rxp; 2946 } 2947 return rxp - 1; 2948 case PORT_TOPOLOGY_CHAINED: 2949 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2950 case PORT_TOPOLOGY_LOOP: 2951 return rxp; 2952 } 2953 } 2954 2955 static void 2956 simple_fwd_config_setup(void) 2957 { 2958 portid_t i; 2959 2960 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2961 cur_fwd_config.nb_fwd_streams = 2962 (streamid_t) cur_fwd_config.nb_fwd_ports; 2963 2964 /* reinitialize forwarding streams */ 2965 init_fwd_streams(); 2966 2967 /* 2968 * In the simple forwarding test, the number of forwarding cores 2969 * must be lower or equal to the number of forwarding ports. 2970 */ 2971 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2972 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2973 cur_fwd_config.nb_fwd_lcores = 2974 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2975 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2976 2977 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2978 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2979 fwd_streams[i]->rx_queue = 0; 2980 fwd_streams[i]->tx_port = 2981 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2982 fwd_streams[i]->tx_queue = 0; 2983 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2984 fwd_streams[i]->retry_enabled = retry_enabled; 2985 } 2986 } 2987 2988 /** 2989 * For the RSS forwarding test all streams distributed over lcores. Each stream 2990 * being composed of a RX queue to poll on a RX port for input messages, 2991 * associated with a TX queue of a TX port where to send forwarded packets. 2992 */ 2993 static void 2994 rss_fwd_config_setup(void) 2995 { 2996 portid_t rxp; 2997 portid_t txp; 2998 queueid_t rxq; 2999 queueid_t nb_q; 3000 streamid_t sm_id; 3001 3002 nb_q = nb_rxq; 3003 if (nb_q > nb_txq) 3004 nb_q = nb_txq; 3005 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3006 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3007 cur_fwd_config.nb_fwd_streams = 3008 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3009 3010 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3011 cur_fwd_config.nb_fwd_lcores = 3012 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3013 3014 /* reinitialize forwarding streams */ 3015 init_fwd_streams(); 3016 3017 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3018 rxp = 0; rxq = 0; 3019 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3020 struct fwd_stream *fs; 3021 3022 fs = fwd_streams[sm_id]; 3023 txp = fwd_topology_tx_port_get(rxp); 3024 fs->rx_port = fwd_ports_ids[rxp]; 3025 fs->rx_queue = rxq; 3026 fs->tx_port = fwd_ports_ids[txp]; 3027 fs->tx_queue = rxq; 3028 fs->peer_addr = fs->tx_port; 3029 fs->retry_enabled = retry_enabled; 3030 rxp++; 3031 if (rxp < nb_fwd_ports) 3032 continue; 3033 rxp = 0; 3034 rxq++; 3035 } 3036 } 3037 3038 static uint16_t 3039 get_fwd_port_total_tc_num(void) 3040 { 3041 struct rte_eth_dcb_info dcb_info; 3042 uint16_t total_tc_num = 0; 3043 unsigned int i; 3044 3045 for (i = 0; i < nb_fwd_ports; i++) { 3046 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3047 total_tc_num += dcb_info.nb_tcs; 3048 } 3049 3050 return total_tc_num; 3051 } 3052 3053 /** 3054 * For the DCB forwarding test, each core is assigned on each traffic class. 3055 * 3056 * Each core is assigned a multi-stream, each stream being composed of 3057 * a RX queue to poll on a RX port for input messages, associated with 3058 * a TX queue of a TX port where to send forwarded packets. All RX and 3059 * TX queues are mapping to the same traffic class. 3060 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3061 * the same core 3062 */ 3063 static void 3064 dcb_fwd_config_setup(void) 3065 { 3066 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3067 portid_t txp, rxp = 0; 3068 queueid_t txq, rxq = 0; 3069 lcoreid_t lc_id; 3070 uint16_t nb_rx_queue, nb_tx_queue; 3071 uint16_t i, j, k, sm_id = 0; 3072 uint16_t total_tc_num; 3073 struct rte_port *port; 3074 uint8_t tc = 0; 3075 portid_t pid; 3076 int ret; 3077 3078 /* 3079 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3080 * or RTE_PORT_STOPPED. 3081 * 3082 * Re-configure ports to get updated mapping between tc and queue in 3083 * case the queue number of the port is changed. Skip for started ports 3084 * since modifying queue number and calling dev_configure need to stop 3085 * ports first. 3086 */ 3087 for (pid = 0; pid < nb_fwd_ports; pid++) { 3088 if (port_is_started(pid) == 1) 3089 continue; 3090 3091 port = &ports[pid]; 3092 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3093 &port->dev_conf); 3094 if (ret < 0) { 3095 fprintf(stderr, 3096 "Failed to re-configure port %d, ret = %d.\n", 3097 pid, ret); 3098 return; 3099 } 3100 } 3101 3102 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3103 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3104 cur_fwd_config.nb_fwd_streams = 3105 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3106 total_tc_num = get_fwd_port_total_tc_num(); 3107 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3108 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3109 3110 /* reinitialize forwarding streams */ 3111 init_fwd_streams(); 3112 sm_id = 0; 3113 txp = 1; 3114 /* get the dcb info on the first RX and TX ports */ 3115 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3116 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3117 3118 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3119 fwd_lcores[lc_id]->stream_nb = 0; 3120 fwd_lcores[lc_id]->stream_idx = sm_id; 3121 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3122 /* if the nb_queue is zero, means this tc is 3123 * not enabled on the POOL 3124 */ 3125 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3126 break; 3127 k = fwd_lcores[lc_id]->stream_nb + 3128 fwd_lcores[lc_id]->stream_idx; 3129 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3130 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3131 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3132 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3133 for (j = 0; j < nb_rx_queue; j++) { 3134 struct fwd_stream *fs; 3135 3136 fs = fwd_streams[k + j]; 3137 fs->rx_port = fwd_ports_ids[rxp]; 3138 fs->rx_queue = rxq + j; 3139 fs->tx_port = fwd_ports_ids[txp]; 3140 fs->tx_queue = txq + j % nb_tx_queue; 3141 fs->peer_addr = fs->tx_port; 3142 fs->retry_enabled = retry_enabled; 3143 } 3144 fwd_lcores[lc_id]->stream_nb += 3145 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3146 } 3147 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3148 3149 tc++; 3150 if (tc < rxp_dcb_info.nb_tcs) 3151 continue; 3152 /* Restart from TC 0 on next RX port */ 3153 tc = 0; 3154 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3155 rxp = (portid_t) 3156 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3157 else 3158 rxp++; 3159 if (rxp >= nb_fwd_ports) 3160 return; 3161 /* get the dcb information on next RX and TX ports */ 3162 if ((rxp & 0x1) == 0) 3163 txp = (portid_t) (rxp + 1); 3164 else 3165 txp = (portid_t) (rxp - 1); 3166 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3167 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3168 } 3169 } 3170 3171 static void 3172 icmp_echo_config_setup(void) 3173 { 3174 portid_t rxp; 3175 queueid_t rxq; 3176 lcoreid_t lc_id; 3177 uint16_t sm_id; 3178 3179 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3180 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3181 (nb_txq * nb_fwd_ports); 3182 else 3183 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3184 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3185 cur_fwd_config.nb_fwd_streams = 3186 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3187 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3188 cur_fwd_config.nb_fwd_lcores = 3189 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3190 if (verbose_level > 0) { 3191 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3192 __FUNCTION__, 3193 cur_fwd_config.nb_fwd_lcores, 3194 cur_fwd_config.nb_fwd_ports, 3195 cur_fwd_config.nb_fwd_streams); 3196 } 3197 3198 /* reinitialize forwarding streams */ 3199 init_fwd_streams(); 3200 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3201 rxp = 0; rxq = 0; 3202 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3203 if (verbose_level > 0) 3204 printf(" core=%d: \n", lc_id); 3205 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3206 struct fwd_stream *fs; 3207 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3208 fs->rx_port = fwd_ports_ids[rxp]; 3209 fs->rx_queue = rxq; 3210 fs->tx_port = fs->rx_port; 3211 fs->tx_queue = rxq; 3212 fs->peer_addr = fs->tx_port; 3213 fs->retry_enabled = retry_enabled; 3214 if (verbose_level > 0) 3215 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3216 sm_id, fs->rx_port, fs->rx_queue, 3217 fs->tx_queue); 3218 rxq = (queueid_t) (rxq + 1); 3219 if (rxq == nb_rxq) { 3220 rxq = 0; 3221 rxp = (portid_t) (rxp + 1); 3222 } 3223 } 3224 } 3225 } 3226 3227 void 3228 fwd_config_setup(void) 3229 { 3230 struct rte_port *port; 3231 portid_t pt_id; 3232 unsigned int i; 3233 3234 cur_fwd_config.fwd_eng = cur_fwd_eng; 3235 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3236 icmp_echo_config_setup(); 3237 return; 3238 } 3239 3240 if ((nb_rxq > 1) && (nb_txq > 1)){ 3241 if (dcb_config) { 3242 for (i = 0; i < nb_fwd_ports; i++) { 3243 pt_id = fwd_ports_ids[i]; 3244 port = &ports[pt_id]; 3245 if (!port->dcb_flag) { 3246 fprintf(stderr, 3247 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3248 return; 3249 } 3250 } 3251 if (nb_fwd_lcores == 1) { 3252 fprintf(stderr, 3253 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3254 return; 3255 } 3256 3257 dcb_fwd_config_setup(); 3258 } else 3259 rss_fwd_config_setup(); 3260 } 3261 else 3262 simple_fwd_config_setup(); 3263 } 3264 3265 static const char * 3266 mp_alloc_to_str(uint8_t mode) 3267 { 3268 switch (mode) { 3269 case MP_ALLOC_NATIVE: 3270 return "native"; 3271 case MP_ALLOC_ANON: 3272 return "anon"; 3273 case MP_ALLOC_XMEM: 3274 return "xmem"; 3275 case MP_ALLOC_XMEM_HUGE: 3276 return "xmemhuge"; 3277 case MP_ALLOC_XBUF: 3278 return "xbuf"; 3279 default: 3280 return "invalid"; 3281 } 3282 } 3283 3284 void 3285 pkt_fwd_config_display(struct fwd_config *cfg) 3286 { 3287 struct fwd_stream *fs; 3288 lcoreid_t lc_id; 3289 streamid_t sm_id; 3290 3291 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3292 "NUMA support %s, MP allocation mode: %s\n", 3293 cfg->fwd_eng->fwd_mode_name, 3294 retry_enabled == 0 ? "" : " with retry", 3295 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3296 numa_support == 1 ? "enabled" : "disabled", 3297 mp_alloc_to_str(mp_alloc_type)); 3298 3299 if (retry_enabled) 3300 printf("TX retry num: %u, delay between TX retries: %uus\n", 3301 burst_tx_retry_num, burst_tx_delay_time); 3302 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3303 printf("Logical Core %u (socket %u) forwards packets on " 3304 "%d streams:", 3305 fwd_lcores_cpuids[lc_id], 3306 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3307 fwd_lcores[lc_id]->stream_nb); 3308 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3309 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3310 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3311 "P=%d/Q=%d (socket %u) ", 3312 fs->rx_port, fs->rx_queue, 3313 ports[fs->rx_port].socket_id, 3314 fs->tx_port, fs->tx_queue, 3315 ports[fs->tx_port].socket_id); 3316 print_ethaddr("peer=", 3317 &peer_eth_addrs[fs->peer_addr]); 3318 } 3319 printf("\n"); 3320 } 3321 printf("\n"); 3322 } 3323 3324 void 3325 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3326 { 3327 struct rte_ether_addr new_peer_addr; 3328 if (!rte_eth_dev_is_valid_port(port_id)) { 3329 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3330 return; 3331 } 3332 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3333 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3334 peer_addr); 3335 return; 3336 } 3337 peer_eth_addrs[port_id] = new_peer_addr; 3338 } 3339 3340 int 3341 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3342 { 3343 unsigned int i; 3344 unsigned int lcore_cpuid; 3345 int record_now; 3346 3347 record_now = 0; 3348 again: 3349 for (i = 0; i < nb_lc; i++) { 3350 lcore_cpuid = lcorelist[i]; 3351 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3352 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3353 return -1; 3354 } 3355 if (lcore_cpuid == rte_get_main_lcore()) { 3356 fprintf(stderr, 3357 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3358 lcore_cpuid); 3359 return -1; 3360 } 3361 if (record_now) 3362 fwd_lcores_cpuids[i] = lcore_cpuid; 3363 } 3364 if (record_now == 0) { 3365 record_now = 1; 3366 goto again; 3367 } 3368 nb_cfg_lcores = (lcoreid_t) nb_lc; 3369 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3370 printf("previous number of forwarding cores %u - changed to " 3371 "number of configured cores %u\n", 3372 (unsigned int) nb_fwd_lcores, nb_lc); 3373 nb_fwd_lcores = (lcoreid_t) nb_lc; 3374 } 3375 3376 return 0; 3377 } 3378 3379 int 3380 set_fwd_lcores_mask(uint64_t lcoremask) 3381 { 3382 unsigned int lcorelist[64]; 3383 unsigned int nb_lc; 3384 unsigned int i; 3385 3386 if (lcoremask == 0) { 3387 fprintf(stderr, "Invalid NULL mask of cores\n"); 3388 return -1; 3389 } 3390 nb_lc = 0; 3391 for (i = 0; i < 64; i++) { 3392 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3393 continue; 3394 lcorelist[nb_lc++] = i; 3395 } 3396 return set_fwd_lcores_list(lcorelist, nb_lc); 3397 } 3398 3399 void 3400 set_fwd_lcores_number(uint16_t nb_lc) 3401 { 3402 if (test_done == 0) { 3403 fprintf(stderr, "Please stop forwarding first\n"); 3404 return; 3405 } 3406 if (nb_lc > nb_cfg_lcores) { 3407 fprintf(stderr, 3408 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3409 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3410 return; 3411 } 3412 nb_fwd_lcores = (lcoreid_t) nb_lc; 3413 printf("Number of forwarding cores set to %u\n", 3414 (unsigned int) nb_fwd_lcores); 3415 } 3416 3417 void 3418 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3419 { 3420 unsigned int i; 3421 portid_t port_id; 3422 int record_now; 3423 3424 record_now = 0; 3425 again: 3426 for (i = 0; i < nb_pt; i++) { 3427 port_id = (portid_t) portlist[i]; 3428 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3429 return; 3430 if (record_now) 3431 fwd_ports_ids[i] = port_id; 3432 } 3433 if (record_now == 0) { 3434 record_now = 1; 3435 goto again; 3436 } 3437 nb_cfg_ports = (portid_t) nb_pt; 3438 if (nb_fwd_ports != (portid_t) nb_pt) { 3439 printf("previous number of forwarding ports %u - changed to " 3440 "number of configured ports %u\n", 3441 (unsigned int) nb_fwd_ports, nb_pt); 3442 nb_fwd_ports = (portid_t) nb_pt; 3443 } 3444 } 3445 3446 /** 3447 * Parse the user input and obtain the list of forwarding ports 3448 * 3449 * @param[in] list 3450 * String containing the user input. User can specify 3451 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3452 * For example, if the user wants to use all the available 3453 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3454 * If the user wants to use only the ports 1,2 then the input 3455 * is 1,2. 3456 * valid characters are '-' and ',' 3457 * @param[out] values 3458 * This array will be filled with a list of port IDs 3459 * based on the user input 3460 * Note that duplicate entries are discarded and only the first 3461 * count entries in this array are port IDs and all the rest 3462 * will contain default values 3463 * @param[in] maxsize 3464 * This parameter denotes 2 things 3465 * 1) Number of elements in the values array 3466 * 2) Maximum value of each element in the values array 3467 * @return 3468 * On success, returns total count of parsed port IDs 3469 * On failure, returns 0 3470 */ 3471 static unsigned int 3472 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3473 { 3474 unsigned int count = 0; 3475 char *end = NULL; 3476 int min, max; 3477 int value, i; 3478 unsigned int marked[maxsize]; 3479 3480 if (list == NULL || values == NULL) 3481 return 0; 3482 3483 for (i = 0; i < (int)maxsize; i++) 3484 marked[i] = 0; 3485 3486 min = INT_MAX; 3487 3488 do { 3489 /*Remove the blank spaces if any*/ 3490 while (isblank(*list)) 3491 list++; 3492 if (*list == '\0') 3493 break; 3494 errno = 0; 3495 value = strtol(list, &end, 10); 3496 if (errno || end == NULL) 3497 return 0; 3498 if (value < 0 || value >= (int)maxsize) 3499 return 0; 3500 while (isblank(*end)) 3501 end++; 3502 if (*end == '-' && min == INT_MAX) { 3503 min = value; 3504 } else if ((*end == ',') || (*end == '\0')) { 3505 max = value; 3506 if (min == INT_MAX) 3507 min = value; 3508 for (i = min; i <= max; i++) { 3509 if (count < maxsize) { 3510 if (marked[i]) 3511 continue; 3512 values[count] = i; 3513 marked[i] = 1; 3514 count++; 3515 } 3516 } 3517 min = INT_MAX; 3518 } else 3519 return 0; 3520 list = end + 1; 3521 } while (*end != '\0'); 3522 3523 return count; 3524 } 3525 3526 void 3527 parse_fwd_portlist(const char *portlist) 3528 { 3529 unsigned int portcount; 3530 unsigned int portindex[RTE_MAX_ETHPORTS]; 3531 unsigned int i, valid_port_count = 0; 3532 3533 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3534 if (!portcount) 3535 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3536 3537 /* 3538 * Here we verify the validity of the ports 3539 * and thereby calculate the total number of 3540 * valid ports 3541 */ 3542 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3543 if (rte_eth_dev_is_valid_port(portindex[i])) { 3544 portindex[valid_port_count] = portindex[i]; 3545 valid_port_count++; 3546 } 3547 } 3548 3549 set_fwd_ports_list(portindex, valid_port_count); 3550 } 3551 3552 void 3553 set_fwd_ports_mask(uint64_t portmask) 3554 { 3555 unsigned int portlist[64]; 3556 unsigned int nb_pt; 3557 unsigned int i; 3558 3559 if (portmask == 0) { 3560 fprintf(stderr, "Invalid NULL mask of ports\n"); 3561 return; 3562 } 3563 nb_pt = 0; 3564 RTE_ETH_FOREACH_DEV(i) { 3565 if (! ((uint64_t)(1ULL << i) & portmask)) 3566 continue; 3567 portlist[nb_pt++] = i; 3568 } 3569 set_fwd_ports_list(portlist, nb_pt); 3570 } 3571 3572 void 3573 set_fwd_ports_number(uint16_t nb_pt) 3574 { 3575 if (nb_pt > nb_cfg_ports) { 3576 fprintf(stderr, 3577 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3578 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3579 return; 3580 } 3581 nb_fwd_ports = (portid_t) nb_pt; 3582 printf("Number of forwarding ports set to %u\n", 3583 (unsigned int) nb_fwd_ports); 3584 } 3585 3586 int 3587 port_is_forwarding(portid_t port_id) 3588 { 3589 unsigned int i; 3590 3591 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3592 return -1; 3593 3594 for (i = 0; i < nb_fwd_ports; i++) { 3595 if (fwd_ports_ids[i] == port_id) 3596 return 1; 3597 } 3598 3599 return 0; 3600 } 3601 3602 void 3603 set_nb_pkt_per_burst(uint16_t nb) 3604 { 3605 if (nb > MAX_PKT_BURST) { 3606 fprintf(stderr, 3607 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3608 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3609 return; 3610 } 3611 nb_pkt_per_burst = nb; 3612 printf("Number of packets per burst set to %u\n", 3613 (unsigned int) nb_pkt_per_burst); 3614 } 3615 3616 static const char * 3617 tx_split_get_name(enum tx_pkt_split split) 3618 { 3619 uint32_t i; 3620 3621 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3622 if (tx_split_name[i].split == split) 3623 return tx_split_name[i].name; 3624 } 3625 return NULL; 3626 } 3627 3628 void 3629 set_tx_pkt_split(const char *name) 3630 { 3631 uint32_t i; 3632 3633 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3634 if (strcmp(tx_split_name[i].name, name) == 0) { 3635 tx_pkt_split = tx_split_name[i].split; 3636 return; 3637 } 3638 } 3639 fprintf(stderr, "unknown value: \"%s\"\n", name); 3640 } 3641 3642 int 3643 parse_fec_mode(const char *name, uint32_t *fec_capa) 3644 { 3645 uint8_t i; 3646 3647 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3648 if (strcmp(fec_mode_name[i].name, name) == 0) { 3649 *fec_capa = 3650 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3651 return 0; 3652 } 3653 } 3654 return -1; 3655 } 3656 3657 void 3658 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3659 { 3660 unsigned int i, j; 3661 3662 printf("FEC capabilities:\n"); 3663 3664 for (i = 0; i < num; i++) { 3665 printf("%s : ", 3666 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3667 3668 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3669 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3670 speed_fec_capa[i].capa) 3671 printf("%s ", fec_mode_name[j].name); 3672 } 3673 printf("\n"); 3674 } 3675 } 3676 3677 void 3678 show_rx_pkt_offsets(void) 3679 { 3680 uint32_t i, n; 3681 3682 n = rx_pkt_nb_offs; 3683 printf("Number of offsets: %u\n", n); 3684 if (n) { 3685 printf("Segment offsets: "); 3686 for (i = 0; i != n - 1; i++) 3687 printf("%hu,", rx_pkt_seg_offsets[i]); 3688 printf("%hu\n", rx_pkt_seg_lengths[i]); 3689 } 3690 } 3691 3692 void 3693 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3694 { 3695 unsigned int i; 3696 3697 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3698 printf("nb segments per RX packets=%u >= " 3699 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3700 return; 3701 } 3702 3703 /* 3704 * No extra check here, the segment length will be checked by PMD 3705 * in the extended queue setup. 3706 */ 3707 for (i = 0; i < nb_offs; i++) { 3708 if (seg_offsets[i] >= UINT16_MAX) { 3709 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3710 i, seg_offsets[i]); 3711 return; 3712 } 3713 } 3714 3715 for (i = 0; i < nb_offs; i++) 3716 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3717 3718 rx_pkt_nb_offs = (uint8_t) nb_offs; 3719 } 3720 3721 void 3722 show_rx_pkt_segments(void) 3723 { 3724 uint32_t i, n; 3725 3726 n = rx_pkt_nb_segs; 3727 printf("Number of segments: %u\n", n); 3728 if (n) { 3729 printf("Segment sizes: "); 3730 for (i = 0; i != n - 1; i++) 3731 printf("%hu,", rx_pkt_seg_lengths[i]); 3732 printf("%hu\n", rx_pkt_seg_lengths[i]); 3733 } 3734 } 3735 3736 void 3737 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3738 { 3739 unsigned int i; 3740 3741 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3742 printf("nb segments per RX packets=%u >= " 3743 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3744 return; 3745 } 3746 3747 /* 3748 * No extra check here, the segment length will be checked by PMD 3749 * in the extended queue setup. 3750 */ 3751 for (i = 0; i < nb_segs; i++) { 3752 if (seg_lengths[i] >= UINT16_MAX) { 3753 printf("length[%u]=%u > UINT16_MAX - give up\n", 3754 i, seg_lengths[i]); 3755 return; 3756 } 3757 } 3758 3759 for (i = 0; i < nb_segs; i++) 3760 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3761 3762 rx_pkt_nb_segs = (uint8_t) nb_segs; 3763 } 3764 3765 void 3766 show_tx_pkt_segments(void) 3767 { 3768 uint32_t i, n; 3769 const char *split; 3770 3771 n = tx_pkt_nb_segs; 3772 split = tx_split_get_name(tx_pkt_split); 3773 3774 printf("Number of segments: %u\n", n); 3775 printf("Segment sizes: "); 3776 for (i = 0; i != n - 1; i++) 3777 printf("%hu,", tx_pkt_seg_lengths[i]); 3778 printf("%hu\n", tx_pkt_seg_lengths[i]); 3779 printf("Split packet: %s\n", split); 3780 } 3781 3782 static bool 3783 nb_segs_is_invalid(unsigned int nb_segs) 3784 { 3785 uint16_t ring_size; 3786 uint16_t queue_id; 3787 uint16_t port_id; 3788 int ret; 3789 3790 RTE_ETH_FOREACH_DEV(port_id) { 3791 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3792 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3793 if (ret) { 3794 /* Port may not be initialized yet, can't say 3795 * the port is invalid in this stage. 3796 */ 3797 continue; 3798 } 3799 if (ring_size < nb_segs) { 3800 printf("nb segments per TX packets=%u >= TX " 3801 "queue(%u) ring_size=%u - txpkts ignored\n", 3802 nb_segs, queue_id, ring_size); 3803 return true; 3804 } 3805 } 3806 } 3807 3808 return false; 3809 } 3810 3811 void 3812 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3813 { 3814 uint16_t tx_pkt_len; 3815 unsigned int i; 3816 3817 /* 3818 * For single segment settings failed check is ignored. 3819 * It is a very basic capability to send the single segment 3820 * packets, suppose it is always supported. 3821 */ 3822 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 3823 fprintf(stderr, 3824 "Tx segment size(%u) is not supported - txpkts ignored\n", 3825 nb_segs); 3826 return; 3827 } 3828 3829 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 3830 fprintf(stderr, 3831 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 3832 nb_segs, RTE_MAX_SEGS_PER_PKT); 3833 return; 3834 } 3835 3836 /* 3837 * Check that each segment length is greater or equal than 3838 * the mbuf data size. 3839 * Check also that the total packet length is greater or equal than the 3840 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3841 * 20 + 8). 3842 */ 3843 tx_pkt_len = 0; 3844 for (i = 0; i < nb_segs; i++) { 3845 if (seg_lengths[i] > mbuf_data_size[0]) { 3846 fprintf(stderr, 3847 "length[%u]=%u > mbuf_data_size=%u - give up\n", 3848 i, seg_lengths[i], mbuf_data_size[0]); 3849 return; 3850 } 3851 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3852 } 3853 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3854 fprintf(stderr, "total packet length=%u < %d - give up\n", 3855 (unsigned) tx_pkt_len, 3856 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3857 return; 3858 } 3859 3860 for (i = 0; i < nb_segs; i++) 3861 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3862 3863 tx_pkt_length = tx_pkt_len; 3864 tx_pkt_nb_segs = (uint8_t) nb_segs; 3865 } 3866 3867 void 3868 show_tx_pkt_times(void) 3869 { 3870 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3871 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3872 } 3873 3874 void 3875 set_tx_pkt_times(unsigned int *tx_times) 3876 { 3877 tx_pkt_times_inter = tx_times[0]; 3878 tx_pkt_times_intra = tx_times[1]; 3879 } 3880 3881 void 3882 setup_gro(const char *onoff, portid_t port_id) 3883 { 3884 if (!rte_eth_dev_is_valid_port(port_id)) { 3885 fprintf(stderr, "invalid port id %u\n", port_id); 3886 return; 3887 } 3888 if (test_done == 0) { 3889 fprintf(stderr, 3890 "Before enable/disable GRO, please stop forwarding first\n"); 3891 return; 3892 } 3893 if (strcmp(onoff, "on") == 0) { 3894 if (gro_ports[port_id].enable != 0) { 3895 fprintf(stderr, 3896 "Port %u has enabled GRO. Please disable GRO first\n", 3897 port_id); 3898 return; 3899 } 3900 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3901 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3902 gro_ports[port_id].param.max_flow_num = 3903 GRO_DEFAULT_FLOW_NUM; 3904 gro_ports[port_id].param.max_item_per_flow = 3905 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3906 } 3907 gro_ports[port_id].enable = 1; 3908 } else { 3909 if (gro_ports[port_id].enable == 0) { 3910 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 3911 return; 3912 } 3913 gro_ports[port_id].enable = 0; 3914 } 3915 } 3916 3917 void 3918 setup_gro_flush_cycles(uint8_t cycles) 3919 { 3920 if (test_done == 0) { 3921 fprintf(stderr, 3922 "Before change flush interval for GRO, please stop forwarding first.\n"); 3923 return; 3924 } 3925 3926 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3927 GRO_DEFAULT_FLUSH_CYCLES) { 3928 fprintf(stderr, 3929 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 3930 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 3931 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3932 } 3933 3934 gro_flush_cycles = cycles; 3935 } 3936 3937 void 3938 show_gro(portid_t port_id) 3939 { 3940 struct rte_gro_param *param; 3941 uint32_t max_pkts_num; 3942 3943 param = &gro_ports[port_id].param; 3944 3945 if (!rte_eth_dev_is_valid_port(port_id)) { 3946 fprintf(stderr, "Invalid port id %u.\n", port_id); 3947 return; 3948 } 3949 if (gro_ports[port_id].enable) { 3950 printf("GRO type: TCP/IPv4\n"); 3951 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3952 max_pkts_num = param->max_flow_num * 3953 param->max_item_per_flow; 3954 } else 3955 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3956 printf("Max number of packets to perform GRO: %u\n", 3957 max_pkts_num); 3958 printf("Flushing cycles: %u\n", gro_flush_cycles); 3959 } else 3960 printf("Port %u doesn't enable GRO.\n", port_id); 3961 } 3962 3963 void 3964 setup_gso(const char *mode, portid_t port_id) 3965 { 3966 if (!rte_eth_dev_is_valid_port(port_id)) { 3967 fprintf(stderr, "invalid port id %u\n", port_id); 3968 return; 3969 } 3970 if (strcmp(mode, "on") == 0) { 3971 if (test_done == 0) { 3972 fprintf(stderr, 3973 "before enabling GSO, please stop forwarding first\n"); 3974 return; 3975 } 3976 gso_ports[port_id].enable = 1; 3977 } else if (strcmp(mode, "off") == 0) { 3978 if (test_done == 0) { 3979 fprintf(stderr, 3980 "before disabling GSO, please stop forwarding first\n"); 3981 return; 3982 } 3983 gso_ports[port_id].enable = 0; 3984 } 3985 } 3986 3987 char* 3988 list_pkt_forwarding_modes(void) 3989 { 3990 static char fwd_modes[128] = ""; 3991 const char *separator = "|"; 3992 struct fwd_engine *fwd_eng; 3993 unsigned i = 0; 3994 3995 if (strlen (fwd_modes) == 0) { 3996 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3997 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3998 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3999 strncat(fwd_modes, separator, 4000 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4001 } 4002 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4003 } 4004 4005 return fwd_modes; 4006 } 4007 4008 char* 4009 list_pkt_forwarding_retry_modes(void) 4010 { 4011 static char fwd_modes[128] = ""; 4012 const char *separator = "|"; 4013 struct fwd_engine *fwd_eng; 4014 unsigned i = 0; 4015 4016 if (strlen(fwd_modes) == 0) { 4017 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4018 if (fwd_eng == &rx_only_engine) 4019 continue; 4020 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4021 sizeof(fwd_modes) - 4022 strlen(fwd_modes) - 1); 4023 strncat(fwd_modes, separator, 4024 sizeof(fwd_modes) - 4025 strlen(fwd_modes) - 1); 4026 } 4027 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4028 } 4029 4030 return fwd_modes; 4031 } 4032 4033 void 4034 set_pkt_forwarding_mode(const char *fwd_mode_name) 4035 { 4036 struct fwd_engine *fwd_eng; 4037 unsigned i; 4038 4039 i = 0; 4040 while ((fwd_eng = fwd_engines[i]) != NULL) { 4041 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4042 printf("Set %s packet forwarding mode%s\n", 4043 fwd_mode_name, 4044 retry_enabled == 0 ? "" : " with retry"); 4045 cur_fwd_eng = fwd_eng; 4046 return; 4047 } 4048 i++; 4049 } 4050 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4051 } 4052 4053 void 4054 add_rx_dump_callbacks(portid_t portid) 4055 { 4056 struct rte_eth_dev_info dev_info; 4057 uint16_t queue; 4058 int ret; 4059 4060 if (port_id_is_invalid(portid, ENABLED_WARN)) 4061 return; 4062 4063 ret = eth_dev_info_get_print_err(portid, &dev_info); 4064 if (ret != 0) 4065 return; 4066 4067 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4068 if (!ports[portid].rx_dump_cb[queue]) 4069 ports[portid].rx_dump_cb[queue] = 4070 rte_eth_add_rx_callback(portid, queue, 4071 dump_rx_pkts, NULL); 4072 } 4073 4074 void 4075 add_tx_dump_callbacks(portid_t portid) 4076 { 4077 struct rte_eth_dev_info dev_info; 4078 uint16_t queue; 4079 int ret; 4080 4081 if (port_id_is_invalid(portid, ENABLED_WARN)) 4082 return; 4083 4084 ret = eth_dev_info_get_print_err(portid, &dev_info); 4085 if (ret != 0) 4086 return; 4087 4088 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4089 if (!ports[portid].tx_dump_cb[queue]) 4090 ports[portid].tx_dump_cb[queue] = 4091 rte_eth_add_tx_callback(portid, queue, 4092 dump_tx_pkts, NULL); 4093 } 4094 4095 void 4096 remove_rx_dump_callbacks(portid_t portid) 4097 { 4098 struct rte_eth_dev_info dev_info; 4099 uint16_t queue; 4100 int ret; 4101 4102 if (port_id_is_invalid(portid, ENABLED_WARN)) 4103 return; 4104 4105 ret = eth_dev_info_get_print_err(portid, &dev_info); 4106 if (ret != 0) 4107 return; 4108 4109 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4110 if (ports[portid].rx_dump_cb[queue]) { 4111 rte_eth_remove_rx_callback(portid, queue, 4112 ports[portid].rx_dump_cb[queue]); 4113 ports[portid].rx_dump_cb[queue] = NULL; 4114 } 4115 } 4116 4117 void 4118 remove_tx_dump_callbacks(portid_t portid) 4119 { 4120 struct rte_eth_dev_info dev_info; 4121 uint16_t queue; 4122 int ret; 4123 4124 if (port_id_is_invalid(portid, ENABLED_WARN)) 4125 return; 4126 4127 ret = eth_dev_info_get_print_err(portid, &dev_info); 4128 if (ret != 0) 4129 return; 4130 4131 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4132 if (ports[portid].tx_dump_cb[queue]) { 4133 rte_eth_remove_tx_callback(portid, queue, 4134 ports[portid].tx_dump_cb[queue]); 4135 ports[portid].tx_dump_cb[queue] = NULL; 4136 } 4137 } 4138 4139 void 4140 configure_rxtx_dump_callbacks(uint16_t verbose) 4141 { 4142 portid_t portid; 4143 4144 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4145 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4146 return; 4147 #endif 4148 4149 RTE_ETH_FOREACH_DEV(portid) 4150 { 4151 if (verbose == 1 || verbose > 2) 4152 add_rx_dump_callbacks(portid); 4153 else 4154 remove_rx_dump_callbacks(portid); 4155 if (verbose >= 2) 4156 add_tx_dump_callbacks(portid); 4157 else 4158 remove_tx_dump_callbacks(portid); 4159 } 4160 } 4161 4162 void 4163 set_verbose_level(uint16_t vb_level) 4164 { 4165 printf("Change verbose level from %u to %u\n", 4166 (unsigned int) verbose_level, (unsigned int) vb_level); 4167 verbose_level = vb_level; 4168 configure_rxtx_dump_callbacks(verbose_level); 4169 } 4170 4171 void 4172 vlan_extend_set(portid_t port_id, int on) 4173 { 4174 int diag; 4175 int vlan_offload; 4176 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4177 4178 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4179 return; 4180 4181 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4182 4183 if (on) { 4184 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4185 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4186 } else { 4187 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4188 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4189 } 4190 4191 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4192 if (diag < 0) { 4193 fprintf(stderr, 4194 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4195 port_id, on, diag); 4196 return; 4197 } 4198 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4199 } 4200 4201 void 4202 rx_vlan_strip_set(portid_t port_id, int on) 4203 { 4204 int diag; 4205 int vlan_offload; 4206 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4207 4208 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4209 return; 4210 4211 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4212 4213 if (on) { 4214 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4215 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4216 } else { 4217 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4218 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4219 } 4220 4221 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4222 if (diag < 0) { 4223 fprintf(stderr, 4224 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4225 __func__, port_id, on, diag); 4226 return; 4227 } 4228 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4229 } 4230 4231 void 4232 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4233 { 4234 int diag; 4235 4236 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4237 return; 4238 4239 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4240 if (diag < 0) 4241 fprintf(stderr, 4242 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4243 __func__, port_id, queue_id, on, diag); 4244 } 4245 4246 void 4247 rx_vlan_filter_set(portid_t port_id, int on) 4248 { 4249 int diag; 4250 int vlan_offload; 4251 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4252 4253 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4254 return; 4255 4256 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4257 4258 if (on) { 4259 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4260 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4261 } else { 4262 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4263 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4264 } 4265 4266 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4267 if (diag < 0) { 4268 fprintf(stderr, 4269 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4270 __func__, port_id, on, diag); 4271 return; 4272 } 4273 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4274 } 4275 4276 void 4277 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4278 { 4279 int diag; 4280 int vlan_offload; 4281 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4282 4283 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4284 return; 4285 4286 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4287 4288 if (on) { 4289 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4290 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4291 } else { 4292 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4293 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4294 } 4295 4296 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4297 if (diag < 0) { 4298 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4299 __func__, port_id, on, diag); 4300 return; 4301 } 4302 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4303 } 4304 4305 int 4306 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4307 { 4308 int diag; 4309 4310 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4311 return 1; 4312 if (vlan_id_is_invalid(vlan_id)) 4313 return 1; 4314 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4315 if (diag == 0) 4316 return 0; 4317 fprintf(stderr, 4318 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4319 port_id, vlan_id, on, diag); 4320 return -1; 4321 } 4322 4323 void 4324 rx_vlan_all_filter_set(portid_t port_id, int on) 4325 { 4326 uint16_t vlan_id; 4327 4328 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4329 return; 4330 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4331 if (rx_vft_set(port_id, vlan_id, on)) 4332 break; 4333 } 4334 } 4335 4336 void 4337 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4338 { 4339 int diag; 4340 4341 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4342 return; 4343 4344 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4345 if (diag == 0) 4346 return; 4347 4348 fprintf(stderr, 4349 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4350 port_id, vlan_type, tp_id, diag); 4351 } 4352 4353 void 4354 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4355 { 4356 struct rte_eth_dev_info dev_info; 4357 int ret; 4358 4359 if (vlan_id_is_invalid(vlan_id)) 4360 return; 4361 4362 if (ports[port_id].dev_conf.txmode.offloads & 4363 DEV_TX_OFFLOAD_QINQ_INSERT) { 4364 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4365 return; 4366 } 4367 4368 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4369 if (ret != 0) 4370 return; 4371 4372 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4373 fprintf(stderr, 4374 "Error: vlan insert is not supported by port %d\n", 4375 port_id); 4376 return; 4377 } 4378 4379 tx_vlan_reset(port_id); 4380 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4381 ports[port_id].tx_vlan_id = vlan_id; 4382 } 4383 4384 void 4385 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4386 { 4387 struct rte_eth_dev_info dev_info; 4388 int ret; 4389 4390 if (vlan_id_is_invalid(vlan_id)) 4391 return; 4392 if (vlan_id_is_invalid(vlan_id_outer)) 4393 return; 4394 4395 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4396 if (ret != 0) 4397 return; 4398 4399 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4400 fprintf(stderr, 4401 "Error: qinq insert not supported by port %d\n", 4402 port_id); 4403 return; 4404 } 4405 4406 tx_vlan_reset(port_id); 4407 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4408 DEV_TX_OFFLOAD_QINQ_INSERT); 4409 ports[port_id].tx_vlan_id = vlan_id; 4410 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4411 } 4412 4413 void 4414 tx_vlan_reset(portid_t port_id) 4415 { 4416 ports[port_id].dev_conf.txmode.offloads &= 4417 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4418 DEV_TX_OFFLOAD_QINQ_INSERT); 4419 ports[port_id].tx_vlan_id = 0; 4420 ports[port_id].tx_vlan_id_outer = 0; 4421 } 4422 4423 void 4424 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4425 { 4426 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4427 return; 4428 4429 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4430 } 4431 4432 void 4433 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4434 { 4435 int ret; 4436 4437 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4438 return; 4439 4440 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4441 return; 4442 4443 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4444 fprintf(stderr, "map_value not in required range 0..%d\n", 4445 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4446 return; 4447 } 4448 4449 if (!is_rx) { /* tx */ 4450 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4451 map_value); 4452 if (ret) { 4453 fprintf(stderr, 4454 "failed to set tx queue stats mapping.\n"); 4455 return; 4456 } 4457 } else { /* rx */ 4458 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4459 map_value); 4460 if (ret) { 4461 fprintf(stderr, 4462 "failed to set rx queue stats mapping.\n"); 4463 return; 4464 } 4465 } 4466 } 4467 4468 void 4469 set_xstats_hide_zero(uint8_t on_off) 4470 { 4471 xstats_hide_zero = on_off; 4472 } 4473 4474 void 4475 set_record_core_cycles(uint8_t on_off) 4476 { 4477 record_core_cycles = on_off; 4478 } 4479 4480 void 4481 set_record_burst_stats(uint8_t on_off) 4482 { 4483 record_burst_stats = on_off; 4484 } 4485 4486 static char* 4487 flowtype_to_str(uint16_t flow_type) 4488 { 4489 struct flow_type_info { 4490 char str[32]; 4491 uint16_t ftype; 4492 }; 4493 4494 uint8_t i; 4495 static struct flow_type_info flowtype_str_table[] = { 4496 {"raw", RTE_ETH_FLOW_RAW}, 4497 {"ipv4", RTE_ETH_FLOW_IPV4}, 4498 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4499 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4500 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4501 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4502 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4503 {"ipv6", RTE_ETH_FLOW_IPV6}, 4504 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4505 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4506 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4507 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4508 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4509 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4510 {"port", RTE_ETH_FLOW_PORT}, 4511 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4512 {"geneve", RTE_ETH_FLOW_GENEVE}, 4513 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4514 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4515 }; 4516 4517 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4518 if (flowtype_str_table[i].ftype == flow_type) 4519 return flowtype_str_table[i].str; 4520 } 4521 4522 return NULL; 4523 } 4524 4525 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4526 4527 static inline void 4528 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4529 { 4530 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4531 4532 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4533 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4534 " tunnel_id: 0x%08x", 4535 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4536 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4537 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4538 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4539 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4540 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4541 4542 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4543 rte_be_to_cpu_16(mask->src_port_mask), 4544 rte_be_to_cpu_16(mask->dst_port_mask)); 4545 4546 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4547 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4548 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4549 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4550 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4551 4552 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4553 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4554 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4555 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4556 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4557 } 4558 4559 printf("\n"); 4560 } 4561 4562 static inline void 4563 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4564 { 4565 struct rte_eth_flex_payload_cfg *cfg; 4566 uint32_t i, j; 4567 4568 for (i = 0; i < flex_conf->nb_payloads; i++) { 4569 cfg = &flex_conf->flex_set[i]; 4570 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4571 printf("\n RAW: "); 4572 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4573 printf("\n L2_PAYLOAD: "); 4574 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4575 printf("\n L3_PAYLOAD: "); 4576 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4577 printf("\n L4_PAYLOAD: "); 4578 else 4579 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4580 for (j = 0; j < num; j++) 4581 printf(" %-5u", cfg->src_offset[j]); 4582 } 4583 printf("\n"); 4584 } 4585 4586 static inline void 4587 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4588 { 4589 struct rte_eth_fdir_flex_mask *mask; 4590 uint32_t i, j; 4591 char *p; 4592 4593 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4594 mask = &flex_conf->flex_mask[i]; 4595 p = flowtype_to_str(mask->flow_type); 4596 printf("\n %s:\t", p ? p : "unknown"); 4597 for (j = 0; j < num; j++) 4598 printf(" %02x", mask->mask[j]); 4599 } 4600 printf("\n"); 4601 } 4602 4603 static inline void 4604 print_fdir_flow_type(uint32_t flow_types_mask) 4605 { 4606 int i; 4607 char *p; 4608 4609 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4610 if (!(flow_types_mask & (1 << i))) 4611 continue; 4612 p = flowtype_to_str(i); 4613 if (p) 4614 printf(" %s", p); 4615 else 4616 printf(" unknown"); 4617 } 4618 printf("\n"); 4619 } 4620 4621 static int 4622 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4623 struct rte_eth_fdir_stats *fdir_stat) 4624 { 4625 int ret = -ENOTSUP; 4626 4627 #ifdef RTE_NET_I40E 4628 if (ret == -ENOTSUP) { 4629 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4630 if (!ret) 4631 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4632 } 4633 #endif 4634 #ifdef RTE_NET_IXGBE 4635 if (ret == -ENOTSUP) { 4636 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4637 if (!ret) 4638 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4639 } 4640 #endif 4641 switch (ret) { 4642 case 0: 4643 break; 4644 case -ENOTSUP: 4645 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4646 port_id); 4647 break; 4648 default: 4649 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4650 break; 4651 } 4652 return ret; 4653 } 4654 4655 void 4656 fdir_get_infos(portid_t port_id) 4657 { 4658 struct rte_eth_fdir_stats fdir_stat; 4659 struct rte_eth_fdir_info fdir_info; 4660 4661 static const char *fdir_stats_border = "########################"; 4662 4663 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4664 return; 4665 4666 memset(&fdir_info, 0, sizeof(fdir_info)); 4667 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4668 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4669 return; 4670 4671 printf("\n %s FDIR infos for port %-2d %s\n", 4672 fdir_stats_border, port_id, fdir_stats_border); 4673 printf(" MODE: "); 4674 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4675 printf(" PERFECT\n"); 4676 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4677 printf(" PERFECT-MAC-VLAN\n"); 4678 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4679 printf(" PERFECT-TUNNEL\n"); 4680 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4681 printf(" SIGNATURE\n"); 4682 else 4683 printf(" DISABLE\n"); 4684 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4685 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4686 printf(" SUPPORTED FLOW TYPE: "); 4687 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4688 } 4689 printf(" FLEX PAYLOAD INFO:\n"); 4690 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4691 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4692 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4693 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4694 fdir_info.flex_payload_unit, 4695 fdir_info.max_flex_payload_segment_num, 4696 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4697 printf(" MASK: "); 4698 print_fdir_mask(&fdir_info.mask); 4699 if (fdir_info.flex_conf.nb_payloads > 0) { 4700 printf(" FLEX PAYLOAD SRC OFFSET:"); 4701 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4702 } 4703 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4704 printf(" FLEX MASK CFG:"); 4705 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4706 } 4707 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4708 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4709 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4710 fdir_info.guarant_spc, fdir_info.best_spc); 4711 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4712 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4713 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4714 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4715 fdir_stat.collision, fdir_stat.free, 4716 fdir_stat.maxhash, fdir_stat.maxlen, 4717 fdir_stat.add, fdir_stat.remove, 4718 fdir_stat.f_add, fdir_stat.f_remove); 4719 printf(" %s############################%s\n", 4720 fdir_stats_border, fdir_stats_border); 4721 } 4722 4723 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4724 4725 void 4726 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4727 { 4728 struct rte_port *port; 4729 struct rte_eth_fdir_flex_conf *flex_conf; 4730 int i, idx = 0; 4731 4732 port = &ports[port_id]; 4733 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4734 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4735 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4736 idx = i; 4737 break; 4738 } 4739 } 4740 if (i >= RTE_ETH_FLOW_MAX) { 4741 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4742 idx = flex_conf->nb_flexmasks; 4743 flex_conf->nb_flexmasks++; 4744 } else { 4745 fprintf(stderr, 4746 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 4747 cfg->flow_type); 4748 return; 4749 } 4750 } 4751 rte_memcpy(&flex_conf->flex_mask[idx], 4752 cfg, 4753 sizeof(struct rte_eth_fdir_flex_mask)); 4754 } 4755 4756 void 4757 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4758 { 4759 struct rte_port *port; 4760 struct rte_eth_fdir_flex_conf *flex_conf; 4761 int i, idx = 0; 4762 4763 port = &ports[port_id]; 4764 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4765 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4766 if (cfg->type == flex_conf->flex_set[i].type) { 4767 idx = i; 4768 break; 4769 } 4770 } 4771 if (i >= RTE_ETH_PAYLOAD_MAX) { 4772 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4773 idx = flex_conf->nb_payloads; 4774 flex_conf->nb_payloads++; 4775 } else { 4776 fprintf(stderr, 4777 "The flex payload table is full. Can not set flex payload for type(%u).", 4778 cfg->type); 4779 return; 4780 } 4781 } 4782 rte_memcpy(&flex_conf->flex_set[idx], 4783 cfg, 4784 sizeof(struct rte_eth_flex_payload_cfg)); 4785 4786 } 4787 4788 void 4789 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4790 { 4791 #ifdef RTE_NET_IXGBE 4792 int diag; 4793 4794 if (is_rx) 4795 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4796 else 4797 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4798 4799 if (diag == 0) 4800 return; 4801 fprintf(stderr, 4802 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4803 is_rx ? "rx" : "tx", port_id, diag); 4804 return; 4805 #endif 4806 fprintf(stderr, "VF %s setting not supported for port %d\n", 4807 is_rx ? "Rx" : "Tx", port_id); 4808 RTE_SET_USED(vf); 4809 RTE_SET_USED(on); 4810 } 4811 4812 int 4813 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4814 { 4815 int diag; 4816 struct rte_eth_link link; 4817 int ret; 4818 4819 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4820 return 1; 4821 ret = eth_link_get_nowait_print_err(port_id, &link); 4822 if (ret < 0) 4823 return 1; 4824 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4825 rate > link.link_speed) { 4826 fprintf(stderr, 4827 "Invalid rate value:%u bigger than link speed: %u\n", 4828 rate, link.link_speed); 4829 return 1; 4830 } 4831 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4832 if (diag == 0) 4833 return diag; 4834 fprintf(stderr, 4835 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4836 port_id, diag); 4837 return diag; 4838 } 4839 4840 int 4841 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4842 { 4843 int diag = -ENOTSUP; 4844 4845 RTE_SET_USED(vf); 4846 RTE_SET_USED(rate); 4847 RTE_SET_USED(q_msk); 4848 4849 #ifdef RTE_NET_IXGBE 4850 if (diag == -ENOTSUP) 4851 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4852 q_msk); 4853 #endif 4854 #ifdef RTE_NET_BNXT 4855 if (diag == -ENOTSUP) 4856 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4857 #endif 4858 if (diag == 0) 4859 return diag; 4860 4861 fprintf(stderr, 4862 "%s for port_id=%d failed diag=%d\n", 4863 __func__, port_id, diag); 4864 return diag; 4865 } 4866 4867 /* 4868 * Functions to manage the set of filtered Multicast MAC addresses. 4869 * 4870 * A pool of filtered multicast MAC addresses is associated with each port. 4871 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4872 * The address of the pool and the number of valid multicast MAC addresses 4873 * recorded in the pool are stored in the fields "mc_addr_pool" and 4874 * "mc_addr_nb" of the "rte_port" data structure. 4875 * 4876 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4877 * to be supplied a contiguous array of multicast MAC addresses. 4878 * To comply with this constraint, the set of multicast addresses recorded 4879 * into the pool are systematically compacted at the beginning of the pool. 4880 * Hence, when a multicast address is removed from the pool, all following 4881 * addresses, if any, are copied back to keep the set contiguous. 4882 */ 4883 #define MCAST_POOL_INC 32 4884 4885 static int 4886 mcast_addr_pool_extend(struct rte_port *port) 4887 { 4888 struct rte_ether_addr *mc_pool; 4889 size_t mc_pool_size; 4890 4891 /* 4892 * If a free entry is available at the end of the pool, just 4893 * increment the number of recorded multicast addresses. 4894 */ 4895 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4896 port->mc_addr_nb++; 4897 return 0; 4898 } 4899 4900 /* 4901 * [re]allocate a pool with MCAST_POOL_INC more entries. 4902 * The previous test guarantees that port->mc_addr_nb is a multiple 4903 * of MCAST_POOL_INC. 4904 */ 4905 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4906 MCAST_POOL_INC); 4907 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4908 mc_pool_size); 4909 if (mc_pool == NULL) { 4910 fprintf(stderr, 4911 "allocation of pool of %u multicast addresses failed\n", 4912 port->mc_addr_nb + MCAST_POOL_INC); 4913 return -ENOMEM; 4914 } 4915 4916 port->mc_addr_pool = mc_pool; 4917 port->mc_addr_nb++; 4918 return 0; 4919 4920 } 4921 4922 static void 4923 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4924 { 4925 if (mcast_addr_pool_extend(port) != 0) 4926 return; 4927 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4928 } 4929 4930 static void 4931 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4932 { 4933 port->mc_addr_nb--; 4934 if (addr_idx == port->mc_addr_nb) { 4935 /* No need to recompact the set of multicast addressses. */ 4936 if (port->mc_addr_nb == 0) { 4937 /* free the pool of multicast addresses. */ 4938 free(port->mc_addr_pool); 4939 port->mc_addr_pool = NULL; 4940 } 4941 return; 4942 } 4943 memmove(&port->mc_addr_pool[addr_idx], 4944 &port->mc_addr_pool[addr_idx + 1], 4945 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4946 } 4947 4948 static int 4949 eth_port_multicast_addr_list_set(portid_t port_id) 4950 { 4951 struct rte_port *port; 4952 int diag; 4953 4954 port = &ports[port_id]; 4955 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4956 port->mc_addr_nb); 4957 if (diag < 0) 4958 fprintf(stderr, 4959 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4960 port_id, port->mc_addr_nb, diag); 4961 4962 return diag; 4963 } 4964 4965 void 4966 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4967 { 4968 struct rte_port *port; 4969 uint32_t i; 4970 4971 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4972 return; 4973 4974 port = &ports[port_id]; 4975 4976 /* 4977 * Check that the added multicast MAC address is not already recorded 4978 * in the pool of multicast addresses. 4979 */ 4980 for (i = 0; i < port->mc_addr_nb; i++) { 4981 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4982 fprintf(stderr, 4983 "multicast address already filtered by port\n"); 4984 return; 4985 } 4986 } 4987 4988 mcast_addr_pool_append(port, mc_addr); 4989 if (eth_port_multicast_addr_list_set(port_id) < 0) 4990 /* Rollback on failure, remove the address from the pool */ 4991 mcast_addr_pool_remove(port, i); 4992 } 4993 4994 void 4995 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4996 { 4997 struct rte_port *port; 4998 uint32_t i; 4999 5000 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5001 return; 5002 5003 port = &ports[port_id]; 5004 5005 /* 5006 * Search the pool of multicast MAC addresses for the removed address. 5007 */ 5008 for (i = 0; i < port->mc_addr_nb; i++) { 5009 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5010 break; 5011 } 5012 if (i == port->mc_addr_nb) { 5013 fprintf(stderr, "multicast address not filtered by port %d\n", 5014 port_id); 5015 return; 5016 } 5017 5018 mcast_addr_pool_remove(port, i); 5019 if (eth_port_multicast_addr_list_set(port_id) < 0) 5020 /* Rollback on failure, add the address back into the pool */ 5021 mcast_addr_pool_append(port, mc_addr); 5022 } 5023 5024 void 5025 port_dcb_info_display(portid_t port_id) 5026 { 5027 struct rte_eth_dcb_info dcb_info; 5028 uint16_t i; 5029 int ret; 5030 static const char *border = "================"; 5031 5032 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5033 return; 5034 5035 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5036 if (ret) { 5037 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5038 port_id); 5039 return; 5040 } 5041 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5042 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5043 printf("\n TC : "); 5044 for (i = 0; i < dcb_info.nb_tcs; i++) 5045 printf("\t%4d", i); 5046 printf("\n Priority : "); 5047 for (i = 0; i < dcb_info.nb_tcs; i++) 5048 printf("\t%4d", dcb_info.prio_tc[i]); 5049 printf("\n BW percent :"); 5050 for (i = 0; i < dcb_info.nb_tcs; i++) 5051 printf("\t%4d%%", dcb_info.tc_bws[i]); 5052 printf("\n RXQ base : "); 5053 for (i = 0; i < dcb_info.nb_tcs; i++) 5054 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5055 printf("\n RXQ number :"); 5056 for (i = 0; i < dcb_info.nb_tcs; i++) 5057 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5058 printf("\n TXQ base : "); 5059 for (i = 0; i < dcb_info.nb_tcs; i++) 5060 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5061 printf("\n TXQ number :"); 5062 for (i = 0; i < dcb_info.nb_tcs; i++) 5063 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5064 printf("\n"); 5065 } 5066 5067 uint8_t * 5068 open_file(const char *file_path, uint32_t *size) 5069 { 5070 int fd = open(file_path, O_RDONLY); 5071 off_t pkg_size; 5072 uint8_t *buf = NULL; 5073 int ret = 0; 5074 struct stat st_buf; 5075 5076 if (size) 5077 *size = 0; 5078 5079 if (fd == -1) { 5080 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5081 return buf; 5082 } 5083 5084 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5085 close(fd); 5086 fprintf(stderr, "%s: File operations failed\n", __func__); 5087 return buf; 5088 } 5089 5090 pkg_size = st_buf.st_size; 5091 if (pkg_size < 0) { 5092 close(fd); 5093 fprintf(stderr, "%s: File operations failed\n", __func__); 5094 return buf; 5095 } 5096 5097 buf = (uint8_t *)malloc(pkg_size); 5098 if (!buf) { 5099 close(fd); 5100 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5101 return buf; 5102 } 5103 5104 ret = read(fd, buf, pkg_size); 5105 if (ret < 0) { 5106 close(fd); 5107 fprintf(stderr, "%s: File read operation failed\n", __func__); 5108 close_file(buf); 5109 return NULL; 5110 } 5111 5112 if (size) 5113 *size = pkg_size; 5114 5115 close(fd); 5116 5117 return buf; 5118 } 5119 5120 int 5121 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5122 { 5123 FILE *fh = fopen(file_path, "wb"); 5124 5125 if (fh == NULL) { 5126 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5127 return -1; 5128 } 5129 5130 if (fwrite(buf, 1, size, fh) != size) { 5131 fclose(fh); 5132 fprintf(stderr, "%s: File write operation failed\n", __func__); 5133 return -1; 5134 } 5135 5136 fclose(fh); 5137 5138 return 0; 5139 } 5140 5141 int 5142 close_file(uint8_t *buf) 5143 { 5144 if (buf) { 5145 free((void *)buf); 5146 return 0; 5147 } 5148 5149 return -1; 5150 } 5151 5152 void 5153 port_queue_region_info_display(portid_t port_id, void *buf) 5154 { 5155 #ifdef RTE_NET_I40E 5156 uint16_t i, j; 5157 struct rte_pmd_i40e_queue_regions *info = 5158 (struct rte_pmd_i40e_queue_regions *)buf; 5159 static const char *queue_region_info_stats_border = "-------"; 5160 5161 if (!info->queue_region_number) 5162 printf("there is no region has been set before"); 5163 5164 printf("\n %s All queue region info for port=%2d %s", 5165 queue_region_info_stats_border, port_id, 5166 queue_region_info_stats_border); 5167 printf("\n queue_region_number: %-14u \n", 5168 info->queue_region_number); 5169 5170 for (i = 0; i < info->queue_region_number; i++) { 5171 printf("\n region_id: %-14u queue_number: %-14u " 5172 "queue_start_index: %-14u \n", 5173 info->region[i].region_id, 5174 info->region[i].queue_num, 5175 info->region[i].queue_start_index); 5176 5177 printf(" user_priority_num is %-14u :", 5178 info->region[i].user_priority_num); 5179 for (j = 0; j < info->region[i].user_priority_num; j++) 5180 printf(" %-14u ", info->region[i].user_priority[j]); 5181 5182 printf("\n flowtype_num is %-14u :", 5183 info->region[i].flowtype_num); 5184 for (j = 0; j < info->region[i].flowtype_num; j++) 5185 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5186 } 5187 #else 5188 RTE_SET_USED(port_id); 5189 RTE_SET_USED(buf); 5190 #endif 5191 5192 printf("\n\n"); 5193 } 5194 5195 void 5196 show_macs(portid_t port_id) 5197 { 5198 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5199 struct rte_eth_dev_info dev_info; 5200 struct rte_ether_addr *addr; 5201 uint32_t i, num_macs = 0; 5202 struct rte_eth_dev *dev; 5203 5204 dev = &rte_eth_devices[port_id]; 5205 5206 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5207 return; 5208 5209 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5210 addr = &dev->data->mac_addrs[i]; 5211 5212 /* skip zero address */ 5213 if (rte_is_zero_ether_addr(addr)) 5214 continue; 5215 5216 num_macs++; 5217 } 5218 5219 printf("Number of MAC address added: %d\n", num_macs); 5220 5221 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5222 addr = &dev->data->mac_addrs[i]; 5223 5224 /* skip zero address */ 5225 if (rte_is_zero_ether_addr(addr)) 5226 continue; 5227 5228 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5229 printf(" %s\n", buf); 5230 } 5231 } 5232 5233 void 5234 show_mcast_macs(portid_t port_id) 5235 { 5236 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5237 struct rte_ether_addr *addr; 5238 struct rte_port *port; 5239 uint32_t i; 5240 5241 port = &ports[port_id]; 5242 5243 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5244 5245 for (i = 0; i < port->mc_addr_nb; i++) { 5246 addr = &port->mc_addr_pool[i]; 5247 5248 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5249 printf(" %s\n", buf); 5250 } 5251 } 5252