1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", ETH_RSS_ETH }, 95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 97 { "vlan", ETH_RSS_VLAN }, 98 { "s-vlan", ETH_RSS_S_VLAN }, 99 { "c-vlan", ETH_RSS_C_VLAN }, 100 { "ipv4", ETH_RSS_IPV4 }, 101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", ETH_RSS_IPV6 }, 107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 116 { "port", ETH_RSS_PORT }, 117 { "vxlan", ETH_RSS_VXLAN }, 118 { "geneve", ETH_RSS_GENEVE }, 119 { "nvgre", ETH_RSS_NVGRE }, 120 { "ip", ETH_RSS_IP }, 121 { "udp", ETH_RSS_UDP }, 122 { "tcp", ETH_RSS_TCP }, 123 { "sctp", ETH_RSS_SCTP }, 124 { "tunnel", ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 135 { "esp", ETH_RSS_ESP }, 136 { "ah", ETH_RSS_AH }, 137 { "l2tpv3", ETH_RSS_L2TPV3 }, 138 { "pfcp", ETH_RSS_PFCP }, 139 { "pppoe", ETH_RSS_PPPOE }, 140 { "gtpu", ETH_RSS_GTPU }, 141 { "ecpri", ETH_RSS_ECPRI }, 142 { "mpls", ETH_RSS_MPLS }, 143 { NULL, 0 }, 144 }; 145 146 static const struct { 147 enum rte_eth_fec_mode mode; 148 const char *name; 149 } fec_mode_name[] = { 150 { 151 .mode = RTE_ETH_FEC_NOFEC, 152 .name = "off", 153 }, 154 { 155 .mode = RTE_ETH_FEC_AUTO, 156 .name = "auto", 157 }, 158 { 159 .mode = RTE_ETH_FEC_BASER, 160 .name = "baser", 161 }, 162 { 163 .mode = RTE_ETH_FEC_RS, 164 .name = "rs", 165 }, 166 }; 167 168 static void 169 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 170 { 171 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 172 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 173 printf("%s%s", name, buf); 174 } 175 176 void 177 nic_stats_display(portid_t port_id) 178 { 179 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 180 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 181 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 182 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 183 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 184 struct timespec cur_time; 185 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 186 diff_ns; 187 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 188 struct rte_eth_stats stats; 189 190 static const char *nic_stats_border = "########################"; 191 192 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 193 print_valid_ports(); 194 return; 195 } 196 rte_eth_stats_get(port_id, &stats); 197 printf("\n %s NIC statistics for port %-2d %s\n", 198 nic_stats_border, port_id, nic_stats_border); 199 200 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 201 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 203 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 204 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 205 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 206 207 diff_ns = 0; 208 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 209 uint64_t ns; 210 211 ns = cur_time.tv_sec * NS_PER_SEC; 212 ns += cur_time.tv_nsec; 213 214 if (prev_ns[port_id] != 0) 215 diff_ns = ns - prev_ns[port_id]; 216 prev_ns[port_id] = ns; 217 } 218 219 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 220 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 221 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 222 (stats.opackets - prev_pkts_tx[port_id]) : 0; 223 prev_pkts_rx[port_id] = stats.ipackets; 224 prev_pkts_tx[port_id] = stats.opackets; 225 mpps_rx = diff_ns > 0 ? 226 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 227 mpps_tx = diff_ns > 0 ? 228 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 229 230 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 231 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 232 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 233 (stats.obytes - prev_bytes_tx[port_id]) : 0; 234 prev_bytes_rx[port_id] = stats.ibytes; 235 prev_bytes_tx[port_id] = stats.obytes; 236 mbps_rx = diff_ns > 0 ? 237 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 238 mbps_tx = diff_ns > 0 ? 239 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 240 241 printf("\n Throughput (since last show)\n"); 242 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 243 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 244 mpps_tx, mbps_tx * 8); 245 246 printf(" %s############################%s\n", 247 nic_stats_border, nic_stats_border); 248 } 249 250 void 251 nic_stats_clear(portid_t port_id) 252 { 253 int ret; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 260 ret = rte_eth_stats_reset(port_id); 261 if (ret != 0) { 262 printf("%s: Error: failed to reset stats (port %u): %s", 263 __func__, port_id, strerror(-ret)); 264 return; 265 } 266 267 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 268 if (ret != 0) { 269 if (ret < 0) 270 ret = -ret; 271 printf("%s: Error: failed to get stats (port %u): %s", 272 __func__, port_id, strerror(ret)); 273 return; 274 } 275 printf("\n NIC statistics for port %d cleared\n", port_id); 276 } 277 278 void 279 nic_xstats_display(portid_t port_id) 280 { 281 struct rte_eth_xstat *xstats; 282 int cnt_xstats, idx_xstat; 283 struct rte_eth_xstat_name *xstats_names; 284 285 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 286 print_valid_ports(); 287 return; 288 } 289 printf("###### NIC extended statistics for port %-2d\n", port_id); 290 if (!rte_eth_dev_is_valid_port(port_id)) { 291 printf("Error: Invalid port number %i\n", port_id); 292 return; 293 } 294 295 /* Get count */ 296 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 297 if (cnt_xstats < 0) { 298 printf("Error: Cannot get count of xstats\n"); 299 return; 300 } 301 302 /* Get id-name lookup table */ 303 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 304 if (xstats_names == NULL) { 305 printf("Cannot allocate memory for xstats lookup\n"); 306 return; 307 } 308 if (cnt_xstats != rte_eth_xstats_get_names( 309 port_id, xstats_names, cnt_xstats)) { 310 printf("Error: Cannot get xstats lookup\n"); 311 free(xstats_names); 312 return; 313 } 314 315 /* Get stats themselves */ 316 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 317 if (xstats == NULL) { 318 printf("Cannot allocate memory for xstats\n"); 319 free(xstats_names); 320 return; 321 } 322 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 323 printf("Error: Unable to get xstats\n"); 324 free(xstats_names); 325 free(xstats); 326 return; 327 } 328 329 /* Display xstats */ 330 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 331 if (xstats_hide_zero && !xstats[idx_xstat].value) 332 continue; 333 printf("%s: %"PRIu64"\n", 334 xstats_names[idx_xstat].name, 335 xstats[idx_xstat].value); 336 } 337 free(xstats_names); 338 free(xstats); 339 } 340 341 void 342 nic_xstats_clear(portid_t port_id) 343 { 344 int ret; 345 346 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 347 print_valid_ports(); 348 return; 349 } 350 351 ret = rte_eth_xstats_reset(port_id); 352 if (ret != 0) { 353 printf("%s: Error: failed to reset xstats (port %u): %s", 354 __func__, port_id, strerror(-ret)); 355 return; 356 } 357 358 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 359 if (ret != 0) { 360 if (ret < 0) 361 ret = -ret; 362 printf("%s: Error: failed to get stats (port %u): %s", 363 __func__, port_id, strerror(ret)); 364 return; 365 } 366 } 367 368 void 369 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 370 { 371 struct rte_eth_burst_mode mode; 372 struct rte_eth_rxq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "RX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 388 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 389 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 390 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 391 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 392 printf("\nRX drop packets: %s", 393 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 394 printf("\nRX deferred start: %s", 395 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 396 printf("\nRX scattered packets: %s", 397 (qinfo.scattered_rx != 0) ? "on" : "off"); 398 if (qinfo.rx_buf_size != 0) 399 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 400 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 401 402 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 403 printf("\nBurst mode: %s%s", 404 mode.info, 405 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 406 " (per queue)" : ""); 407 408 printf("\n"); 409 } 410 411 void 412 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 413 { 414 struct rte_eth_burst_mode mode; 415 struct rte_eth_txq_info qinfo; 416 int32_t rc; 417 static const char *info_border = "*********************"; 418 419 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 420 if (rc != 0) { 421 printf("Failed to retrieve information for port: %u, " 422 "TX queue: %hu\nerror desc: %s(%d)\n", 423 port_id, queue_id, strerror(-rc), rc); 424 return; 425 } 426 427 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 428 info_border, port_id, queue_id, info_border); 429 430 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 431 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 432 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 433 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 434 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 435 printf("\nTX deferred start: %s", 436 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 437 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 438 439 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 440 printf("\nBurst mode: %s%s", 441 mode.info, 442 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 443 " (per queue)" : ""); 444 445 printf("\n"); 446 } 447 448 static int bus_match_all(const struct rte_bus *bus, const void *data) 449 { 450 RTE_SET_USED(bus); 451 RTE_SET_USED(data); 452 return 0; 453 } 454 455 static void 456 device_infos_display_speeds(uint32_t speed_capa) 457 { 458 printf("\n\tDevice speed capability:"); 459 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 460 printf(" Autonegotiate (all speeds)"); 461 if (speed_capa & ETH_LINK_SPEED_FIXED) 462 printf(" Disable autonegotiate (fixed speed) "); 463 if (speed_capa & ETH_LINK_SPEED_10M_HD) 464 printf(" 10 Mbps half-duplex "); 465 if (speed_capa & ETH_LINK_SPEED_10M) 466 printf(" 10 Mbps full-duplex "); 467 if (speed_capa & ETH_LINK_SPEED_100M_HD) 468 printf(" 100 Mbps half-duplex "); 469 if (speed_capa & ETH_LINK_SPEED_100M) 470 printf(" 100 Mbps full-duplex "); 471 if (speed_capa & ETH_LINK_SPEED_1G) 472 printf(" 1 Gbps "); 473 if (speed_capa & ETH_LINK_SPEED_2_5G) 474 printf(" 2.5 Gbps "); 475 if (speed_capa & ETH_LINK_SPEED_5G) 476 printf(" 5 Gbps "); 477 if (speed_capa & ETH_LINK_SPEED_10G) 478 printf(" 10 Gbps "); 479 if (speed_capa & ETH_LINK_SPEED_20G) 480 printf(" 20 Gbps "); 481 if (speed_capa & ETH_LINK_SPEED_25G) 482 printf(" 25 Gbps "); 483 if (speed_capa & ETH_LINK_SPEED_40G) 484 printf(" 40 Gbps "); 485 if (speed_capa & ETH_LINK_SPEED_50G) 486 printf(" 50 Gbps "); 487 if (speed_capa & ETH_LINK_SPEED_56G) 488 printf(" 56 Gbps "); 489 if (speed_capa & ETH_LINK_SPEED_100G) 490 printf(" 100 Gbps "); 491 if (speed_capa & ETH_LINK_SPEED_200G) 492 printf(" 200 Gbps "); 493 } 494 495 void 496 device_infos_display(const char *identifier) 497 { 498 static const char *info_border = "*********************"; 499 struct rte_bus *start = NULL, *next; 500 struct rte_dev_iterator dev_iter; 501 char name[RTE_ETH_NAME_MAX_LEN]; 502 struct rte_ether_addr mac_addr; 503 struct rte_device *dev; 504 struct rte_devargs da; 505 portid_t port_id; 506 struct rte_eth_dev_info dev_info; 507 char devstr[128]; 508 509 memset(&da, 0, sizeof(da)); 510 if (!identifier) 511 goto skip_parse; 512 513 if (rte_devargs_parsef(&da, "%s", identifier)) { 514 printf("cannot parse identifier\n"); 515 return; 516 } 517 518 skip_parse: 519 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 520 521 start = next; 522 if (identifier && da.bus != next) 523 continue; 524 525 /* Skip buses that don't have iterate method */ 526 if (!next->dev_iterate) 527 continue; 528 529 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 530 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 531 532 if (!dev->driver) 533 continue; 534 /* Check for matching device if identifier is present */ 535 if (identifier && 536 strncmp(da.name, dev->name, strlen(dev->name))) 537 continue; 538 printf("\n%s Infos for device %s %s\n", 539 info_border, dev->name, info_border); 540 printf("Bus name: %s", dev->bus->name); 541 printf("\nDriver name: %s", dev->driver->name); 542 printf("\nDevargs: %s", 543 dev->devargs ? dev->devargs->args : ""); 544 printf("\nConnect to socket: %d", dev->numa_node); 545 printf("\n"); 546 547 /* List ports with matching device name */ 548 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 549 printf("\n\tPort id: %-2d", port_id); 550 if (eth_macaddr_get_print_err(port_id, 551 &mac_addr) == 0) 552 print_ethaddr("\n\tMAC address: ", 553 &mac_addr); 554 rte_eth_dev_get_name_by_port(port_id, name); 555 printf("\n\tDevice name: %s", name); 556 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 557 device_infos_display_speeds(dev_info.speed_capa); 558 printf("\n"); 559 } 560 } 561 }; 562 rte_devargs_reset(&da); 563 } 564 565 void 566 port_infos_display(portid_t port_id) 567 { 568 struct rte_port *port; 569 struct rte_ether_addr mac_addr; 570 struct rte_eth_link link; 571 struct rte_eth_dev_info dev_info; 572 int vlan_offload; 573 struct rte_mempool * mp; 574 static const char *info_border = "*********************"; 575 uint16_t mtu; 576 char name[RTE_ETH_NAME_MAX_LEN]; 577 int ret; 578 char fw_version[ETHDEV_FWVERS_LEN]; 579 580 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 581 print_valid_ports(); 582 return; 583 } 584 port = &ports[port_id]; 585 ret = eth_link_get_nowait_print_err(port_id, &link); 586 if (ret < 0) 587 return; 588 589 ret = eth_dev_info_get_print_err(port_id, &dev_info); 590 if (ret != 0) 591 return; 592 593 printf("\n%s Infos for port %-2d %s\n", 594 info_border, port_id, info_border); 595 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 596 print_ethaddr("MAC address: ", &mac_addr); 597 rte_eth_dev_get_name_by_port(port_id, name); 598 printf("\nDevice name: %s", name); 599 printf("\nDriver name: %s", dev_info.driver_name); 600 601 if (rte_eth_dev_fw_version_get(port_id, fw_version, 602 ETHDEV_FWVERS_LEN) == 0) 603 printf("\nFirmware-version: %s", fw_version); 604 else 605 printf("\nFirmware-version: %s", "not available"); 606 607 if (dev_info.device->devargs && dev_info.device->devargs->args) 608 printf("\nDevargs: %s", dev_info.device->devargs->args); 609 printf("\nConnect to socket: %u", port->socket_id); 610 611 if (port_numa[port_id] != NUMA_NO_CONFIG) { 612 mp = mbuf_pool_find(port_numa[port_id], 0); 613 if (mp) 614 printf("\nmemory allocation on the socket: %d", 615 port_numa[port_id]); 616 } else 617 printf("\nmemory allocation on the socket: %u",port->socket_id); 618 619 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 620 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 621 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 622 ("full-duplex") : ("half-duplex")); 623 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ? 624 ("On") : ("Off")); 625 626 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 627 printf("MTU: %u\n", mtu); 628 629 printf("Promiscuous mode: %s\n", 630 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 631 printf("Allmulticast mode: %s\n", 632 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 633 printf("Maximum number of MAC addresses: %u\n", 634 (unsigned int)(port->dev_info.max_mac_addrs)); 635 printf("Maximum number of MAC addresses of hash filtering: %u\n", 636 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 637 638 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 639 if (vlan_offload >= 0){ 640 printf("VLAN offload: \n"); 641 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 642 printf(" strip on, "); 643 else 644 printf(" strip off, "); 645 646 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 647 printf("filter on, "); 648 else 649 printf("filter off, "); 650 651 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 652 printf("extend on, "); 653 else 654 printf("extend off, "); 655 656 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 657 printf("qinq strip on\n"); 658 else 659 printf("qinq strip off\n"); 660 } 661 662 if (dev_info.hash_key_size > 0) 663 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 664 if (dev_info.reta_size > 0) 665 printf("Redirection table size: %u\n", dev_info.reta_size); 666 if (!dev_info.flow_type_rss_offloads) 667 printf("No RSS offload flow type is supported.\n"); 668 else { 669 uint16_t i; 670 char *p; 671 672 printf("Supported RSS offload flow types:\n"); 673 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 674 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 675 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 676 continue; 677 p = flowtype_to_str(i); 678 if (p) 679 printf(" %s\n", p); 680 else 681 printf(" user defined %d\n", i); 682 } 683 } 684 685 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 686 printf("Maximum configurable length of RX packet: %u\n", 687 dev_info.max_rx_pktlen); 688 printf("Maximum configurable size of LRO aggregated packet: %u\n", 689 dev_info.max_lro_pkt_size); 690 if (dev_info.max_vfs) 691 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 692 if (dev_info.max_vmdq_pools) 693 printf("Maximum number of VMDq pools: %u\n", 694 dev_info.max_vmdq_pools); 695 696 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 697 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 698 printf("Max possible number of RXDs per queue: %hu\n", 699 dev_info.rx_desc_lim.nb_max); 700 printf("Min possible number of RXDs per queue: %hu\n", 701 dev_info.rx_desc_lim.nb_min); 702 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 703 704 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 705 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 706 printf("Max possible number of TXDs per queue: %hu\n", 707 dev_info.tx_desc_lim.nb_max); 708 printf("Min possible number of TXDs per queue: %hu\n", 709 dev_info.tx_desc_lim.nb_min); 710 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 711 printf("Max segment number per packet: %hu\n", 712 dev_info.tx_desc_lim.nb_seg_max); 713 printf("Max segment number per MTU/TSO: %hu\n", 714 dev_info.tx_desc_lim.nb_mtu_seg_max); 715 716 /* Show switch info only if valid switch domain and port id is set */ 717 if (dev_info.switch_info.domain_id != 718 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 719 if (dev_info.switch_info.name) 720 printf("Switch name: %s\n", dev_info.switch_info.name); 721 722 printf("Switch domain Id: %u\n", 723 dev_info.switch_info.domain_id); 724 printf("Switch Port Id: %u\n", 725 dev_info.switch_info.port_id); 726 } 727 } 728 729 void 730 port_summary_header_display(void) 731 { 732 uint16_t port_number; 733 734 port_number = rte_eth_dev_count_avail(); 735 printf("Number of available ports: %i\n", port_number); 736 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 737 "Driver", "Status", "Link"); 738 } 739 740 void 741 port_summary_display(portid_t port_id) 742 { 743 struct rte_ether_addr mac_addr; 744 struct rte_eth_link link; 745 struct rte_eth_dev_info dev_info; 746 char name[RTE_ETH_NAME_MAX_LEN]; 747 int ret; 748 749 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 750 print_valid_ports(); 751 return; 752 } 753 754 ret = eth_link_get_nowait_print_err(port_id, &link); 755 if (ret < 0) 756 return; 757 758 ret = eth_dev_info_get_print_err(port_id, &dev_info); 759 if (ret != 0) 760 return; 761 762 rte_eth_dev_get_name_by_port(port_id, name); 763 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 764 if (ret != 0) 765 return; 766 767 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 768 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 769 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 770 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 771 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 772 rte_eth_link_speed_to_str(link.link_speed)); 773 } 774 775 void 776 port_eeprom_display(portid_t port_id) 777 { 778 struct rte_dev_eeprom_info einfo; 779 int ret; 780 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 781 print_valid_ports(); 782 return; 783 } 784 785 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 786 if (len_eeprom < 0) { 787 switch (len_eeprom) { 788 case -ENODEV: 789 printf("port index %d invalid\n", port_id); 790 break; 791 case -ENOTSUP: 792 printf("operation not supported by device\n"); 793 break; 794 case -EIO: 795 printf("device is removed\n"); 796 break; 797 default: 798 printf("Unable to get EEPROM: %d\n", len_eeprom); 799 break; 800 } 801 return; 802 } 803 804 char buf[len_eeprom]; 805 einfo.offset = 0; 806 einfo.length = len_eeprom; 807 einfo.data = buf; 808 809 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 810 if (ret != 0) { 811 switch (ret) { 812 case -ENODEV: 813 printf("port index %d invalid\n", port_id); 814 break; 815 case -ENOTSUP: 816 printf("operation not supported by device\n"); 817 break; 818 case -EIO: 819 printf("device is removed\n"); 820 break; 821 default: 822 printf("Unable to get EEPROM: %d\n", ret); 823 break; 824 } 825 return; 826 } 827 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 828 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 829 } 830 831 void 832 port_module_eeprom_display(portid_t port_id) 833 { 834 struct rte_eth_dev_module_info minfo; 835 struct rte_dev_eeprom_info einfo; 836 int ret; 837 838 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 839 print_valid_ports(); 840 return; 841 } 842 843 844 ret = rte_eth_dev_get_module_info(port_id, &minfo); 845 if (ret != 0) { 846 switch (ret) { 847 case -ENODEV: 848 printf("port index %d invalid\n", port_id); 849 break; 850 case -ENOTSUP: 851 printf("operation not supported by device\n"); 852 break; 853 case -EIO: 854 printf("device is removed\n"); 855 break; 856 default: 857 printf("Unable to get module EEPROM: %d\n", ret); 858 break; 859 } 860 return; 861 } 862 863 char buf[minfo.eeprom_len]; 864 einfo.offset = 0; 865 einfo.length = minfo.eeprom_len; 866 einfo.data = buf; 867 868 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 869 if (ret != 0) { 870 switch (ret) { 871 case -ENODEV: 872 printf("port index %d invalid\n", port_id); 873 break; 874 case -ENOTSUP: 875 printf("operation not supported by device\n"); 876 break; 877 case -EIO: 878 printf("device is removed\n"); 879 break; 880 default: 881 printf("Unable to get module EEPROM: %d\n", ret); 882 break; 883 } 884 return; 885 } 886 887 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 888 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 889 } 890 891 int 892 port_id_is_invalid(portid_t port_id, enum print_warning warning) 893 { 894 uint16_t pid; 895 896 if (port_id == (portid_t)RTE_PORT_ALL) 897 return 0; 898 899 RTE_ETH_FOREACH_DEV(pid) 900 if (port_id == pid) 901 return 0; 902 903 if (warning == ENABLED_WARN) 904 printf("Invalid port %d\n", port_id); 905 906 return 1; 907 } 908 909 void print_valid_ports(void) 910 { 911 portid_t pid; 912 913 printf("The valid ports array is ["); 914 RTE_ETH_FOREACH_DEV(pid) { 915 printf(" %d", pid); 916 } 917 printf(" ]\n"); 918 } 919 920 static int 921 vlan_id_is_invalid(uint16_t vlan_id) 922 { 923 if (vlan_id < 4096) 924 return 0; 925 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 926 return 1; 927 } 928 929 static int 930 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 931 { 932 const struct rte_pci_device *pci_dev; 933 const struct rte_bus *bus; 934 uint64_t pci_len; 935 936 if (reg_off & 0x3) { 937 printf("Port register offset 0x%X not aligned on a 4-byte " 938 "boundary\n", 939 (unsigned)reg_off); 940 return 1; 941 } 942 943 if (!ports[port_id].dev_info.device) { 944 printf("Invalid device\n"); 945 return 0; 946 } 947 948 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 949 if (bus && !strcmp(bus->name, "pci")) { 950 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 951 } else { 952 printf("Not a PCI device\n"); 953 return 1; 954 } 955 956 pci_len = pci_dev->mem_resource[0].len; 957 if (reg_off >= pci_len) { 958 printf("Port %d: register offset %u (0x%X) out of port PCI " 959 "resource (length=%"PRIu64")\n", 960 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 961 return 1; 962 } 963 return 0; 964 } 965 966 static int 967 reg_bit_pos_is_invalid(uint8_t bit_pos) 968 { 969 if (bit_pos <= 31) 970 return 0; 971 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 972 return 1; 973 } 974 975 #define display_port_and_reg_off(port_id, reg_off) \ 976 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 977 978 static inline void 979 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 980 { 981 display_port_and_reg_off(port_id, (unsigned)reg_off); 982 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 983 } 984 985 void 986 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 987 { 988 uint32_t reg_v; 989 990 991 if (port_id_is_invalid(port_id, ENABLED_WARN)) 992 return; 993 if (port_reg_off_is_invalid(port_id, reg_off)) 994 return; 995 if (reg_bit_pos_is_invalid(bit_x)) 996 return; 997 reg_v = port_id_pci_reg_read(port_id, reg_off); 998 display_port_and_reg_off(port_id, (unsigned)reg_off); 999 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1000 } 1001 1002 void 1003 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1004 uint8_t bit1_pos, uint8_t bit2_pos) 1005 { 1006 uint32_t reg_v; 1007 uint8_t l_bit; 1008 uint8_t h_bit; 1009 1010 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1011 return; 1012 if (port_reg_off_is_invalid(port_id, reg_off)) 1013 return; 1014 if (reg_bit_pos_is_invalid(bit1_pos)) 1015 return; 1016 if (reg_bit_pos_is_invalid(bit2_pos)) 1017 return; 1018 if (bit1_pos > bit2_pos) 1019 l_bit = bit2_pos, h_bit = bit1_pos; 1020 else 1021 l_bit = bit1_pos, h_bit = bit2_pos; 1022 1023 reg_v = port_id_pci_reg_read(port_id, reg_off); 1024 reg_v >>= l_bit; 1025 if (h_bit < 31) 1026 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1027 display_port_and_reg_off(port_id, (unsigned)reg_off); 1028 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1029 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1030 } 1031 1032 void 1033 port_reg_display(portid_t port_id, uint32_t reg_off) 1034 { 1035 uint32_t reg_v; 1036 1037 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1038 return; 1039 if (port_reg_off_is_invalid(port_id, reg_off)) 1040 return; 1041 reg_v = port_id_pci_reg_read(port_id, reg_off); 1042 display_port_reg_value(port_id, reg_off, reg_v); 1043 } 1044 1045 void 1046 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1047 uint8_t bit_v) 1048 { 1049 uint32_t reg_v; 1050 1051 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1052 return; 1053 if (port_reg_off_is_invalid(port_id, reg_off)) 1054 return; 1055 if (reg_bit_pos_is_invalid(bit_pos)) 1056 return; 1057 if (bit_v > 1) { 1058 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1059 return; 1060 } 1061 reg_v = port_id_pci_reg_read(port_id, reg_off); 1062 if (bit_v == 0) 1063 reg_v &= ~(1 << bit_pos); 1064 else 1065 reg_v |= (1 << bit_pos); 1066 port_id_pci_reg_write(port_id, reg_off, reg_v); 1067 display_port_reg_value(port_id, reg_off, reg_v); 1068 } 1069 1070 void 1071 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1072 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1073 { 1074 uint32_t max_v; 1075 uint32_t reg_v; 1076 uint8_t l_bit; 1077 uint8_t h_bit; 1078 1079 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1080 return; 1081 if (port_reg_off_is_invalid(port_id, reg_off)) 1082 return; 1083 if (reg_bit_pos_is_invalid(bit1_pos)) 1084 return; 1085 if (reg_bit_pos_is_invalid(bit2_pos)) 1086 return; 1087 if (bit1_pos > bit2_pos) 1088 l_bit = bit2_pos, h_bit = bit1_pos; 1089 else 1090 l_bit = bit1_pos, h_bit = bit2_pos; 1091 1092 if ((h_bit - l_bit) < 31) 1093 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1094 else 1095 max_v = 0xFFFFFFFF; 1096 1097 if (value > max_v) { 1098 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1099 (unsigned)value, (unsigned)value, 1100 (unsigned)max_v, (unsigned)max_v); 1101 return; 1102 } 1103 reg_v = port_id_pci_reg_read(port_id, reg_off); 1104 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1105 reg_v |= (value << l_bit); /* Set changed bits */ 1106 port_id_pci_reg_write(port_id, reg_off, reg_v); 1107 display_port_reg_value(port_id, reg_off, reg_v); 1108 } 1109 1110 void 1111 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1112 { 1113 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1114 return; 1115 if (port_reg_off_is_invalid(port_id, reg_off)) 1116 return; 1117 port_id_pci_reg_write(port_id, reg_off, reg_v); 1118 display_port_reg_value(port_id, reg_off, reg_v); 1119 } 1120 1121 void 1122 port_mtu_set(portid_t port_id, uint16_t mtu) 1123 { 1124 int diag; 1125 struct rte_port *rte_port = &ports[port_id]; 1126 struct rte_eth_dev_info dev_info; 1127 uint16_t eth_overhead; 1128 int ret; 1129 1130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1131 return; 1132 1133 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1134 if (ret != 0) 1135 return; 1136 1137 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1138 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1139 mtu, dev_info.min_mtu, dev_info.max_mtu); 1140 return; 1141 } 1142 diag = rte_eth_dev_set_mtu(port_id, mtu); 1143 if (diag) 1144 printf("Set MTU failed. diag=%d\n", diag); 1145 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1146 /* 1147 * Ether overhead in driver is equal to the difference of 1148 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1149 * device supports jumbo frame. 1150 */ 1151 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1152 if (mtu > RTE_ETHER_MTU) { 1153 rte_port->dev_conf.rxmode.offloads |= 1154 DEV_RX_OFFLOAD_JUMBO_FRAME; 1155 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1156 mtu + eth_overhead; 1157 } else 1158 rte_port->dev_conf.rxmode.offloads &= 1159 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1160 } 1161 } 1162 1163 /* Generic flow management functions. */ 1164 1165 static struct port_flow_tunnel * 1166 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1167 { 1168 struct port_flow_tunnel *flow_tunnel; 1169 1170 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1171 if (flow_tunnel->id == port_tunnel_id) 1172 goto out; 1173 } 1174 flow_tunnel = NULL; 1175 1176 out: 1177 return flow_tunnel; 1178 } 1179 1180 const char * 1181 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1182 { 1183 const char *type; 1184 switch (tunnel->type) { 1185 default: 1186 type = "unknown"; 1187 break; 1188 case RTE_FLOW_ITEM_TYPE_VXLAN: 1189 type = "vxlan"; 1190 break; 1191 } 1192 1193 return type; 1194 } 1195 1196 struct port_flow_tunnel * 1197 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1198 { 1199 struct rte_port *port = &ports[port_id]; 1200 struct port_flow_tunnel *flow_tunnel; 1201 1202 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1203 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1204 goto out; 1205 } 1206 flow_tunnel = NULL; 1207 1208 out: 1209 return flow_tunnel; 1210 } 1211 1212 void port_flow_tunnel_list(portid_t port_id) 1213 { 1214 struct rte_port *port = &ports[port_id]; 1215 struct port_flow_tunnel *flt; 1216 1217 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1218 printf("port %u tunnel #%u type=%s", 1219 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1220 if (flt->tunnel.tun_id) 1221 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1222 printf("\n"); 1223 } 1224 } 1225 1226 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1227 { 1228 struct rte_port *port = &ports[port_id]; 1229 struct port_flow_tunnel *flt; 1230 1231 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1232 if (flt->id == tunnel_id) 1233 break; 1234 } 1235 if (flt) { 1236 LIST_REMOVE(flt, chain); 1237 free(flt); 1238 printf("port %u: flow tunnel #%u destroyed\n", 1239 port_id, tunnel_id); 1240 } 1241 } 1242 1243 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1244 { 1245 struct rte_port *port = &ports[port_id]; 1246 enum rte_flow_item_type type; 1247 struct port_flow_tunnel *flt; 1248 1249 if (!strcmp(ops->type, "vxlan")) 1250 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1251 else { 1252 printf("cannot offload \"%s\" tunnel type\n", ops->type); 1253 return; 1254 } 1255 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1256 if (flt->tunnel.type == type) 1257 break; 1258 } 1259 if (!flt) { 1260 flt = calloc(1, sizeof(*flt)); 1261 if (!flt) { 1262 printf("failed to allocate port flt object\n"); 1263 return; 1264 } 1265 flt->tunnel.type = type; 1266 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1267 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1268 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1269 } 1270 printf("port %d: flow tunnel #%u type %s\n", 1271 port_id, flt->id, ops->type); 1272 } 1273 1274 /** Generate a port_flow entry from attributes/pattern/actions. */ 1275 static struct port_flow * 1276 port_flow_new(const struct rte_flow_attr *attr, 1277 const struct rte_flow_item *pattern, 1278 const struct rte_flow_action *actions, 1279 struct rte_flow_error *error) 1280 { 1281 const struct rte_flow_conv_rule rule = { 1282 .attr_ro = attr, 1283 .pattern_ro = pattern, 1284 .actions_ro = actions, 1285 }; 1286 struct port_flow *pf; 1287 int ret; 1288 1289 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1290 if (ret < 0) 1291 return NULL; 1292 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1293 if (!pf) { 1294 rte_flow_error_set 1295 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1296 "calloc() failed"); 1297 return NULL; 1298 } 1299 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1300 error) >= 0) 1301 return pf; 1302 free(pf); 1303 return NULL; 1304 } 1305 1306 /** Print a message out of a flow error. */ 1307 static int 1308 port_flow_complain(struct rte_flow_error *error) 1309 { 1310 static const char *const errstrlist[] = { 1311 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1312 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1313 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1314 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1315 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1316 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1317 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1318 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1319 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1320 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1321 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1322 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1323 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1324 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1325 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1326 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1327 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1328 }; 1329 const char *errstr; 1330 char buf[32]; 1331 int err = rte_errno; 1332 1333 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1334 !errstrlist[error->type]) 1335 errstr = "unknown type"; 1336 else 1337 errstr = errstrlist[error->type]; 1338 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1339 error->type, errstr, 1340 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1341 error->cause), buf) : "", 1342 error->message ? error->message : "(no stated reason)", 1343 rte_strerror(err)); 1344 return -err; 1345 } 1346 1347 static void 1348 rss_config_display(struct rte_flow_action_rss *rss_conf) 1349 { 1350 uint8_t i; 1351 1352 if (rss_conf == NULL) { 1353 printf("Invalid rule\n"); 1354 return; 1355 } 1356 1357 printf("RSS:\n" 1358 " queues:"); 1359 if (rss_conf->queue_num == 0) 1360 printf(" none"); 1361 for (i = 0; i < rss_conf->queue_num; i++) 1362 printf(" %d", rss_conf->queue[i]); 1363 printf("\n"); 1364 1365 printf(" function: "); 1366 switch (rss_conf->func) { 1367 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1368 printf("default\n"); 1369 break; 1370 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1371 printf("toeplitz\n"); 1372 break; 1373 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1374 printf("simple_xor\n"); 1375 break; 1376 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1377 printf("symmetric_toeplitz\n"); 1378 break; 1379 default: 1380 printf("Unknown function\n"); 1381 return; 1382 } 1383 1384 printf(" types:\n"); 1385 if (rss_conf->types == 0) { 1386 printf(" none\n"); 1387 return; 1388 } 1389 for (i = 0; rss_type_table[i].str; i++) { 1390 if ((rss_conf->types & 1391 rss_type_table[i].rss_type) == 1392 rss_type_table[i].rss_type && 1393 rss_type_table[i].rss_type != 0) 1394 printf(" %s\n", rss_type_table[i].str); 1395 } 1396 } 1397 1398 static struct port_indirect_action * 1399 action_get_by_id(portid_t port_id, uint32_t id) 1400 { 1401 struct rte_port *port; 1402 struct port_indirect_action **ppia; 1403 struct port_indirect_action *pia = NULL; 1404 1405 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1406 port_id == (portid_t)RTE_PORT_ALL) 1407 return NULL; 1408 port = &ports[port_id]; 1409 ppia = &port->actions_list; 1410 while (*ppia) { 1411 if ((*ppia)->id == id) { 1412 pia = *ppia; 1413 break; 1414 } 1415 ppia = &(*ppia)->next; 1416 } 1417 if (!pia) 1418 printf("Failed to find indirect action #%u on port %u\n", 1419 id, port_id); 1420 return pia; 1421 } 1422 1423 static int 1424 action_alloc(portid_t port_id, uint32_t id, 1425 struct port_indirect_action **action) 1426 { 1427 struct rte_port *port; 1428 struct port_indirect_action **ppia; 1429 struct port_indirect_action *pia = NULL; 1430 1431 *action = NULL; 1432 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1433 port_id == (portid_t)RTE_PORT_ALL) 1434 return -EINVAL; 1435 port = &ports[port_id]; 1436 if (id == UINT32_MAX) { 1437 /* taking first available ID */ 1438 if (port->actions_list) { 1439 if (port->actions_list->id == UINT32_MAX - 1) { 1440 printf("Highest indirect action ID is already" 1441 " assigned, delete it first\n"); 1442 return -ENOMEM; 1443 } 1444 id = port->actions_list->id + 1; 1445 } else { 1446 id = 0; 1447 } 1448 } 1449 pia = calloc(1, sizeof(*pia)); 1450 if (!pia) { 1451 printf("Allocation of port %u indirect action failed\n", 1452 port_id); 1453 return -ENOMEM; 1454 } 1455 ppia = &port->actions_list; 1456 while (*ppia && (*ppia)->id > id) 1457 ppia = &(*ppia)->next; 1458 if (*ppia && (*ppia)->id == id) { 1459 printf("Indirect action #%u is already assigned," 1460 " delete it first\n", id); 1461 free(pia); 1462 return -EINVAL; 1463 } 1464 pia->next = *ppia; 1465 pia->id = id; 1466 *ppia = pia; 1467 *action = pia; 1468 return 0; 1469 } 1470 1471 /** Create indirect action */ 1472 int 1473 port_action_handle_create(portid_t port_id, uint32_t id, 1474 const struct rte_flow_indir_action_conf *conf, 1475 const struct rte_flow_action *action) 1476 { 1477 struct port_indirect_action *pia; 1478 int ret; 1479 struct rte_flow_error error; 1480 1481 ret = action_alloc(port_id, id, &pia); 1482 if (ret) 1483 return ret; 1484 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1485 struct rte_flow_action_age *age = 1486 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1487 1488 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1489 age->context = &pia->age_type; 1490 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1491 struct rte_flow_action_conntrack *ct = 1492 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1493 1494 memcpy(ct, &conntrack_context, sizeof(*ct)); 1495 } 1496 /* Poisoning to make sure PMDs update it in case of error. */ 1497 memset(&error, 0x22, sizeof(error)); 1498 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1499 &error); 1500 if (!pia->handle) { 1501 uint32_t destroy_id = pia->id; 1502 port_action_handle_destroy(port_id, 1, &destroy_id); 1503 return port_flow_complain(&error); 1504 } 1505 pia->type = action->type; 1506 printf("Indirect action #%u created\n", pia->id); 1507 return 0; 1508 } 1509 1510 /** Destroy indirect action */ 1511 int 1512 port_action_handle_destroy(portid_t port_id, 1513 uint32_t n, 1514 const uint32_t *actions) 1515 { 1516 struct rte_port *port; 1517 struct port_indirect_action **tmp; 1518 uint32_t c = 0; 1519 int ret = 0; 1520 1521 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1522 port_id == (portid_t)RTE_PORT_ALL) 1523 return -EINVAL; 1524 port = &ports[port_id]; 1525 tmp = &port->actions_list; 1526 while (*tmp) { 1527 uint32_t i; 1528 1529 for (i = 0; i != n; ++i) { 1530 struct rte_flow_error error; 1531 struct port_indirect_action *pia = *tmp; 1532 1533 if (actions[i] != pia->id) 1534 continue; 1535 /* 1536 * Poisoning to make sure PMDs update it in case 1537 * of error. 1538 */ 1539 memset(&error, 0x33, sizeof(error)); 1540 1541 if (pia->handle && rte_flow_action_handle_destroy( 1542 port_id, pia->handle, &error)) { 1543 ret = port_flow_complain(&error); 1544 continue; 1545 } 1546 *tmp = pia->next; 1547 printf("Indirect action #%u destroyed\n", pia->id); 1548 free(pia); 1549 break; 1550 } 1551 if (i == n) 1552 tmp = &(*tmp)->next; 1553 ++c; 1554 } 1555 return ret; 1556 } 1557 1558 1559 /** Get indirect action by port + id */ 1560 struct rte_flow_action_handle * 1561 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1562 { 1563 1564 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1565 1566 return (pia) ? pia->handle : NULL; 1567 } 1568 1569 /** Update indirect action */ 1570 int 1571 port_action_handle_update(portid_t port_id, uint32_t id, 1572 const struct rte_flow_action *action) 1573 { 1574 struct rte_flow_error error; 1575 struct rte_flow_action_handle *action_handle; 1576 struct port_indirect_action *pia; 1577 const void *update; 1578 1579 action_handle = port_action_handle_get_by_id(port_id, id); 1580 if (!action_handle) 1581 return -EINVAL; 1582 pia = action_get_by_id(port_id, id); 1583 if (!pia) 1584 return -EINVAL; 1585 switch (pia->type) { 1586 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1587 update = action->conf; 1588 break; 1589 default: 1590 update = action; 1591 break; 1592 } 1593 if (rte_flow_action_handle_update(port_id, action_handle, update, 1594 &error)) { 1595 return port_flow_complain(&error); 1596 } 1597 printf("Indirect action #%u updated\n", id); 1598 return 0; 1599 } 1600 1601 int 1602 port_action_handle_query(portid_t port_id, uint32_t id) 1603 { 1604 struct rte_flow_error error; 1605 struct port_indirect_action *pia; 1606 uint64_t default_data; 1607 void *data = NULL; 1608 int ret = 0; 1609 1610 pia = action_get_by_id(port_id, id); 1611 if (!pia) 1612 return -EINVAL; 1613 switch (pia->type) { 1614 case RTE_FLOW_ACTION_TYPE_RSS: 1615 case RTE_FLOW_ACTION_TYPE_AGE: 1616 data = &default_data; 1617 break; 1618 default: 1619 printf("Indirect action %u (type: %d) on port %u doesn't" 1620 " support query\n", id, pia->type, port_id); 1621 return -1; 1622 } 1623 if (rte_flow_action_handle_query(port_id, pia->handle, data, &error)) 1624 ret = port_flow_complain(&error); 1625 switch (pia->type) { 1626 case RTE_FLOW_ACTION_TYPE_RSS: 1627 if (!ret) 1628 printf("Shared RSS action:\n\trefs:%u\n", 1629 *((uint32_t *)data)); 1630 data = NULL; 1631 break; 1632 case RTE_FLOW_ACTION_TYPE_AGE: 1633 if (!ret) { 1634 struct rte_flow_query_age *resp = data; 1635 1636 printf("AGE:\n" 1637 " aged: %u\n" 1638 " sec_since_last_hit_valid: %u\n" 1639 " sec_since_last_hit: %" PRIu32 "\n", 1640 resp->aged, 1641 resp->sec_since_last_hit_valid, 1642 resp->sec_since_last_hit); 1643 } 1644 data = NULL; 1645 break; 1646 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1647 if (!ret) { 1648 struct rte_flow_action_conntrack *ct = data; 1649 1650 printf("Conntrack Context:\n" 1651 " Peer: %u, Flow dir: %s, Enable: %u\n" 1652 " Live: %u, SACK: %u, CACK: %u\n" 1653 " Packet dir: %s, Liberal: %u, State: %u\n" 1654 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1655 " Last Seq: %u, Last ACK: %u\n" 1656 " Last Win: %u, Last End: %u\n", 1657 ct->peer_port, 1658 ct->is_original_dir ? "Original" : "Reply", 1659 ct->enable, ct->live_connection, 1660 ct->selective_ack, ct->challenge_ack_passed, 1661 ct->last_direction ? "Original" : "Reply", 1662 ct->liberal_mode, ct->state, 1663 ct->max_ack_window, ct->retransmission_limit, 1664 ct->last_index, ct->last_seq, ct->last_ack, 1665 ct->last_window, ct->last_end); 1666 printf(" Original Dir:\n" 1667 " scale: %u, fin: %u, ack seen: %u\n" 1668 " unacked data: %u\n Sent end: %u," 1669 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1670 ct->original_dir.scale, 1671 ct->original_dir.close_initiated, 1672 ct->original_dir.last_ack_seen, 1673 ct->original_dir.data_unacked, 1674 ct->original_dir.sent_end, 1675 ct->original_dir.reply_end, 1676 ct->original_dir.max_win, 1677 ct->original_dir.max_ack); 1678 printf(" Reply Dir:\n" 1679 " scale: %u, fin: %u, ack seen: %u\n" 1680 " unacked data: %u\n Sent end: %u," 1681 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1682 ct->reply_dir.scale, 1683 ct->reply_dir.close_initiated, 1684 ct->reply_dir.last_ack_seen, 1685 ct->reply_dir.data_unacked, 1686 ct->reply_dir.sent_end, ct->reply_dir.reply_end, 1687 ct->reply_dir.max_win, ct->reply_dir.max_ack); 1688 } 1689 data = NULL; 1690 break; 1691 default: 1692 printf("Indirect action %u (type: %d) on port %u doesn't" 1693 " support query\n", id, pia->type, port_id); 1694 ret = -1; 1695 } 1696 return ret; 1697 } 1698 1699 static struct port_flow_tunnel * 1700 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1701 const struct rte_flow_item *pattern, 1702 const struct rte_flow_action *actions, 1703 const struct tunnel_ops *tunnel_ops) 1704 { 1705 int ret; 1706 struct rte_port *port; 1707 struct port_flow_tunnel *pft; 1708 struct rte_flow_error error; 1709 1710 port = &ports[port_id]; 1711 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1712 if (!pft) { 1713 printf("failed to locate port flow tunnel #%u\n", 1714 tunnel_ops->id); 1715 return NULL; 1716 } 1717 if (tunnel_ops->actions) { 1718 uint32_t num_actions; 1719 const struct rte_flow_action *aptr; 1720 1721 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1722 &pft->pmd_actions, 1723 &pft->num_pmd_actions, 1724 &error); 1725 if (ret) { 1726 port_flow_complain(&error); 1727 return NULL; 1728 } 1729 for (aptr = actions, num_actions = 1; 1730 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1731 aptr++, num_actions++); 1732 pft->actions = malloc( 1733 (num_actions + pft->num_pmd_actions) * 1734 sizeof(actions[0])); 1735 if (!pft->actions) { 1736 rte_flow_tunnel_action_decap_release( 1737 port_id, pft->actions, 1738 pft->num_pmd_actions, &error); 1739 return NULL; 1740 } 1741 rte_memcpy(pft->actions, pft->pmd_actions, 1742 pft->num_pmd_actions * sizeof(actions[0])); 1743 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1744 num_actions * sizeof(actions[0])); 1745 } 1746 if (tunnel_ops->items) { 1747 uint32_t num_items; 1748 const struct rte_flow_item *iptr; 1749 1750 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1751 &pft->pmd_items, 1752 &pft->num_pmd_items, 1753 &error); 1754 if (ret) { 1755 port_flow_complain(&error); 1756 return NULL; 1757 } 1758 for (iptr = pattern, num_items = 1; 1759 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1760 iptr++, num_items++); 1761 pft->items = malloc((num_items + pft->num_pmd_items) * 1762 sizeof(pattern[0])); 1763 if (!pft->items) { 1764 rte_flow_tunnel_item_release( 1765 port_id, pft->pmd_items, 1766 pft->num_pmd_items, &error); 1767 return NULL; 1768 } 1769 rte_memcpy(pft->items, pft->pmd_items, 1770 pft->num_pmd_items * sizeof(pattern[0])); 1771 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1772 num_items * sizeof(pattern[0])); 1773 } 1774 1775 return pft; 1776 } 1777 1778 static void 1779 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1780 const struct tunnel_ops *tunnel_ops, 1781 struct port_flow_tunnel *pft) 1782 { 1783 struct rte_flow_error error; 1784 1785 if (tunnel_ops->actions) { 1786 free(pft->actions); 1787 rte_flow_tunnel_action_decap_release( 1788 port_id, pft->pmd_actions, 1789 pft->num_pmd_actions, &error); 1790 pft->actions = NULL; 1791 pft->pmd_actions = NULL; 1792 } 1793 if (tunnel_ops->items) { 1794 free(pft->items); 1795 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1796 pft->num_pmd_items, 1797 &error); 1798 pft->items = NULL; 1799 pft->pmd_items = NULL; 1800 } 1801 } 1802 1803 /** Add port meter policy */ 1804 int 1805 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1806 const struct rte_flow_action *actions) 1807 { 1808 struct rte_mtr_error error; 1809 const struct rte_flow_action *act = actions; 1810 const struct rte_flow_action *start; 1811 struct rte_mtr_meter_policy_params policy; 1812 uint32_t i = 0, act_n; 1813 int ret; 1814 1815 for (i = 0; i < RTE_COLORS; i++) { 1816 for (act_n = 0, start = act; 1817 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1818 act_n++; 1819 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1820 policy.actions[i] = start; 1821 else 1822 policy.actions[i] = NULL; 1823 act++; 1824 } 1825 ret = rte_mtr_meter_policy_add(port_id, 1826 policy_id, 1827 &policy, &error); 1828 if (ret) 1829 print_mtr_err_msg(&error); 1830 return ret; 1831 } 1832 1833 /** Validate flow rule. */ 1834 int 1835 port_flow_validate(portid_t port_id, 1836 const struct rte_flow_attr *attr, 1837 const struct rte_flow_item *pattern, 1838 const struct rte_flow_action *actions, 1839 const struct tunnel_ops *tunnel_ops) 1840 { 1841 struct rte_flow_error error; 1842 struct port_flow_tunnel *pft = NULL; 1843 1844 /* Poisoning to make sure PMDs update it in case of error. */ 1845 memset(&error, 0x11, sizeof(error)); 1846 if (tunnel_ops->enabled) { 1847 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1848 actions, tunnel_ops); 1849 if (!pft) 1850 return -ENOENT; 1851 if (pft->items) 1852 pattern = pft->items; 1853 if (pft->actions) 1854 actions = pft->actions; 1855 } 1856 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1857 return port_flow_complain(&error); 1858 if (tunnel_ops->enabled) 1859 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1860 printf("Flow rule validated\n"); 1861 return 0; 1862 } 1863 1864 /** Return age action structure if exists, otherwise NULL. */ 1865 static struct rte_flow_action_age * 1866 age_action_get(const struct rte_flow_action *actions) 1867 { 1868 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1869 switch (actions->type) { 1870 case RTE_FLOW_ACTION_TYPE_AGE: 1871 return (struct rte_flow_action_age *) 1872 (uintptr_t)actions->conf; 1873 default: 1874 break; 1875 } 1876 } 1877 return NULL; 1878 } 1879 1880 /** Create flow rule. */ 1881 int 1882 port_flow_create(portid_t port_id, 1883 const struct rte_flow_attr *attr, 1884 const struct rte_flow_item *pattern, 1885 const struct rte_flow_action *actions, 1886 const struct tunnel_ops *tunnel_ops) 1887 { 1888 struct rte_flow *flow; 1889 struct rte_port *port; 1890 struct port_flow *pf; 1891 uint32_t id = 0; 1892 struct rte_flow_error error; 1893 struct port_flow_tunnel *pft = NULL; 1894 struct rte_flow_action_age *age = age_action_get(actions); 1895 1896 port = &ports[port_id]; 1897 if (port->flow_list) { 1898 if (port->flow_list->id == UINT32_MAX) { 1899 printf("Highest rule ID is already assigned, delete" 1900 " it first"); 1901 return -ENOMEM; 1902 } 1903 id = port->flow_list->id + 1; 1904 } 1905 if (tunnel_ops->enabled) { 1906 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1907 actions, tunnel_ops); 1908 if (!pft) 1909 return -ENOENT; 1910 if (pft->items) 1911 pattern = pft->items; 1912 if (pft->actions) 1913 actions = pft->actions; 1914 } 1915 pf = port_flow_new(attr, pattern, actions, &error); 1916 if (!pf) 1917 return port_flow_complain(&error); 1918 if (age) { 1919 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 1920 age->context = &pf->age_type; 1921 } 1922 /* Poisoning to make sure PMDs update it in case of error. */ 1923 memset(&error, 0x22, sizeof(error)); 1924 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1925 if (!flow) { 1926 free(pf); 1927 return port_flow_complain(&error); 1928 } 1929 pf->next = port->flow_list; 1930 pf->id = id; 1931 pf->flow = flow; 1932 port->flow_list = pf; 1933 if (tunnel_ops->enabled) 1934 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1935 printf("Flow rule #%u created\n", pf->id); 1936 return 0; 1937 } 1938 1939 /** Destroy a number of flow rules. */ 1940 int 1941 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1942 { 1943 struct rte_port *port; 1944 struct port_flow **tmp; 1945 uint32_t c = 0; 1946 int ret = 0; 1947 1948 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1949 port_id == (portid_t)RTE_PORT_ALL) 1950 return -EINVAL; 1951 port = &ports[port_id]; 1952 tmp = &port->flow_list; 1953 while (*tmp) { 1954 uint32_t i; 1955 1956 for (i = 0; i != n; ++i) { 1957 struct rte_flow_error error; 1958 struct port_flow *pf = *tmp; 1959 1960 if (rule[i] != pf->id) 1961 continue; 1962 /* 1963 * Poisoning to make sure PMDs update it in case 1964 * of error. 1965 */ 1966 memset(&error, 0x33, sizeof(error)); 1967 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1968 ret = port_flow_complain(&error); 1969 continue; 1970 } 1971 printf("Flow rule #%u destroyed\n", pf->id); 1972 *tmp = pf->next; 1973 free(pf); 1974 break; 1975 } 1976 if (i == n) 1977 tmp = &(*tmp)->next; 1978 ++c; 1979 } 1980 return ret; 1981 } 1982 1983 /** Remove all flow rules. */ 1984 int 1985 port_flow_flush(portid_t port_id) 1986 { 1987 struct rte_flow_error error; 1988 struct rte_port *port; 1989 int ret = 0; 1990 1991 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1992 port_id == (portid_t)RTE_PORT_ALL) 1993 return -EINVAL; 1994 1995 port = &ports[port_id]; 1996 1997 if (port->flow_list == NULL) 1998 return ret; 1999 2000 /* Poisoning to make sure PMDs update it in case of error. */ 2001 memset(&error, 0x44, sizeof(error)); 2002 if (rte_flow_flush(port_id, &error)) { 2003 port_flow_complain(&error); 2004 } 2005 2006 while (port->flow_list) { 2007 struct port_flow *pf = port->flow_list->next; 2008 2009 free(port->flow_list); 2010 port->flow_list = pf; 2011 } 2012 return ret; 2013 } 2014 2015 /** Dump flow rules. */ 2016 int 2017 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2018 const char *file_name) 2019 { 2020 int ret = 0; 2021 FILE *file = stdout; 2022 struct rte_flow_error error; 2023 struct rte_port *port; 2024 struct port_flow *pflow; 2025 struct rte_flow *tmpFlow = NULL; 2026 bool found = false; 2027 2028 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2029 port_id == (portid_t)RTE_PORT_ALL) 2030 return -EINVAL; 2031 2032 if (!dump_all) { 2033 port = &ports[port_id]; 2034 pflow = port->flow_list; 2035 while (pflow) { 2036 if (rule_id != pflow->id) { 2037 pflow = pflow->next; 2038 } else { 2039 tmpFlow = pflow->flow; 2040 if (tmpFlow) 2041 found = true; 2042 break; 2043 } 2044 } 2045 if (found == false) { 2046 printf("Failed to dump to flow %d\n", rule_id); 2047 return -EINVAL; 2048 } 2049 } 2050 2051 if (file_name && strlen(file_name)) { 2052 file = fopen(file_name, "w"); 2053 if (!file) { 2054 printf("Failed to create file %s: %s\n", file_name, 2055 strerror(errno)); 2056 return -errno; 2057 } 2058 } 2059 2060 if (!dump_all) 2061 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2062 else 2063 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2064 if (ret) { 2065 port_flow_complain(&error); 2066 printf("Failed to dump flow: %s\n", strerror(-ret)); 2067 } else 2068 printf("Flow dump finished\n"); 2069 if (file_name && strlen(file_name)) 2070 fclose(file); 2071 return ret; 2072 } 2073 2074 /** Query a flow rule. */ 2075 int 2076 port_flow_query(portid_t port_id, uint32_t rule, 2077 const struct rte_flow_action *action) 2078 { 2079 struct rte_flow_error error; 2080 struct rte_port *port; 2081 struct port_flow *pf; 2082 const char *name; 2083 union { 2084 struct rte_flow_query_count count; 2085 struct rte_flow_action_rss rss_conf; 2086 struct rte_flow_query_age age; 2087 } query; 2088 int ret; 2089 2090 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2091 port_id == (portid_t)RTE_PORT_ALL) 2092 return -EINVAL; 2093 port = &ports[port_id]; 2094 for (pf = port->flow_list; pf; pf = pf->next) 2095 if (pf->id == rule) 2096 break; 2097 if (!pf) { 2098 printf("Flow rule #%u not found\n", rule); 2099 return -ENOENT; 2100 } 2101 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2102 &name, sizeof(name), 2103 (void *)(uintptr_t)action->type, &error); 2104 if (ret < 0) 2105 return port_flow_complain(&error); 2106 switch (action->type) { 2107 case RTE_FLOW_ACTION_TYPE_COUNT: 2108 case RTE_FLOW_ACTION_TYPE_RSS: 2109 case RTE_FLOW_ACTION_TYPE_AGE: 2110 break; 2111 default: 2112 printf("Cannot query action type %d (%s)\n", 2113 action->type, name); 2114 return -ENOTSUP; 2115 } 2116 /* Poisoning to make sure PMDs update it in case of error. */ 2117 memset(&error, 0x55, sizeof(error)); 2118 memset(&query, 0, sizeof(query)); 2119 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2120 return port_flow_complain(&error); 2121 switch (action->type) { 2122 case RTE_FLOW_ACTION_TYPE_COUNT: 2123 printf("%s:\n" 2124 " hits_set: %u\n" 2125 " bytes_set: %u\n" 2126 " hits: %" PRIu64 "\n" 2127 " bytes: %" PRIu64 "\n", 2128 name, 2129 query.count.hits_set, 2130 query.count.bytes_set, 2131 query.count.hits, 2132 query.count.bytes); 2133 break; 2134 case RTE_FLOW_ACTION_TYPE_RSS: 2135 rss_config_display(&query.rss_conf); 2136 break; 2137 case RTE_FLOW_ACTION_TYPE_AGE: 2138 printf("%s:\n" 2139 " aged: %u\n" 2140 " sec_since_last_hit_valid: %u\n" 2141 " sec_since_last_hit: %" PRIu32 "\n", 2142 name, 2143 query.age.aged, 2144 query.age.sec_since_last_hit_valid, 2145 query.age.sec_since_last_hit); 2146 break; 2147 default: 2148 printf("Cannot display result for action type %d (%s)\n", 2149 action->type, name); 2150 break; 2151 } 2152 return 0; 2153 } 2154 2155 /** List simply and destroy all aged flows. */ 2156 void 2157 port_flow_aged(portid_t port_id, uint8_t destroy) 2158 { 2159 void **contexts; 2160 int nb_context, total = 0, idx; 2161 struct rte_flow_error error; 2162 enum age_action_context_type *type; 2163 union { 2164 struct port_flow *pf; 2165 struct port_indirect_action *pia; 2166 } ctx; 2167 2168 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2169 port_id == (portid_t)RTE_PORT_ALL) 2170 return; 2171 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2172 printf("Port %u total aged flows: %d\n", port_id, total); 2173 if (total < 0) { 2174 port_flow_complain(&error); 2175 return; 2176 } 2177 if (total == 0) 2178 return; 2179 contexts = malloc(sizeof(void *) * total); 2180 if (contexts == NULL) { 2181 printf("Cannot allocate contexts for aged flow\n"); 2182 return; 2183 } 2184 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2185 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2186 if (nb_context != total) { 2187 printf("Port:%d get aged flows count(%d) != total(%d)\n", 2188 port_id, nb_context, total); 2189 free(contexts); 2190 return; 2191 } 2192 total = 0; 2193 for (idx = 0; idx < nb_context; idx++) { 2194 if (!contexts[idx]) { 2195 printf("Error: get Null context in port %u\n", port_id); 2196 continue; 2197 } 2198 type = (enum age_action_context_type *)contexts[idx]; 2199 switch (*type) { 2200 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2201 ctx.pf = container_of(type, struct port_flow, age_type); 2202 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2203 "\t%c%c%c\t\n", 2204 "Flow", 2205 ctx.pf->id, 2206 ctx.pf->rule.attr->group, 2207 ctx.pf->rule.attr->priority, 2208 ctx.pf->rule.attr->ingress ? 'i' : '-', 2209 ctx.pf->rule.attr->egress ? 'e' : '-', 2210 ctx.pf->rule.attr->transfer ? 't' : '-'); 2211 if (destroy && !port_flow_destroy(port_id, 1, 2212 &ctx.pf->id)) 2213 total++; 2214 break; 2215 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2216 ctx.pia = container_of(type, 2217 struct port_indirect_action, age_type); 2218 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2219 ctx.pia->id); 2220 break; 2221 default: 2222 printf("Error: invalid context type %u\n", port_id); 2223 break; 2224 } 2225 } 2226 printf("\n%d flows destroyed\n", total); 2227 free(contexts); 2228 } 2229 2230 /** List flow rules. */ 2231 void 2232 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2233 { 2234 struct rte_port *port; 2235 struct port_flow *pf; 2236 struct port_flow *list = NULL; 2237 uint32_t i; 2238 2239 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2240 port_id == (portid_t)RTE_PORT_ALL) 2241 return; 2242 port = &ports[port_id]; 2243 if (!port->flow_list) 2244 return; 2245 /* Sort flows by group, priority and ID. */ 2246 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2247 struct port_flow **tmp; 2248 const struct rte_flow_attr *curr = pf->rule.attr; 2249 2250 if (n) { 2251 /* Filter out unwanted groups. */ 2252 for (i = 0; i != n; ++i) 2253 if (curr->group == group[i]) 2254 break; 2255 if (i == n) 2256 continue; 2257 } 2258 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2259 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2260 2261 if (curr->group > comp->group || 2262 (curr->group == comp->group && 2263 curr->priority > comp->priority) || 2264 (curr->group == comp->group && 2265 curr->priority == comp->priority && 2266 pf->id > (*tmp)->id)) 2267 continue; 2268 break; 2269 } 2270 pf->tmp = *tmp; 2271 *tmp = pf; 2272 } 2273 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2274 for (pf = list; pf != NULL; pf = pf->tmp) { 2275 const struct rte_flow_item *item = pf->rule.pattern; 2276 const struct rte_flow_action *action = pf->rule.actions; 2277 const char *name; 2278 2279 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2280 pf->id, 2281 pf->rule.attr->group, 2282 pf->rule.attr->priority, 2283 pf->rule.attr->ingress ? 'i' : '-', 2284 pf->rule.attr->egress ? 'e' : '-', 2285 pf->rule.attr->transfer ? 't' : '-'); 2286 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2287 if ((uint32_t)item->type > INT_MAX) 2288 name = "PMD_INTERNAL"; 2289 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2290 &name, sizeof(name), 2291 (void *)(uintptr_t)item->type, 2292 NULL) <= 0) 2293 name = "[UNKNOWN]"; 2294 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2295 printf("%s ", name); 2296 ++item; 2297 } 2298 printf("=>"); 2299 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2300 if ((uint32_t)action->type > INT_MAX) 2301 name = "PMD_INTERNAL"; 2302 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2303 &name, sizeof(name), 2304 (void *)(uintptr_t)action->type, 2305 NULL) <= 0) 2306 name = "[UNKNOWN]"; 2307 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2308 printf(" %s", name); 2309 ++action; 2310 } 2311 printf("\n"); 2312 } 2313 } 2314 2315 /** Restrict ingress traffic to the defined flow rules. */ 2316 int 2317 port_flow_isolate(portid_t port_id, int set) 2318 { 2319 struct rte_flow_error error; 2320 2321 /* Poisoning to make sure PMDs update it in case of error. */ 2322 memset(&error, 0x66, sizeof(error)); 2323 if (rte_flow_isolate(port_id, set, &error)) 2324 return port_flow_complain(&error); 2325 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2326 port_id, 2327 set ? "now restricted" : "not restricted anymore"); 2328 return 0; 2329 } 2330 2331 /* 2332 * RX/TX ring descriptors display functions. 2333 */ 2334 int 2335 rx_queue_id_is_invalid(queueid_t rxq_id) 2336 { 2337 if (rxq_id < nb_rxq) 2338 return 0; 2339 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2340 return 1; 2341 } 2342 2343 int 2344 tx_queue_id_is_invalid(queueid_t txq_id) 2345 { 2346 if (txq_id < nb_txq) 2347 return 0; 2348 printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq); 2349 return 1; 2350 } 2351 2352 static int 2353 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2354 { 2355 struct rte_port *port = &ports[port_id]; 2356 struct rte_eth_rxq_info rx_qinfo; 2357 int ret; 2358 2359 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2360 if (ret == 0) { 2361 *ring_size = rx_qinfo.nb_desc; 2362 return ret; 2363 } 2364 2365 if (ret != -ENOTSUP) 2366 return ret; 2367 /* 2368 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2369 * ring_size stored in testpmd will be used for validity verification. 2370 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2371 * being 0, it will use a default value provided by PMDs to setup this 2372 * rxq. If the default value is 0, it will use the 2373 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2374 */ 2375 if (port->nb_rx_desc[rxq_id]) 2376 *ring_size = port->nb_rx_desc[rxq_id]; 2377 else if (port->dev_info.default_rxportconf.ring_size) 2378 *ring_size = port->dev_info.default_rxportconf.ring_size; 2379 else 2380 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2381 return 0; 2382 } 2383 2384 static int 2385 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2386 { 2387 struct rte_port *port = &ports[port_id]; 2388 struct rte_eth_txq_info tx_qinfo; 2389 int ret; 2390 2391 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2392 if (ret == 0) { 2393 *ring_size = tx_qinfo.nb_desc; 2394 return ret; 2395 } 2396 2397 if (ret != -ENOTSUP) 2398 return ret; 2399 /* 2400 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2401 * ring_size stored in testpmd will be used for validity verification. 2402 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2403 * being 0, it will use a default value provided by PMDs to setup this 2404 * txq. If the default value is 0, it will use the 2405 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2406 */ 2407 if (port->nb_tx_desc[txq_id]) 2408 *ring_size = port->nb_tx_desc[txq_id]; 2409 else if (port->dev_info.default_txportconf.ring_size) 2410 *ring_size = port->dev_info.default_txportconf.ring_size; 2411 else 2412 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2413 return 0; 2414 } 2415 2416 static int 2417 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2418 { 2419 uint16_t ring_size; 2420 int ret; 2421 2422 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2423 if (ret) 2424 return 1; 2425 2426 if (rxdesc_id < ring_size) 2427 return 0; 2428 2429 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2430 rxdesc_id, ring_size); 2431 return 1; 2432 } 2433 2434 static int 2435 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2436 { 2437 uint16_t ring_size; 2438 int ret; 2439 2440 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2441 if (ret) 2442 return 1; 2443 2444 if (txdesc_id < ring_size) 2445 return 0; 2446 2447 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2448 txdesc_id, ring_size); 2449 return 1; 2450 } 2451 2452 static const struct rte_memzone * 2453 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2454 { 2455 char mz_name[RTE_MEMZONE_NAMESIZE]; 2456 const struct rte_memzone *mz; 2457 2458 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2459 port_id, q_id, ring_name); 2460 mz = rte_memzone_lookup(mz_name); 2461 if (mz == NULL) 2462 printf("%s ring memory zoneof (port %d, queue %d) not" 2463 "found (zone name = %s\n", 2464 ring_name, port_id, q_id, mz_name); 2465 return mz; 2466 } 2467 2468 union igb_ring_dword { 2469 uint64_t dword; 2470 struct { 2471 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2472 uint32_t lo; 2473 uint32_t hi; 2474 #else 2475 uint32_t hi; 2476 uint32_t lo; 2477 #endif 2478 } words; 2479 }; 2480 2481 struct igb_ring_desc_32_bytes { 2482 union igb_ring_dword lo_dword; 2483 union igb_ring_dword hi_dword; 2484 union igb_ring_dword resv1; 2485 union igb_ring_dword resv2; 2486 }; 2487 2488 struct igb_ring_desc_16_bytes { 2489 union igb_ring_dword lo_dword; 2490 union igb_ring_dword hi_dword; 2491 }; 2492 2493 static void 2494 ring_rxd_display_dword(union igb_ring_dword dword) 2495 { 2496 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2497 (unsigned)dword.words.hi); 2498 } 2499 2500 static void 2501 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2502 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2503 portid_t port_id, 2504 #else 2505 __rte_unused portid_t port_id, 2506 #endif 2507 uint16_t desc_id) 2508 { 2509 struct igb_ring_desc_16_bytes *ring = 2510 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2511 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2512 int ret; 2513 struct rte_eth_dev_info dev_info; 2514 2515 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2516 if (ret != 0) 2517 return; 2518 2519 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2520 /* 32 bytes RX descriptor, i40e only */ 2521 struct igb_ring_desc_32_bytes *ring = 2522 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2523 ring[desc_id].lo_dword.dword = 2524 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2525 ring_rxd_display_dword(ring[desc_id].lo_dword); 2526 ring[desc_id].hi_dword.dword = 2527 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2528 ring_rxd_display_dword(ring[desc_id].hi_dword); 2529 ring[desc_id].resv1.dword = 2530 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2531 ring_rxd_display_dword(ring[desc_id].resv1); 2532 ring[desc_id].resv2.dword = 2533 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2534 ring_rxd_display_dword(ring[desc_id].resv2); 2535 2536 return; 2537 } 2538 #endif 2539 /* 16 bytes RX descriptor */ 2540 ring[desc_id].lo_dword.dword = 2541 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2542 ring_rxd_display_dword(ring[desc_id].lo_dword); 2543 ring[desc_id].hi_dword.dword = 2544 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2545 ring_rxd_display_dword(ring[desc_id].hi_dword); 2546 } 2547 2548 static void 2549 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2550 { 2551 struct igb_ring_desc_16_bytes *ring; 2552 struct igb_ring_desc_16_bytes txd; 2553 2554 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2555 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2556 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2557 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2558 (unsigned)txd.lo_dword.words.lo, 2559 (unsigned)txd.lo_dword.words.hi, 2560 (unsigned)txd.hi_dword.words.lo, 2561 (unsigned)txd.hi_dword.words.hi); 2562 } 2563 2564 void 2565 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2566 { 2567 const struct rte_memzone *rx_mz; 2568 2569 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2570 return; 2571 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2572 if (rx_mz == NULL) 2573 return; 2574 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2575 } 2576 2577 void 2578 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2579 { 2580 const struct rte_memzone *tx_mz; 2581 2582 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2583 return; 2584 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2585 if (tx_mz == NULL) 2586 return; 2587 ring_tx_descriptor_display(tx_mz, txd_id); 2588 } 2589 2590 void 2591 fwd_lcores_config_display(void) 2592 { 2593 lcoreid_t lc_id; 2594 2595 printf("List of forwarding lcores:"); 2596 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2597 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2598 printf("\n"); 2599 } 2600 void 2601 rxtx_config_display(void) 2602 { 2603 portid_t pid; 2604 queueid_t qid; 2605 2606 printf(" %s packet forwarding%s packets/burst=%d\n", 2607 cur_fwd_eng->fwd_mode_name, 2608 retry_enabled == 0 ? "" : " with retry", 2609 nb_pkt_per_burst); 2610 2611 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2612 printf(" packet len=%u - nb packet segments=%d\n", 2613 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2614 2615 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2616 nb_fwd_lcores, nb_fwd_ports); 2617 2618 RTE_ETH_FOREACH_DEV(pid) { 2619 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2620 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2621 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2622 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2623 struct rte_eth_rxq_info rx_qinfo; 2624 struct rte_eth_txq_info tx_qinfo; 2625 uint16_t rx_free_thresh_tmp; 2626 uint16_t tx_free_thresh_tmp; 2627 uint16_t tx_rs_thresh_tmp; 2628 uint16_t nb_rx_desc_tmp; 2629 uint16_t nb_tx_desc_tmp; 2630 uint64_t offloads_tmp; 2631 uint8_t pthresh_tmp; 2632 uint8_t hthresh_tmp; 2633 uint8_t wthresh_tmp; 2634 int32_t rc; 2635 2636 /* per port config */ 2637 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2638 (unsigned int)pid, nb_rxq, nb_txq); 2639 2640 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2641 ports[pid].dev_conf.rxmode.offloads, 2642 ports[pid].dev_conf.txmode.offloads); 2643 2644 /* per rx queue config only for first queue to be less verbose */ 2645 for (qid = 0; qid < 1; qid++) { 2646 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2647 if (rc) { 2648 nb_rx_desc_tmp = nb_rx_desc[qid]; 2649 rx_free_thresh_tmp = 2650 rx_conf[qid].rx_free_thresh; 2651 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2652 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2653 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2654 offloads_tmp = rx_conf[qid].offloads; 2655 } else { 2656 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2657 rx_free_thresh_tmp = 2658 rx_qinfo.conf.rx_free_thresh; 2659 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2660 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2661 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2662 offloads_tmp = rx_qinfo.conf.offloads; 2663 } 2664 2665 printf(" RX queue: %d\n", qid); 2666 printf(" RX desc=%d - RX free threshold=%d\n", 2667 nb_rx_desc_tmp, rx_free_thresh_tmp); 2668 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2669 " wthresh=%d\n", 2670 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2671 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2672 } 2673 2674 /* per tx queue config only for first queue to be less verbose */ 2675 for (qid = 0; qid < 1; qid++) { 2676 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2677 if (rc) { 2678 nb_tx_desc_tmp = nb_tx_desc[qid]; 2679 tx_free_thresh_tmp = 2680 tx_conf[qid].tx_free_thresh; 2681 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2682 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2683 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2684 offloads_tmp = tx_conf[qid].offloads; 2685 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2686 } else { 2687 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2688 tx_free_thresh_tmp = 2689 tx_qinfo.conf.tx_free_thresh; 2690 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2691 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2692 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2693 offloads_tmp = tx_qinfo.conf.offloads; 2694 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2695 } 2696 2697 printf(" TX queue: %d\n", qid); 2698 printf(" TX desc=%d - TX free threshold=%d\n", 2699 nb_tx_desc_tmp, tx_free_thresh_tmp); 2700 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2701 " wthresh=%d\n", 2702 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2703 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2704 offloads_tmp, tx_rs_thresh_tmp); 2705 } 2706 } 2707 } 2708 2709 void 2710 port_rss_reta_info(portid_t port_id, 2711 struct rte_eth_rss_reta_entry64 *reta_conf, 2712 uint16_t nb_entries) 2713 { 2714 uint16_t i, idx, shift; 2715 int ret; 2716 2717 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2718 return; 2719 2720 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2721 if (ret != 0) { 2722 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2723 return; 2724 } 2725 2726 for (i = 0; i < nb_entries; i++) { 2727 idx = i / RTE_RETA_GROUP_SIZE; 2728 shift = i % RTE_RETA_GROUP_SIZE; 2729 if (!(reta_conf[idx].mask & (1ULL << shift))) 2730 continue; 2731 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2732 i, reta_conf[idx].reta[shift]); 2733 } 2734 } 2735 2736 /* 2737 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2738 * key of the port. 2739 */ 2740 void 2741 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2742 { 2743 struct rte_eth_rss_conf rss_conf = {0}; 2744 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2745 uint64_t rss_hf; 2746 uint8_t i; 2747 int diag; 2748 struct rte_eth_dev_info dev_info; 2749 uint8_t hash_key_size; 2750 int ret; 2751 2752 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2753 return; 2754 2755 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2756 if (ret != 0) 2757 return; 2758 2759 if (dev_info.hash_key_size > 0 && 2760 dev_info.hash_key_size <= sizeof(rss_key)) 2761 hash_key_size = dev_info.hash_key_size; 2762 else { 2763 printf("dev_info did not provide a valid hash key size\n"); 2764 return; 2765 } 2766 2767 /* Get RSS hash key if asked to display it */ 2768 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2769 rss_conf.rss_key_len = hash_key_size; 2770 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2771 if (diag != 0) { 2772 switch (diag) { 2773 case -ENODEV: 2774 printf("port index %d invalid\n", port_id); 2775 break; 2776 case -ENOTSUP: 2777 printf("operation not supported by device\n"); 2778 break; 2779 default: 2780 printf("operation failed - diag=%d\n", diag); 2781 break; 2782 } 2783 return; 2784 } 2785 rss_hf = rss_conf.rss_hf; 2786 if (rss_hf == 0) { 2787 printf("RSS disabled\n"); 2788 return; 2789 } 2790 printf("RSS functions:\n "); 2791 for (i = 0; rss_type_table[i].str; i++) { 2792 if (rss_hf & rss_type_table[i].rss_type) 2793 printf("%s ", rss_type_table[i].str); 2794 } 2795 printf("\n"); 2796 if (!show_rss_key) 2797 return; 2798 printf("RSS key:\n"); 2799 for (i = 0; i < hash_key_size; i++) 2800 printf("%02X", rss_key[i]); 2801 printf("\n"); 2802 } 2803 2804 void 2805 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2806 uint8_t hash_key_len) 2807 { 2808 struct rte_eth_rss_conf rss_conf; 2809 int diag; 2810 unsigned int i; 2811 2812 rss_conf.rss_key = NULL; 2813 rss_conf.rss_key_len = hash_key_len; 2814 rss_conf.rss_hf = 0; 2815 for (i = 0; rss_type_table[i].str; i++) { 2816 if (!strcmp(rss_type_table[i].str, rss_type)) 2817 rss_conf.rss_hf = rss_type_table[i].rss_type; 2818 } 2819 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2820 if (diag == 0) { 2821 rss_conf.rss_key = hash_key; 2822 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2823 } 2824 if (diag == 0) 2825 return; 2826 2827 switch (diag) { 2828 case -ENODEV: 2829 printf("port index %d invalid\n", port_id); 2830 break; 2831 case -ENOTSUP: 2832 printf("operation not supported by device\n"); 2833 break; 2834 default: 2835 printf("operation failed - diag=%d\n", diag); 2836 break; 2837 } 2838 } 2839 2840 /* 2841 * Setup forwarding configuration for each logical core. 2842 */ 2843 static void 2844 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2845 { 2846 streamid_t nb_fs_per_lcore; 2847 streamid_t nb_fs; 2848 streamid_t sm_id; 2849 lcoreid_t nb_extra; 2850 lcoreid_t nb_fc; 2851 lcoreid_t nb_lc; 2852 lcoreid_t lc_id; 2853 2854 nb_fs = cfg->nb_fwd_streams; 2855 nb_fc = cfg->nb_fwd_lcores; 2856 if (nb_fs <= nb_fc) { 2857 nb_fs_per_lcore = 1; 2858 nb_extra = 0; 2859 } else { 2860 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2861 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2862 } 2863 2864 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2865 sm_id = 0; 2866 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2867 fwd_lcores[lc_id]->stream_idx = sm_id; 2868 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2869 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2870 } 2871 2872 /* 2873 * Assign extra remaining streams, if any. 2874 */ 2875 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2876 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2877 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2878 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2879 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2880 } 2881 } 2882 2883 static portid_t 2884 fwd_topology_tx_port_get(portid_t rxp) 2885 { 2886 static int warning_once = 1; 2887 2888 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2889 2890 switch (port_topology) { 2891 default: 2892 case PORT_TOPOLOGY_PAIRED: 2893 if ((rxp & 0x1) == 0) { 2894 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2895 return rxp + 1; 2896 if (warning_once) { 2897 printf("\nWarning! port-topology=paired" 2898 " and odd forward ports number," 2899 " the last port will pair with" 2900 " itself.\n\n"); 2901 warning_once = 0; 2902 } 2903 return rxp; 2904 } 2905 return rxp - 1; 2906 case PORT_TOPOLOGY_CHAINED: 2907 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2908 case PORT_TOPOLOGY_LOOP: 2909 return rxp; 2910 } 2911 } 2912 2913 static void 2914 simple_fwd_config_setup(void) 2915 { 2916 portid_t i; 2917 2918 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2919 cur_fwd_config.nb_fwd_streams = 2920 (streamid_t) cur_fwd_config.nb_fwd_ports; 2921 2922 /* reinitialize forwarding streams */ 2923 init_fwd_streams(); 2924 2925 /* 2926 * In the simple forwarding test, the number of forwarding cores 2927 * must be lower or equal to the number of forwarding ports. 2928 */ 2929 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2930 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2931 cur_fwd_config.nb_fwd_lcores = 2932 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2933 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2934 2935 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2936 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2937 fwd_streams[i]->rx_queue = 0; 2938 fwd_streams[i]->tx_port = 2939 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2940 fwd_streams[i]->tx_queue = 0; 2941 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2942 fwd_streams[i]->retry_enabled = retry_enabled; 2943 } 2944 } 2945 2946 /** 2947 * For the RSS forwarding test all streams distributed over lcores. Each stream 2948 * being composed of a RX queue to poll on a RX port for input messages, 2949 * associated with a TX queue of a TX port where to send forwarded packets. 2950 */ 2951 static void 2952 rss_fwd_config_setup(void) 2953 { 2954 portid_t rxp; 2955 portid_t txp; 2956 queueid_t rxq; 2957 queueid_t nb_q; 2958 streamid_t sm_id; 2959 2960 nb_q = nb_rxq; 2961 if (nb_q > nb_txq) 2962 nb_q = nb_txq; 2963 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2964 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2965 cur_fwd_config.nb_fwd_streams = 2966 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2967 2968 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2969 cur_fwd_config.nb_fwd_lcores = 2970 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2971 2972 /* reinitialize forwarding streams */ 2973 init_fwd_streams(); 2974 2975 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2976 rxp = 0; rxq = 0; 2977 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2978 struct fwd_stream *fs; 2979 2980 fs = fwd_streams[sm_id]; 2981 txp = fwd_topology_tx_port_get(rxp); 2982 fs->rx_port = fwd_ports_ids[rxp]; 2983 fs->rx_queue = rxq; 2984 fs->tx_port = fwd_ports_ids[txp]; 2985 fs->tx_queue = rxq; 2986 fs->peer_addr = fs->tx_port; 2987 fs->retry_enabled = retry_enabled; 2988 rxp++; 2989 if (rxp < nb_fwd_ports) 2990 continue; 2991 rxp = 0; 2992 rxq++; 2993 } 2994 } 2995 2996 /** 2997 * For the DCB forwarding test, each core is assigned on each traffic class. 2998 * 2999 * Each core is assigned a multi-stream, each stream being composed of 3000 * a RX queue to poll on a RX port for input messages, associated with 3001 * a TX queue of a TX port where to send forwarded packets. All RX and 3002 * TX queues are mapping to the same traffic class. 3003 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3004 * the same core 3005 */ 3006 static void 3007 dcb_fwd_config_setup(void) 3008 { 3009 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3010 portid_t txp, rxp = 0; 3011 queueid_t txq, rxq = 0; 3012 lcoreid_t lc_id; 3013 uint16_t nb_rx_queue, nb_tx_queue; 3014 uint16_t i, j, k, sm_id = 0; 3015 uint8_t tc = 0; 3016 3017 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3018 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3019 cur_fwd_config.nb_fwd_streams = 3020 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3021 3022 /* reinitialize forwarding streams */ 3023 init_fwd_streams(); 3024 sm_id = 0; 3025 txp = 1; 3026 /* get the dcb info on the first RX and TX ports */ 3027 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3028 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3029 3030 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3031 fwd_lcores[lc_id]->stream_nb = 0; 3032 fwd_lcores[lc_id]->stream_idx = sm_id; 3033 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3034 /* if the nb_queue is zero, means this tc is 3035 * not enabled on the POOL 3036 */ 3037 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3038 break; 3039 k = fwd_lcores[lc_id]->stream_nb + 3040 fwd_lcores[lc_id]->stream_idx; 3041 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3042 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3043 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3044 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3045 for (j = 0; j < nb_rx_queue; j++) { 3046 struct fwd_stream *fs; 3047 3048 fs = fwd_streams[k + j]; 3049 fs->rx_port = fwd_ports_ids[rxp]; 3050 fs->rx_queue = rxq + j; 3051 fs->tx_port = fwd_ports_ids[txp]; 3052 fs->tx_queue = txq + j % nb_tx_queue; 3053 fs->peer_addr = fs->tx_port; 3054 fs->retry_enabled = retry_enabled; 3055 } 3056 fwd_lcores[lc_id]->stream_nb += 3057 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3058 } 3059 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3060 3061 tc++; 3062 if (tc < rxp_dcb_info.nb_tcs) 3063 continue; 3064 /* Restart from TC 0 on next RX port */ 3065 tc = 0; 3066 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3067 rxp = (portid_t) 3068 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3069 else 3070 rxp++; 3071 if (rxp >= nb_fwd_ports) 3072 return; 3073 /* get the dcb information on next RX and TX ports */ 3074 if ((rxp & 0x1) == 0) 3075 txp = (portid_t) (rxp + 1); 3076 else 3077 txp = (portid_t) (rxp - 1); 3078 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3079 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3080 } 3081 } 3082 3083 static void 3084 icmp_echo_config_setup(void) 3085 { 3086 portid_t rxp; 3087 queueid_t rxq; 3088 lcoreid_t lc_id; 3089 uint16_t sm_id; 3090 3091 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3092 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3093 (nb_txq * nb_fwd_ports); 3094 else 3095 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3096 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3097 cur_fwd_config.nb_fwd_streams = 3098 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3099 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3100 cur_fwd_config.nb_fwd_lcores = 3101 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3102 if (verbose_level > 0) { 3103 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3104 __FUNCTION__, 3105 cur_fwd_config.nb_fwd_lcores, 3106 cur_fwd_config.nb_fwd_ports, 3107 cur_fwd_config.nb_fwd_streams); 3108 } 3109 3110 /* reinitialize forwarding streams */ 3111 init_fwd_streams(); 3112 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3113 rxp = 0; rxq = 0; 3114 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3115 if (verbose_level > 0) 3116 printf(" core=%d: \n", lc_id); 3117 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3118 struct fwd_stream *fs; 3119 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3120 fs->rx_port = fwd_ports_ids[rxp]; 3121 fs->rx_queue = rxq; 3122 fs->tx_port = fs->rx_port; 3123 fs->tx_queue = rxq; 3124 fs->peer_addr = fs->tx_port; 3125 fs->retry_enabled = retry_enabled; 3126 if (verbose_level > 0) 3127 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3128 sm_id, fs->rx_port, fs->rx_queue, 3129 fs->tx_queue); 3130 rxq = (queueid_t) (rxq + 1); 3131 if (rxq == nb_rxq) { 3132 rxq = 0; 3133 rxp = (portid_t) (rxp + 1); 3134 } 3135 } 3136 } 3137 } 3138 3139 void 3140 fwd_config_setup(void) 3141 { 3142 cur_fwd_config.fwd_eng = cur_fwd_eng; 3143 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3144 icmp_echo_config_setup(); 3145 return; 3146 } 3147 3148 if ((nb_rxq > 1) && (nb_txq > 1)){ 3149 if (dcb_config) 3150 dcb_fwd_config_setup(); 3151 else 3152 rss_fwd_config_setup(); 3153 } 3154 else 3155 simple_fwd_config_setup(); 3156 } 3157 3158 static const char * 3159 mp_alloc_to_str(uint8_t mode) 3160 { 3161 switch (mode) { 3162 case MP_ALLOC_NATIVE: 3163 return "native"; 3164 case MP_ALLOC_ANON: 3165 return "anon"; 3166 case MP_ALLOC_XMEM: 3167 return "xmem"; 3168 case MP_ALLOC_XMEM_HUGE: 3169 return "xmemhuge"; 3170 case MP_ALLOC_XBUF: 3171 return "xbuf"; 3172 default: 3173 return "invalid"; 3174 } 3175 } 3176 3177 void 3178 pkt_fwd_config_display(struct fwd_config *cfg) 3179 { 3180 struct fwd_stream *fs; 3181 lcoreid_t lc_id; 3182 streamid_t sm_id; 3183 3184 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3185 "NUMA support %s, MP allocation mode: %s\n", 3186 cfg->fwd_eng->fwd_mode_name, 3187 retry_enabled == 0 ? "" : " with retry", 3188 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3189 numa_support == 1 ? "enabled" : "disabled", 3190 mp_alloc_to_str(mp_alloc_type)); 3191 3192 if (retry_enabled) 3193 printf("TX retry num: %u, delay between TX retries: %uus\n", 3194 burst_tx_retry_num, burst_tx_delay_time); 3195 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3196 printf("Logical Core %u (socket %u) forwards packets on " 3197 "%d streams:", 3198 fwd_lcores_cpuids[lc_id], 3199 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3200 fwd_lcores[lc_id]->stream_nb); 3201 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3202 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3203 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3204 "P=%d/Q=%d (socket %u) ", 3205 fs->rx_port, fs->rx_queue, 3206 ports[fs->rx_port].socket_id, 3207 fs->tx_port, fs->tx_queue, 3208 ports[fs->tx_port].socket_id); 3209 print_ethaddr("peer=", 3210 &peer_eth_addrs[fs->peer_addr]); 3211 } 3212 printf("\n"); 3213 } 3214 printf("\n"); 3215 } 3216 3217 void 3218 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3219 { 3220 struct rte_ether_addr new_peer_addr; 3221 if (!rte_eth_dev_is_valid_port(port_id)) { 3222 printf("Error: Invalid port number %i\n", port_id); 3223 return; 3224 } 3225 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3226 printf("Error: Invalid ethernet address: %s\n", peer_addr); 3227 return; 3228 } 3229 peer_eth_addrs[port_id] = new_peer_addr; 3230 } 3231 3232 int 3233 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3234 { 3235 unsigned int i; 3236 unsigned int lcore_cpuid; 3237 int record_now; 3238 3239 record_now = 0; 3240 again: 3241 for (i = 0; i < nb_lc; i++) { 3242 lcore_cpuid = lcorelist[i]; 3243 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3244 printf("lcore %u not enabled\n", lcore_cpuid); 3245 return -1; 3246 } 3247 if (lcore_cpuid == rte_get_main_lcore()) { 3248 printf("lcore %u cannot be masked on for running " 3249 "packet forwarding, which is the main lcore " 3250 "and reserved for command line parsing only\n", 3251 lcore_cpuid); 3252 return -1; 3253 } 3254 if (record_now) 3255 fwd_lcores_cpuids[i] = lcore_cpuid; 3256 } 3257 if (record_now == 0) { 3258 record_now = 1; 3259 goto again; 3260 } 3261 nb_cfg_lcores = (lcoreid_t) nb_lc; 3262 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3263 printf("previous number of forwarding cores %u - changed to " 3264 "number of configured cores %u\n", 3265 (unsigned int) nb_fwd_lcores, nb_lc); 3266 nb_fwd_lcores = (lcoreid_t) nb_lc; 3267 } 3268 3269 return 0; 3270 } 3271 3272 int 3273 set_fwd_lcores_mask(uint64_t lcoremask) 3274 { 3275 unsigned int lcorelist[64]; 3276 unsigned int nb_lc; 3277 unsigned int i; 3278 3279 if (lcoremask == 0) { 3280 printf("Invalid NULL mask of cores\n"); 3281 return -1; 3282 } 3283 nb_lc = 0; 3284 for (i = 0; i < 64; i++) { 3285 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3286 continue; 3287 lcorelist[nb_lc++] = i; 3288 } 3289 return set_fwd_lcores_list(lcorelist, nb_lc); 3290 } 3291 3292 void 3293 set_fwd_lcores_number(uint16_t nb_lc) 3294 { 3295 if (test_done == 0) { 3296 printf("Please stop forwarding first\n"); 3297 return; 3298 } 3299 if (nb_lc > nb_cfg_lcores) { 3300 printf("nb fwd cores %u > %u (max. number of configured " 3301 "lcores) - ignored\n", 3302 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3303 return; 3304 } 3305 nb_fwd_lcores = (lcoreid_t) nb_lc; 3306 printf("Number of forwarding cores set to %u\n", 3307 (unsigned int) nb_fwd_lcores); 3308 } 3309 3310 void 3311 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3312 { 3313 unsigned int i; 3314 portid_t port_id; 3315 int record_now; 3316 3317 record_now = 0; 3318 again: 3319 for (i = 0; i < nb_pt; i++) { 3320 port_id = (portid_t) portlist[i]; 3321 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3322 return; 3323 if (record_now) 3324 fwd_ports_ids[i] = port_id; 3325 } 3326 if (record_now == 0) { 3327 record_now = 1; 3328 goto again; 3329 } 3330 nb_cfg_ports = (portid_t) nb_pt; 3331 if (nb_fwd_ports != (portid_t) nb_pt) { 3332 printf("previous number of forwarding ports %u - changed to " 3333 "number of configured ports %u\n", 3334 (unsigned int) nb_fwd_ports, nb_pt); 3335 nb_fwd_ports = (portid_t) nb_pt; 3336 } 3337 } 3338 3339 /** 3340 * Parse the user input and obtain the list of forwarding ports 3341 * 3342 * @param[in] list 3343 * String containing the user input. User can specify 3344 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3345 * For example, if the user wants to use all the available 3346 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3347 * If the user wants to use only the ports 1,2 then the input 3348 * is 1,2. 3349 * valid characters are '-' and ',' 3350 * @param[out] values 3351 * This array will be filled with a list of port IDs 3352 * based on the user input 3353 * Note that duplicate entries are discarded and only the first 3354 * count entries in this array are port IDs and all the rest 3355 * will contain default values 3356 * @param[in] maxsize 3357 * This parameter denotes 2 things 3358 * 1) Number of elements in the values array 3359 * 2) Maximum value of each element in the values array 3360 * @return 3361 * On success, returns total count of parsed port IDs 3362 * On failure, returns 0 3363 */ 3364 static unsigned int 3365 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3366 { 3367 unsigned int count = 0; 3368 char *end = NULL; 3369 int min, max; 3370 int value, i; 3371 unsigned int marked[maxsize]; 3372 3373 if (list == NULL || values == NULL) 3374 return 0; 3375 3376 for (i = 0; i < (int)maxsize; i++) 3377 marked[i] = 0; 3378 3379 min = INT_MAX; 3380 3381 do { 3382 /*Remove the blank spaces if any*/ 3383 while (isblank(*list)) 3384 list++; 3385 if (*list == '\0') 3386 break; 3387 errno = 0; 3388 value = strtol(list, &end, 10); 3389 if (errno || end == NULL) 3390 return 0; 3391 if (value < 0 || value >= (int)maxsize) 3392 return 0; 3393 while (isblank(*end)) 3394 end++; 3395 if (*end == '-' && min == INT_MAX) { 3396 min = value; 3397 } else if ((*end == ',') || (*end == '\0')) { 3398 max = value; 3399 if (min == INT_MAX) 3400 min = value; 3401 for (i = min; i <= max; i++) { 3402 if (count < maxsize) { 3403 if (marked[i]) 3404 continue; 3405 values[count] = i; 3406 marked[i] = 1; 3407 count++; 3408 } 3409 } 3410 min = INT_MAX; 3411 } else 3412 return 0; 3413 list = end + 1; 3414 } while (*end != '\0'); 3415 3416 return count; 3417 } 3418 3419 void 3420 parse_fwd_portlist(const char *portlist) 3421 { 3422 unsigned int portcount; 3423 unsigned int portindex[RTE_MAX_ETHPORTS]; 3424 unsigned int i, valid_port_count = 0; 3425 3426 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3427 if (!portcount) 3428 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3429 3430 /* 3431 * Here we verify the validity of the ports 3432 * and thereby calculate the total number of 3433 * valid ports 3434 */ 3435 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3436 if (rte_eth_dev_is_valid_port(portindex[i])) { 3437 portindex[valid_port_count] = portindex[i]; 3438 valid_port_count++; 3439 } 3440 } 3441 3442 set_fwd_ports_list(portindex, valid_port_count); 3443 } 3444 3445 void 3446 set_fwd_ports_mask(uint64_t portmask) 3447 { 3448 unsigned int portlist[64]; 3449 unsigned int nb_pt; 3450 unsigned int i; 3451 3452 if (portmask == 0) { 3453 printf("Invalid NULL mask of ports\n"); 3454 return; 3455 } 3456 nb_pt = 0; 3457 RTE_ETH_FOREACH_DEV(i) { 3458 if (! ((uint64_t)(1ULL << i) & portmask)) 3459 continue; 3460 portlist[nb_pt++] = i; 3461 } 3462 set_fwd_ports_list(portlist, nb_pt); 3463 } 3464 3465 void 3466 set_fwd_ports_number(uint16_t nb_pt) 3467 { 3468 if (nb_pt > nb_cfg_ports) { 3469 printf("nb fwd ports %u > %u (number of configured " 3470 "ports) - ignored\n", 3471 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3472 return; 3473 } 3474 nb_fwd_ports = (portid_t) nb_pt; 3475 printf("Number of forwarding ports set to %u\n", 3476 (unsigned int) nb_fwd_ports); 3477 } 3478 3479 int 3480 port_is_forwarding(portid_t port_id) 3481 { 3482 unsigned int i; 3483 3484 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3485 return -1; 3486 3487 for (i = 0; i < nb_fwd_ports; i++) { 3488 if (fwd_ports_ids[i] == port_id) 3489 return 1; 3490 } 3491 3492 return 0; 3493 } 3494 3495 void 3496 set_nb_pkt_per_burst(uint16_t nb) 3497 { 3498 if (nb > MAX_PKT_BURST) { 3499 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3500 " ignored\n", 3501 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3502 return; 3503 } 3504 nb_pkt_per_burst = nb; 3505 printf("Number of packets per burst set to %u\n", 3506 (unsigned int) nb_pkt_per_burst); 3507 } 3508 3509 static const char * 3510 tx_split_get_name(enum tx_pkt_split split) 3511 { 3512 uint32_t i; 3513 3514 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3515 if (tx_split_name[i].split == split) 3516 return tx_split_name[i].name; 3517 } 3518 return NULL; 3519 } 3520 3521 void 3522 set_tx_pkt_split(const char *name) 3523 { 3524 uint32_t i; 3525 3526 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3527 if (strcmp(tx_split_name[i].name, name) == 0) { 3528 tx_pkt_split = tx_split_name[i].split; 3529 return; 3530 } 3531 } 3532 printf("unknown value: \"%s\"\n", name); 3533 } 3534 3535 int 3536 parse_fec_mode(const char *name, uint32_t *mode) 3537 { 3538 uint8_t i; 3539 3540 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3541 if (strcmp(fec_mode_name[i].name, name) == 0) { 3542 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3543 return 0; 3544 } 3545 } 3546 return -1; 3547 } 3548 3549 void 3550 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3551 { 3552 unsigned int i, j; 3553 3554 printf("FEC capabilities:\n"); 3555 3556 for (i = 0; i < num; i++) { 3557 printf("%s : ", 3558 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3559 3560 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3561 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3562 speed_fec_capa[i].capa) 3563 printf("%s ", fec_mode_name[j].name); 3564 } 3565 printf("\n"); 3566 } 3567 } 3568 3569 void 3570 show_rx_pkt_offsets(void) 3571 { 3572 uint32_t i, n; 3573 3574 n = rx_pkt_nb_offs; 3575 printf("Number of offsets: %u\n", n); 3576 if (n) { 3577 printf("Segment offsets: "); 3578 for (i = 0; i != n - 1; i++) 3579 printf("%hu,", rx_pkt_seg_offsets[i]); 3580 printf("%hu\n", rx_pkt_seg_lengths[i]); 3581 } 3582 } 3583 3584 void 3585 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3586 { 3587 unsigned int i; 3588 3589 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3590 printf("nb segments per RX packets=%u >= " 3591 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3592 return; 3593 } 3594 3595 /* 3596 * No extra check here, the segment length will be checked by PMD 3597 * in the extended queue setup. 3598 */ 3599 for (i = 0; i < nb_offs; i++) { 3600 if (seg_offsets[i] >= UINT16_MAX) { 3601 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3602 i, seg_offsets[i]); 3603 return; 3604 } 3605 } 3606 3607 for (i = 0; i < nb_offs; i++) 3608 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3609 3610 rx_pkt_nb_offs = (uint8_t) nb_offs; 3611 } 3612 3613 void 3614 show_rx_pkt_segments(void) 3615 { 3616 uint32_t i, n; 3617 3618 n = rx_pkt_nb_segs; 3619 printf("Number of segments: %u\n", n); 3620 if (n) { 3621 printf("Segment sizes: "); 3622 for (i = 0; i != n - 1; i++) 3623 printf("%hu,", rx_pkt_seg_lengths[i]); 3624 printf("%hu\n", rx_pkt_seg_lengths[i]); 3625 } 3626 } 3627 3628 void 3629 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3630 { 3631 unsigned int i; 3632 3633 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3634 printf("nb segments per RX packets=%u >= " 3635 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3636 return; 3637 } 3638 3639 /* 3640 * No extra check here, the segment length will be checked by PMD 3641 * in the extended queue setup. 3642 */ 3643 for (i = 0; i < nb_segs; i++) { 3644 if (seg_lengths[i] >= UINT16_MAX) { 3645 printf("length[%u]=%u > UINT16_MAX - give up\n", 3646 i, seg_lengths[i]); 3647 return; 3648 } 3649 } 3650 3651 for (i = 0; i < nb_segs; i++) 3652 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3653 3654 rx_pkt_nb_segs = (uint8_t) nb_segs; 3655 } 3656 3657 void 3658 show_tx_pkt_segments(void) 3659 { 3660 uint32_t i, n; 3661 const char *split; 3662 3663 n = tx_pkt_nb_segs; 3664 split = tx_split_get_name(tx_pkt_split); 3665 3666 printf("Number of segments: %u\n", n); 3667 printf("Segment sizes: "); 3668 for (i = 0; i != n - 1; i++) 3669 printf("%hu,", tx_pkt_seg_lengths[i]); 3670 printf("%hu\n", tx_pkt_seg_lengths[i]); 3671 printf("Split packet: %s\n", split); 3672 } 3673 3674 static bool 3675 nb_segs_is_invalid(unsigned int nb_segs) 3676 { 3677 uint16_t ring_size; 3678 uint16_t queue_id; 3679 uint16_t port_id; 3680 int ret; 3681 3682 RTE_ETH_FOREACH_DEV(port_id) { 3683 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3684 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3685 3686 if (ret) 3687 return true; 3688 3689 if (ring_size < nb_segs) { 3690 printf("nb segments per TX packets=%u >= " 3691 "TX queue(%u) ring_size=%u - ignored\n", 3692 nb_segs, queue_id, ring_size); 3693 return true; 3694 } 3695 } 3696 } 3697 3698 return false; 3699 } 3700 3701 void 3702 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3703 { 3704 uint16_t tx_pkt_len; 3705 unsigned int i; 3706 3707 if (nb_segs_is_invalid(nb_segs)) 3708 return; 3709 3710 /* 3711 * Check that each segment length is greater or equal than 3712 * the mbuf data sise. 3713 * Check also that the total packet length is greater or equal than the 3714 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3715 * 20 + 8). 3716 */ 3717 tx_pkt_len = 0; 3718 for (i = 0; i < nb_segs; i++) { 3719 if (seg_lengths[i] > mbuf_data_size[0]) { 3720 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3721 i, seg_lengths[i], mbuf_data_size[0]); 3722 return; 3723 } 3724 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3725 } 3726 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3727 printf("total packet length=%u < %d - give up\n", 3728 (unsigned) tx_pkt_len, 3729 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3730 return; 3731 } 3732 3733 for (i = 0; i < nb_segs; i++) 3734 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3735 3736 tx_pkt_length = tx_pkt_len; 3737 tx_pkt_nb_segs = (uint8_t) nb_segs; 3738 } 3739 3740 void 3741 show_tx_pkt_times(void) 3742 { 3743 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3744 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3745 } 3746 3747 void 3748 set_tx_pkt_times(unsigned int *tx_times) 3749 { 3750 tx_pkt_times_inter = tx_times[0]; 3751 tx_pkt_times_intra = tx_times[1]; 3752 } 3753 3754 void 3755 setup_gro(const char *onoff, portid_t port_id) 3756 { 3757 if (!rte_eth_dev_is_valid_port(port_id)) { 3758 printf("invalid port id %u\n", port_id); 3759 return; 3760 } 3761 if (test_done == 0) { 3762 printf("Before enable/disable GRO," 3763 " please stop forwarding first\n"); 3764 return; 3765 } 3766 if (strcmp(onoff, "on") == 0) { 3767 if (gro_ports[port_id].enable != 0) { 3768 printf("Port %u has enabled GRO. Please" 3769 " disable GRO first\n", port_id); 3770 return; 3771 } 3772 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3773 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3774 gro_ports[port_id].param.max_flow_num = 3775 GRO_DEFAULT_FLOW_NUM; 3776 gro_ports[port_id].param.max_item_per_flow = 3777 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3778 } 3779 gro_ports[port_id].enable = 1; 3780 } else { 3781 if (gro_ports[port_id].enable == 0) { 3782 printf("Port %u has disabled GRO\n", port_id); 3783 return; 3784 } 3785 gro_ports[port_id].enable = 0; 3786 } 3787 } 3788 3789 void 3790 setup_gro_flush_cycles(uint8_t cycles) 3791 { 3792 if (test_done == 0) { 3793 printf("Before change flush interval for GRO," 3794 " please stop forwarding first.\n"); 3795 return; 3796 } 3797 3798 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3799 GRO_DEFAULT_FLUSH_CYCLES) { 3800 printf("The flushing cycle be in the range" 3801 " of 1 to %u. Revert to the default" 3802 " value %u.\n", 3803 GRO_MAX_FLUSH_CYCLES, 3804 GRO_DEFAULT_FLUSH_CYCLES); 3805 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3806 } 3807 3808 gro_flush_cycles = cycles; 3809 } 3810 3811 void 3812 show_gro(portid_t port_id) 3813 { 3814 struct rte_gro_param *param; 3815 uint32_t max_pkts_num; 3816 3817 param = &gro_ports[port_id].param; 3818 3819 if (!rte_eth_dev_is_valid_port(port_id)) { 3820 printf("Invalid port id %u.\n", port_id); 3821 return; 3822 } 3823 if (gro_ports[port_id].enable) { 3824 printf("GRO type: TCP/IPv4\n"); 3825 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3826 max_pkts_num = param->max_flow_num * 3827 param->max_item_per_flow; 3828 } else 3829 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3830 printf("Max number of packets to perform GRO: %u\n", 3831 max_pkts_num); 3832 printf("Flushing cycles: %u\n", gro_flush_cycles); 3833 } else 3834 printf("Port %u doesn't enable GRO.\n", port_id); 3835 } 3836 3837 void 3838 setup_gso(const char *mode, portid_t port_id) 3839 { 3840 if (!rte_eth_dev_is_valid_port(port_id)) { 3841 printf("invalid port id %u\n", port_id); 3842 return; 3843 } 3844 if (strcmp(mode, "on") == 0) { 3845 if (test_done == 0) { 3846 printf("before enabling GSO," 3847 " please stop forwarding first\n"); 3848 return; 3849 } 3850 gso_ports[port_id].enable = 1; 3851 } else if (strcmp(mode, "off") == 0) { 3852 if (test_done == 0) { 3853 printf("before disabling GSO," 3854 " please stop forwarding first\n"); 3855 return; 3856 } 3857 gso_ports[port_id].enable = 0; 3858 } 3859 } 3860 3861 char* 3862 list_pkt_forwarding_modes(void) 3863 { 3864 static char fwd_modes[128] = ""; 3865 const char *separator = "|"; 3866 struct fwd_engine *fwd_eng; 3867 unsigned i = 0; 3868 3869 if (strlen (fwd_modes) == 0) { 3870 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3871 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3872 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3873 strncat(fwd_modes, separator, 3874 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3875 } 3876 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3877 } 3878 3879 return fwd_modes; 3880 } 3881 3882 char* 3883 list_pkt_forwarding_retry_modes(void) 3884 { 3885 static char fwd_modes[128] = ""; 3886 const char *separator = "|"; 3887 struct fwd_engine *fwd_eng; 3888 unsigned i = 0; 3889 3890 if (strlen(fwd_modes) == 0) { 3891 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3892 if (fwd_eng == &rx_only_engine) 3893 continue; 3894 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3895 sizeof(fwd_modes) - 3896 strlen(fwd_modes) - 1); 3897 strncat(fwd_modes, separator, 3898 sizeof(fwd_modes) - 3899 strlen(fwd_modes) - 1); 3900 } 3901 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3902 } 3903 3904 return fwd_modes; 3905 } 3906 3907 void 3908 set_pkt_forwarding_mode(const char *fwd_mode_name) 3909 { 3910 struct fwd_engine *fwd_eng; 3911 unsigned i; 3912 3913 i = 0; 3914 while ((fwd_eng = fwd_engines[i]) != NULL) { 3915 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 3916 printf("Set %s packet forwarding mode%s\n", 3917 fwd_mode_name, 3918 retry_enabled == 0 ? "" : " with retry"); 3919 cur_fwd_eng = fwd_eng; 3920 return; 3921 } 3922 i++; 3923 } 3924 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 3925 } 3926 3927 void 3928 add_rx_dump_callbacks(portid_t portid) 3929 { 3930 struct rte_eth_dev_info dev_info; 3931 uint16_t queue; 3932 int ret; 3933 3934 if (port_id_is_invalid(portid, ENABLED_WARN)) 3935 return; 3936 3937 ret = eth_dev_info_get_print_err(portid, &dev_info); 3938 if (ret != 0) 3939 return; 3940 3941 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3942 if (!ports[portid].rx_dump_cb[queue]) 3943 ports[portid].rx_dump_cb[queue] = 3944 rte_eth_add_rx_callback(portid, queue, 3945 dump_rx_pkts, NULL); 3946 } 3947 3948 void 3949 add_tx_dump_callbacks(portid_t portid) 3950 { 3951 struct rte_eth_dev_info dev_info; 3952 uint16_t queue; 3953 int ret; 3954 3955 if (port_id_is_invalid(portid, ENABLED_WARN)) 3956 return; 3957 3958 ret = eth_dev_info_get_print_err(portid, &dev_info); 3959 if (ret != 0) 3960 return; 3961 3962 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3963 if (!ports[portid].tx_dump_cb[queue]) 3964 ports[portid].tx_dump_cb[queue] = 3965 rte_eth_add_tx_callback(portid, queue, 3966 dump_tx_pkts, NULL); 3967 } 3968 3969 void 3970 remove_rx_dump_callbacks(portid_t portid) 3971 { 3972 struct rte_eth_dev_info dev_info; 3973 uint16_t queue; 3974 int ret; 3975 3976 if (port_id_is_invalid(portid, ENABLED_WARN)) 3977 return; 3978 3979 ret = eth_dev_info_get_print_err(portid, &dev_info); 3980 if (ret != 0) 3981 return; 3982 3983 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3984 if (ports[portid].rx_dump_cb[queue]) { 3985 rte_eth_remove_rx_callback(portid, queue, 3986 ports[portid].rx_dump_cb[queue]); 3987 ports[portid].rx_dump_cb[queue] = NULL; 3988 } 3989 } 3990 3991 void 3992 remove_tx_dump_callbacks(portid_t portid) 3993 { 3994 struct rte_eth_dev_info dev_info; 3995 uint16_t queue; 3996 int ret; 3997 3998 if (port_id_is_invalid(portid, ENABLED_WARN)) 3999 return; 4000 4001 ret = eth_dev_info_get_print_err(portid, &dev_info); 4002 if (ret != 0) 4003 return; 4004 4005 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4006 if (ports[portid].tx_dump_cb[queue]) { 4007 rte_eth_remove_tx_callback(portid, queue, 4008 ports[portid].tx_dump_cb[queue]); 4009 ports[portid].tx_dump_cb[queue] = NULL; 4010 } 4011 } 4012 4013 void 4014 configure_rxtx_dump_callbacks(uint16_t verbose) 4015 { 4016 portid_t portid; 4017 4018 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4019 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4020 return; 4021 #endif 4022 4023 RTE_ETH_FOREACH_DEV(portid) 4024 { 4025 if (verbose == 1 || verbose > 2) 4026 add_rx_dump_callbacks(portid); 4027 else 4028 remove_rx_dump_callbacks(portid); 4029 if (verbose >= 2) 4030 add_tx_dump_callbacks(portid); 4031 else 4032 remove_tx_dump_callbacks(portid); 4033 } 4034 } 4035 4036 void 4037 set_verbose_level(uint16_t vb_level) 4038 { 4039 printf("Change verbose level from %u to %u\n", 4040 (unsigned int) verbose_level, (unsigned int) vb_level); 4041 verbose_level = vb_level; 4042 configure_rxtx_dump_callbacks(verbose_level); 4043 } 4044 4045 void 4046 vlan_extend_set(portid_t port_id, int on) 4047 { 4048 int diag; 4049 int vlan_offload; 4050 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4051 4052 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4053 return; 4054 4055 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4056 4057 if (on) { 4058 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4059 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4060 } else { 4061 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4062 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4063 } 4064 4065 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4066 if (diag < 0) { 4067 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 4068 "diag=%d\n", port_id, on, diag); 4069 return; 4070 } 4071 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4072 } 4073 4074 void 4075 rx_vlan_strip_set(portid_t port_id, int on) 4076 { 4077 int diag; 4078 int vlan_offload; 4079 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4080 4081 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4082 return; 4083 4084 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4085 4086 if (on) { 4087 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4088 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4089 } else { 4090 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4091 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4092 } 4093 4094 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4095 if (diag < 0) { 4096 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 4097 "diag=%d\n", port_id, on, diag); 4098 return; 4099 } 4100 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4101 } 4102 4103 void 4104 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4105 { 4106 int diag; 4107 4108 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4109 return; 4110 4111 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4112 if (diag < 0) 4113 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 4114 "diag=%d\n", port_id, queue_id, on, diag); 4115 } 4116 4117 void 4118 rx_vlan_filter_set(portid_t port_id, int on) 4119 { 4120 int diag; 4121 int vlan_offload; 4122 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4123 4124 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4125 return; 4126 4127 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4128 4129 if (on) { 4130 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4131 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4132 } else { 4133 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4134 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4135 } 4136 4137 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4138 if (diag < 0) { 4139 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 4140 "diag=%d\n", port_id, on, diag); 4141 return; 4142 } 4143 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4144 } 4145 4146 void 4147 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4148 { 4149 int diag; 4150 int vlan_offload; 4151 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4152 4153 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4154 return; 4155 4156 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4157 4158 if (on) { 4159 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4160 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4161 } else { 4162 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4163 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4164 } 4165 4166 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4167 if (diag < 0) { 4168 printf("%s(port_pi=%d, on=%d) failed " 4169 "diag=%d\n", __func__, port_id, on, diag); 4170 return; 4171 } 4172 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4173 } 4174 4175 int 4176 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4177 { 4178 int diag; 4179 4180 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4181 return 1; 4182 if (vlan_id_is_invalid(vlan_id)) 4183 return 1; 4184 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4185 if (diag == 0) 4186 return 0; 4187 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 4188 "diag=%d\n", 4189 port_id, vlan_id, on, diag); 4190 return -1; 4191 } 4192 4193 void 4194 rx_vlan_all_filter_set(portid_t port_id, int on) 4195 { 4196 uint16_t vlan_id; 4197 4198 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4199 return; 4200 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4201 if (rx_vft_set(port_id, vlan_id, on)) 4202 break; 4203 } 4204 } 4205 4206 void 4207 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4208 { 4209 int diag; 4210 4211 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4212 return; 4213 4214 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4215 if (diag == 0) 4216 return; 4217 4218 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 4219 "diag=%d\n", 4220 port_id, vlan_type, tp_id, diag); 4221 } 4222 4223 void 4224 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4225 { 4226 struct rte_eth_dev_info dev_info; 4227 int ret; 4228 4229 if (vlan_id_is_invalid(vlan_id)) 4230 return; 4231 4232 if (ports[port_id].dev_conf.txmode.offloads & 4233 DEV_TX_OFFLOAD_QINQ_INSERT) { 4234 printf("Error, as QinQ has been enabled.\n"); 4235 return; 4236 } 4237 4238 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4239 if (ret != 0) 4240 return; 4241 4242 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4243 printf("Error: vlan insert is not supported by port %d\n", 4244 port_id); 4245 return; 4246 } 4247 4248 tx_vlan_reset(port_id); 4249 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4250 ports[port_id].tx_vlan_id = vlan_id; 4251 } 4252 4253 void 4254 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4255 { 4256 struct rte_eth_dev_info dev_info; 4257 int ret; 4258 4259 if (vlan_id_is_invalid(vlan_id)) 4260 return; 4261 if (vlan_id_is_invalid(vlan_id_outer)) 4262 return; 4263 4264 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4265 if (ret != 0) 4266 return; 4267 4268 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4269 printf("Error: qinq insert not supported by port %d\n", 4270 port_id); 4271 return; 4272 } 4273 4274 tx_vlan_reset(port_id); 4275 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4276 DEV_TX_OFFLOAD_QINQ_INSERT); 4277 ports[port_id].tx_vlan_id = vlan_id; 4278 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4279 } 4280 4281 void 4282 tx_vlan_reset(portid_t port_id) 4283 { 4284 ports[port_id].dev_conf.txmode.offloads &= 4285 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4286 DEV_TX_OFFLOAD_QINQ_INSERT); 4287 ports[port_id].tx_vlan_id = 0; 4288 ports[port_id].tx_vlan_id_outer = 0; 4289 } 4290 4291 void 4292 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4293 { 4294 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4295 return; 4296 4297 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4298 } 4299 4300 void 4301 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4302 { 4303 int ret; 4304 4305 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4306 return; 4307 4308 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4309 return; 4310 4311 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4312 printf("map_value not in required range 0..%d\n", 4313 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4314 return; 4315 } 4316 4317 if (!is_rx) { /* tx */ 4318 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4319 map_value); 4320 if (ret) { 4321 printf("failed to set tx queue stats mapping.\n"); 4322 return; 4323 } 4324 } else { /* rx */ 4325 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4326 map_value); 4327 if (ret) { 4328 printf("failed to set rx queue stats mapping.\n"); 4329 return; 4330 } 4331 } 4332 } 4333 4334 void 4335 set_xstats_hide_zero(uint8_t on_off) 4336 { 4337 xstats_hide_zero = on_off; 4338 } 4339 4340 void 4341 set_record_core_cycles(uint8_t on_off) 4342 { 4343 record_core_cycles = on_off; 4344 } 4345 4346 void 4347 set_record_burst_stats(uint8_t on_off) 4348 { 4349 record_burst_stats = on_off; 4350 } 4351 4352 static inline void 4353 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4354 { 4355 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4356 4357 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4358 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4359 " tunnel_id: 0x%08x", 4360 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4361 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4362 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4363 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4364 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4365 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4366 4367 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4368 rte_be_to_cpu_16(mask->src_port_mask), 4369 rte_be_to_cpu_16(mask->dst_port_mask)); 4370 4371 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4372 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4373 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4374 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4375 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4376 4377 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4378 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4379 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4380 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4381 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4382 } 4383 4384 printf("\n"); 4385 } 4386 4387 static inline void 4388 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4389 { 4390 struct rte_eth_flex_payload_cfg *cfg; 4391 uint32_t i, j; 4392 4393 for (i = 0; i < flex_conf->nb_payloads; i++) { 4394 cfg = &flex_conf->flex_set[i]; 4395 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4396 printf("\n RAW: "); 4397 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4398 printf("\n L2_PAYLOAD: "); 4399 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4400 printf("\n L3_PAYLOAD: "); 4401 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4402 printf("\n L4_PAYLOAD: "); 4403 else 4404 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4405 for (j = 0; j < num; j++) 4406 printf(" %-5u", cfg->src_offset[j]); 4407 } 4408 printf("\n"); 4409 } 4410 4411 static char * 4412 flowtype_to_str(uint16_t flow_type) 4413 { 4414 struct flow_type_info { 4415 char str[32]; 4416 uint16_t ftype; 4417 }; 4418 4419 uint8_t i; 4420 static struct flow_type_info flowtype_str_table[] = { 4421 {"raw", RTE_ETH_FLOW_RAW}, 4422 {"ipv4", RTE_ETH_FLOW_IPV4}, 4423 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4424 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4425 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4426 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4427 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4428 {"ipv6", RTE_ETH_FLOW_IPV6}, 4429 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4430 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4431 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4432 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4433 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4434 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4435 {"port", RTE_ETH_FLOW_PORT}, 4436 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4437 {"geneve", RTE_ETH_FLOW_GENEVE}, 4438 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4439 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4440 }; 4441 4442 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4443 if (flowtype_str_table[i].ftype == flow_type) 4444 return flowtype_str_table[i].str; 4445 } 4446 4447 return NULL; 4448 } 4449 4450 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4451 4452 static inline void 4453 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4454 { 4455 struct rte_eth_fdir_flex_mask *mask; 4456 uint32_t i, j; 4457 char *p; 4458 4459 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4460 mask = &flex_conf->flex_mask[i]; 4461 p = flowtype_to_str(mask->flow_type); 4462 printf("\n %s:\t", p ? p : "unknown"); 4463 for (j = 0; j < num; j++) 4464 printf(" %02x", mask->mask[j]); 4465 } 4466 printf("\n"); 4467 } 4468 4469 static inline void 4470 print_fdir_flow_type(uint32_t flow_types_mask) 4471 { 4472 int i; 4473 char *p; 4474 4475 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4476 if (!(flow_types_mask & (1 << i))) 4477 continue; 4478 p = flowtype_to_str(i); 4479 if (p) 4480 printf(" %s", p); 4481 else 4482 printf(" unknown"); 4483 } 4484 printf("\n"); 4485 } 4486 4487 static int 4488 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4489 struct rte_eth_fdir_stats *fdir_stat) 4490 { 4491 int ret = -ENOTSUP; 4492 4493 #ifdef RTE_NET_I40E 4494 if (ret == -ENOTSUP) { 4495 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4496 if (!ret) 4497 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4498 } 4499 #endif 4500 #ifdef RTE_NET_IXGBE 4501 if (ret == -ENOTSUP) { 4502 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4503 if (!ret) 4504 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4505 } 4506 #endif 4507 switch (ret) { 4508 case 0: 4509 break; 4510 case -ENOTSUP: 4511 printf("\n FDIR is not supported on port %-2d\n", 4512 port_id); 4513 break; 4514 default: 4515 printf("programming error: (%s)\n", strerror(-ret)); 4516 break; 4517 } 4518 return ret; 4519 } 4520 4521 void 4522 fdir_get_infos(portid_t port_id) 4523 { 4524 struct rte_eth_fdir_stats fdir_stat; 4525 struct rte_eth_fdir_info fdir_info; 4526 4527 static const char *fdir_stats_border = "########################"; 4528 4529 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4530 return; 4531 4532 memset(&fdir_info, 0, sizeof(fdir_info)); 4533 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4534 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4535 return; 4536 4537 printf("\n %s FDIR infos for port %-2d %s\n", 4538 fdir_stats_border, port_id, fdir_stats_border); 4539 printf(" MODE: "); 4540 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4541 printf(" PERFECT\n"); 4542 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4543 printf(" PERFECT-MAC-VLAN\n"); 4544 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4545 printf(" PERFECT-TUNNEL\n"); 4546 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4547 printf(" SIGNATURE\n"); 4548 else 4549 printf(" DISABLE\n"); 4550 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4551 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4552 printf(" SUPPORTED FLOW TYPE: "); 4553 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4554 } 4555 printf(" FLEX PAYLOAD INFO:\n"); 4556 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4557 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4558 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4559 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4560 fdir_info.flex_payload_unit, 4561 fdir_info.max_flex_payload_segment_num, 4562 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4563 printf(" MASK: "); 4564 print_fdir_mask(&fdir_info.mask); 4565 if (fdir_info.flex_conf.nb_payloads > 0) { 4566 printf(" FLEX PAYLOAD SRC OFFSET:"); 4567 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4568 } 4569 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4570 printf(" FLEX MASK CFG:"); 4571 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4572 } 4573 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4574 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4575 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4576 fdir_info.guarant_spc, fdir_info.best_spc); 4577 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4578 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4579 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4580 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4581 fdir_stat.collision, fdir_stat.free, 4582 fdir_stat.maxhash, fdir_stat.maxlen, 4583 fdir_stat.add, fdir_stat.remove, 4584 fdir_stat.f_add, fdir_stat.f_remove); 4585 printf(" %s############################%s\n", 4586 fdir_stats_border, fdir_stats_border); 4587 } 4588 4589 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4590 4591 void 4592 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4593 { 4594 struct rte_port *port; 4595 struct rte_eth_fdir_flex_conf *flex_conf; 4596 int i, idx = 0; 4597 4598 port = &ports[port_id]; 4599 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4600 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4601 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4602 idx = i; 4603 break; 4604 } 4605 } 4606 if (i >= RTE_ETH_FLOW_MAX) { 4607 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4608 idx = flex_conf->nb_flexmasks; 4609 flex_conf->nb_flexmasks++; 4610 } else { 4611 printf("The flex mask table is full. Can not set flex" 4612 " mask for flow_type(%u).", cfg->flow_type); 4613 return; 4614 } 4615 } 4616 rte_memcpy(&flex_conf->flex_mask[idx], 4617 cfg, 4618 sizeof(struct rte_eth_fdir_flex_mask)); 4619 } 4620 4621 void 4622 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4623 { 4624 struct rte_port *port; 4625 struct rte_eth_fdir_flex_conf *flex_conf; 4626 int i, idx = 0; 4627 4628 port = &ports[port_id]; 4629 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4630 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4631 if (cfg->type == flex_conf->flex_set[i].type) { 4632 idx = i; 4633 break; 4634 } 4635 } 4636 if (i >= RTE_ETH_PAYLOAD_MAX) { 4637 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4638 idx = flex_conf->nb_payloads; 4639 flex_conf->nb_payloads++; 4640 } else { 4641 printf("The flex payload table is full. Can not set" 4642 " flex payload for type(%u).", cfg->type); 4643 return; 4644 } 4645 } 4646 rte_memcpy(&flex_conf->flex_set[idx], 4647 cfg, 4648 sizeof(struct rte_eth_flex_payload_cfg)); 4649 4650 } 4651 4652 void 4653 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4654 { 4655 #ifdef RTE_NET_IXGBE 4656 int diag; 4657 4658 if (is_rx) 4659 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4660 else 4661 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4662 4663 if (diag == 0) 4664 return; 4665 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4666 is_rx ? "rx" : "tx", port_id, diag); 4667 return; 4668 #endif 4669 printf("VF %s setting not supported for port %d\n", 4670 is_rx ? "Rx" : "Tx", port_id); 4671 RTE_SET_USED(vf); 4672 RTE_SET_USED(on); 4673 } 4674 4675 int 4676 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4677 { 4678 int diag; 4679 struct rte_eth_link link; 4680 int ret; 4681 4682 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4683 return 1; 4684 ret = eth_link_get_nowait_print_err(port_id, &link); 4685 if (ret < 0) 4686 return 1; 4687 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4688 rate > link.link_speed) { 4689 printf("Invalid rate value:%u bigger than link speed: %u\n", 4690 rate, link.link_speed); 4691 return 1; 4692 } 4693 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4694 if (diag == 0) 4695 return diag; 4696 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4697 port_id, diag); 4698 return diag; 4699 } 4700 4701 int 4702 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4703 { 4704 int diag = -ENOTSUP; 4705 4706 RTE_SET_USED(vf); 4707 RTE_SET_USED(rate); 4708 RTE_SET_USED(q_msk); 4709 4710 #ifdef RTE_NET_IXGBE 4711 if (diag == -ENOTSUP) 4712 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4713 q_msk); 4714 #endif 4715 #ifdef RTE_NET_BNXT 4716 if (diag == -ENOTSUP) 4717 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4718 #endif 4719 if (diag == 0) 4720 return diag; 4721 4722 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4723 port_id, diag); 4724 return diag; 4725 } 4726 4727 /* 4728 * Functions to manage the set of filtered Multicast MAC addresses. 4729 * 4730 * A pool of filtered multicast MAC addresses is associated with each port. 4731 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4732 * The address of the pool and the number of valid multicast MAC addresses 4733 * recorded in the pool are stored in the fields "mc_addr_pool" and 4734 * "mc_addr_nb" of the "rte_port" data structure. 4735 * 4736 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4737 * to be supplied a contiguous array of multicast MAC addresses. 4738 * To comply with this constraint, the set of multicast addresses recorded 4739 * into the pool are systematically compacted at the beginning of the pool. 4740 * Hence, when a multicast address is removed from the pool, all following 4741 * addresses, if any, are copied back to keep the set contiguous. 4742 */ 4743 #define MCAST_POOL_INC 32 4744 4745 static int 4746 mcast_addr_pool_extend(struct rte_port *port) 4747 { 4748 struct rte_ether_addr *mc_pool; 4749 size_t mc_pool_size; 4750 4751 /* 4752 * If a free entry is available at the end of the pool, just 4753 * increment the number of recorded multicast addresses. 4754 */ 4755 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4756 port->mc_addr_nb++; 4757 return 0; 4758 } 4759 4760 /* 4761 * [re]allocate a pool with MCAST_POOL_INC more entries. 4762 * The previous test guarantees that port->mc_addr_nb is a multiple 4763 * of MCAST_POOL_INC. 4764 */ 4765 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4766 MCAST_POOL_INC); 4767 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4768 mc_pool_size); 4769 if (mc_pool == NULL) { 4770 printf("allocation of pool of %u multicast addresses failed\n", 4771 port->mc_addr_nb + MCAST_POOL_INC); 4772 return -ENOMEM; 4773 } 4774 4775 port->mc_addr_pool = mc_pool; 4776 port->mc_addr_nb++; 4777 return 0; 4778 4779 } 4780 4781 static void 4782 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4783 { 4784 if (mcast_addr_pool_extend(port) != 0) 4785 return; 4786 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4787 } 4788 4789 static void 4790 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4791 { 4792 port->mc_addr_nb--; 4793 if (addr_idx == port->mc_addr_nb) { 4794 /* No need to recompact the set of multicast addressses. */ 4795 if (port->mc_addr_nb == 0) { 4796 /* free the pool of multicast addresses. */ 4797 free(port->mc_addr_pool); 4798 port->mc_addr_pool = NULL; 4799 } 4800 return; 4801 } 4802 memmove(&port->mc_addr_pool[addr_idx], 4803 &port->mc_addr_pool[addr_idx + 1], 4804 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4805 } 4806 4807 static int 4808 eth_port_multicast_addr_list_set(portid_t port_id) 4809 { 4810 struct rte_port *port; 4811 int diag; 4812 4813 port = &ports[port_id]; 4814 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4815 port->mc_addr_nb); 4816 if (diag < 0) 4817 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4818 port_id, port->mc_addr_nb, diag); 4819 4820 return diag; 4821 } 4822 4823 void 4824 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4825 { 4826 struct rte_port *port; 4827 uint32_t i; 4828 4829 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4830 return; 4831 4832 port = &ports[port_id]; 4833 4834 /* 4835 * Check that the added multicast MAC address is not already recorded 4836 * in the pool of multicast addresses. 4837 */ 4838 for (i = 0; i < port->mc_addr_nb; i++) { 4839 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4840 printf("multicast address already filtered by port\n"); 4841 return; 4842 } 4843 } 4844 4845 mcast_addr_pool_append(port, mc_addr); 4846 if (eth_port_multicast_addr_list_set(port_id) < 0) 4847 /* Rollback on failure, remove the address from the pool */ 4848 mcast_addr_pool_remove(port, i); 4849 } 4850 4851 void 4852 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4853 { 4854 struct rte_port *port; 4855 uint32_t i; 4856 4857 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4858 return; 4859 4860 port = &ports[port_id]; 4861 4862 /* 4863 * Search the pool of multicast MAC addresses for the removed address. 4864 */ 4865 for (i = 0; i < port->mc_addr_nb; i++) { 4866 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 4867 break; 4868 } 4869 if (i == port->mc_addr_nb) { 4870 printf("multicast address not filtered by port %d\n", port_id); 4871 return; 4872 } 4873 4874 mcast_addr_pool_remove(port, i); 4875 if (eth_port_multicast_addr_list_set(port_id) < 0) 4876 /* Rollback on failure, add the address back into the pool */ 4877 mcast_addr_pool_append(port, mc_addr); 4878 } 4879 4880 void 4881 port_dcb_info_display(portid_t port_id) 4882 { 4883 struct rte_eth_dcb_info dcb_info; 4884 uint16_t i; 4885 int ret; 4886 static const char *border = "================"; 4887 4888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4889 return; 4890 4891 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 4892 if (ret) { 4893 printf("\n Failed to get dcb infos on port %-2d\n", 4894 port_id); 4895 return; 4896 } 4897 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 4898 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 4899 printf("\n TC : "); 4900 for (i = 0; i < dcb_info.nb_tcs; i++) 4901 printf("\t%4d", i); 4902 printf("\n Priority : "); 4903 for (i = 0; i < dcb_info.nb_tcs; i++) 4904 printf("\t%4d", dcb_info.prio_tc[i]); 4905 printf("\n BW percent :"); 4906 for (i = 0; i < dcb_info.nb_tcs; i++) 4907 printf("\t%4d%%", dcb_info.tc_bws[i]); 4908 printf("\n RXQ base : "); 4909 for (i = 0; i < dcb_info.nb_tcs; i++) 4910 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 4911 printf("\n RXQ number :"); 4912 for (i = 0; i < dcb_info.nb_tcs; i++) 4913 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 4914 printf("\n TXQ base : "); 4915 for (i = 0; i < dcb_info.nb_tcs; i++) 4916 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 4917 printf("\n TXQ number :"); 4918 for (i = 0; i < dcb_info.nb_tcs; i++) 4919 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 4920 printf("\n"); 4921 } 4922 4923 uint8_t * 4924 open_file(const char *file_path, uint32_t *size) 4925 { 4926 int fd = open(file_path, O_RDONLY); 4927 off_t pkg_size; 4928 uint8_t *buf = NULL; 4929 int ret = 0; 4930 struct stat st_buf; 4931 4932 if (size) 4933 *size = 0; 4934 4935 if (fd == -1) { 4936 printf("%s: Failed to open %s\n", __func__, file_path); 4937 return buf; 4938 } 4939 4940 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 4941 close(fd); 4942 printf("%s: File operations failed\n", __func__); 4943 return buf; 4944 } 4945 4946 pkg_size = st_buf.st_size; 4947 if (pkg_size < 0) { 4948 close(fd); 4949 printf("%s: File operations failed\n", __func__); 4950 return buf; 4951 } 4952 4953 buf = (uint8_t *)malloc(pkg_size); 4954 if (!buf) { 4955 close(fd); 4956 printf("%s: Failed to malloc memory\n", __func__); 4957 return buf; 4958 } 4959 4960 ret = read(fd, buf, pkg_size); 4961 if (ret < 0) { 4962 close(fd); 4963 printf("%s: File read operation failed\n", __func__); 4964 close_file(buf); 4965 return NULL; 4966 } 4967 4968 if (size) 4969 *size = pkg_size; 4970 4971 close(fd); 4972 4973 return buf; 4974 } 4975 4976 int 4977 save_file(const char *file_path, uint8_t *buf, uint32_t size) 4978 { 4979 FILE *fh = fopen(file_path, "wb"); 4980 4981 if (fh == NULL) { 4982 printf("%s: Failed to open %s\n", __func__, file_path); 4983 return -1; 4984 } 4985 4986 if (fwrite(buf, 1, size, fh) != size) { 4987 fclose(fh); 4988 printf("%s: File write operation failed\n", __func__); 4989 return -1; 4990 } 4991 4992 fclose(fh); 4993 4994 return 0; 4995 } 4996 4997 int 4998 close_file(uint8_t *buf) 4999 { 5000 if (buf) { 5001 free((void *)buf); 5002 return 0; 5003 } 5004 5005 return -1; 5006 } 5007 5008 void 5009 port_queue_region_info_display(portid_t port_id, void *buf) 5010 { 5011 #ifdef RTE_NET_I40E 5012 uint16_t i, j; 5013 struct rte_pmd_i40e_queue_regions *info = 5014 (struct rte_pmd_i40e_queue_regions *)buf; 5015 static const char *queue_region_info_stats_border = "-------"; 5016 5017 if (!info->queue_region_number) 5018 printf("there is no region has been set before"); 5019 5020 printf("\n %s All queue region info for port=%2d %s", 5021 queue_region_info_stats_border, port_id, 5022 queue_region_info_stats_border); 5023 printf("\n queue_region_number: %-14u \n", 5024 info->queue_region_number); 5025 5026 for (i = 0; i < info->queue_region_number; i++) { 5027 printf("\n region_id: %-14u queue_number: %-14u " 5028 "queue_start_index: %-14u \n", 5029 info->region[i].region_id, 5030 info->region[i].queue_num, 5031 info->region[i].queue_start_index); 5032 5033 printf(" user_priority_num is %-14u :", 5034 info->region[i].user_priority_num); 5035 for (j = 0; j < info->region[i].user_priority_num; j++) 5036 printf(" %-14u ", info->region[i].user_priority[j]); 5037 5038 printf("\n flowtype_num is %-14u :", 5039 info->region[i].flowtype_num); 5040 for (j = 0; j < info->region[i].flowtype_num; j++) 5041 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5042 } 5043 #else 5044 RTE_SET_USED(port_id); 5045 RTE_SET_USED(buf); 5046 #endif 5047 5048 printf("\n\n"); 5049 } 5050 5051 void 5052 show_macs(portid_t port_id) 5053 { 5054 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5055 struct rte_eth_dev_info dev_info; 5056 struct rte_ether_addr *addr; 5057 uint32_t i, num_macs = 0; 5058 struct rte_eth_dev *dev; 5059 5060 dev = &rte_eth_devices[port_id]; 5061 5062 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5063 return; 5064 5065 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5066 addr = &dev->data->mac_addrs[i]; 5067 5068 /* skip zero address */ 5069 if (rte_is_zero_ether_addr(addr)) 5070 continue; 5071 5072 num_macs++; 5073 } 5074 5075 printf("Number of MAC address added: %d\n", num_macs); 5076 5077 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5078 addr = &dev->data->mac_addrs[i]; 5079 5080 /* skip zero address */ 5081 if (rte_is_zero_ether_addr(addr)) 5082 continue; 5083 5084 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5085 printf(" %s\n", buf); 5086 } 5087 } 5088 5089 void 5090 show_mcast_macs(portid_t port_id) 5091 { 5092 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5093 struct rte_ether_addr *addr; 5094 struct rte_port *port; 5095 uint32_t i; 5096 5097 port = &ports[port_id]; 5098 5099 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5100 5101 for (i = 0; i < port->mc_addr_nb; i++) { 5102 addr = &port->mc_addr_pool[i]; 5103 5104 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5105 printf(" %s\n", buf); 5106 } 5107 } 5108