1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_hexdump.h> 53 54 #include "testpmd.h" 55 56 #define ETHDEV_FWVERS_LEN 32 57 58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 60 #else 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 62 #endif 63 64 #define NS_PER_SEC 1E9 65 66 static char *flowtype_to_str(uint16_t flow_type); 67 68 static const struct { 69 enum tx_pkt_split split; 70 const char *name; 71 } tx_split_name[] = { 72 { 73 .split = TX_PKT_SPLIT_OFF, 74 .name = "off", 75 }, 76 { 77 .split = TX_PKT_SPLIT_ON, 78 .name = "on", 79 }, 80 { 81 .split = TX_PKT_SPLIT_RND, 82 .name = "rand", 83 }, 84 }; 85 86 const struct rss_type_info rss_type_table[] = { 87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 90 ETH_RSS_GTPU | ETH_RSS_ECPRI}, 91 { "none", 0 }, 92 { "eth", ETH_RSS_ETH }, 93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 95 { "vlan", ETH_RSS_VLAN }, 96 { "s-vlan", ETH_RSS_S_VLAN }, 97 { "c-vlan", ETH_RSS_C_VLAN }, 98 { "ipv4", ETH_RSS_IPV4 }, 99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 104 { "ipv6", ETH_RSS_IPV6 }, 105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 110 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 111 { "ipv6-ex", ETH_RSS_IPV6_EX }, 112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 114 { "port", ETH_RSS_PORT }, 115 { "vxlan", ETH_RSS_VXLAN }, 116 { "geneve", ETH_RSS_GENEVE }, 117 { "nvgre", ETH_RSS_NVGRE }, 118 { "ip", ETH_RSS_IP }, 119 { "udp", ETH_RSS_UDP }, 120 { "tcp", ETH_RSS_TCP }, 121 { "sctp", ETH_RSS_SCTP }, 122 { "tunnel", ETH_RSS_TUNNEL }, 123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 133 { "esp", ETH_RSS_ESP }, 134 { "ah", ETH_RSS_AH }, 135 { "l2tpv3", ETH_RSS_L2TPV3 }, 136 { "pfcp", ETH_RSS_PFCP }, 137 { "pppoe", ETH_RSS_PPPOE }, 138 { "gtpu", ETH_RSS_GTPU }, 139 { "ecpri", ETH_RSS_ECPRI }, 140 { NULL, 0 }, 141 }; 142 143 static const struct { 144 enum rte_eth_fec_mode mode; 145 const char *name; 146 } fec_mode_name[] = { 147 { 148 .mode = RTE_ETH_FEC_NOFEC, 149 .name = "off", 150 }, 151 { 152 .mode = RTE_ETH_FEC_AUTO, 153 .name = "auto", 154 }, 155 { 156 .mode = RTE_ETH_FEC_BASER, 157 .name = "baser", 158 }, 159 { 160 .mode = RTE_ETH_FEC_RS, 161 .name = "rs", 162 }, 163 }; 164 165 static void 166 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 167 { 168 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 169 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 170 printf("%s%s", name, buf); 171 } 172 173 void 174 nic_stats_display(portid_t port_id) 175 { 176 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 177 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 178 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 179 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 180 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 181 struct timespec cur_time; 182 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 183 diff_ns; 184 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 185 struct rte_eth_stats stats; 186 187 static const char *nic_stats_border = "########################"; 188 189 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 190 print_valid_ports(); 191 return; 192 } 193 rte_eth_stats_get(port_id, &stats); 194 printf("\n %s NIC statistics for port %-2d %s\n", 195 nic_stats_border, port_id, nic_stats_border); 196 197 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 198 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 199 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 200 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 201 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 202 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 203 204 diff_ns = 0; 205 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 206 uint64_t ns; 207 208 ns = cur_time.tv_sec * NS_PER_SEC; 209 ns += cur_time.tv_nsec; 210 211 if (prev_ns[port_id] != 0) 212 diff_ns = ns - prev_ns[port_id]; 213 prev_ns[port_id] = ns; 214 } 215 216 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 217 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 218 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 219 (stats.opackets - prev_pkts_tx[port_id]) : 0; 220 prev_pkts_rx[port_id] = stats.ipackets; 221 prev_pkts_tx[port_id] = stats.opackets; 222 mpps_rx = diff_ns > 0 ? 223 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 224 mpps_tx = diff_ns > 0 ? 225 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 226 227 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 228 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 229 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 230 (stats.obytes - prev_bytes_tx[port_id]) : 0; 231 prev_bytes_rx[port_id] = stats.ibytes; 232 prev_bytes_tx[port_id] = stats.obytes; 233 mbps_rx = diff_ns > 0 ? 234 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 235 mbps_tx = diff_ns > 0 ? 236 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 237 238 printf("\n Throughput (since last show)\n"); 239 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 240 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 241 mpps_tx, mbps_tx * 8); 242 243 printf(" %s############################%s\n", 244 nic_stats_border, nic_stats_border); 245 } 246 247 void 248 nic_stats_clear(portid_t port_id) 249 { 250 int ret; 251 252 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 253 print_valid_ports(); 254 return; 255 } 256 257 ret = rte_eth_stats_reset(port_id); 258 if (ret != 0) { 259 printf("%s: Error: failed to reset stats (port %u): %s", 260 __func__, port_id, strerror(-ret)); 261 return; 262 } 263 264 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 265 if (ret != 0) { 266 if (ret < 0) 267 ret = -ret; 268 printf("%s: Error: failed to get stats (port %u): %s", 269 __func__, port_id, strerror(ret)); 270 return; 271 } 272 printf("\n NIC statistics for port %d cleared\n", port_id); 273 } 274 275 void 276 nic_xstats_display(portid_t port_id) 277 { 278 struct rte_eth_xstat *xstats; 279 int cnt_xstats, idx_xstat; 280 struct rte_eth_xstat_name *xstats_names; 281 282 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 283 print_valid_ports(); 284 return; 285 } 286 printf("###### NIC extended statistics for port %-2d\n", port_id); 287 if (!rte_eth_dev_is_valid_port(port_id)) { 288 printf("Error: Invalid port number %i\n", port_id); 289 return; 290 } 291 292 /* Get count */ 293 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 294 if (cnt_xstats < 0) { 295 printf("Error: Cannot get count of xstats\n"); 296 return; 297 } 298 299 /* Get id-name lookup table */ 300 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 301 if (xstats_names == NULL) { 302 printf("Cannot allocate memory for xstats lookup\n"); 303 return; 304 } 305 if (cnt_xstats != rte_eth_xstats_get_names( 306 port_id, xstats_names, cnt_xstats)) { 307 printf("Error: Cannot get xstats lookup\n"); 308 free(xstats_names); 309 return; 310 } 311 312 /* Get stats themselves */ 313 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 314 if (xstats == NULL) { 315 printf("Cannot allocate memory for xstats\n"); 316 free(xstats_names); 317 return; 318 } 319 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 320 printf("Error: Unable to get xstats\n"); 321 free(xstats_names); 322 free(xstats); 323 return; 324 } 325 326 /* Display xstats */ 327 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 328 if (xstats_hide_zero && !xstats[idx_xstat].value) 329 continue; 330 printf("%s: %"PRIu64"\n", 331 xstats_names[idx_xstat].name, 332 xstats[idx_xstat].value); 333 } 334 free(xstats_names); 335 free(xstats); 336 } 337 338 void 339 nic_xstats_clear(portid_t port_id) 340 { 341 int ret; 342 343 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 344 print_valid_ports(); 345 return; 346 } 347 348 ret = rte_eth_xstats_reset(port_id); 349 if (ret != 0) { 350 printf("%s: Error: failed to reset xstats (port %u): %s", 351 __func__, port_id, strerror(-ret)); 352 return; 353 } 354 355 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 356 if (ret != 0) { 357 if (ret < 0) 358 ret = -ret; 359 printf("%s: Error: failed to get stats (port %u): %s", 360 __func__, port_id, strerror(ret)); 361 return; 362 } 363 } 364 365 void 366 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 367 { 368 struct rte_eth_burst_mode mode; 369 struct rte_eth_rxq_info qinfo; 370 int32_t rc; 371 static const char *info_border = "*********************"; 372 373 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 374 if (rc != 0) { 375 printf("Failed to retrieve information for port: %u, " 376 "RX queue: %hu\nerror desc: %s(%d)\n", 377 port_id, queue_id, strerror(-rc), rc); 378 return; 379 } 380 381 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 382 info_border, port_id, queue_id, info_border); 383 384 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 385 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 386 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 387 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 388 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 389 printf("\nRX drop packets: %s", 390 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 391 printf("\nRX deferred start: %s", 392 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 393 printf("\nRX scattered packets: %s", 394 (qinfo.scattered_rx != 0) ? "on" : "off"); 395 if (qinfo.rx_buf_size != 0) 396 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 397 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 398 399 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 400 printf("\nBurst mode: %s%s", 401 mode.info, 402 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 403 " (per queue)" : ""); 404 405 printf("\n"); 406 } 407 408 void 409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 410 { 411 struct rte_eth_burst_mode mode; 412 struct rte_eth_txq_info qinfo; 413 int32_t rc; 414 static const char *info_border = "*********************"; 415 416 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 417 if (rc != 0) { 418 printf("Failed to retrieve information for port: %u, " 419 "TX queue: %hu\nerror desc: %s(%d)\n", 420 port_id, queue_id, strerror(-rc), rc); 421 return; 422 } 423 424 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 425 info_border, port_id, queue_id, info_border); 426 427 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 428 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 429 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 430 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 431 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 432 printf("\nTX deferred start: %s", 433 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 434 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 435 436 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 437 printf("\nBurst mode: %s%s", 438 mode.info, 439 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 440 " (per queue)" : ""); 441 442 printf("\n"); 443 } 444 445 static int bus_match_all(const struct rte_bus *bus, const void *data) 446 { 447 RTE_SET_USED(bus); 448 RTE_SET_USED(data); 449 return 0; 450 } 451 452 static void 453 device_infos_display_speeds(uint32_t speed_capa) 454 { 455 printf("\n\tDevice speed capability:"); 456 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 457 printf(" Autonegotiate (all speeds)"); 458 if (speed_capa & ETH_LINK_SPEED_FIXED) 459 printf(" Disable autonegotiate (fixed speed) "); 460 if (speed_capa & ETH_LINK_SPEED_10M_HD) 461 printf(" 10 Mbps half-duplex "); 462 if (speed_capa & ETH_LINK_SPEED_10M) 463 printf(" 10 Mbps full-duplex "); 464 if (speed_capa & ETH_LINK_SPEED_100M_HD) 465 printf(" 100 Mbps half-duplex "); 466 if (speed_capa & ETH_LINK_SPEED_100M) 467 printf(" 100 Mbps full-duplex "); 468 if (speed_capa & ETH_LINK_SPEED_1G) 469 printf(" 1 Gbps "); 470 if (speed_capa & ETH_LINK_SPEED_2_5G) 471 printf(" 2.5 Gbps "); 472 if (speed_capa & ETH_LINK_SPEED_5G) 473 printf(" 5 Gbps "); 474 if (speed_capa & ETH_LINK_SPEED_10G) 475 printf(" 10 Gbps "); 476 if (speed_capa & ETH_LINK_SPEED_20G) 477 printf(" 20 Gbps "); 478 if (speed_capa & ETH_LINK_SPEED_25G) 479 printf(" 25 Gbps "); 480 if (speed_capa & ETH_LINK_SPEED_40G) 481 printf(" 40 Gbps "); 482 if (speed_capa & ETH_LINK_SPEED_50G) 483 printf(" 50 Gbps "); 484 if (speed_capa & ETH_LINK_SPEED_56G) 485 printf(" 56 Gbps "); 486 if (speed_capa & ETH_LINK_SPEED_100G) 487 printf(" 100 Gbps "); 488 if (speed_capa & ETH_LINK_SPEED_200G) 489 printf(" 200 Gbps "); 490 } 491 492 void 493 device_infos_display(const char *identifier) 494 { 495 static const char *info_border = "*********************"; 496 struct rte_bus *start = NULL, *next; 497 struct rte_dev_iterator dev_iter; 498 char name[RTE_ETH_NAME_MAX_LEN]; 499 struct rte_ether_addr mac_addr; 500 struct rte_device *dev; 501 struct rte_devargs da; 502 portid_t port_id; 503 struct rte_eth_dev_info dev_info; 504 char devstr[128]; 505 506 memset(&da, 0, sizeof(da)); 507 if (!identifier) 508 goto skip_parse; 509 510 if (rte_devargs_parsef(&da, "%s", identifier)) { 511 printf("cannot parse identifier\n"); 512 if (da.args) 513 free(da.args); 514 return; 515 } 516 517 skip_parse: 518 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 519 520 start = next; 521 if (identifier && da.bus != next) 522 continue; 523 524 /* Skip buses that don't have iterate method */ 525 if (!next->dev_iterate) 526 continue; 527 528 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 529 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 530 531 if (!dev->driver) 532 continue; 533 /* Check for matching device if identifier is present */ 534 if (identifier && 535 strncmp(da.name, dev->name, strlen(dev->name))) 536 continue; 537 printf("\n%s Infos for device %s %s\n", 538 info_border, dev->name, info_border); 539 printf("Bus name: %s", dev->bus->name); 540 printf("\nDriver name: %s", dev->driver->name); 541 printf("\nDevargs: %s", 542 dev->devargs ? dev->devargs->args : ""); 543 printf("\nConnect to socket: %d", dev->numa_node); 544 printf("\n"); 545 546 /* List ports with matching device name */ 547 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 548 printf("\n\tPort id: %-2d", port_id); 549 if (eth_macaddr_get_print_err(port_id, 550 &mac_addr) == 0) 551 print_ethaddr("\n\tMAC address: ", 552 &mac_addr); 553 rte_eth_dev_get_name_by_port(port_id, name); 554 printf("\n\tDevice name: %s", name); 555 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 556 device_infos_display_speeds(dev_info.speed_capa); 557 printf("\n"); 558 } 559 } 560 }; 561 } 562 563 void 564 port_infos_display(portid_t port_id) 565 { 566 struct rte_port *port; 567 struct rte_ether_addr mac_addr; 568 struct rte_eth_link link; 569 struct rte_eth_dev_info dev_info; 570 int vlan_offload; 571 struct rte_mempool * mp; 572 static const char *info_border = "*********************"; 573 uint16_t mtu; 574 char name[RTE_ETH_NAME_MAX_LEN]; 575 int ret; 576 char fw_version[ETHDEV_FWVERS_LEN]; 577 578 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 579 print_valid_ports(); 580 return; 581 } 582 port = &ports[port_id]; 583 ret = eth_link_get_nowait_print_err(port_id, &link); 584 if (ret < 0) 585 return; 586 587 ret = eth_dev_info_get_print_err(port_id, &dev_info); 588 if (ret != 0) 589 return; 590 591 printf("\n%s Infos for port %-2d %s\n", 592 info_border, port_id, info_border); 593 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 594 print_ethaddr("MAC address: ", &mac_addr); 595 rte_eth_dev_get_name_by_port(port_id, name); 596 printf("\nDevice name: %s", name); 597 printf("\nDriver name: %s", dev_info.driver_name); 598 599 if (rte_eth_dev_fw_version_get(port_id, fw_version, 600 ETHDEV_FWVERS_LEN) == 0) 601 printf("\nFirmware-version: %s", fw_version); 602 else 603 printf("\nFirmware-version: %s", "not available"); 604 605 if (dev_info.device->devargs && dev_info.device->devargs->args) 606 printf("\nDevargs: %s", dev_info.device->devargs->args); 607 printf("\nConnect to socket: %u", port->socket_id); 608 609 if (port_numa[port_id] != NUMA_NO_CONFIG) { 610 mp = mbuf_pool_find(port_numa[port_id], 0); 611 if (mp) 612 printf("\nmemory allocation on the socket: %d", 613 port_numa[port_id]); 614 } else 615 printf("\nmemory allocation on the socket: %u",port->socket_id); 616 617 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 618 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 619 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 620 ("full-duplex") : ("half-duplex")); 621 622 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 623 printf("MTU: %u\n", mtu); 624 625 printf("Promiscuous mode: %s\n", 626 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 627 printf("Allmulticast mode: %s\n", 628 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 629 printf("Maximum number of MAC addresses: %u\n", 630 (unsigned int)(port->dev_info.max_mac_addrs)); 631 printf("Maximum number of MAC addresses of hash filtering: %u\n", 632 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 633 634 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 635 if (vlan_offload >= 0){ 636 printf("VLAN offload: \n"); 637 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 638 printf(" strip on, "); 639 else 640 printf(" strip off, "); 641 642 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 643 printf("filter on, "); 644 else 645 printf("filter off, "); 646 647 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 648 printf("extend on, "); 649 else 650 printf("extend off, "); 651 652 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 653 printf("qinq strip on\n"); 654 else 655 printf("qinq strip off\n"); 656 } 657 658 if (dev_info.hash_key_size > 0) 659 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 660 if (dev_info.reta_size > 0) 661 printf("Redirection table size: %u\n", dev_info.reta_size); 662 if (!dev_info.flow_type_rss_offloads) 663 printf("No RSS offload flow type is supported.\n"); 664 else { 665 uint16_t i; 666 char *p; 667 668 printf("Supported RSS offload flow types:\n"); 669 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 670 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 671 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 672 continue; 673 p = flowtype_to_str(i); 674 if (p) 675 printf(" %s\n", p); 676 else 677 printf(" user defined %d\n", i); 678 } 679 } 680 681 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 682 printf("Maximum configurable length of RX packet: %u\n", 683 dev_info.max_rx_pktlen); 684 printf("Maximum configurable size of LRO aggregated packet: %u\n", 685 dev_info.max_lro_pkt_size); 686 if (dev_info.max_vfs) 687 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 688 if (dev_info.max_vmdq_pools) 689 printf("Maximum number of VMDq pools: %u\n", 690 dev_info.max_vmdq_pools); 691 692 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 693 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 694 printf("Max possible number of RXDs per queue: %hu\n", 695 dev_info.rx_desc_lim.nb_max); 696 printf("Min possible number of RXDs per queue: %hu\n", 697 dev_info.rx_desc_lim.nb_min); 698 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 699 700 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 701 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 702 printf("Max possible number of TXDs per queue: %hu\n", 703 dev_info.tx_desc_lim.nb_max); 704 printf("Min possible number of TXDs per queue: %hu\n", 705 dev_info.tx_desc_lim.nb_min); 706 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 707 printf("Max segment number per packet: %hu\n", 708 dev_info.tx_desc_lim.nb_seg_max); 709 printf("Max segment number per MTU/TSO: %hu\n", 710 dev_info.tx_desc_lim.nb_mtu_seg_max); 711 712 /* Show switch info only if valid switch domain and port id is set */ 713 if (dev_info.switch_info.domain_id != 714 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 715 if (dev_info.switch_info.name) 716 printf("Switch name: %s\n", dev_info.switch_info.name); 717 718 printf("Switch domain Id: %u\n", 719 dev_info.switch_info.domain_id); 720 printf("Switch Port Id: %u\n", 721 dev_info.switch_info.port_id); 722 } 723 } 724 725 void 726 port_summary_header_display(void) 727 { 728 uint16_t port_number; 729 730 port_number = rte_eth_dev_count_avail(); 731 printf("Number of available ports: %i\n", port_number); 732 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 733 "Driver", "Status", "Link"); 734 } 735 736 void 737 port_summary_display(portid_t port_id) 738 { 739 struct rte_ether_addr mac_addr; 740 struct rte_eth_link link; 741 struct rte_eth_dev_info dev_info; 742 char name[RTE_ETH_NAME_MAX_LEN]; 743 int ret; 744 745 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 746 print_valid_ports(); 747 return; 748 } 749 750 ret = eth_link_get_nowait_print_err(port_id, &link); 751 if (ret < 0) 752 return; 753 754 ret = eth_dev_info_get_print_err(port_id, &dev_info); 755 if (ret != 0) 756 return; 757 758 rte_eth_dev_get_name_by_port(port_id, name); 759 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 760 if (ret != 0) 761 return; 762 763 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 764 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 765 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 766 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 767 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 768 rte_eth_link_speed_to_str(link.link_speed)); 769 } 770 771 void 772 port_eeprom_display(portid_t port_id) 773 { 774 struct rte_dev_eeprom_info einfo; 775 int ret; 776 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 777 print_valid_ports(); 778 return; 779 } 780 781 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 782 if (len_eeprom < 0) { 783 switch (len_eeprom) { 784 case -ENODEV: 785 printf("port index %d invalid\n", port_id); 786 break; 787 case -ENOTSUP: 788 printf("operation not supported by device\n"); 789 break; 790 case -EIO: 791 printf("device is removed\n"); 792 break; 793 default: 794 printf("Unable to get EEPROM: %d\n", len_eeprom); 795 break; 796 } 797 return; 798 } 799 800 char buf[len_eeprom]; 801 einfo.offset = 0; 802 einfo.length = len_eeprom; 803 einfo.data = buf; 804 805 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 806 if (ret != 0) { 807 switch (ret) { 808 case -ENODEV: 809 printf("port index %d invalid\n", port_id); 810 break; 811 case -ENOTSUP: 812 printf("operation not supported by device\n"); 813 break; 814 case -EIO: 815 printf("device is removed\n"); 816 break; 817 default: 818 printf("Unable to get EEPROM: %d\n", ret); 819 break; 820 } 821 return; 822 } 823 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 824 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 825 } 826 827 void 828 port_module_eeprom_display(portid_t port_id) 829 { 830 struct rte_eth_dev_module_info minfo; 831 struct rte_dev_eeprom_info einfo; 832 int ret; 833 834 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 835 print_valid_ports(); 836 return; 837 } 838 839 840 ret = rte_eth_dev_get_module_info(port_id, &minfo); 841 if (ret != 0) { 842 switch (ret) { 843 case -ENODEV: 844 printf("port index %d invalid\n", port_id); 845 break; 846 case -ENOTSUP: 847 printf("operation not supported by device\n"); 848 break; 849 case -EIO: 850 printf("device is removed\n"); 851 break; 852 default: 853 printf("Unable to get module EEPROM: %d\n", ret); 854 break; 855 } 856 return; 857 } 858 859 char buf[minfo.eeprom_len]; 860 einfo.offset = 0; 861 einfo.length = minfo.eeprom_len; 862 einfo.data = buf; 863 864 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 865 if (ret != 0) { 866 switch (ret) { 867 case -ENODEV: 868 printf("port index %d invalid\n", port_id); 869 break; 870 case -ENOTSUP: 871 printf("operation not supported by device\n"); 872 break; 873 case -EIO: 874 printf("device is removed\n"); 875 break; 876 default: 877 printf("Unable to get module EEPROM: %d\n", ret); 878 break; 879 } 880 return; 881 } 882 883 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 884 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 885 } 886 887 void 888 port_offload_cap_display(portid_t port_id) 889 { 890 struct rte_eth_dev_info dev_info; 891 static const char *info_border = "************"; 892 int ret; 893 894 if (port_id_is_invalid(port_id, ENABLED_WARN)) 895 return; 896 897 ret = eth_dev_info_get_print_err(port_id, &dev_info); 898 if (ret != 0) 899 return; 900 901 printf("\n%s Port %d supported offload features: %s\n", 902 info_border, port_id, info_border); 903 904 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 905 printf("VLAN stripped: "); 906 if (ports[port_id].dev_conf.rxmode.offloads & 907 DEV_RX_OFFLOAD_VLAN_STRIP) 908 printf("on\n"); 909 else 910 printf("off\n"); 911 } 912 913 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 914 printf("Double VLANs stripped: "); 915 if (ports[port_id].dev_conf.rxmode.offloads & 916 DEV_RX_OFFLOAD_QINQ_STRIP) 917 printf("on\n"); 918 else 919 printf("off\n"); 920 } 921 922 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 923 printf("RX IPv4 checksum: "); 924 if (ports[port_id].dev_conf.rxmode.offloads & 925 DEV_RX_OFFLOAD_IPV4_CKSUM) 926 printf("on\n"); 927 else 928 printf("off\n"); 929 } 930 931 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 932 printf("RX UDP checksum: "); 933 if (ports[port_id].dev_conf.rxmode.offloads & 934 DEV_RX_OFFLOAD_UDP_CKSUM) 935 printf("on\n"); 936 else 937 printf("off\n"); 938 } 939 940 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 941 printf("RX TCP checksum: "); 942 if (ports[port_id].dev_conf.rxmode.offloads & 943 DEV_RX_OFFLOAD_TCP_CKSUM) 944 printf("on\n"); 945 else 946 printf("off\n"); 947 } 948 949 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 950 printf("RX SCTP checksum: "); 951 if (ports[port_id].dev_conf.rxmode.offloads & 952 DEV_RX_OFFLOAD_SCTP_CKSUM) 953 printf("on\n"); 954 else 955 printf("off\n"); 956 } 957 958 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 959 printf("RX Outer IPv4 checksum: "); 960 if (ports[port_id].dev_conf.rxmode.offloads & 961 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 962 printf("on\n"); 963 else 964 printf("off\n"); 965 } 966 967 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 968 printf("RX Outer UDP checksum: "); 969 if (ports[port_id].dev_conf.rxmode.offloads & 970 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 971 printf("on\n"); 972 else 973 printf("off\n"); 974 } 975 976 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 977 printf("Large receive offload: "); 978 if (ports[port_id].dev_conf.rxmode.offloads & 979 DEV_RX_OFFLOAD_TCP_LRO) 980 printf("on\n"); 981 else 982 printf("off\n"); 983 } 984 985 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 986 printf("HW timestamp: "); 987 if (ports[port_id].dev_conf.rxmode.offloads & 988 DEV_RX_OFFLOAD_TIMESTAMP) 989 printf("on\n"); 990 else 991 printf("off\n"); 992 } 993 994 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 995 printf("Rx Keep CRC: "); 996 if (ports[port_id].dev_conf.rxmode.offloads & 997 DEV_RX_OFFLOAD_KEEP_CRC) 998 printf("on\n"); 999 else 1000 printf("off\n"); 1001 } 1002 1003 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 1004 printf("RX offload security: "); 1005 if (ports[port_id].dev_conf.rxmode.offloads & 1006 DEV_RX_OFFLOAD_SECURITY) 1007 printf("on\n"); 1008 else 1009 printf("off\n"); 1010 } 1011 1012 if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1013 printf("RX offload buffer split: "); 1014 if (ports[port_id].dev_conf.rxmode.offloads & 1015 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) 1016 printf("on\n"); 1017 else 1018 printf("off\n"); 1019 } 1020 1021 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 1022 printf("VLAN insert: "); 1023 if (ports[port_id].dev_conf.txmode.offloads & 1024 DEV_TX_OFFLOAD_VLAN_INSERT) 1025 printf("on\n"); 1026 else 1027 printf("off\n"); 1028 } 1029 1030 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 1031 printf("Double VLANs insert: "); 1032 if (ports[port_id].dev_conf.txmode.offloads & 1033 DEV_TX_OFFLOAD_QINQ_INSERT) 1034 printf("on\n"); 1035 else 1036 printf("off\n"); 1037 } 1038 1039 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 1040 printf("TX IPv4 checksum: "); 1041 if (ports[port_id].dev_conf.txmode.offloads & 1042 DEV_TX_OFFLOAD_IPV4_CKSUM) 1043 printf("on\n"); 1044 else 1045 printf("off\n"); 1046 } 1047 1048 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 1049 printf("TX UDP checksum: "); 1050 if (ports[port_id].dev_conf.txmode.offloads & 1051 DEV_TX_OFFLOAD_UDP_CKSUM) 1052 printf("on\n"); 1053 else 1054 printf("off\n"); 1055 } 1056 1057 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 1058 printf("TX TCP checksum: "); 1059 if (ports[port_id].dev_conf.txmode.offloads & 1060 DEV_TX_OFFLOAD_TCP_CKSUM) 1061 printf("on\n"); 1062 else 1063 printf("off\n"); 1064 } 1065 1066 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 1067 printf("TX SCTP checksum: "); 1068 if (ports[port_id].dev_conf.txmode.offloads & 1069 DEV_TX_OFFLOAD_SCTP_CKSUM) 1070 printf("on\n"); 1071 else 1072 printf("off\n"); 1073 } 1074 1075 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 1076 printf("TX Outer IPv4 checksum: "); 1077 if (ports[port_id].dev_conf.txmode.offloads & 1078 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 1079 printf("on\n"); 1080 else 1081 printf("off\n"); 1082 } 1083 1084 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 1085 printf("TX TCP segmentation: "); 1086 if (ports[port_id].dev_conf.txmode.offloads & 1087 DEV_TX_OFFLOAD_TCP_TSO) 1088 printf("on\n"); 1089 else 1090 printf("off\n"); 1091 } 1092 1093 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 1094 printf("TX UDP segmentation: "); 1095 if (ports[port_id].dev_conf.txmode.offloads & 1096 DEV_TX_OFFLOAD_UDP_TSO) 1097 printf("on\n"); 1098 else 1099 printf("off\n"); 1100 } 1101 1102 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 1103 printf("TSO for VXLAN tunnel packet: "); 1104 if (ports[port_id].dev_conf.txmode.offloads & 1105 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 1106 printf("on\n"); 1107 else 1108 printf("off\n"); 1109 } 1110 1111 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 1112 printf("TSO for GRE tunnel packet: "); 1113 if (ports[port_id].dev_conf.txmode.offloads & 1114 DEV_TX_OFFLOAD_GRE_TNL_TSO) 1115 printf("on\n"); 1116 else 1117 printf("off\n"); 1118 } 1119 1120 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 1121 printf("TSO for IPIP tunnel packet: "); 1122 if (ports[port_id].dev_conf.txmode.offloads & 1123 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 1124 printf("on\n"); 1125 else 1126 printf("off\n"); 1127 } 1128 1129 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 1130 printf("TSO for GENEVE tunnel packet: "); 1131 if (ports[port_id].dev_conf.txmode.offloads & 1132 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 1133 printf("on\n"); 1134 else 1135 printf("off\n"); 1136 } 1137 1138 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 1139 printf("IP tunnel TSO: "); 1140 if (ports[port_id].dev_conf.txmode.offloads & 1141 DEV_TX_OFFLOAD_IP_TNL_TSO) 1142 printf("on\n"); 1143 else 1144 printf("off\n"); 1145 } 1146 1147 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 1148 printf("UDP tunnel TSO: "); 1149 if (ports[port_id].dev_conf.txmode.offloads & 1150 DEV_TX_OFFLOAD_UDP_TNL_TSO) 1151 printf("on\n"); 1152 else 1153 printf("off\n"); 1154 } 1155 1156 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 1157 printf("TX Outer UDP checksum: "); 1158 if (ports[port_id].dev_conf.txmode.offloads & 1159 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1160 printf("on\n"); 1161 else 1162 printf("off\n"); 1163 } 1164 1165 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 1166 printf("Tx scheduling on timestamp: "); 1167 if (ports[port_id].dev_conf.txmode.offloads & 1168 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) 1169 printf("on\n"); 1170 else 1171 printf("off\n"); 1172 } 1173 1174 } 1175 1176 int 1177 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1178 { 1179 uint16_t pid; 1180 1181 if (port_id == (portid_t)RTE_PORT_ALL) 1182 return 0; 1183 1184 RTE_ETH_FOREACH_DEV(pid) 1185 if (port_id == pid) 1186 return 0; 1187 1188 if (warning == ENABLED_WARN) 1189 printf("Invalid port %d\n", port_id); 1190 1191 return 1; 1192 } 1193 1194 void print_valid_ports(void) 1195 { 1196 portid_t pid; 1197 1198 printf("The valid ports array is ["); 1199 RTE_ETH_FOREACH_DEV(pid) { 1200 printf(" %d", pid); 1201 } 1202 printf(" ]\n"); 1203 } 1204 1205 static int 1206 vlan_id_is_invalid(uint16_t vlan_id) 1207 { 1208 if (vlan_id < 4096) 1209 return 0; 1210 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1211 return 1; 1212 } 1213 1214 static int 1215 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1216 { 1217 const struct rte_pci_device *pci_dev; 1218 const struct rte_bus *bus; 1219 uint64_t pci_len; 1220 1221 if (reg_off & 0x3) { 1222 printf("Port register offset 0x%X not aligned on a 4-byte " 1223 "boundary\n", 1224 (unsigned)reg_off); 1225 return 1; 1226 } 1227 1228 if (!ports[port_id].dev_info.device) { 1229 printf("Invalid device\n"); 1230 return 0; 1231 } 1232 1233 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1234 if (bus && !strcmp(bus->name, "pci")) { 1235 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1236 } else { 1237 printf("Not a PCI device\n"); 1238 return 1; 1239 } 1240 1241 pci_len = pci_dev->mem_resource[0].len; 1242 if (reg_off >= pci_len) { 1243 printf("Port %d: register offset %u (0x%X) out of port PCI " 1244 "resource (length=%"PRIu64")\n", 1245 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1246 return 1; 1247 } 1248 return 0; 1249 } 1250 1251 static int 1252 reg_bit_pos_is_invalid(uint8_t bit_pos) 1253 { 1254 if (bit_pos <= 31) 1255 return 0; 1256 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1257 return 1; 1258 } 1259 1260 #define display_port_and_reg_off(port_id, reg_off) \ 1261 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1262 1263 static inline void 1264 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1265 { 1266 display_port_and_reg_off(port_id, (unsigned)reg_off); 1267 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1268 } 1269 1270 void 1271 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1272 { 1273 uint32_t reg_v; 1274 1275 1276 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1277 return; 1278 if (port_reg_off_is_invalid(port_id, reg_off)) 1279 return; 1280 if (reg_bit_pos_is_invalid(bit_x)) 1281 return; 1282 reg_v = port_id_pci_reg_read(port_id, reg_off); 1283 display_port_and_reg_off(port_id, (unsigned)reg_off); 1284 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1285 } 1286 1287 void 1288 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1289 uint8_t bit1_pos, uint8_t bit2_pos) 1290 { 1291 uint32_t reg_v; 1292 uint8_t l_bit; 1293 uint8_t h_bit; 1294 1295 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1296 return; 1297 if (port_reg_off_is_invalid(port_id, reg_off)) 1298 return; 1299 if (reg_bit_pos_is_invalid(bit1_pos)) 1300 return; 1301 if (reg_bit_pos_is_invalid(bit2_pos)) 1302 return; 1303 if (bit1_pos > bit2_pos) 1304 l_bit = bit2_pos, h_bit = bit1_pos; 1305 else 1306 l_bit = bit1_pos, h_bit = bit2_pos; 1307 1308 reg_v = port_id_pci_reg_read(port_id, reg_off); 1309 reg_v >>= l_bit; 1310 if (h_bit < 31) 1311 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1312 display_port_and_reg_off(port_id, (unsigned)reg_off); 1313 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1314 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1315 } 1316 1317 void 1318 port_reg_display(portid_t port_id, uint32_t reg_off) 1319 { 1320 uint32_t reg_v; 1321 1322 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1323 return; 1324 if (port_reg_off_is_invalid(port_id, reg_off)) 1325 return; 1326 reg_v = port_id_pci_reg_read(port_id, reg_off); 1327 display_port_reg_value(port_id, reg_off, reg_v); 1328 } 1329 1330 void 1331 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1332 uint8_t bit_v) 1333 { 1334 uint32_t reg_v; 1335 1336 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1337 return; 1338 if (port_reg_off_is_invalid(port_id, reg_off)) 1339 return; 1340 if (reg_bit_pos_is_invalid(bit_pos)) 1341 return; 1342 if (bit_v > 1) { 1343 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1344 return; 1345 } 1346 reg_v = port_id_pci_reg_read(port_id, reg_off); 1347 if (bit_v == 0) 1348 reg_v &= ~(1 << bit_pos); 1349 else 1350 reg_v |= (1 << bit_pos); 1351 port_id_pci_reg_write(port_id, reg_off, reg_v); 1352 display_port_reg_value(port_id, reg_off, reg_v); 1353 } 1354 1355 void 1356 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1357 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1358 { 1359 uint32_t max_v; 1360 uint32_t reg_v; 1361 uint8_t l_bit; 1362 uint8_t h_bit; 1363 1364 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1365 return; 1366 if (port_reg_off_is_invalid(port_id, reg_off)) 1367 return; 1368 if (reg_bit_pos_is_invalid(bit1_pos)) 1369 return; 1370 if (reg_bit_pos_is_invalid(bit2_pos)) 1371 return; 1372 if (bit1_pos > bit2_pos) 1373 l_bit = bit2_pos, h_bit = bit1_pos; 1374 else 1375 l_bit = bit1_pos, h_bit = bit2_pos; 1376 1377 if ((h_bit - l_bit) < 31) 1378 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1379 else 1380 max_v = 0xFFFFFFFF; 1381 1382 if (value > max_v) { 1383 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1384 (unsigned)value, (unsigned)value, 1385 (unsigned)max_v, (unsigned)max_v); 1386 return; 1387 } 1388 reg_v = port_id_pci_reg_read(port_id, reg_off); 1389 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1390 reg_v |= (value << l_bit); /* Set changed bits */ 1391 port_id_pci_reg_write(port_id, reg_off, reg_v); 1392 display_port_reg_value(port_id, reg_off, reg_v); 1393 } 1394 1395 void 1396 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1397 { 1398 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1399 return; 1400 if (port_reg_off_is_invalid(port_id, reg_off)) 1401 return; 1402 port_id_pci_reg_write(port_id, reg_off, reg_v); 1403 display_port_reg_value(port_id, reg_off, reg_v); 1404 } 1405 1406 void 1407 port_mtu_set(portid_t port_id, uint16_t mtu) 1408 { 1409 int diag; 1410 struct rte_port *rte_port = &ports[port_id]; 1411 struct rte_eth_dev_info dev_info; 1412 uint16_t eth_overhead; 1413 int ret; 1414 1415 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1416 return; 1417 1418 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1419 if (ret != 0) 1420 return; 1421 1422 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1423 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1424 mtu, dev_info.min_mtu, dev_info.max_mtu); 1425 return; 1426 } 1427 diag = rte_eth_dev_set_mtu(port_id, mtu); 1428 if (diag) 1429 printf("Set MTU failed. diag=%d\n", diag); 1430 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1431 /* 1432 * Ether overhead in driver is equal to the difference of 1433 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1434 * device supports jumbo frame. 1435 */ 1436 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1437 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1438 rte_port->dev_conf.rxmode.offloads |= 1439 DEV_RX_OFFLOAD_JUMBO_FRAME; 1440 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1441 mtu + eth_overhead; 1442 } else 1443 rte_port->dev_conf.rxmode.offloads &= 1444 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1445 } 1446 } 1447 1448 /* Generic flow management functions. */ 1449 1450 static struct port_flow_tunnel * 1451 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1452 { 1453 struct port_flow_tunnel *flow_tunnel; 1454 1455 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1456 if (flow_tunnel->id == port_tunnel_id) 1457 goto out; 1458 } 1459 flow_tunnel = NULL; 1460 1461 out: 1462 return flow_tunnel; 1463 } 1464 1465 const char * 1466 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1467 { 1468 const char *type; 1469 switch (tunnel->type) { 1470 default: 1471 type = "unknown"; 1472 break; 1473 case RTE_FLOW_ITEM_TYPE_VXLAN: 1474 type = "vxlan"; 1475 break; 1476 } 1477 1478 return type; 1479 } 1480 1481 struct port_flow_tunnel * 1482 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1483 { 1484 struct rte_port *port = &ports[port_id]; 1485 struct port_flow_tunnel *flow_tunnel; 1486 1487 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1488 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1489 goto out; 1490 } 1491 flow_tunnel = NULL; 1492 1493 out: 1494 return flow_tunnel; 1495 } 1496 1497 void port_flow_tunnel_list(portid_t port_id) 1498 { 1499 struct rte_port *port = &ports[port_id]; 1500 struct port_flow_tunnel *flt; 1501 1502 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1503 printf("port %u tunnel #%u type=%s", 1504 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1505 if (flt->tunnel.tun_id) 1506 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1507 printf("\n"); 1508 } 1509 } 1510 1511 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1512 { 1513 struct rte_port *port = &ports[port_id]; 1514 struct port_flow_tunnel *flt; 1515 1516 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1517 if (flt->id == tunnel_id) 1518 break; 1519 } 1520 if (flt) { 1521 LIST_REMOVE(flt, chain); 1522 free(flt); 1523 printf("port %u: flow tunnel #%u destroyed\n", 1524 port_id, tunnel_id); 1525 } 1526 } 1527 1528 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1529 { 1530 struct rte_port *port = &ports[port_id]; 1531 enum rte_flow_item_type type; 1532 struct port_flow_tunnel *flt; 1533 1534 if (!strcmp(ops->type, "vxlan")) 1535 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1536 else { 1537 printf("cannot offload \"%s\" tunnel type\n", ops->type); 1538 return; 1539 } 1540 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1541 if (flt->tunnel.type == type) 1542 break; 1543 } 1544 if (!flt) { 1545 flt = calloc(1, sizeof(*flt)); 1546 if (!flt) { 1547 printf("failed to allocate port flt object\n"); 1548 return; 1549 } 1550 flt->tunnel.type = type; 1551 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1552 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1553 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1554 } 1555 printf("port %d: flow tunnel #%u type %s\n", 1556 port_id, flt->id, ops->type); 1557 } 1558 1559 /** Generate a port_flow entry from attributes/pattern/actions. */ 1560 static struct port_flow * 1561 port_flow_new(const struct rte_flow_attr *attr, 1562 const struct rte_flow_item *pattern, 1563 const struct rte_flow_action *actions, 1564 struct rte_flow_error *error) 1565 { 1566 const struct rte_flow_conv_rule rule = { 1567 .attr_ro = attr, 1568 .pattern_ro = pattern, 1569 .actions_ro = actions, 1570 }; 1571 struct port_flow *pf; 1572 int ret; 1573 1574 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1575 if (ret < 0) 1576 return NULL; 1577 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1578 if (!pf) { 1579 rte_flow_error_set 1580 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1581 "calloc() failed"); 1582 return NULL; 1583 } 1584 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1585 error) >= 0) 1586 return pf; 1587 free(pf); 1588 return NULL; 1589 } 1590 1591 /** Print a message out of a flow error. */ 1592 static int 1593 port_flow_complain(struct rte_flow_error *error) 1594 { 1595 static const char *const errstrlist[] = { 1596 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1597 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1598 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1599 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1600 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1601 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1602 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1603 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1604 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1605 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1606 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1607 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1608 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1609 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1610 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1611 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1612 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1613 }; 1614 const char *errstr; 1615 char buf[32]; 1616 int err = rte_errno; 1617 1618 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1619 !errstrlist[error->type]) 1620 errstr = "unknown type"; 1621 else 1622 errstr = errstrlist[error->type]; 1623 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1624 error->type, errstr, 1625 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1626 error->cause), buf) : "", 1627 error->message ? error->message : "(no stated reason)", 1628 rte_strerror(err)); 1629 return -err; 1630 } 1631 1632 static void 1633 rss_config_display(struct rte_flow_action_rss *rss_conf) 1634 { 1635 uint8_t i; 1636 1637 if (rss_conf == NULL) { 1638 printf("Invalid rule\n"); 1639 return; 1640 } 1641 1642 printf("RSS:\n" 1643 " queues:"); 1644 if (rss_conf->queue_num == 0) 1645 printf(" none"); 1646 for (i = 0; i < rss_conf->queue_num; i++) 1647 printf(" %d", rss_conf->queue[i]); 1648 printf("\n"); 1649 1650 printf(" function: "); 1651 switch (rss_conf->func) { 1652 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1653 printf("default\n"); 1654 break; 1655 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1656 printf("toeplitz\n"); 1657 break; 1658 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1659 printf("simple_xor\n"); 1660 break; 1661 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1662 printf("symmetric_toeplitz\n"); 1663 break; 1664 default: 1665 printf("Unknown function\n"); 1666 return; 1667 } 1668 1669 printf(" types:\n"); 1670 if (rss_conf->types == 0) { 1671 printf(" none\n"); 1672 return; 1673 } 1674 for (i = 0; rss_type_table[i].str; i++) { 1675 if ((rss_conf->types & 1676 rss_type_table[i].rss_type) == 1677 rss_type_table[i].rss_type && 1678 rss_type_table[i].rss_type != 0) 1679 printf(" %s\n", rss_type_table[i].str); 1680 } 1681 } 1682 1683 static struct port_shared_action * 1684 action_get_by_id(portid_t port_id, uint32_t id) 1685 { 1686 struct rte_port *port; 1687 struct port_shared_action **ppsa; 1688 struct port_shared_action *psa = NULL; 1689 1690 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1691 port_id == (portid_t)RTE_PORT_ALL) 1692 return NULL; 1693 port = &ports[port_id]; 1694 ppsa = &port->actions_list; 1695 while (*ppsa) { 1696 if ((*ppsa)->id == id) { 1697 psa = *ppsa; 1698 break; 1699 } 1700 ppsa = &(*ppsa)->next; 1701 } 1702 if (!psa) 1703 printf("Failed to find shared action #%u on port %u\n", 1704 id, port_id); 1705 return psa; 1706 } 1707 1708 static int 1709 action_alloc(portid_t port_id, uint32_t id, 1710 struct port_shared_action **action) 1711 { 1712 struct rte_port *port; 1713 struct port_shared_action **ppsa; 1714 struct port_shared_action *psa = NULL; 1715 1716 *action = NULL; 1717 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1718 port_id == (portid_t)RTE_PORT_ALL) 1719 return -EINVAL; 1720 port = &ports[port_id]; 1721 if (id == UINT32_MAX) { 1722 /* taking first available ID */ 1723 if (port->actions_list) { 1724 if (port->actions_list->id == UINT32_MAX - 1) { 1725 printf("Highest shared action ID is already" 1726 " assigned, delete it first\n"); 1727 return -ENOMEM; 1728 } 1729 id = port->actions_list->id + 1; 1730 } else { 1731 id = 0; 1732 } 1733 } 1734 psa = calloc(1, sizeof(*psa)); 1735 if (!psa) { 1736 printf("Allocation of port %u shared action failed\n", 1737 port_id); 1738 return -ENOMEM; 1739 } 1740 ppsa = &port->actions_list; 1741 while (*ppsa && (*ppsa)->id > id) 1742 ppsa = &(*ppsa)->next; 1743 if (*ppsa && (*ppsa)->id == id) { 1744 printf("Shared action #%u is already assigned," 1745 " delete it first\n", id); 1746 free(psa); 1747 return -EINVAL; 1748 } 1749 psa->next = *ppsa; 1750 psa->id = id; 1751 *ppsa = psa; 1752 *action = psa; 1753 return 0; 1754 } 1755 1756 /** Create shared action */ 1757 int 1758 port_shared_action_create(portid_t port_id, uint32_t id, 1759 const struct rte_flow_shared_action_conf *conf, 1760 const struct rte_flow_action *action) 1761 { 1762 struct port_shared_action *psa; 1763 int ret; 1764 struct rte_flow_error error; 1765 1766 ret = action_alloc(port_id, id, &psa); 1767 if (ret) 1768 return ret; 1769 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1770 struct rte_flow_action_age *age = 1771 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1772 1773 psa->age_type = ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION; 1774 age->context = &psa->age_type; 1775 } 1776 /* Poisoning to make sure PMDs update it in case of error. */ 1777 memset(&error, 0x22, sizeof(error)); 1778 psa->action = rte_flow_shared_action_create(port_id, conf, action, 1779 &error); 1780 if (!psa->action) { 1781 uint32_t destroy_id = psa->id; 1782 port_shared_action_destroy(port_id, 1, &destroy_id); 1783 return port_flow_complain(&error); 1784 } 1785 psa->type = action->type; 1786 printf("Shared action #%u created\n", psa->id); 1787 return 0; 1788 } 1789 1790 /** Destroy shared action */ 1791 int 1792 port_shared_action_destroy(portid_t port_id, 1793 uint32_t n, 1794 const uint32_t *actions) 1795 { 1796 struct rte_port *port; 1797 struct port_shared_action **tmp; 1798 uint32_t c = 0; 1799 int ret = 0; 1800 1801 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1802 port_id == (portid_t)RTE_PORT_ALL) 1803 return -EINVAL; 1804 port = &ports[port_id]; 1805 tmp = &port->actions_list; 1806 while (*tmp) { 1807 uint32_t i; 1808 1809 for (i = 0; i != n; ++i) { 1810 struct rte_flow_error error; 1811 struct port_shared_action *psa = *tmp; 1812 1813 if (actions[i] != psa->id) 1814 continue; 1815 /* 1816 * Poisoning to make sure PMDs update it in case 1817 * of error. 1818 */ 1819 memset(&error, 0x33, sizeof(error)); 1820 1821 if (psa->action && rte_flow_shared_action_destroy( 1822 port_id, psa->action, &error)) { 1823 ret = port_flow_complain(&error); 1824 continue; 1825 } 1826 *tmp = psa->next; 1827 printf("Shared action #%u destroyed\n", psa->id); 1828 free(psa); 1829 break; 1830 } 1831 if (i == n) 1832 tmp = &(*tmp)->next; 1833 ++c; 1834 } 1835 return ret; 1836 } 1837 1838 1839 /** Get shared action by port + id */ 1840 struct rte_flow_shared_action * 1841 port_shared_action_get_by_id(portid_t port_id, uint32_t id) 1842 { 1843 1844 struct port_shared_action *psa = action_get_by_id(port_id, id); 1845 1846 return (psa) ? psa->action : NULL; 1847 } 1848 1849 /** Update shared action */ 1850 int 1851 port_shared_action_update(portid_t port_id, uint32_t id, 1852 const struct rte_flow_action *action) 1853 { 1854 struct rte_flow_error error; 1855 struct rte_flow_shared_action *shared_action; 1856 1857 shared_action = port_shared_action_get_by_id(port_id, id); 1858 if (!shared_action) 1859 return -EINVAL; 1860 if (rte_flow_shared_action_update(port_id, shared_action, action, 1861 &error)) { 1862 return port_flow_complain(&error); 1863 } 1864 printf("Shared action #%u updated\n", id); 1865 return 0; 1866 } 1867 1868 int 1869 port_shared_action_query(portid_t port_id, uint32_t id) 1870 { 1871 struct rte_flow_error error; 1872 struct port_shared_action *psa; 1873 uint64_t default_data; 1874 void *data = NULL; 1875 int ret = 0; 1876 1877 psa = action_get_by_id(port_id, id); 1878 if (!psa) 1879 return -EINVAL; 1880 switch (psa->type) { 1881 case RTE_FLOW_ACTION_TYPE_RSS: 1882 data = &default_data; 1883 break; 1884 default: 1885 printf("Shared action %u (type: %d) on port %u doesn't support" 1886 " query\n", id, psa->type, port_id); 1887 return -1; 1888 } 1889 if (rte_flow_shared_action_query(port_id, psa->action, data, &error)) 1890 ret = port_flow_complain(&error); 1891 switch (psa->type) { 1892 case RTE_FLOW_ACTION_TYPE_RSS: 1893 if (!ret) 1894 printf("Shared RSS action:\n\trefs:%u\n", 1895 *((uint32_t *)data)); 1896 data = NULL; 1897 break; 1898 default: 1899 printf("Shared action %u (type: %d) on port %u doesn't support" 1900 " query\n", id, psa->type, port_id); 1901 ret = -1; 1902 } 1903 return ret; 1904 } 1905 static struct port_flow_tunnel * 1906 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1907 const struct rte_flow_item *pattern, 1908 const struct rte_flow_action *actions, 1909 const struct tunnel_ops *tunnel_ops) 1910 { 1911 int ret; 1912 struct rte_port *port; 1913 struct port_flow_tunnel *pft; 1914 struct rte_flow_error error; 1915 1916 port = &ports[port_id]; 1917 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1918 if (!pft) { 1919 printf("failed to locate port flow tunnel #%u\n", 1920 tunnel_ops->id); 1921 return NULL; 1922 } 1923 if (tunnel_ops->actions) { 1924 uint32_t num_actions; 1925 const struct rte_flow_action *aptr; 1926 1927 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1928 &pft->pmd_actions, 1929 &pft->num_pmd_actions, 1930 &error); 1931 if (ret) { 1932 port_flow_complain(&error); 1933 return NULL; 1934 } 1935 for (aptr = actions, num_actions = 1; 1936 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1937 aptr++, num_actions++); 1938 pft->actions = malloc( 1939 (num_actions + pft->num_pmd_actions) * 1940 sizeof(actions[0])); 1941 if (!pft->actions) { 1942 rte_flow_tunnel_action_decap_release( 1943 port_id, pft->actions, 1944 pft->num_pmd_actions, &error); 1945 return NULL; 1946 } 1947 rte_memcpy(pft->actions, pft->pmd_actions, 1948 pft->num_pmd_actions * sizeof(actions[0])); 1949 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1950 num_actions * sizeof(actions[0])); 1951 } 1952 if (tunnel_ops->items) { 1953 uint32_t num_items; 1954 const struct rte_flow_item *iptr; 1955 1956 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1957 &pft->pmd_items, 1958 &pft->num_pmd_items, 1959 &error); 1960 if (ret) { 1961 port_flow_complain(&error); 1962 return NULL; 1963 } 1964 for (iptr = pattern, num_items = 1; 1965 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1966 iptr++, num_items++); 1967 pft->items = malloc((num_items + pft->num_pmd_items) * 1968 sizeof(pattern[0])); 1969 if (!pft->items) { 1970 rte_flow_tunnel_item_release( 1971 port_id, pft->pmd_items, 1972 pft->num_pmd_items, &error); 1973 return NULL; 1974 } 1975 rte_memcpy(pft->items, pft->pmd_items, 1976 pft->num_pmd_items * sizeof(pattern[0])); 1977 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1978 num_items * sizeof(pattern[0])); 1979 } 1980 1981 return pft; 1982 } 1983 1984 static void 1985 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1986 const struct tunnel_ops *tunnel_ops, 1987 struct port_flow_tunnel *pft) 1988 { 1989 struct rte_flow_error error; 1990 1991 if (tunnel_ops->actions) { 1992 free(pft->actions); 1993 rte_flow_tunnel_action_decap_release( 1994 port_id, pft->pmd_actions, 1995 pft->num_pmd_actions, &error); 1996 pft->actions = NULL; 1997 pft->pmd_actions = NULL; 1998 } 1999 if (tunnel_ops->items) { 2000 free(pft->items); 2001 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2002 pft->num_pmd_items, 2003 &error); 2004 pft->items = NULL; 2005 pft->pmd_items = NULL; 2006 } 2007 } 2008 2009 /** Validate flow rule. */ 2010 int 2011 port_flow_validate(portid_t port_id, 2012 const struct rte_flow_attr *attr, 2013 const struct rte_flow_item *pattern, 2014 const struct rte_flow_action *actions, 2015 const struct tunnel_ops *tunnel_ops) 2016 { 2017 struct rte_flow_error error; 2018 struct port_flow_tunnel *pft = NULL; 2019 2020 /* Poisoning to make sure PMDs update it in case of error. */ 2021 memset(&error, 0x11, sizeof(error)); 2022 if (tunnel_ops->enabled) { 2023 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2024 actions, tunnel_ops); 2025 if (!pft) 2026 return -ENOENT; 2027 if (pft->items) 2028 pattern = pft->items; 2029 if (pft->actions) 2030 actions = pft->actions; 2031 } 2032 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 2033 return port_flow_complain(&error); 2034 if (tunnel_ops->enabled) 2035 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2036 printf("Flow rule validated\n"); 2037 return 0; 2038 } 2039 2040 /** Return age action structure if exists, otherwise NULL. */ 2041 static struct rte_flow_action_age * 2042 age_action_get(const struct rte_flow_action *actions) 2043 { 2044 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2045 switch (actions->type) { 2046 case RTE_FLOW_ACTION_TYPE_AGE: 2047 return (struct rte_flow_action_age *) 2048 (uintptr_t)actions->conf; 2049 default: 2050 break; 2051 } 2052 } 2053 return NULL; 2054 } 2055 2056 /** Create flow rule. */ 2057 int 2058 port_flow_create(portid_t port_id, 2059 const struct rte_flow_attr *attr, 2060 const struct rte_flow_item *pattern, 2061 const struct rte_flow_action *actions, 2062 const struct tunnel_ops *tunnel_ops) 2063 { 2064 struct rte_flow *flow; 2065 struct rte_port *port; 2066 struct port_flow *pf; 2067 uint32_t id = 0; 2068 struct rte_flow_error error; 2069 struct port_flow_tunnel *pft = NULL; 2070 struct rte_flow_action_age *age = age_action_get(actions); 2071 2072 port = &ports[port_id]; 2073 if (port->flow_list) { 2074 if (port->flow_list->id == UINT32_MAX) { 2075 printf("Highest rule ID is already assigned, delete" 2076 " it first"); 2077 return -ENOMEM; 2078 } 2079 id = port->flow_list->id + 1; 2080 } 2081 if (tunnel_ops->enabled) { 2082 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2083 actions, tunnel_ops); 2084 if (!pft) 2085 return -ENOENT; 2086 if (pft->items) 2087 pattern = pft->items; 2088 if (pft->actions) 2089 actions = pft->actions; 2090 } 2091 pf = port_flow_new(attr, pattern, actions, &error); 2092 if (!pf) 2093 return port_flow_complain(&error); 2094 if (age) { 2095 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2096 age->context = &pf->age_type; 2097 } 2098 /* Poisoning to make sure PMDs update it in case of error. */ 2099 memset(&error, 0x22, sizeof(error)); 2100 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2101 if (!flow) { 2102 free(pf); 2103 return port_flow_complain(&error); 2104 } 2105 pf->next = port->flow_list; 2106 pf->id = id; 2107 pf->flow = flow; 2108 port->flow_list = pf; 2109 if (tunnel_ops->enabled) 2110 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2111 printf("Flow rule #%u created\n", pf->id); 2112 return 0; 2113 } 2114 2115 /** Destroy a number of flow rules. */ 2116 int 2117 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2118 { 2119 struct rte_port *port; 2120 struct port_flow **tmp; 2121 uint32_t c = 0; 2122 int ret = 0; 2123 2124 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2125 port_id == (portid_t)RTE_PORT_ALL) 2126 return -EINVAL; 2127 port = &ports[port_id]; 2128 tmp = &port->flow_list; 2129 while (*tmp) { 2130 uint32_t i; 2131 2132 for (i = 0; i != n; ++i) { 2133 struct rte_flow_error error; 2134 struct port_flow *pf = *tmp; 2135 2136 if (rule[i] != pf->id) 2137 continue; 2138 /* 2139 * Poisoning to make sure PMDs update it in case 2140 * of error. 2141 */ 2142 memset(&error, 0x33, sizeof(error)); 2143 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2144 ret = port_flow_complain(&error); 2145 continue; 2146 } 2147 printf("Flow rule #%u destroyed\n", pf->id); 2148 *tmp = pf->next; 2149 free(pf); 2150 break; 2151 } 2152 if (i == n) 2153 tmp = &(*tmp)->next; 2154 ++c; 2155 } 2156 return ret; 2157 } 2158 2159 /** Remove all flow rules. */ 2160 int 2161 port_flow_flush(portid_t port_id) 2162 { 2163 struct rte_flow_error error; 2164 struct rte_port *port; 2165 int ret = 0; 2166 2167 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2168 port_id == (portid_t)RTE_PORT_ALL) 2169 return -EINVAL; 2170 2171 port = &ports[port_id]; 2172 2173 if (port->flow_list == NULL) 2174 return ret; 2175 2176 /* Poisoning to make sure PMDs update it in case of error. */ 2177 memset(&error, 0x44, sizeof(error)); 2178 if (rte_flow_flush(port_id, &error)) { 2179 port_flow_complain(&error); 2180 } 2181 2182 while (port->flow_list) { 2183 struct port_flow *pf = port->flow_list->next; 2184 2185 free(port->flow_list); 2186 port->flow_list = pf; 2187 } 2188 return ret; 2189 } 2190 2191 /** Dump all flow rules. */ 2192 int 2193 port_flow_dump(portid_t port_id, const char *file_name) 2194 { 2195 int ret = 0; 2196 FILE *file = stdout; 2197 struct rte_flow_error error; 2198 2199 if (file_name && strlen(file_name)) { 2200 file = fopen(file_name, "w"); 2201 if (!file) { 2202 printf("Failed to create file %s: %s\n", file_name, 2203 strerror(errno)); 2204 return -errno; 2205 } 2206 } 2207 ret = rte_flow_dev_dump(port_id, file, &error); 2208 if (ret) { 2209 port_flow_complain(&error); 2210 printf("Failed to dump flow: %s\n", strerror(-ret)); 2211 } else 2212 printf("Flow dump finished\n"); 2213 if (file_name && strlen(file_name)) 2214 fclose(file); 2215 return ret; 2216 } 2217 2218 /** Query a flow rule. */ 2219 int 2220 port_flow_query(portid_t port_id, uint32_t rule, 2221 const struct rte_flow_action *action) 2222 { 2223 struct rte_flow_error error; 2224 struct rte_port *port; 2225 struct port_flow *pf; 2226 const char *name; 2227 union { 2228 struct rte_flow_query_count count; 2229 struct rte_flow_action_rss rss_conf; 2230 struct rte_flow_query_age age; 2231 } query; 2232 int ret; 2233 2234 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2235 port_id == (portid_t)RTE_PORT_ALL) 2236 return -EINVAL; 2237 port = &ports[port_id]; 2238 for (pf = port->flow_list; pf; pf = pf->next) 2239 if (pf->id == rule) 2240 break; 2241 if (!pf) { 2242 printf("Flow rule #%u not found\n", rule); 2243 return -ENOENT; 2244 } 2245 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2246 &name, sizeof(name), 2247 (void *)(uintptr_t)action->type, &error); 2248 if (ret < 0) 2249 return port_flow_complain(&error); 2250 switch (action->type) { 2251 case RTE_FLOW_ACTION_TYPE_COUNT: 2252 case RTE_FLOW_ACTION_TYPE_RSS: 2253 case RTE_FLOW_ACTION_TYPE_AGE: 2254 break; 2255 default: 2256 printf("Cannot query action type %d (%s)\n", 2257 action->type, name); 2258 return -ENOTSUP; 2259 } 2260 /* Poisoning to make sure PMDs update it in case of error. */ 2261 memset(&error, 0x55, sizeof(error)); 2262 memset(&query, 0, sizeof(query)); 2263 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2264 return port_flow_complain(&error); 2265 switch (action->type) { 2266 case RTE_FLOW_ACTION_TYPE_COUNT: 2267 printf("%s:\n" 2268 " hits_set: %u\n" 2269 " bytes_set: %u\n" 2270 " hits: %" PRIu64 "\n" 2271 " bytes: %" PRIu64 "\n", 2272 name, 2273 query.count.hits_set, 2274 query.count.bytes_set, 2275 query.count.hits, 2276 query.count.bytes); 2277 break; 2278 case RTE_FLOW_ACTION_TYPE_RSS: 2279 rss_config_display(&query.rss_conf); 2280 break; 2281 case RTE_FLOW_ACTION_TYPE_AGE: 2282 printf("%s:\n" 2283 " aged: %u\n" 2284 " sec_since_last_hit_valid: %u\n" 2285 " sec_since_last_hit: %" PRIu32 "\n", 2286 name, 2287 query.age.aged, 2288 query.age.sec_since_last_hit_valid, 2289 query.age.sec_since_last_hit); 2290 break; 2291 default: 2292 printf("Cannot display result for action type %d (%s)\n", 2293 action->type, name); 2294 break; 2295 } 2296 return 0; 2297 } 2298 2299 /** List simply and destroy all aged flows. */ 2300 void 2301 port_flow_aged(portid_t port_id, uint8_t destroy) 2302 { 2303 void **contexts; 2304 int nb_context, total = 0, idx; 2305 struct rte_flow_error error; 2306 enum age_action_context_type *type; 2307 union { 2308 struct port_flow *pf; 2309 struct port_shared_action *psa; 2310 } ctx; 2311 2312 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2313 port_id == (portid_t)RTE_PORT_ALL) 2314 return; 2315 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2316 printf("Port %u total aged flows: %d\n", port_id, total); 2317 if (total < 0) { 2318 port_flow_complain(&error); 2319 return; 2320 } 2321 if (total == 0) 2322 return; 2323 contexts = malloc(sizeof(void *) * total); 2324 if (contexts == NULL) { 2325 printf("Cannot allocate contexts for aged flow\n"); 2326 return; 2327 } 2328 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2329 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2330 if (nb_context != total) { 2331 printf("Port:%d get aged flows count(%d) != total(%d)\n", 2332 port_id, nb_context, total); 2333 free(contexts); 2334 return; 2335 } 2336 total = 0; 2337 for (idx = 0; idx < nb_context; idx++) { 2338 if (!contexts[idx]) { 2339 printf("Error: get Null context in port %u\n", port_id); 2340 continue; 2341 } 2342 type = (enum age_action_context_type *)contexts[idx]; 2343 switch (*type) { 2344 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2345 ctx.pf = container_of(type, struct port_flow, age_type); 2346 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2347 "\t%c%c%c\t\n", 2348 "Flow", 2349 ctx.pf->id, 2350 ctx.pf->rule.attr->group, 2351 ctx.pf->rule.attr->priority, 2352 ctx.pf->rule.attr->ingress ? 'i' : '-', 2353 ctx.pf->rule.attr->egress ? 'e' : '-', 2354 ctx.pf->rule.attr->transfer ? 't' : '-'); 2355 if (destroy && !port_flow_destroy(port_id, 1, 2356 &ctx.pf->id)) 2357 total++; 2358 break; 2359 case ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION: 2360 ctx.psa = container_of(type, struct port_shared_action, 2361 age_type); 2362 printf("%-20s\t%" PRIu32 "\n", "Shared action", 2363 ctx.psa->id); 2364 break; 2365 default: 2366 printf("Error: invalid context type %u\n", port_id); 2367 break; 2368 } 2369 } 2370 printf("\n%d flows destroyed\n", total); 2371 free(contexts); 2372 } 2373 2374 /** List flow rules. */ 2375 void 2376 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2377 { 2378 struct rte_port *port; 2379 struct port_flow *pf; 2380 struct port_flow *list = NULL; 2381 uint32_t i; 2382 2383 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2384 port_id == (portid_t)RTE_PORT_ALL) 2385 return; 2386 port = &ports[port_id]; 2387 if (!port->flow_list) 2388 return; 2389 /* Sort flows by group, priority and ID. */ 2390 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2391 struct port_flow **tmp; 2392 const struct rte_flow_attr *curr = pf->rule.attr; 2393 2394 if (n) { 2395 /* Filter out unwanted groups. */ 2396 for (i = 0; i != n; ++i) 2397 if (curr->group == group[i]) 2398 break; 2399 if (i == n) 2400 continue; 2401 } 2402 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2403 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2404 2405 if (curr->group > comp->group || 2406 (curr->group == comp->group && 2407 curr->priority > comp->priority) || 2408 (curr->group == comp->group && 2409 curr->priority == comp->priority && 2410 pf->id > (*tmp)->id)) 2411 continue; 2412 break; 2413 } 2414 pf->tmp = *tmp; 2415 *tmp = pf; 2416 } 2417 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2418 for (pf = list; pf != NULL; pf = pf->tmp) { 2419 const struct rte_flow_item *item = pf->rule.pattern; 2420 const struct rte_flow_action *action = pf->rule.actions; 2421 const char *name; 2422 2423 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2424 pf->id, 2425 pf->rule.attr->group, 2426 pf->rule.attr->priority, 2427 pf->rule.attr->ingress ? 'i' : '-', 2428 pf->rule.attr->egress ? 'e' : '-', 2429 pf->rule.attr->transfer ? 't' : '-'); 2430 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2431 if ((uint32_t)item->type > INT_MAX) 2432 name = "PMD_INTERNAL"; 2433 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2434 &name, sizeof(name), 2435 (void *)(uintptr_t)item->type, 2436 NULL) <= 0) 2437 name = "[UNKNOWN]"; 2438 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2439 printf("%s ", name); 2440 ++item; 2441 } 2442 printf("=>"); 2443 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2444 if ((uint32_t)action->type > INT_MAX) 2445 name = "PMD_INTERNAL"; 2446 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2447 &name, sizeof(name), 2448 (void *)(uintptr_t)action->type, 2449 NULL) <= 0) 2450 name = "[UNKNOWN]"; 2451 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2452 printf(" %s", name); 2453 ++action; 2454 } 2455 printf("\n"); 2456 } 2457 } 2458 2459 /** Restrict ingress traffic to the defined flow rules. */ 2460 int 2461 port_flow_isolate(portid_t port_id, int set) 2462 { 2463 struct rte_flow_error error; 2464 2465 /* Poisoning to make sure PMDs update it in case of error. */ 2466 memset(&error, 0x66, sizeof(error)); 2467 if (rte_flow_isolate(port_id, set, &error)) 2468 return port_flow_complain(&error); 2469 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2470 port_id, 2471 set ? "now restricted" : "not restricted anymore"); 2472 return 0; 2473 } 2474 2475 /* 2476 * RX/TX ring descriptors display functions. 2477 */ 2478 int 2479 rx_queue_id_is_invalid(queueid_t rxq_id) 2480 { 2481 if (rxq_id < nb_rxq) 2482 return 0; 2483 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2484 return 1; 2485 } 2486 2487 int 2488 tx_queue_id_is_invalid(queueid_t txq_id) 2489 { 2490 if (txq_id < nb_txq) 2491 return 0; 2492 printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq); 2493 return 1; 2494 } 2495 2496 static int 2497 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2498 { 2499 struct rte_port *port = &ports[port_id]; 2500 struct rte_eth_rxq_info rx_qinfo; 2501 int ret; 2502 2503 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2504 if (ret == 0) { 2505 *ring_size = rx_qinfo.nb_desc; 2506 return ret; 2507 } 2508 2509 if (ret != -ENOTSUP) 2510 return ret; 2511 /* 2512 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2513 * ring_size stored in testpmd will be used for validity verification. 2514 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2515 * being 0, it will use a default value provided by PMDs to setup this 2516 * rxq. If the default value is 0, it will use the 2517 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2518 */ 2519 if (port->nb_rx_desc[rxq_id]) 2520 *ring_size = port->nb_rx_desc[rxq_id]; 2521 else if (port->dev_info.default_rxportconf.ring_size) 2522 *ring_size = port->dev_info.default_rxportconf.ring_size; 2523 else 2524 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2525 return 0; 2526 } 2527 2528 static int 2529 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2530 { 2531 struct rte_port *port = &ports[port_id]; 2532 struct rte_eth_txq_info tx_qinfo; 2533 int ret; 2534 2535 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2536 if (ret == 0) { 2537 *ring_size = tx_qinfo.nb_desc; 2538 return ret; 2539 } 2540 2541 if (ret != -ENOTSUP) 2542 return ret; 2543 /* 2544 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2545 * ring_size stored in testpmd will be used for validity verification. 2546 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2547 * being 0, it will use a default value provided by PMDs to setup this 2548 * txq. If the default value is 0, it will use the 2549 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2550 */ 2551 if (port->nb_tx_desc[txq_id]) 2552 *ring_size = port->nb_tx_desc[txq_id]; 2553 else if (port->dev_info.default_txportconf.ring_size) 2554 *ring_size = port->dev_info.default_txportconf.ring_size; 2555 else 2556 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2557 return 0; 2558 } 2559 2560 static int 2561 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2562 { 2563 uint16_t ring_size; 2564 int ret; 2565 2566 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2567 if (ret) 2568 return 1; 2569 2570 if (rxdesc_id < ring_size) 2571 return 0; 2572 2573 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2574 rxdesc_id, ring_size); 2575 return 1; 2576 } 2577 2578 static int 2579 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2580 { 2581 uint16_t ring_size; 2582 int ret; 2583 2584 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2585 if (ret) 2586 return 1; 2587 2588 if (txdesc_id < ring_size) 2589 return 0; 2590 2591 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2592 txdesc_id, ring_size); 2593 return 1; 2594 } 2595 2596 static const struct rte_memzone * 2597 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2598 { 2599 char mz_name[RTE_MEMZONE_NAMESIZE]; 2600 const struct rte_memzone *mz; 2601 2602 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2603 port_id, q_id, ring_name); 2604 mz = rte_memzone_lookup(mz_name); 2605 if (mz == NULL) 2606 printf("%s ring memory zoneof (port %d, queue %d) not" 2607 "found (zone name = %s\n", 2608 ring_name, port_id, q_id, mz_name); 2609 return mz; 2610 } 2611 2612 union igb_ring_dword { 2613 uint64_t dword; 2614 struct { 2615 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2616 uint32_t lo; 2617 uint32_t hi; 2618 #else 2619 uint32_t hi; 2620 uint32_t lo; 2621 #endif 2622 } words; 2623 }; 2624 2625 struct igb_ring_desc_32_bytes { 2626 union igb_ring_dword lo_dword; 2627 union igb_ring_dword hi_dword; 2628 union igb_ring_dword resv1; 2629 union igb_ring_dword resv2; 2630 }; 2631 2632 struct igb_ring_desc_16_bytes { 2633 union igb_ring_dword lo_dword; 2634 union igb_ring_dword hi_dword; 2635 }; 2636 2637 static void 2638 ring_rxd_display_dword(union igb_ring_dword dword) 2639 { 2640 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2641 (unsigned)dword.words.hi); 2642 } 2643 2644 static void 2645 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2646 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2647 portid_t port_id, 2648 #else 2649 __rte_unused portid_t port_id, 2650 #endif 2651 uint16_t desc_id) 2652 { 2653 struct igb_ring_desc_16_bytes *ring = 2654 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2655 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2656 int ret; 2657 struct rte_eth_dev_info dev_info; 2658 2659 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2660 if (ret != 0) 2661 return; 2662 2663 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2664 /* 32 bytes RX descriptor, i40e only */ 2665 struct igb_ring_desc_32_bytes *ring = 2666 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2667 ring[desc_id].lo_dword.dword = 2668 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2669 ring_rxd_display_dword(ring[desc_id].lo_dword); 2670 ring[desc_id].hi_dword.dword = 2671 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2672 ring_rxd_display_dword(ring[desc_id].hi_dword); 2673 ring[desc_id].resv1.dword = 2674 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2675 ring_rxd_display_dword(ring[desc_id].resv1); 2676 ring[desc_id].resv2.dword = 2677 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2678 ring_rxd_display_dword(ring[desc_id].resv2); 2679 2680 return; 2681 } 2682 #endif 2683 /* 16 bytes RX descriptor */ 2684 ring[desc_id].lo_dword.dword = 2685 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2686 ring_rxd_display_dword(ring[desc_id].lo_dword); 2687 ring[desc_id].hi_dword.dword = 2688 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2689 ring_rxd_display_dword(ring[desc_id].hi_dword); 2690 } 2691 2692 static void 2693 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2694 { 2695 struct igb_ring_desc_16_bytes *ring; 2696 struct igb_ring_desc_16_bytes txd; 2697 2698 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2699 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2700 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2701 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2702 (unsigned)txd.lo_dword.words.lo, 2703 (unsigned)txd.lo_dword.words.hi, 2704 (unsigned)txd.hi_dword.words.lo, 2705 (unsigned)txd.hi_dword.words.hi); 2706 } 2707 2708 void 2709 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2710 { 2711 const struct rte_memzone *rx_mz; 2712 2713 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2714 return; 2715 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2716 if (rx_mz == NULL) 2717 return; 2718 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2719 } 2720 2721 void 2722 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2723 { 2724 const struct rte_memzone *tx_mz; 2725 2726 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2727 return; 2728 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2729 if (tx_mz == NULL) 2730 return; 2731 ring_tx_descriptor_display(tx_mz, txd_id); 2732 } 2733 2734 void 2735 fwd_lcores_config_display(void) 2736 { 2737 lcoreid_t lc_id; 2738 2739 printf("List of forwarding lcores:"); 2740 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2741 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2742 printf("\n"); 2743 } 2744 void 2745 rxtx_config_display(void) 2746 { 2747 portid_t pid; 2748 queueid_t qid; 2749 2750 printf(" %s packet forwarding%s packets/burst=%d\n", 2751 cur_fwd_eng->fwd_mode_name, 2752 retry_enabled == 0 ? "" : " with retry", 2753 nb_pkt_per_burst); 2754 2755 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2756 printf(" packet len=%u - nb packet segments=%d\n", 2757 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2758 2759 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2760 nb_fwd_lcores, nb_fwd_ports); 2761 2762 RTE_ETH_FOREACH_DEV(pid) { 2763 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2764 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2765 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2766 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2767 struct rte_eth_rxq_info rx_qinfo; 2768 struct rte_eth_txq_info tx_qinfo; 2769 uint16_t rx_free_thresh_tmp; 2770 uint16_t tx_free_thresh_tmp; 2771 uint16_t tx_rs_thresh_tmp; 2772 uint16_t nb_rx_desc_tmp; 2773 uint16_t nb_tx_desc_tmp; 2774 uint64_t offloads_tmp; 2775 uint8_t pthresh_tmp; 2776 uint8_t hthresh_tmp; 2777 uint8_t wthresh_tmp; 2778 int32_t rc; 2779 2780 /* per port config */ 2781 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2782 (unsigned int)pid, nb_rxq, nb_txq); 2783 2784 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2785 ports[pid].dev_conf.rxmode.offloads, 2786 ports[pid].dev_conf.txmode.offloads); 2787 2788 /* per rx queue config only for first queue to be less verbose */ 2789 for (qid = 0; qid < 1; qid++) { 2790 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2791 if (rc) { 2792 nb_rx_desc_tmp = nb_rx_desc[qid]; 2793 rx_free_thresh_tmp = 2794 rx_conf[qid].rx_free_thresh; 2795 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2796 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2797 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2798 offloads_tmp = rx_conf[qid].offloads; 2799 } else { 2800 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2801 rx_free_thresh_tmp = 2802 rx_qinfo.conf.rx_free_thresh; 2803 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2804 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2805 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2806 offloads_tmp = rx_qinfo.conf.offloads; 2807 } 2808 2809 printf(" RX queue: %d\n", qid); 2810 printf(" RX desc=%d - RX free threshold=%d\n", 2811 nb_rx_desc_tmp, rx_free_thresh_tmp); 2812 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2813 " wthresh=%d\n", 2814 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2815 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2816 } 2817 2818 /* per tx queue config only for first queue to be less verbose */ 2819 for (qid = 0; qid < 1; qid++) { 2820 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2821 if (rc) { 2822 nb_tx_desc_tmp = nb_tx_desc[qid]; 2823 tx_free_thresh_tmp = 2824 tx_conf[qid].tx_free_thresh; 2825 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2826 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2827 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2828 offloads_tmp = tx_conf[qid].offloads; 2829 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2830 } else { 2831 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2832 tx_free_thresh_tmp = 2833 tx_qinfo.conf.tx_free_thresh; 2834 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2835 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2836 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2837 offloads_tmp = tx_qinfo.conf.offloads; 2838 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2839 } 2840 2841 printf(" TX queue: %d\n", qid); 2842 printf(" TX desc=%d - TX free threshold=%d\n", 2843 nb_tx_desc_tmp, tx_free_thresh_tmp); 2844 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2845 " wthresh=%d\n", 2846 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2847 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2848 offloads_tmp, tx_rs_thresh_tmp); 2849 } 2850 } 2851 } 2852 2853 void 2854 port_rss_reta_info(portid_t port_id, 2855 struct rte_eth_rss_reta_entry64 *reta_conf, 2856 uint16_t nb_entries) 2857 { 2858 uint16_t i, idx, shift; 2859 int ret; 2860 2861 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2862 return; 2863 2864 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2865 if (ret != 0) { 2866 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2867 return; 2868 } 2869 2870 for (i = 0; i < nb_entries; i++) { 2871 idx = i / RTE_RETA_GROUP_SIZE; 2872 shift = i % RTE_RETA_GROUP_SIZE; 2873 if (!(reta_conf[idx].mask & (1ULL << shift))) 2874 continue; 2875 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2876 i, reta_conf[idx].reta[shift]); 2877 } 2878 } 2879 2880 /* 2881 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2882 * key of the port. 2883 */ 2884 void 2885 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2886 { 2887 struct rte_eth_rss_conf rss_conf = {0}; 2888 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2889 uint64_t rss_hf; 2890 uint8_t i; 2891 int diag; 2892 struct rte_eth_dev_info dev_info; 2893 uint8_t hash_key_size; 2894 int ret; 2895 2896 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2897 return; 2898 2899 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2900 if (ret != 0) 2901 return; 2902 2903 if (dev_info.hash_key_size > 0 && 2904 dev_info.hash_key_size <= sizeof(rss_key)) 2905 hash_key_size = dev_info.hash_key_size; 2906 else { 2907 printf("dev_info did not provide a valid hash key size\n"); 2908 return; 2909 } 2910 2911 /* Get RSS hash key if asked to display it */ 2912 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2913 rss_conf.rss_key_len = hash_key_size; 2914 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2915 if (diag != 0) { 2916 switch (diag) { 2917 case -ENODEV: 2918 printf("port index %d invalid\n", port_id); 2919 break; 2920 case -ENOTSUP: 2921 printf("operation not supported by device\n"); 2922 break; 2923 default: 2924 printf("operation failed - diag=%d\n", diag); 2925 break; 2926 } 2927 return; 2928 } 2929 rss_hf = rss_conf.rss_hf; 2930 if (rss_hf == 0) { 2931 printf("RSS disabled\n"); 2932 return; 2933 } 2934 printf("RSS functions:\n "); 2935 for (i = 0; rss_type_table[i].str; i++) { 2936 if (rss_hf & rss_type_table[i].rss_type) 2937 printf("%s ", rss_type_table[i].str); 2938 } 2939 printf("\n"); 2940 if (!show_rss_key) 2941 return; 2942 printf("RSS key:\n"); 2943 for (i = 0; i < hash_key_size; i++) 2944 printf("%02X", rss_key[i]); 2945 printf("\n"); 2946 } 2947 2948 void 2949 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2950 uint hash_key_len) 2951 { 2952 struct rte_eth_rss_conf rss_conf; 2953 int diag; 2954 unsigned int i; 2955 2956 rss_conf.rss_key = NULL; 2957 rss_conf.rss_key_len = hash_key_len; 2958 rss_conf.rss_hf = 0; 2959 for (i = 0; rss_type_table[i].str; i++) { 2960 if (!strcmp(rss_type_table[i].str, rss_type)) 2961 rss_conf.rss_hf = rss_type_table[i].rss_type; 2962 } 2963 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2964 if (diag == 0) { 2965 rss_conf.rss_key = hash_key; 2966 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2967 } 2968 if (diag == 0) 2969 return; 2970 2971 switch (diag) { 2972 case -ENODEV: 2973 printf("port index %d invalid\n", port_id); 2974 break; 2975 case -ENOTSUP: 2976 printf("operation not supported by device\n"); 2977 break; 2978 default: 2979 printf("operation failed - diag=%d\n", diag); 2980 break; 2981 } 2982 } 2983 2984 /* 2985 * Setup forwarding configuration for each logical core. 2986 */ 2987 static void 2988 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2989 { 2990 streamid_t nb_fs_per_lcore; 2991 streamid_t nb_fs; 2992 streamid_t sm_id; 2993 lcoreid_t nb_extra; 2994 lcoreid_t nb_fc; 2995 lcoreid_t nb_lc; 2996 lcoreid_t lc_id; 2997 2998 nb_fs = cfg->nb_fwd_streams; 2999 nb_fc = cfg->nb_fwd_lcores; 3000 if (nb_fs <= nb_fc) { 3001 nb_fs_per_lcore = 1; 3002 nb_extra = 0; 3003 } else { 3004 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3005 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3006 } 3007 3008 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3009 sm_id = 0; 3010 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3011 fwd_lcores[lc_id]->stream_idx = sm_id; 3012 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3013 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3014 } 3015 3016 /* 3017 * Assign extra remaining streams, if any. 3018 */ 3019 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3020 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3021 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3022 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3023 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3024 } 3025 } 3026 3027 static portid_t 3028 fwd_topology_tx_port_get(portid_t rxp) 3029 { 3030 static int warning_once = 1; 3031 3032 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3033 3034 switch (port_topology) { 3035 default: 3036 case PORT_TOPOLOGY_PAIRED: 3037 if ((rxp & 0x1) == 0) { 3038 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3039 return rxp + 1; 3040 if (warning_once) { 3041 printf("\nWarning! port-topology=paired" 3042 " and odd forward ports number," 3043 " the last port will pair with" 3044 " itself.\n\n"); 3045 warning_once = 0; 3046 } 3047 return rxp; 3048 } 3049 return rxp - 1; 3050 case PORT_TOPOLOGY_CHAINED: 3051 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3052 case PORT_TOPOLOGY_LOOP: 3053 return rxp; 3054 } 3055 } 3056 3057 static void 3058 simple_fwd_config_setup(void) 3059 { 3060 portid_t i; 3061 3062 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3063 cur_fwd_config.nb_fwd_streams = 3064 (streamid_t) cur_fwd_config.nb_fwd_ports; 3065 3066 /* reinitialize forwarding streams */ 3067 init_fwd_streams(); 3068 3069 /* 3070 * In the simple forwarding test, the number of forwarding cores 3071 * must be lower or equal to the number of forwarding ports. 3072 */ 3073 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3074 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3075 cur_fwd_config.nb_fwd_lcores = 3076 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3077 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3078 3079 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3080 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3081 fwd_streams[i]->rx_queue = 0; 3082 fwd_streams[i]->tx_port = 3083 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3084 fwd_streams[i]->tx_queue = 0; 3085 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3086 fwd_streams[i]->retry_enabled = retry_enabled; 3087 } 3088 } 3089 3090 /** 3091 * For the RSS forwarding test all streams distributed over lcores. Each stream 3092 * being composed of a RX queue to poll on a RX port for input messages, 3093 * associated with a TX queue of a TX port where to send forwarded packets. 3094 */ 3095 static void 3096 rss_fwd_config_setup(void) 3097 { 3098 portid_t rxp; 3099 portid_t txp; 3100 queueid_t rxq; 3101 queueid_t nb_q; 3102 streamid_t sm_id; 3103 3104 nb_q = nb_rxq; 3105 if (nb_q > nb_txq) 3106 nb_q = nb_txq; 3107 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3108 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3109 cur_fwd_config.nb_fwd_streams = 3110 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3111 3112 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3113 cur_fwd_config.nb_fwd_lcores = 3114 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3115 3116 /* reinitialize forwarding streams */ 3117 init_fwd_streams(); 3118 3119 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3120 rxp = 0; rxq = 0; 3121 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3122 struct fwd_stream *fs; 3123 3124 fs = fwd_streams[sm_id]; 3125 txp = fwd_topology_tx_port_get(rxp); 3126 fs->rx_port = fwd_ports_ids[rxp]; 3127 fs->rx_queue = rxq; 3128 fs->tx_port = fwd_ports_ids[txp]; 3129 fs->tx_queue = rxq; 3130 fs->peer_addr = fs->tx_port; 3131 fs->retry_enabled = retry_enabled; 3132 rxp++; 3133 if (rxp < nb_fwd_ports) 3134 continue; 3135 rxp = 0; 3136 rxq++; 3137 } 3138 } 3139 3140 /** 3141 * For the DCB forwarding test, each core is assigned on each traffic class. 3142 * 3143 * Each core is assigned a multi-stream, each stream being composed of 3144 * a RX queue to poll on a RX port for input messages, associated with 3145 * a TX queue of a TX port where to send forwarded packets. All RX and 3146 * TX queues are mapping to the same traffic class. 3147 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3148 * the same core 3149 */ 3150 static void 3151 dcb_fwd_config_setup(void) 3152 { 3153 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3154 portid_t txp, rxp = 0; 3155 queueid_t txq, rxq = 0; 3156 lcoreid_t lc_id; 3157 uint16_t nb_rx_queue, nb_tx_queue; 3158 uint16_t i, j, k, sm_id = 0; 3159 uint8_t tc = 0; 3160 3161 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3162 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3163 cur_fwd_config.nb_fwd_streams = 3164 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3165 3166 /* reinitialize forwarding streams */ 3167 init_fwd_streams(); 3168 sm_id = 0; 3169 txp = 1; 3170 /* get the dcb info on the first RX and TX ports */ 3171 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3172 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3173 3174 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3175 fwd_lcores[lc_id]->stream_nb = 0; 3176 fwd_lcores[lc_id]->stream_idx = sm_id; 3177 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3178 /* if the nb_queue is zero, means this tc is 3179 * not enabled on the POOL 3180 */ 3181 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3182 break; 3183 k = fwd_lcores[lc_id]->stream_nb + 3184 fwd_lcores[lc_id]->stream_idx; 3185 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3186 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3187 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3188 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3189 for (j = 0; j < nb_rx_queue; j++) { 3190 struct fwd_stream *fs; 3191 3192 fs = fwd_streams[k + j]; 3193 fs->rx_port = fwd_ports_ids[rxp]; 3194 fs->rx_queue = rxq + j; 3195 fs->tx_port = fwd_ports_ids[txp]; 3196 fs->tx_queue = txq + j % nb_tx_queue; 3197 fs->peer_addr = fs->tx_port; 3198 fs->retry_enabled = retry_enabled; 3199 } 3200 fwd_lcores[lc_id]->stream_nb += 3201 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3202 } 3203 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3204 3205 tc++; 3206 if (tc < rxp_dcb_info.nb_tcs) 3207 continue; 3208 /* Restart from TC 0 on next RX port */ 3209 tc = 0; 3210 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3211 rxp = (portid_t) 3212 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3213 else 3214 rxp++; 3215 if (rxp >= nb_fwd_ports) 3216 return; 3217 /* get the dcb information on next RX and TX ports */ 3218 if ((rxp & 0x1) == 0) 3219 txp = (portid_t) (rxp + 1); 3220 else 3221 txp = (portid_t) (rxp - 1); 3222 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3223 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3224 } 3225 } 3226 3227 static void 3228 icmp_echo_config_setup(void) 3229 { 3230 portid_t rxp; 3231 queueid_t rxq; 3232 lcoreid_t lc_id; 3233 uint16_t sm_id; 3234 3235 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3236 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3237 (nb_txq * nb_fwd_ports); 3238 else 3239 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3240 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3241 cur_fwd_config.nb_fwd_streams = 3242 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3243 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3244 cur_fwd_config.nb_fwd_lcores = 3245 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3246 if (verbose_level > 0) { 3247 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3248 __FUNCTION__, 3249 cur_fwd_config.nb_fwd_lcores, 3250 cur_fwd_config.nb_fwd_ports, 3251 cur_fwd_config.nb_fwd_streams); 3252 } 3253 3254 /* reinitialize forwarding streams */ 3255 init_fwd_streams(); 3256 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3257 rxp = 0; rxq = 0; 3258 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3259 if (verbose_level > 0) 3260 printf(" core=%d: \n", lc_id); 3261 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3262 struct fwd_stream *fs; 3263 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3264 fs->rx_port = fwd_ports_ids[rxp]; 3265 fs->rx_queue = rxq; 3266 fs->tx_port = fs->rx_port; 3267 fs->tx_queue = rxq; 3268 fs->peer_addr = fs->tx_port; 3269 fs->retry_enabled = retry_enabled; 3270 if (verbose_level > 0) 3271 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3272 sm_id, fs->rx_port, fs->rx_queue, 3273 fs->tx_queue); 3274 rxq = (queueid_t) (rxq + 1); 3275 if (rxq == nb_rxq) { 3276 rxq = 0; 3277 rxp = (portid_t) (rxp + 1); 3278 } 3279 } 3280 } 3281 } 3282 3283 void 3284 fwd_config_setup(void) 3285 { 3286 cur_fwd_config.fwd_eng = cur_fwd_eng; 3287 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3288 icmp_echo_config_setup(); 3289 return; 3290 } 3291 3292 if ((nb_rxq > 1) && (nb_txq > 1)){ 3293 if (dcb_config) 3294 dcb_fwd_config_setup(); 3295 else 3296 rss_fwd_config_setup(); 3297 } 3298 else 3299 simple_fwd_config_setup(); 3300 } 3301 3302 static const char * 3303 mp_alloc_to_str(uint8_t mode) 3304 { 3305 switch (mode) { 3306 case MP_ALLOC_NATIVE: 3307 return "native"; 3308 case MP_ALLOC_ANON: 3309 return "anon"; 3310 case MP_ALLOC_XMEM: 3311 return "xmem"; 3312 case MP_ALLOC_XMEM_HUGE: 3313 return "xmemhuge"; 3314 case MP_ALLOC_XBUF: 3315 return "xbuf"; 3316 default: 3317 return "invalid"; 3318 } 3319 } 3320 3321 void 3322 pkt_fwd_config_display(struct fwd_config *cfg) 3323 { 3324 struct fwd_stream *fs; 3325 lcoreid_t lc_id; 3326 streamid_t sm_id; 3327 3328 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3329 "NUMA support %s, MP allocation mode: %s\n", 3330 cfg->fwd_eng->fwd_mode_name, 3331 retry_enabled == 0 ? "" : " with retry", 3332 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3333 numa_support == 1 ? "enabled" : "disabled", 3334 mp_alloc_to_str(mp_alloc_type)); 3335 3336 if (retry_enabled) 3337 printf("TX retry num: %u, delay between TX retries: %uus\n", 3338 burst_tx_retry_num, burst_tx_delay_time); 3339 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3340 printf("Logical Core %u (socket %u) forwards packets on " 3341 "%d streams:", 3342 fwd_lcores_cpuids[lc_id], 3343 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3344 fwd_lcores[lc_id]->stream_nb); 3345 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3346 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3347 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3348 "P=%d/Q=%d (socket %u) ", 3349 fs->rx_port, fs->rx_queue, 3350 ports[fs->rx_port].socket_id, 3351 fs->tx_port, fs->tx_queue, 3352 ports[fs->tx_port].socket_id); 3353 print_ethaddr("peer=", 3354 &peer_eth_addrs[fs->peer_addr]); 3355 } 3356 printf("\n"); 3357 } 3358 printf("\n"); 3359 } 3360 3361 void 3362 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3363 { 3364 struct rte_ether_addr new_peer_addr; 3365 if (!rte_eth_dev_is_valid_port(port_id)) { 3366 printf("Error: Invalid port number %i\n", port_id); 3367 return; 3368 } 3369 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3370 printf("Error: Invalid ethernet address: %s\n", peer_addr); 3371 return; 3372 } 3373 peer_eth_addrs[port_id] = new_peer_addr; 3374 } 3375 3376 int 3377 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3378 { 3379 unsigned int i; 3380 unsigned int lcore_cpuid; 3381 int record_now; 3382 3383 record_now = 0; 3384 again: 3385 for (i = 0; i < nb_lc; i++) { 3386 lcore_cpuid = lcorelist[i]; 3387 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3388 printf("lcore %u not enabled\n", lcore_cpuid); 3389 return -1; 3390 } 3391 if (lcore_cpuid == rte_get_main_lcore()) { 3392 printf("lcore %u cannot be masked on for running " 3393 "packet forwarding, which is the main lcore " 3394 "and reserved for command line parsing only\n", 3395 lcore_cpuid); 3396 return -1; 3397 } 3398 if (record_now) 3399 fwd_lcores_cpuids[i] = lcore_cpuid; 3400 } 3401 if (record_now == 0) { 3402 record_now = 1; 3403 goto again; 3404 } 3405 nb_cfg_lcores = (lcoreid_t) nb_lc; 3406 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3407 printf("previous number of forwarding cores %u - changed to " 3408 "number of configured cores %u\n", 3409 (unsigned int) nb_fwd_lcores, nb_lc); 3410 nb_fwd_lcores = (lcoreid_t) nb_lc; 3411 } 3412 3413 return 0; 3414 } 3415 3416 int 3417 set_fwd_lcores_mask(uint64_t lcoremask) 3418 { 3419 unsigned int lcorelist[64]; 3420 unsigned int nb_lc; 3421 unsigned int i; 3422 3423 if (lcoremask == 0) { 3424 printf("Invalid NULL mask of cores\n"); 3425 return -1; 3426 } 3427 nb_lc = 0; 3428 for (i = 0; i < 64; i++) { 3429 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3430 continue; 3431 lcorelist[nb_lc++] = i; 3432 } 3433 return set_fwd_lcores_list(lcorelist, nb_lc); 3434 } 3435 3436 void 3437 set_fwd_lcores_number(uint16_t nb_lc) 3438 { 3439 if (test_done == 0) { 3440 printf("Please stop forwarding first\n"); 3441 return; 3442 } 3443 if (nb_lc > nb_cfg_lcores) { 3444 printf("nb fwd cores %u > %u (max. number of configured " 3445 "lcores) - ignored\n", 3446 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3447 return; 3448 } 3449 nb_fwd_lcores = (lcoreid_t) nb_lc; 3450 printf("Number of forwarding cores set to %u\n", 3451 (unsigned int) nb_fwd_lcores); 3452 } 3453 3454 void 3455 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3456 { 3457 unsigned int i; 3458 portid_t port_id; 3459 int record_now; 3460 3461 record_now = 0; 3462 again: 3463 for (i = 0; i < nb_pt; i++) { 3464 port_id = (portid_t) portlist[i]; 3465 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3466 return; 3467 if (record_now) 3468 fwd_ports_ids[i] = port_id; 3469 } 3470 if (record_now == 0) { 3471 record_now = 1; 3472 goto again; 3473 } 3474 nb_cfg_ports = (portid_t) nb_pt; 3475 if (nb_fwd_ports != (portid_t) nb_pt) { 3476 printf("previous number of forwarding ports %u - changed to " 3477 "number of configured ports %u\n", 3478 (unsigned int) nb_fwd_ports, nb_pt); 3479 nb_fwd_ports = (portid_t) nb_pt; 3480 } 3481 } 3482 3483 /** 3484 * Parse the user input and obtain the list of forwarding ports 3485 * 3486 * @param[in] list 3487 * String containing the user input. User can specify 3488 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3489 * For example, if the user wants to use all the available 3490 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3491 * If the user wants to use only the ports 1,2 then the input 3492 * is 1,2. 3493 * valid characters are '-' and ',' 3494 * @param[out] values 3495 * This array will be filled with a list of port IDs 3496 * based on the user input 3497 * Note that duplicate entries are discarded and only the first 3498 * count entries in this array are port IDs and all the rest 3499 * will contain default values 3500 * @param[in] maxsize 3501 * This parameter denotes 2 things 3502 * 1) Number of elements in the values array 3503 * 2) Maximum value of each element in the values array 3504 * @return 3505 * On success, returns total count of parsed port IDs 3506 * On failure, returns 0 3507 */ 3508 static unsigned int 3509 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3510 { 3511 unsigned int count = 0; 3512 char *end = NULL; 3513 int min, max; 3514 int value, i; 3515 unsigned int marked[maxsize]; 3516 3517 if (list == NULL || values == NULL) 3518 return 0; 3519 3520 for (i = 0; i < (int)maxsize; i++) 3521 marked[i] = 0; 3522 3523 min = INT_MAX; 3524 3525 do { 3526 /*Remove the blank spaces if any*/ 3527 while (isblank(*list)) 3528 list++; 3529 if (*list == '\0') 3530 break; 3531 errno = 0; 3532 value = strtol(list, &end, 10); 3533 if (errno || end == NULL) 3534 return 0; 3535 if (value < 0 || value >= (int)maxsize) 3536 return 0; 3537 while (isblank(*end)) 3538 end++; 3539 if (*end == '-' && min == INT_MAX) { 3540 min = value; 3541 } else if ((*end == ',') || (*end == '\0')) { 3542 max = value; 3543 if (min == INT_MAX) 3544 min = value; 3545 for (i = min; i <= max; i++) { 3546 if (count < maxsize) { 3547 if (marked[i]) 3548 continue; 3549 values[count] = i; 3550 marked[i] = 1; 3551 count++; 3552 } 3553 } 3554 min = INT_MAX; 3555 } else 3556 return 0; 3557 list = end + 1; 3558 } while (*end != '\0'); 3559 3560 return count; 3561 } 3562 3563 void 3564 parse_fwd_portlist(const char *portlist) 3565 { 3566 unsigned int portcount; 3567 unsigned int portindex[RTE_MAX_ETHPORTS]; 3568 unsigned int i, valid_port_count = 0; 3569 3570 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3571 if (!portcount) 3572 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3573 3574 /* 3575 * Here we verify the validity of the ports 3576 * and thereby calculate the total number of 3577 * valid ports 3578 */ 3579 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3580 if (rte_eth_dev_is_valid_port(portindex[i])) { 3581 portindex[valid_port_count] = portindex[i]; 3582 valid_port_count++; 3583 } 3584 } 3585 3586 set_fwd_ports_list(portindex, valid_port_count); 3587 } 3588 3589 void 3590 set_fwd_ports_mask(uint64_t portmask) 3591 { 3592 unsigned int portlist[64]; 3593 unsigned int nb_pt; 3594 unsigned int i; 3595 3596 if (portmask == 0) { 3597 printf("Invalid NULL mask of ports\n"); 3598 return; 3599 } 3600 nb_pt = 0; 3601 RTE_ETH_FOREACH_DEV(i) { 3602 if (! ((uint64_t)(1ULL << i) & portmask)) 3603 continue; 3604 portlist[nb_pt++] = i; 3605 } 3606 set_fwd_ports_list(portlist, nb_pt); 3607 } 3608 3609 void 3610 set_fwd_ports_number(uint16_t nb_pt) 3611 { 3612 if (nb_pt > nb_cfg_ports) { 3613 printf("nb fwd ports %u > %u (number of configured " 3614 "ports) - ignored\n", 3615 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3616 return; 3617 } 3618 nb_fwd_ports = (portid_t) nb_pt; 3619 printf("Number of forwarding ports set to %u\n", 3620 (unsigned int) nb_fwd_ports); 3621 } 3622 3623 int 3624 port_is_forwarding(portid_t port_id) 3625 { 3626 unsigned int i; 3627 3628 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3629 return -1; 3630 3631 for (i = 0; i < nb_fwd_ports; i++) { 3632 if (fwd_ports_ids[i] == port_id) 3633 return 1; 3634 } 3635 3636 return 0; 3637 } 3638 3639 void 3640 set_nb_pkt_per_burst(uint16_t nb) 3641 { 3642 if (nb > MAX_PKT_BURST) { 3643 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3644 " ignored\n", 3645 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3646 return; 3647 } 3648 nb_pkt_per_burst = nb; 3649 printf("Number of packets per burst set to %u\n", 3650 (unsigned int) nb_pkt_per_burst); 3651 } 3652 3653 static const char * 3654 tx_split_get_name(enum tx_pkt_split split) 3655 { 3656 uint32_t i; 3657 3658 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3659 if (tx_split_name[i].split == split) 3660 return tx_split_name[i].name; 3661 } 3662 return NULL; 3663 } 3664 3665 void 3666 set_tx_pkt_split(const char *name) 3667 { 3668 uint32_t i; 3669 3670 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3671 if (strcmp(tx_split_name[i].name, name) == 0) { 3672 tx_pkt_split = tx_split_name[i].split; 3673 return; 3674 } 3675 } 3676 printf("unknown value: \"%s\"\n", name); 3677 } 3678 3679 int 3680 parse_fec_mode(const char *name, uint32_t *mode) 3681 { 3682 uint8_t i; 3683 3684 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3685 if (strcmp(fec_mode_name[i].name, name) == 0) { 3686 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3687 return 0; 3688 } 3689 } 3690 return -1; 3691 } 3692 3693 void 3694 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3695 { 3696 unsigned int i, j; 3697 3698 printf("FEC capabilities:\n"); 3699 3700 for (i = 0; i < num; i++) { 3701 printf("%s : ", 3702 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3703 3704 for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) { 3705 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3706 speed_fec_capa[i].capa) 3707 printf("%s ", fec_mode_name[j].name); 3708 } 3709 printf("\n"); 3710 } 3711 } 3712 3713 void 3714 show_rx_pkt_offsets(void) 3715 { 3716 uint32_t i, n; 3717 3718 n = rx_pkt_nb_offs; 3719 printf("Number of offsets: %u\n", n); 3720 if (n) { 3721 printf("Segment offsets: "); 3722 for (i = 0; i != n - 1; i++) 3723 printf("%hu,", rx_pkt_seg_offsets[i]); 3724 printf("%hu\n", rx_pkt_seg_lengths[i]); 3725 } 3726 } 3727 3728 void 3729 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3730 { 3731 unsigned int i; 3732 3733 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3734 printf("nb segments per RX packets=%u >= " 3735 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3736 return; 3737 } 3738 3739 /* 3740 * No extra check here, the segment length will be checked by PMD 3741 * in the extended queue setup. 3742 */ 3743 for (i = 0; i < nb_offs; i++) { 3744 if (seg_offsets[i] >= UINT16_MAX) { 3745 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3746 i, seg_offsets[i]); 3747 return; 3748 } 3749 } 3750 3751 for (i = 0; i < nb_offs; i++) 3752 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3753 3754 rx_pkt_nb_offs = (uint8_t) nb_offs; 3755 } 3756 3757 void 3758 show_rx_pkt_segments(void) 3759 { 3760 uint32_t i, n; 3761 3762 n = rx_pkt_nb_segs; 3763 printf("Number of segments: %u\n", n); 3764 if (n) { 3765 printf("Segment sizes: "); 3766 for (i = 0; i != n - 1; i++) 3767 printf("%hu,", rx_pkt_seg_lengths[i]); 3768 printf("%hu\n", rx_pkt_seg_lengths[i]); 3769 } 3770 } 3771 3772 void 3773 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3774 { 3775 unsigned int i; 3776 3777 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3778 printf("nb segments per RX packets=%u >= " 3779 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3780 return; 3781 } 3782 3783 /* 3784 * No extra check here, the segment length will be checked by PMD 3785 * in the extended queue setup. 3786 */ 3787 for (i = 0; i < nb_segs; i++) { 3788 if (seg_lengths[i] >= UINT16_MAX) { 3789 printf("length[%u]=%u > UINT16_MAX - give up\n", 3790 i, seg_lengths[i]); 3791 return; 3792 } 3793 } 3794 3795 for (i = 0; i < nb_segs; i++) 3796 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3797 3798 rx_pkt_nb_segs = (uint8_t) nb_segs; 3799 } 3800 3801 void 3802 show_tx_pkt_segments(void) 3803 { 3804 uint32_t i, n; 3805 const char *split; 3806 3807 n = tx_pkt_nb_segs; 3808 split = tx_split_get_name(tx_pkt_split); 3809 3810 printf("Number of segments: %u\n", n); 3811 printf("Segment sizes: "); 3812 for (i = 0; i != n - 1; i++) 3813 printf("%hu,", tx_pkt_seg_lengths[i]); 3814 printf("%hu\n", tx_pkt_seg_lengths[i]); 3815 printf("Split packet: %s\n", split); 3816 } 3817 3818 static bool 3819 nb_segs_is_invalid(unsigned int nb_segs) 3820 { 3821 uint16_t ring_size; 3822 uint16_t queue_id; 3823 uint16_t port_id; 3824 int ret; 3825 3826 RTE_ETH_FOREACH_DEV(port_id) { 3827 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3828 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3829 3830 if (ret) 3831 return true; 3832 3833 if (ring_size < nb_segs) { 3834 printf("nb segments per TX packets=%u >= " 3835 "TX queue(%u) ring_size=%u - ignored\n", 3836 nb_segs, queue_id, ring_size); 3837 return true; 3838 } 3839 } 3840 } 3841 3842 return false; 3843 } 3844 3845 void 3846 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3847 { 3848 uint16_t tx_pkt_len; 3849 unsigned int i; 3850 3851 if (nb_segs_is_invalid(nb_segs)) 3852 return; 3853 3854 /* 3855 * Check that each segment length is greater or equal than 3856 * the mbuf data sise. 3857 * Check also that the total packet length is greater or equal than the 3858 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3859 * 20 + 8). 3860 */ 3861 tx_pkt_len = 0; 3862 for (i = 0; i < nb_segs; i++) { 3863 if (seg_lengths[i] > mbuf_data_size[0]) { 3864 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3865 i, seg_lengths[i], mbuf_data_size[0]); 3866 return; 3867 } 3868 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3869 } 3870 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3871 printf("total packet length=%u < %d - give up\n", 3872 (unsigned) tx_pkt_len, 3873 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3874 return; 3875 } 3876 3877 for (i = 0; i < nb_segs; i++) 3878 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3879 3880 tx_pkt_length = tx_pkt_len; 3881 tx_pkt_nb_segs = (uint8_t) nb_segs; 3882 } 3883 3884 void 3885 show_tx_pkt_times(void) 3886 { 3887 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3888 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3889 } 3890 3891 void 3892 set_tx_pkt_times(unsigned int *tx_times) 3893 { 3894 tx_pkt_times_inter = tx_times[0]; 3895 tx_pkt_times_intra = tx_times[1]; 3896 } 3897 3898 void 3899 setup_gro(const char *onoff, portid_t port_id) 3900 { 3901 if (!rte_eth_dev_is_valid_port(port_id)) { 3902 printf("invalid port id %u\n", port_id); 3903 return; 3904 } 3905 if (test_done == 0) { 3906 printf("Before enable/disable GRO," 3907 " please stop forwarding first\n"); 3908 return; 3909 } 3910 if (strcmp(onoff, "on") == 0) { 3911 if (gro_ports[port_id].enable != 0) { 3912 printf("Port %u has enabled GRO. Please" 3913 " disable GRO first\n", port_id); 3914 return; 3915 } 3916 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3917 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3918 gro_ports[port_id].param.max_flow_num = 3919 GRO_DEFAULT_FLOW_NUM; 3920 gro_ports[port_id].param.max_item_per_flow = 3921 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3922 } 3923 gro_ports[port_id].enable = 1; 3924 } else { 3925 if (gro_ports[port_id].enable == 0) { 3926 printf("Port %u has disabled GRO\n", port_id); 3927 return; 3928 } 3929 gro_ports[port_id].enable = 0; 3930 } 3931 } 3932 3933 void 3934 setup_gro_flush_cycles(uint8_t cycles) 3935 { 3936 if (test_done == 0) { 3937 printf("Before change flush interval for GRO," 3938 " please stop forwarding first.\n"); 3939 return; 3940 } 3941 3942 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3943 GRO_DEFAULT_FLUSH_CYCLES) { 3944 printf("The flushing cycle be in the range" 3945 " of 1 to %u. Revert to the default" 3946 " value %u.\n", 3947 GRO_MAX_FLUSH_CYCLES, 3948 GRO_DEFAULT_FLUSH_CYCLES); 3949 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3950 } 3951 3952 gro_flush_cycles = cycles; 3953 } 3954 3955 void 3956 show_gro(portid_t port_id) 3957 { 3958 struct rte_gro_param *param; 3959 uint32_t max_pkts_num; 3960 3961 param = &gro_ports[port_id].param; 3962 3963 if (!rte_eth_dev_is_valid_port(port_id)) { 3964 printf("Invalid port id %u.\n", port_id); 3965 return; 3966 } 3967 if (gro_ports[port_id].enable) { 3968 printf("GRO type: TCP/IPv4\n"); 3969 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3970 max_pkts_num = param->max_flow_num * 3971 param->max_item_per_flow; 3972 } else 3973 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3974 printf("Max number of packets to perform GRO: %u\n", 3975 max_pkts_num); 3976 printf("Flushing cycles: %u\n", gro_flush_cycles); 3977 } else 3978 printf("Port %u doesn't enable GRO.\n", port_id); 3979 } 3980 3981 void 3982 setup_gso(const char *mode, portid_t port_id) 3983 { 3984 if (!rte_eth_dev_is_valid_port(port_id)) { 3985 printf("invalid port id %u\n", port_id); 3986 return; 3987 } 3988 if (strcmp(mode, "on") == 0) { 3989 if (test_done == 0) { 3990 printf("before enabling GSO," 3991 " please stop forwarding first\n"); 3992 return; 3993 } 3994 gso_ports[port_id].enable = 1; 3995 } else if (strcmp(mode, "off") == 0) { 3996 if (test_done == 0) { 3997 printf("before disabling GSO," 3998 " please stop forwarding first\n"); 3999 return; 4000 } 4001 gso_ports[port_id].enable = 0; 4002 } 4003 } 4004 4005 char* 4006 list_pkt_forwarding_modes(void) 4007 { 4008 static char fwd_modes[128] = ""; 4009 const char *separator = "|"; 4010 struct fwd_engine *fwd_eng; 4011 unsigned i = 0; 4012 4013 if (strlen (fwd_modes) == 0) { 4014 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4015 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4016 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4017 strncat(fwd_modes, separator, 4018 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4019 } 4020 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4021 } 4022 4023 return fwd_modes; 4024 } 4025 4026 char* 4027 list_pkt_forwarding_retry_modes(void) 4028 { 4029 static char fwd_modes[128] = ""; 4030 const char *separator = "|"; 4031 struct fwd_engine *fwd_eng; 4032 unsigned i = 0; 4033 4034 if (strlen(fwd_modes) == 0) { 4035 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4036 if (fwd_eng == &rx_only_engine) 4037 continue; 4038 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4039 sizeof(fwd_modes) - 4040 strlen(fwd_modes) - 1); 4041 strncat(fwd_modes, separator, 4042 sizeof(fwd_modes) - 4043 strlen(fwd_modes) - 1); 4044 } 4045 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4046 } 4047 4048 return fwd_modes; 4049 } 4050 4051 void 4052 set_pkt_forwarding_mode(const char *fwd_mode_name) 4053 { 4054 struct fwd_engine *fwd_eng; 4055 unsigned i; 4056 4057 i = 0; 4058 while ((fwd_eng = fwd_engines[i]) != NULL) { 4059 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4060 printf("Set %s packet forwarding mode%s\n", 4061 fwd_mode_name, 4062 retry_enabled == 0 ? "" : " with retry"); 4063 cur_fwd_eng = fwd_eng; 4064 return; 4065 } 4066 i++; 4067 } 4068 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 4069 } 4070 4071 void 4072 add_rx_dump_callbacks(portid_t portid) 4073 { 4074 struct rte_eth_dev_info dev_info; 4075 uint16_t queue; 4076 int ret; 4077 4078 if (port_id_is_invalid(portid, ENABLED_WARN)) 4079 return; 4080 4081 ret = eth_dev_info_get_print_err(portid, &dev_info); 4082 if (ret != 0) 4083 return; 4084 4085 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4086 if (!ports[portid].rx_dump_cb[queue]) 4087 ports[portid].rx_dump_cb[queue] = 4088 rte_eth_add_rx_callback(portid, queue, 4089 dump_rx_pkts, NULL); 4090 } 4091 4092 void 4093 add_tx_dump_callbacks(portid_t portid) 4094 { 4095 struct rte_eth_dev_info dev_info; 4096 uint16_t queue; 4097 int ret; 4098 4099 if (port_id_is_invalid(portid, ENABLED_WARN)) 4100 return; 4101 4102 ret = eth_dev_info_get_print_err(portid, &dev_info); 4103 if (ret != 0) 4104 return; 4105 4106 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4107 if (!ports[portid].tx_dump_cb[queue]) 4108 ports[portid].tx_dump_cb[queue] = 4109 rte_eth_add_tx_callback(portid, queue, 4110 dump_tx_pkts, NULL); 4111 } 4112 4113 void 4114 remove_rx_dump_callbacks(portid_t portid) 4115 { 4116 struct rte_eth_dev_info dev_info; 4117 uint16_t queue; 4118 int ret; 4119 4120 if (port_id_is_invalid(portid, ENABLED_WARN)) 4121 return; 4122 4123 ret = eth_dev_info_get_print_err(portid, &dev_info); 4124 if (ret != 0) 4125 return; 4126 4127 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4128 if (ports[portid].rx_dump_cb[queue]) { 4129 rte_eth_remove_rx_callback(portid, queue, 4130 ports[portid].rx_dump_cb[queue]); 4131 ports[portid].rx_dump_cb[queue] = NULL; 4132 } 4133 } 4134 4135 void 4136 remove_tx_dump_callbacks(portid_t portid) 4137 { 4138 struct rte_eth_dev_info dev_info; 4139 uint16_t queue; 4140 int ret; 4141 4142 if (port_id_is_invalid(portid, ENABLED_WARN)) 4143 return; 4144 4145 ret = eth_dev_info_get_print_err(portid, &dev_info); 4146 if (ret != 0) 4147 return; 4148 4149 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4150 if (ports[portid].tx_dump_cb[queue]) { 4151 rte_eth_remove_tx_callback(portid, queue, 4152 ports[portid].tx_dump_cb[queue]); 4153 ports[portid].tx_dump_cb[queue] = NULL; 4154 } 4155 } 4156 4157 void 4158 configure_rxtx_dump_callbacks(uint16_t verbose) 4159 { 4160 portid_t portid; 4161 4162 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4163 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4164 return; 4165 #endif 4166 4167 RTE_ETH_FOREACH_DEV(portid) 4168 { 4169 if (verbose == 1 || verbose > 2) 4170 add_rx_dump_callbacks(portid); 4171 else 4172 remove_rx_dump_callbacks(portid); 4173 if (verbose >= 2) 4174 add_tx_dump_callbacks(portid); 4175 else 4176 remove_tx_dump_callbacks(portid); 4177 } 4178 } 4179 4180 void 4181 set_verbose_level(uint16_t vb_level) 4182 { 4183 printf("Change verbose level from %u to %u\n", 4184 (unsigned int) verbose_level, (unsigned int) vb_level); 4185 verbose_level = vb_level; 4186 configure_rxtx_dump_callbacks(verbose_level); 4187 } 4188 4189 void 4190 vlan_extend_set(portid_t port_id, int on) 4191 { 4192 int diag; 4193 int vlan_offload; 4194 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4195 4196 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4197 return; 4198 4199 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4200 4201 if (on) { 4202 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4203 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4204 } else { 4205 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4206 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4207 } 4208 4209 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4210 if (diag < 0) { 4211 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 4212 "diag=%d\n", port_id, on, diag); 4213 return; 4214 } 4215 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4216 } 4217 4218 void 4219 rx_vlan_strip_set(portid_t port_id, int on) 4220 { 4221 int diag; 4222 int vlan_offload; 4223 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4224 4225 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4226 return; 4227 4228 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4229 4230 if (on) { 4231 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4232 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4233 } else { 4234 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4235 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4236 } 4237 4238 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4239 if (diag < 0) { 4240 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 4241 "diag=%d\n", port_id, on, diag); 4242 return; 4243 } 4244 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4245 } 4246 4247 void 4248 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4249 { 4250 int diag; 4251 4252 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4253 return; 4254 4255 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4256 if (diag < 0) 4257 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 4258 "diag=%d\n", port_id, queue_id, on, diag); 4259 } 4260 4261 void 4262 rx_vlan_filter_set(portid_t port_id, int on) 4263 { 4264 int diag; 4265 int vlan_offload; 4266 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4267 4268 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4269 return; 4270 4271 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4272 4273 if (on) { 4274 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4275 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4276 } else { 4277 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4278 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4279 } 4280 4281 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4282 if (diag < 0) { 4283 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 4284 "diag=%d\n", port_id, on, diag); 4285 return; 4286 } 4287 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4288 } 4289 4290 void 4291 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4292 { 4293 int diag; 4294 int vlan_offload; 4295 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4296 4297 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4298 return; 4299 4300 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4301 4302 if (on) { 4303 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4304 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4305 } else { 4306 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4307 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4308 } 4309 4310 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4311 if (diag < 0) { 4312 printf("%s(port_pi=%d, on=%d) failed " 4313 "diag=%d\n", __func__, port_id, on, diag); 4314 return; 4315 } 4316 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4317 } 4318 4319 int 4320 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4321 { 4322 int diag; 4323 4324 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4325 return 1; 4326 if (vlan_id_is_invalid(vlan_id)) 4327 return 1; 4328 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4329 if (diag == 0) 4330 return 0; 4331 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 4332 "diag=%d\n", 4333 port_id, vlan_id, on, diag); 4334 return -1; 4335 } 4336 4337 void 4338 rx_vlan_all_filter_set(portid_t port_id, int on) 4339 { 4340 uint16_t vlan_id; 4341 4342 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4343 return; 4344 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4345 if (rx_vft_set(port_id, vlan_id, on)) 4346 break; 4347 } 4348 } 4349 4350 void 4351 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4352 { 4353 int diag; 4354 4355 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4356 return; 4357 4358 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4359 if (diag == 0) 4360 return; 4361 4362 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 4363 "diag=%d\n", 4364 port_id, vlan_type, tp_id, diag); 4365 } 4366 4367 void 4368 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4369 { 4370 struct rte_eth_dev_info dev_info; 4371 int ret; 4372 4373 if (vlan_id_is_invalid(vlan_id)) 4374 return; 4375 4376 if (ports[port_id].dev_conf.txmode.offloads & 4377 DEV_TX_OFFLOAD_QINQ_INSERT) { 4378 printf("Error, as QinQ has been enabled.\n"); 4379 return; 4380 } 4381 4382 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4383 if (ret != 0) 4384 return; 4385 4386 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4387 printf("Error: vlan insert is not supported by port %d\n", 4388 port_id); 4389 return; 4390 } 4391 4392 tx_vlan_reset(port_id); 4393 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4394 ports[port_id].tx_vlan_id = vlan_id; 4395 } 4396 4397 void 4398 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4399 { 4400 struct rte_eth_dev_info dev_info; 4401 int ret; 4402 4403 if (vlan_id_is_invalid(vlan_id)) 4404 return; 4405 if (vlan_id_is_invalid(vlan_id_outer)) 4406 return; 4407 4408 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4409 if (ret != 0) 4410 return; 4411 4412 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4413 printf("Error: qinq insert not supported by port %d\n", 4414 port_id); 4415 return; 4416 } 4417 4418 tx_vlan_reset(port_id); 4419 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4420 DEV_TX_OFFLOAD_QINQ_INSERT); 4421 ports[port_id].tx_vlan_id = vlan_id; 4422 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4423 } 4424 4425 void 4426 tx_vlan_reset(portid_t port_id) 4427 { 4428 ports[port_id].dev_conf.txmode.offloads &= 4429 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4430 DEV_TX_OFFLOAD_QINQ_INSERT); 4431 ports[port_id].tx_vlan_id = 0; 4432 ports[port_id].tx_vlan_id_outer = 0; 4433 } 4434 4435 void 4436 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4437 { 4438 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4439 return; 4440 4441 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4442 } 4443 4444 void 4445 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4446 { 4447 int ret; 4448 4449 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4450 return; 4451 4452 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4453 return; 4454 4455 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4456 printf("map_value not in required range 0..%d\n", 4457 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4458 return; 4459 } 4460 4461 if (!is_rx) { /* tx */ 4462 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4463 map_value); 4464 if (ret) { 4465 printf("failed to set tx queue stats mapping.\n"); 4466 return; 4467 } 4468 } else { /* rx */ 4469 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4470 map_value); 4471 if (ret) { 4472 printf("failed to set rx queue stats mapping.\n"); 4473 return; 4474 } 4475 } 4476 } 4477 4478 void 4479 set_xstats_hide_zero(uint8_t on_off) 4480 { 4481 xstats_hide_zero = on_off; 4482 } 4483 4484 void 4485 set_record_core_cycles(uint8_t on_off) 4486 { 4487 record_core_cycles = on_off; 4488 } 4489 4490 void 4491 set_record_burst_stats(uint8_t on_off) 4492 { 4493 record_burst_stats = on_off; 4494 } 4495 4496 static inline void 4497 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4498 { 4499 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4500 4501 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4502 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4503 " tunnel_id: 0x%08x", 4504 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4505 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4506 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4507 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4508 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4509 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4510 4511 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4512 rte_be_to_cpu_16(mask->src_port_mask), 4513 rte_be_to_cpu_16(mask->dst_port_mask)); 4514 4515 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4516 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4517 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4518 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4519 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4520 4521 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4522 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4523 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4524 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4525 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4526 } 4527 4528 printf("\n"); 4529 } 4530 4531 static inline void 4532 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4533 { 4534 struct rte_eth_flex_payload_cfg *cfg; 4535 uint32_t i, j; 4536 4537 for (i = 0; i < flex_conf->nb_payloads; i++) { 4538 cfg = &flex_conf->flex_set[i]; 4539 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4540 printf("\n RAW: "); 4541 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4542 printf("\n L2_PAYLOAD: "); 4543 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4544 printf("\n L3_PAYLOAD: "); 4545 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4546 printf("\n L4_PAYLOAD: "); 4547 else 4548 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4549 for (j = 0; j < num; j++) 4550 printf(" %-5u", cfg->src_offset[j]); 4551 } 4552 printf("\n"); 4553 } 4554 4555 static char * 4556 flowtype_to_str(uint16_t flow_type) 4557 { 4558 struct flow_type_info { 4559 char str[32]; 4560 uint16_t ftype; 4561 }; 4562 4563 uint8_t i; 4564 static struct flow_type_info flowtype_str_table[] = { 4565 {"raw", RTE_ETH_FLOW_RAW}, 4566 {"ipv4", RTE_ETH_FLOW_IPV4}, 4567 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4568 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4569 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4570 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4571 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4572 {"ipv6", RTE_ETH_FLOW_IPV6}, 4573 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4574 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4575 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4576 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4577 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4578 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4579 {"port", RTE_ETH_FLOW_PORT}, 4580 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4581 {"geneve", RTE_ETH_FLOW_GENEVE}, 4582 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4583 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4584 }; 4585 4586 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4587 if (flowtype_str_table[i].ftype == flow_type) 4588 return flowtype_str_table[i].str; 4589 } 4590 4591 return NULL; 4592 } 4593 4594 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4595 4596 static inline void 4597 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4598 { 4599 struct rte_eth_fdir_flex_mask *mask; 4600 uint32_t i, j; 4601 char *p; 4602 4603 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4604 mask = &flex_conf->flex_mask[i]; 4605 p = flowtype_to_str(mask->flow_type); 4606 printf("\n %s:\t", p ? p : "unknown"); 4607 for (j = 0; j < num; j++) 4608 printf(" %02x", mask->mask[j]); 4609 } 4610 printf("\n"); 4611 } 4612 4613 static inline void 4614 print_fdir_flow_type(uint32_t flow_types_mask) 4615 { 4616 int i; 4617 char *p; 4618 4619 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4620 if (!(flow_types_mask & (1 << i))) 4621 continue; 4622 p = flowtype_to_str(i); 4623 if (p) 4624 printf(" %s", p); 4625 else 4626 printf(" unknown"); 4627 } 4628 printf("\n"); 4629 } 4630 4631 static int 4632 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4633 struct rte_eth_fdir_stats *fdir_stat) 4634 { 4635 int ret = -ENOTSUP; 4636 4637 #ifdef RTE_NET_I40E 4638 if (ret == -ENOTSUP) { 4639 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4640 if (!ret) 4641 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4642 } 4643 #endif 4644 #ifdef RTE_NET_IXGBE 4645 if (ret == -ENOTSUP) { 4646 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4647 if (!ret) 4648 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4649 } 4650 #endif 4651 switch (ret) { 4652 case 0: 4653 break; 4654 case -ENOTSUP: 4655 printf("\n FDIR is not supported on port %-2d\n", 4656 port_id); 4657 break; 4658 default: 4659 printf("programming error: (%s)\n", strerror(-ret)); 4660 break; 4661 } 4662 return ret; 4663 } 4664 4665 void 4666 fdir_get_infos(portid_t port_id) 4667 { 4668 struct rte_eth_fdir_stats fdir_stat; 4669 struct rte_eth_fdir_info fdir_info; 4670 4671 static const char *fdir_stats_border = "########################"; 4672 4673 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4674 return; 4675 4676 memset(&fdir_info, 0, sizeof(fdir_info)); 4677 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4678 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4679 return; 4680 4681 printf("\n %s FDIR infos for port %-2d %s\n", 4682 fdir_stats_border, port_id, fdir_stats_border); 4683 printf(" MODE: "); 4684 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4685 printf(" PERFECT\n"); 4686 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4687 printf(" PERFECT-MAC-VLAN\n"); 4688 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4689 printf(" PERFECT-TUNNEL\n"); 4690 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4691 printf(" SIGNATURE\n"); 4692 else 4693 printf(" DISABLE\n"); 4694 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4695 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4696 printf(" SUPPORTED FLOW TYPE: "); 4697 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4698 } 4699 printf(" FLEX PAYLOAD INFO:\n"); 4700 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4701 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4702 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4703 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4704 fdir_info.flex_payload_unit, 4705 fdir_info.max_flex_payload_segment_num, 4706 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4707 printf(" MASK: "); 4708 print_fdir_mask(&fdir_info.mask); 4709 if (fdir_info.flex_conf.nb_payloads > 0) { 4710 printf(" FLEX PAYLOAD SRC OFFSET:"); 4711 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4712 } 4713 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4714 printf(" FLEX MASK CFG:"); 4715 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4716 } 4717 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4718 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4719 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4720 fdir_info.guarant_spc, fdir_info.best_spc); 4721 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4722 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4723 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4724 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4725 fdir_stat.collision, fdir_stat.free, 4726 fdir_stat.maxhash, fdir_stat.maxlen, 4727 fdir_stat.add, fdir_stat.remove, 4728 fdir_stat.f_add, fdir_stat.f_remove); 4729 printf(" %s############################%s\n", 4730 fdir_stats_border, fdir_stats_border); 4731 } 4732 4733 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4734 4735 void 4736 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4737 { 4738 struct rte_port *port; 4739 struct rte_eth_fdir_flex_conf *flex_conf; 4740 int i, idx = 0; 4741 4742 port = &ports[port_id]; 4743 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4744 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4745 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4746 idx = i; 4747 break; 4748 } 4749 } 4750 if (i >= RTE_ETH_FLOW_MAX) { 4751 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4752 idx = flex_conf->nb_flexmasks; 4753 flex_conf->nb_flexmasks++; 4754 } else { 4755 printf("The flex mask table is full. Can not set flex" 4756 " mask for flow_type(%u).", cfg->flow_type); 4757 return; 4758 } 4759 } 4760 rte_memcpy(&flex_conf->flex_mask[idx], 4761 cfg, 4762 sizeof(struct rte_eth_fdir_flex_mask)); 4763 } 4764 4765 void 4766 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4767 { 4768 struct rte_port *port; 4769 struct rte_eth_fdir_flex_conf *flex_conf; 4770 int i, idx = 0; 4771 4772 port = &ports[port_id]; 4773 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4774 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4775 if (cfg->type == flex_conf->flex_set[i].type) { 4776 idx = i; 4777 break; 4778 } 4779 } 4780 if (i >= RTE_ETH_PAYLOAD_MAX) { 4781 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4782 idx = flex_conf->nb_payloads; 4783 flex_conf->nb_payloads++; 4784 } else { 4785 printf("The flex payload table is full. Can not set" 4786 " flex payload for type(%u).", cfg->type); 4787 return; 4788 } 4789 } 4790 rte_memcpy(&flex_conf->flex_set[idx], 4791 cfg, 4792 sizeof(struct rte_eth_flex_payload_cfg)); 4793 4794 } 4795 4796 void 4797 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4798 { 4799 #ifdef RTE_NET_IXGBE 4800 int diag; 4801 4802 if (is_rx) 4803 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4804 else 4805 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4806 4807 if (diag == 0) 4808 return; 4809 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4810 is_rx ? "rx" : "tx", port_id, diag); 4811 return; 4812 #endif 4813 printf("VF %s setting not supported for port %d\n", 4814 is_rx ? "Rx" : "Tx", port_id); 4815 RTE_SET_USED(vf); 4816 RTE_SET_USED(on); 4817 } 4818 4819 int 4820 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4821 { 4822 int diag; 4823 struct rte_eth_link link; 4824 int ret; 4825 4826 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4827 return 1; 4828 ret = eth_link_get_nowait_print_err(port_id, &link); 4829 if (ret < 0) 4830 return 1; 4831 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4832 rate > link.link_speed) { 4833 printf("Invalid rate value:%u bigger than link speed: %u\n", 4834 rate, link.link_speed); 4835 return 1; 4836 } 4837 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4838 if (diag == 0) 4839 return diag; 4840 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4841 port_id, diag); 4842 return diag; 4843 } 4844 4845 int 4846 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4847 { 4848 int diag = -ENOTSUP; 4849 4850 RTE_SET_USED(vf); 4851 RTE_SET_USED(rate); 4852 RTE_SET_USED(q_msk); 4853 4854 #ifdef RTE_NET_IXGBE 4855 if (diag == -ENOTSUP) 4856 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4857 q_msk); 4858 #endif 4859 #ifdef RTE_NET_BNXT 4860 if (diag == -ENOTSUP) 4861 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4862 #endif 4863 if (diag == 0) 4864 return diag; 4865 4866 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4867 port_id, diag); 4868 return diag; 4869 } 4870 4871 /* 4872 * Functions to manage the set of filtered Multicast MAC addresses. 4873 * 4874 * A pool of filtered multicast MAC addresses is associated with each port. 4875 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4876 * The address of the pool and the number of valid multicast MAC addresses 4877 * recorded in the pool are stored in the fields "mc_addr_pool" and 4878 * "mc_addr_nb" of the "rte_port" data structure. 4879 * 4880 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4881 * to be supplied a contiguous array of multicast MAC addresses. 4882 * To comply with this constraint, the set of multicast addresses recorded 4883 * into the pool are systematically compacted at the beginning of the pool. 4884 * Hence, when a multicast address is removed from the pool, all following 4885 * addresses, if any, are copied back to keep the set contiguous. 4886 */ 4887 #define MCAST_POOL_INC 32 4888 4889 static int 4890 mcast_addr_pool_extend(struct rte_port *port) 4891 { 4892 struct rte_ether_addr *mc_pool; 4893 size_t mc_pool_size; 4894 4895 /* 4896 * If a free entry is available at the end of the pool, just 4897 * increment the number of recorded multicast addresses. 4898 */ 4899 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4900 port->mc_addr_nb++; 4901 return 0; 4902 } 4903 4904 /* 4905 * [re]allocate a pool with MCAST_POOL_INC more entries. 4906 * The previous test guarantees that port->mc_addr_nb is a multiple 4907 * of MCAST_POOL_INC. 4908 */ 4909 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4910 MCAST_POOL_INC); 4911 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4912 mc_pool_size); 4913 if (mc_pool == NULL) { 4914 printf("allocation of pool of %u multicast addresses failed\n", 4915 port->mc_addr_nb + MCAST_POOL_INC); 4916 return -ENOMEM; 4917 } 4918 4919 port->mc_addr_pool = mc_pool; 4920 port->mc_addr_nb++; 4921 return 0; 4922 4923 } 4924 4925 static void 4926 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4927 { 4928 if (mcast_addr_pool_extend(port) != 0) 4929 return; 4930 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4931 } 4932 4933 static void 4934 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4935 { 4936 port->mc_addr_nb--; 4937 if (addr_idx == port->mc_addr_nb) { 4938 /* No need to recompact the set of multicast addressses. */ 4939 if (port->mc_addr_nb == 0) { 4940 /* free the pool of multicast addresses. */ 4941 free(port->mc_addr_pool); 4942 port->mc_addr_pool = NULL; 4943 } 4944 return; 4945 } 4946 memmove(&port->mc_addr_pool[addr_idx], 4947 &port->mc_addr_pool[addr_idx + 1], 4948 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4949 } 4950 4951 static int 4952 eth_port_multicast_addr_list_set(portid_t port_id) 4953 { 4954 struct rte_port *port; 4955 int diag; 4956 4957 port = &ports[port_id]; 4958 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4959 port->mc_addr_nb); 4960 if (diag < 0) 4961 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4962 port_id, port->mc_addr_nb, diag); 4963 4964 return diag; 4965 } 4966 4967 void 4968 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4969 { 4970 struct rte_port *port; 4971 uint32_t i; 4972 4973 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4974 return; 4975 4976 port = &ports[port_id]; 4977 4978 /* 4979 * Check that the added multicast MAC address is not already recorded 4980 * in the pool of multicast addresses. 4981 */ 4982 for (i = 0; i < port->mc_addr_nb; i++) { 4983 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4984 printf("multicast address already filtered by port\n"); 4985 return; 4986 } 4987 } 4988 4989 mcast_addr_pool_append(port, mc_addr); 4990 if (eth_port_multicast_addr_list_set(port_id) < 0) 4991 /* Rollback on failure, remove the address from the pool */ 4992 mcast_addr_pool_remove(port, i); 4993 } 4994 4995 void 4996 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4997 { 4998 struct rte_port *port; 4999 uint32_t i; 5000 5001 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5002 return; 5003 5004 port = &ports[port_id]; 5005 5006 /* 5007 * Search the pool of multicast MAC addresses for the removed address. 5008 */ 5009 for (i = 0; i < port->mc_addr_nb; i++) { 5010 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5011 break; 5012 } 5013 if (i == port->mc_addr_nb) { 5014 printf("multicast address not filtered by port %d\n", port_id); 5015 return; 5016 } 5017 5018 mcast_addr_pool_remove(port, i); 5019 if (eth_port_multicast_addr_list_set(port_id) < 0) 5020 /* Rollback on failure, add the address back into the pool */ 5021 mcast_addr_pool_append(port, mc_addr); 5022 } 5023 5024 void 5025 port_dcb_info_display(portid_t port_id) 5026 { 5027 struct rte_eth_dcb_info dcb_info; 5028 uint16_t i; 5029 int ret; 5030 static const char *border = "================"; 5031 5032 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5033 return; 5034 5035 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5036 if (ret) { 5037 printf("\n Failed to get dcb infos on port %-2d\n", 5038 port_id); 5039 return; 5040 } 5041 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5042 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5043 printf("\n TC : "); 5044 for (i = 0; i < dcb_info.nb_tcs; i++) 5045 printf("\t%4d", i); 5046 printf("\n Priority : "); 5047 for (i = 0; i < dcb_info.nb_tcs; i++) 5048 printf("\t%4d", dcb_info.prio_tc[i]); 5049 printf("\n BW percent :"); 5050 for (i = 0; i < dcb_info.nb_tcs; i++) 5051 printf("\t%4d%%", dcb_info.tc_bws[i]); 5052 printf("\n RXQ base : "); 5053 for (i = 0; i < dcb_info.nb_tcs; i++) 5054 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5055 printf("\n RXQ number :"); 5056 for (i = 0; i < dcb_info.nb_tcs; i++) 5057 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5058 printf("\n TXQ base : "); 5059 for (i = 0; i < dcb_info.nb_tcs; i++) 5060 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5061 printf("\n TXQ number :"); 5062 for (i = 0; i < dcb_info.nb_tcs; i++) 5063 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5064 printf("\n"); 5065 } 5066 5067 uint8_t * 5068 open_file(const char *file_path, uint32_t *size) 5069 { 5070 int fd = open(file_path, O_RDONLY); 5071 off_t pkg_size; 5072 uint8_t *buf = NULL; 5073 int ret = 0; 5074 struct stat st_buf; 5075 5076 if (size) 5077 *size = 0; 5078 5079 if (fd == -1) { 5080 printf("%s: Failed to open %s\n", __func__, file_path); 5081 return buf; 5082 } 5083 5084 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5085 close(fd); 5086 printf("%s: File operations failed\n", __func__); 5087 return buf; 5088 } 5089 5090 pkg_size = st_buf.st_size; 5091 if (pkg_size < 0) { 5092 close(fd); 5093 printf("%s: File operations failed\n", __func__); 5094 return buf; 5095 } 5096 5097 buf = (uint8_t *)malloc(pkg_size); 5098 if (!buf) { 5099 close(fd); 5100 printf("%s: Failed to malloc memory\n", __func__); 5101 return buf; 5102 } 5103 5104 ret = read(fd, buf, pkg_size); 5105 if (ret < 0) { 5106 close(fd); 5107 printf("%s: File read operation failed\n", __func__); 5108 close_file(buf); 5109 return NULL; 5110 } 5111 5112 if (size) 5113 *size = pkg_size; 5114 5115 close(fd); 5116 5117 return buf; 5118 } 5119 5120 int 5121 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5122 { 5123 FILE *fh = fopen(file_path, "wb"); 5124 5125 if (fh == NULL) { 5126 printf("%s: Failed to open %s\n", __func__, file_path); 5127 return -1; 5128 } 5129 5130 if (fwrite(buf, 1, size, fh) != size) { 5131 fclose(fh); 5132 printf("%s: File write operation failed\n", __func__); 5133 return -1; 5134 } 5135 5136 fclose(fh); 5137 5138 return 0; 5139 } 5140 5141 int 5142 close_file(uint8_t *buf) 5143 { 5144 if (buf) { 5145 free((void *)buf); 5146 return 0; 5147 } 5148 5149 return -1; 5150 } 5151 5152 void 5153 port_queue_region_info_display(portid_t port_id, void *buf) 5154 { 5155 #ifdef RTE_NET_I40E 5156 uint16_t i, j; 5157 struct rte_pmd_i40e_queue_regions *info = 5158 (struct rte_pmd_i40e_queue_regions *)buf; 5159 static const char *queue_region_info_stats_border = "-------"; 5160 5161 if (!info->queue_region_number) 5162 printf("there is no region has been set before"); 5163 5164 printf("\n %s All queue region info for port=%2d %s", 5165 queue_region_info_stats_border, port_id, 5166 queue_region_info_stats_border); 5167 printf("\n queue_region_number: %-14u \n", 5168 info->queue_region_number); 5169 5170 for (i = 0; i < info->queue_region_number; i++) { 5171 printf("\n region_id: %-14u queue_number: %-14u " 5172 "queue_start_index: %-14u \n", 5173 info->region[i].region_id, 5174 info->region[i].queue_num, 5175 info->region[i].queue_start_index); 5176 5177 printf(" user_priority_num is %-14u :", 5178 info->region[i].user_priority_num); 5179 for (j = 0; j < info->region[i].user_priority_num; j++) 5180 printf(" %-14u ", info->region[i].user_priority[j]); 5181 5182 printf("\n flowtype_num is %-14u :", 5183 info->region[i].flowtype_num); 5184 for (j = 0; j < info->region[i].flowtype_num; j++) 5185 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5186 } 5187 #else 5188 RTE_SET_USED(port_id); 5189 RTE_SET_USED(buf); 5190 #endif 5191 5192 printf("\n\n"); 5193 } 5194 5195 void 5196 show_macs(portid_t port_id) 5197 { 5198 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5199 struct rte_eth_dev_info dev_info; 5200 struct rte_ether_addr *addr; 5201 uint32_t i, num_macs = 0; 5202 struct rte_eth_dev *dev; 5203 5204 dev = &rte_eth_devices[port_id]; 5205 5206 rte_eth_dev_info_get(port_id, &dev_info); 5207 5208 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5209 addr = &dev->data->mac_addrs[i]; 5210 5211 /* skip zero address */ 5212 if (rte_is_zero_ether_addr(addr)) 5213 continue; 5214 5215 num_macs++; 5216 } 5217 5218 printf("Number of MAC address added: %d\n", num_macs); 5219 5220 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5221 addr = &dev->data->mac_addrs[i]; 5222 5223 /* skip zero address */ 5224 if (rte_is_zero_ether_addr(addr)) 5225 continue; 5226 5227 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5228 printf(" %s\n", buf); 5229 } 5230 } 5231 5232 void 5233 show_mcast_macs(portid_t port_id) 5234 { 5235 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5236 struct rte_ether_addr *addr; 5237 struct rte_port *port; 5238 uint32_t i; 5239 5240 port = &ports[port_id]; 5241 5242 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5243 5244 for (i = 0; i < port->mc_addr_nb; i++) { 5245 addr = &port->mc_addr_pool[i]; 5246 5247 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5248 printf(" %s\n", buf); 5249 } 5250 } 5251