1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_hexdump.h> 53 54 #include "testpmd.h" 55 56 #define ETHDEV_FWVERS_LEN 32 57 58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 60 #else 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 62 #endif 63 64 #define NS_PER_SEC 1E9 65 66 static char *flowtype_to_str(uint16_t flow_type); 67 68 static const struct { 69 enum tx_pkt_split split; 70 const char *name; 71 } tx_split_name[] = { 72 { 73 .split = TX_PKT_SPLIT_OFF, 74 .name = "off", 75 }, 76 { 77 .split = TX_PKT_SPLIT_ON, 78 .name = "on", 79 }, 80 { 81 .split = TX_PKT_SPLIT_RND, 82 .name = "rand", 83 }, 84 }; 85 86 const struct rss_type_info rss_type_table[] = { 87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 90 ETH_RSS_GTPU}, 91 { "none", 0 }, 92 { "eth", ETH_RSS_ETH }, 93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 95 { "vlan", ETH_RSS_VLAN }, 96 { "s-vlan", ETH_RSS_S_VLAN }, 97 { "c-vlan", ETH_RSS_C_VLAN }, 98 { "ipv4", ETH_RSS_IPV4 }, 99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 104 { "ipv6", ETH_RSS_IPV6 }, 105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 110 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 111 { "ipv6-ex", ETH_RSS_IPV6_EX }, 112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 114 { "port", ETH_RSS_PORT }, 115 { "vxlan", ETH_RSS_VXLAN }, 116 { "geneve", ETH_RSS_GENEVE }, 117 { "nvgre", ETH_RSS_NVGRE }, 118 { "ip", ETH_RSS_IP }, 119 { "udp", ETH_RSS_UDP }, 120 { "tcp", ETH_RSS_TCP }, 121 { "sctp", ETH_RSS_SCTP }, 122 { "tunnel", ETH_RSS_TUNNEL }, 123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 133 { "esp", ETH_RSS_ESP }, 134 { "ah", ETH_RSS_AH }, 135 { "l2tpv3", ETH_RSS_L2TPV3 }, 136 { "pfcp", ETH_RSS_PFCP }, 137 { "pppoe", ETH_RSS_PPPOE }, 138 { "gtpu", ETH_RSS_GTPU }, 139 { NULL, 0 }, 140 }; 141 142 static void 143 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 144 { 145 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 146 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 147 printf("%s%s", name, buf); 148 } 149 150 void 151 nic_stats_display(portid_t port_id) 152 { 153 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 154 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 155 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 156 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 157 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 158 struct timespec cur_time; 159 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 160 diff_ns; 161 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 162 struct rte_eth_stats stats; 163 struct rte_port *port = &ports[port_id]; 164 uint8_t i; 165 166 static const char *nic_stats_border = "########################"; 167 168 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 169 print_valid_ports(); 170 return; 171 } 172 rte_eth_stats_get(port_id, &stats); 173 printf("\n %s NIC statistics for port %-2d %s\n", 174 nic_stats_border, port_id, nic_stats_border); 175 176 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 177 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 178 "%-"PRIu64"\n", 179 stats.ipackets, stats.imissed, stats.ibytes); 180 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 181 printf(" RX-nombuf: %-10"PRIu64"\n", 182 stats.rx_nombuf); 183 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 184 "%-"PRIu64"\n", 185 stats.opackets, stats.oerrors, stats.obytes); 186 } 187 else { 188 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 189 " RX-bytes: %10"PRIu64"\n", 190 stats.ipackets, stats.ierrors, stats.ibytes); 191 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 192 printf(" RX-nombuf: %10"PRIu64"\n", 193 stats.rx_nombuf); 194 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 195 " TX-bytes: %10"PRIu64"\n", 196 stats.opackets, stats.oerrors, stats.obytes); 197 } 198 199 if (port->rx_queue_stats_mapping_enabled) { 200 printf("\n"); 201 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 202 printf(" Stats reg %2d RX-packets: %10"PRIu64 203 " RX-errors: %10"PRIu64 204 " RX-bytes: %10"PRIu64"\n", 205 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 206 } 207 } 208 if (port->tx_queue_stats_mapping_enabled) { 209 printf("\n"); 210 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 211 printf(" Stats reg %2d TX-packets: %10"PRIu64 212 " TX-bytes: %10"PRIu64"\n", 213 i, stats.q_opackets[i], stats.q_obytes[i]); 214 } 215 } 216 217 diff_ns = 0; 218 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 219 uint64_t ns; 220 221 ns = cur_time.tv_sec * NS_PER_SEC; 222 ns += cur_time.tv_nsec; 223 224 if (prev_ns[port_id] != 0) 225 diff_ns = ns - prev_ns[port_id]; 226 prev_ns[port_id] = ns; 227 } 228 229 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 230 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 231 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 232 (stats.opackets - prev_pkts_tx[port_id]) : 0; 233 prev_pkts_rx[port_id] = stats.ipackets; 234 prev_pkts_tx[port_id] = stats.opackets; 235 mpps_rx = diff_ns > 0 ? 236 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 237 mpps_tx = diff_ns > 0 ? 238 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 239 240 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 241 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 242 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 243 (stats.obytes - prev_bytes_tx[port_id]) : 0; 244 prev_bytes_rx[port_id] = stats.ibytes; 245 prev_bytes_tx[port_id] = stats.obytes; 246 mbps_rx = diff_ns > 0 ? 247 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 248 mbps_tx = diff_ns > 0 ? 249 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 250 251 printf("\n Throughput (since last show)\n"); 252 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 253 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 254 mpps_tx, mbps_tx * 8); 255 256 printf(" %s############################%s\n", 257 nic_stats_border, nic_stats_border); 258 } 259 260 void 261 nic_stats_clear(portid_t port_id) 262 { 263 int ret; 264 265 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 266 print_valid_ports(); 267 return; 268 } 269 270 ret = rte_eth_stats_reset(port_id); 271 if (ret != 0) { 272 printf("%s: Error: failed to reset stats (port %u): %s", 273 __func__, port_id, strerror(-ret)); 274 return; 275 } 276 277 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 278 if (ret != 0) { 279 if (ret < 0) 280 ret = -ret; 281 printf("%s: Error: failed to get stats (port %u): %s", 282 __func__, port_id, strerror(ret)); 283 return; 284 } 285 printf("\n NIC statistics for port %d cleared\n", port_id); 286 } 287 288 void 289 nic_xstats_display(portid_t port_id) 290 { 291 struct rte_eth_xstat *xstats; 292 int cnt_xstats, idx_xstat; 293 struct rte_eth_xstat_name *xstats_names; 294 295 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 296 print_valid_ports(); 297 return; 298 } 299 printf("###### NIC extended statistics for port %-2d\n", port_id); 300 if (!rte_eth_dev_is_valid_port(port_id)) { 301 printf("Error: Invalid port number %i\n", port_id); 302 return; 303 } 304 305 /* Get count */ 306 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 307 if (cnt_xstats < 0) { 308 printf("Error: Cannot get count of xstats\n"); 309 return; 310 } 311 312 /* Get id-name lookup table */ 313 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 314 if (xstats_names == NULL) { 315 printf("Cannot allocate memory for xstats lookup\n"); 316 return; 317 } 318 if (cnt_xstats != rte_eth_xstats_get_names( 319 port_id, xstats_names, cnt_xstats)) { 320 printf("Error: Cannot get xstats lookup\n"); 321 free(xstats_names); 322 return; 323 } 324 325 /* Get stats themselves */ 326 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 327 if (xstats == NULL) { 328 printf("Cannot allocate memory for xstats\n"); 329 free(xstats_names); 330 return; 331 } 332 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 333 printf("Error: Unable to get xstats\n"); 334 free(xstats_names); 335 free(xstats); 336 return; 337 } 338 339 /* Display xstats */ 340 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 341 if (xstats_hide_zero && !xstats[idx_xstat].value) 342 continue; 343 printf("%s: %"PRIu64"\n", 344 xstats_names[idx_xstat].name, 345 xstats[idx_xstat].value); 346 } 347 free(xstats_names); 348 free(xstats); 349 } 350 351 void 352 nic_xstats_clear(portid_t port_id) 353 { 354 int ret; 355 356 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 357 print_valid_ports(); 358 return; 359 } 360 361 ret = rte_eth_xstats_reset(port_id); 362 if (ret != 0) { 363 printf("%s: Error: failed to reset xstats (port %u): %s", 364 __func__, port_id, strerror(-ret)); 365 return; 366 } 367 368 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 369 if (ret != 0) { 370 if (ret < 0) 371 ret = -ret; 372 printf("%s: Error: failed to get stats (port %u): %s", 373 __func__, port_id, strerror(ret)); 374 return; 375 } 376 } 377 378 void 379 nic_stats_mapping_display(portid_t port_id) 380 { 381 struct rte_port *port = &ports[port_id]; 382 uint16_t i; 383 384 static const char *nic_stats_mapping_border = "########################"; 385 386 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 387 print_valid_ports(); 388 return; 389 } 390 391 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 392 printf("Port id %d - either does not support queue statistic mapping or" 393 " no queue statistic mapping set\n", port_id); 394 return; 395 } 396 397 printf("\n %s NIC statistics mapping for port %-2d %s\n", 398 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 399 400 if (port->rx_queue_stats_mapping_enabled) { 401 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 402 if (rx_queue_stats_mappings[i].port_id == port_id) { 403 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 404 rx_queue_stats_mappings[i].queue_id, 405 rx_queue_stats_mappings[i].stats_counter_id); 406 } 407 } 408 printf("\n"); 409 } 410 411 412 if (port->tx_queue_stats_mapping_enabled) { 413 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 414 if (tx_queue_stats_mappings[i].port_id == port_id) { 415 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 416 tx_queue_stats_mappings[i].queue_id, 417 tx_queue_stats_mappings[i].stats_counter_id); 418 } 419 } 420 } 421 422 printf(" %s####################################%s\n", 423 nic_stats_mapping_border, nic_stats_mapping_border); 424 } 425 426 void 427 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 428 { 429 struct rte_eth_burst_mode mode; 430 struct rte_eth_rxq_info qinfo; 431 int32_t rc; 432 static const char *info_border = "*********************"; 433 434 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 435 if (rc != 0) { 436 printf("Failed to retrieve information for port: %u, " 437 "RX queue: %hu\nerror desc: %s(%d)\n", 438 port_id, queue_id, strerror(-rc), rc); 439 return; 440 } 441 442 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 443 info_border, port_id, queue_id, info_border); 444 445 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 446 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 447 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 448 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 449 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 450 printf("\nRX drop packets: %s", 451 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 452 printf("\nRX deferred start: %s", 453 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 454 printf("\nRX scattered packets: %s", 455 (qinfo.scattered_rx != 0) ? "on" : "off"); 456 if (qinfo.rx_buf_size != 0) 457 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 458 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 459 460 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 461 printf("\nBurst mode: %s%s", 462 mode.info, 463 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 464 " (per queue)" : ""); 465 466 printf("\n"); 467 } 468 469 void 470 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 471 { 472 struct rte_eth_burst_mode mode; 473 struct rte_eth_txq_info qinfo; 474 int32_t rc; 475 static const char *info_border = "*********************"; 476 477 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 478 if (rc != 0) { 479 printf("Failed to retrieve information for port: %u, " 480 "TX queue: %hu\nerror desc: %s(%d)\n", 481 port_id, queue_id, strerror(-rc), rc); 482 return; 483 } 484 485 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 486 info_border, port_id, queue_id, info_border); 487 488 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 489 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 490 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 491 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 492 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 493 printf("\nTX deferred start: %s", 494 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 495 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 496 497 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 498 printf("\nBurst mode: %s%s", 499 mode.info, 500 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 501 " (per queue)" : ""); 502 503 printf("\n"); 504 } 505 506 static int bus_match_all(const struct rte_bus *bus, const void *data) 507 { 508 RTE_SET_USED(bus); 509 RTE_SET_USED(data); 510 return 0; 511 } 512 513 void 514 device_infos_display(const char *identifier) 515 { 516 static const char *info_border = "*********************"; 517 struct rte_bus *start = NULL, *next; 518 struct rte_dev_iterator dev_iter; 519 char name[RTE_ETH_NAME_MAX_LEN]; 520 struct rte_ether_addr mac_addr; 521 struct rte_device *dev; 522 struct rte_devargs da; 523 portid_t port_id; 524 char devstr[128]; 525 526 memset(&da, 0, sizeof(da)); 527 if (!identifier) 528 goto skip_parse; 529 530 if (rte_devargs_parsef(&da, "%s", identifier)) { 531 printf("cannot parse identifier\n"); 532 if (da.args) 533 free(da.args); 534 return; 535 } 536 537 skip_parse: 538 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 539 540 start = next; 541 if (identifier && da.bus != next) 542 continue; 543 544 /* Skip buses that don't have iterate method */ 545 if (!next->dev_iterate) 546 continue; 547 548 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 549 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 550 551 if (!dev->driver) 552 continue; 553 /* Check for matching device if identifier is present */ 554 if (identifier && 555 strncmp(da.name, dev->name, strlen(dev->name))) 556 continue; 557 printf("\n%s Infos for device %s %s\n", 558 info_border, dev->name, info_border); 559 printf("Bus name: %s", dev->bus->name); 560 printf("\nDriver name: %s", dev->driver->name); 561 printf("\nDevargs: %s", 562 dev->devargs ? dev->devargs->args : ""); 563 printf("\nConnect to socket: %d", dev->numa_node); 564 printf("\n"); 565 566 /* List ports with matching device name */ 567 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 568 printf("\n\tPort id: %-2d", port_id); 569 if (eth_macaddr_get_print_err(port_id, 570 &mac_addr) == 0) 571 print_ethaddr("\n\tMAC address: ", 572 &mac_addr); 573 rte_eth_dev_get_name_by_port(port_id, name); 574 printf("\n\tDevice name: %s", name); 575 printf("\n"); 576 } 577 } 578 }; 579 } 580 581 void 582 port_infos_display(portid_t port_id) 583 { 584 struct rte_port *port; 585 struct rte_ether_addr mac_addr; 586 struct rte_eth_link link; 587 struct rte_eth_dev_info dev_info; 588 int vlan_offload; 589 struct rte_mempool * mp; 590 static const char *info_border = "*********************"; 591 uint16_t mtu; 592 char name[RTE_ETH_NAME_MAX_LEN]; 593 int ret; 594 char fw_version[ETHDEV_FWVERS_LEN]; 595 596 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 597 print_valid_ports(); 598 return; 599 } 600 port = &ports[port_id]; 601 ret = eth_link_get_nowait_print_err(port_id, &link); 602 if (ret < 0) 603 return; 604 605 ret = eth_dev_info_get_print_err(port_id, &dev_info); 606 if (ret != 0) 607 return; 608 609 printf("\n%s Infos for port %-2d %s\n", 610 info_border, port_id, info_border); 611 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 612 print_ethaddr("MAC address: ", &mac_addr); 613 rte_eth_dev_get_name_by_port(port_id, name); 614 printf("\nDevice name: %s", name); 615 printf("\nDriver name: %s", dev_info.driver_name); 616 617 if (rte_eth_dev_fw_version_get(port_id, fw_version, 618 ETHDEV_FWVERS_LEN) == 0) 619 printf("\nFirmware-version: %s", fw_version); 620 else 621 printf("\nFirmware-version: %s", "not available"); 622 623 if (dev_info.device->devargs && dev_info.device->devargs->args) 624 printf("\nDevargs: %s", dev_info.device->devargs->args); 625 printf("\nConnect to socket: %u", port->socket_id); 626 627 if (port_numa[port_id] != NUMA_NO_CONFIG) { 628 mp = mbuf_pool_find(port_numa[port_id]); 629 if (mp) 630 printf("\nmemory allocation on the socket: %d", 631 port_numa[port_id]); 632 } else 633 printf("\nmemory allocation on the socket: %u",port->socket_id); 634 635 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 636 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 637 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 638 ("full-duplex") : ("half-duplex")); 639 640 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 641 printf("MTU: %u\n", mtu); 642 643 printf("Promiscuous mode: %s\n", 644 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 645 printf("Allmulticast mode: %s\n", 646 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 647 printf("Maximum number of MAC addresses: %u\n", 648 (unsigned int)(port->dev_info.max_mac_addrs)); 649 printf("Maximum number of MAC addresses of hash filtering: %u\n", 650 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 651 652 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 653 if (vlan_offload >= 0){ 654 printf("VLAN offload: \n"); 655 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 656 printf(" strip on, "); 657 else 658 printf(" strip off, "); 659 660 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 661 printf("filter on, "); 662 else 663 printf("filter off, "); 664 665 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 666 printf("extend on, "); 667 else 668 printf("extend off, "); 669 670 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 671 printf("qinq strip on\n"); 672 else 673 printf("qinq strip off\n"); 674 } 675 676 if (dev_info.hash_key_size > 0) 677 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 678 if (dev_info.reta_size > 0) 679 printf("Redirection table size: %u\n", dev_info.reta_size); 680 if (!dev_info.flow_type_rss_offloads) 681 printf("No RSS offload flow type is supported.\n"); 682 else { 683 uint16_t i; 684 char *p; 685 686 printf("Supported RSS offload flow types:\n"); 687 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 688 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 689 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 690 continue; 691 p = flowtype_to_str(i); 692 if (p) 693 printf(" %s\n", p); 694 else 695 printf(" user defined %d\n", i); 696 } 697 } 698 699 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 700 printf("Maximum configurable length of RX packet: %u\n", 701 dev_info.max_rx_pktlen); 702 printf("Maximum configurable size of LRO aggregated packet: %u\n", 703 dev_info.max_lro_pkt_size); 704 if (dev_info.max_vfs) 705 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 706 if (dev_info.max_vmdq_pools) 707 printf("Maximum number of VMDq pools: %u\n", 708 dev_info.max_vmdq_pools); 709 710 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 711 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 712 printf("Max possible number of RXDs per queue: %hu\n", 713 dev_info.rx_desc_lim.nb_max); 714 printf("Min possible number of RXDs per queue: %hu\n", 715 dev_info.rx_desc_lim.nb_min); 716 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 717 718 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 719 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 720 printf("Max possible number of TXDs per queue: %hu\n", 721 dev_info.tx_desc_lim.nb_max); 722 printf("Min possible number of TXDs per queue: %hu\n", 723 dev_info.tx_desc_lim.nb_min); 724 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 725 printf("Max segment number per packet: %hu\n", 726 dev_info.tx_desc_lim.nb_seg_max); 727 printf("Max segment number per MTU/TSO: %hu\n", 728 dev_info.tx_desc_lim.nb_mtu_seg_max); 729 730 /* Show switch info only if valid switch domain and port id is set */ 731 if (dev_info.switch_info.domain_id != 732 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 733 if (dev_info.switch_info.name) 734 printf("Switch name: %s\n", dev_info.switch_info.name); 735 736 printf("Switch domain Id: %u\n", 737 dev_info.switch_info.domain_id); 738 printf("Switch Port Id: %u\n", 739 dev_info.switch_info.port_id); 740 } 741 } 742 743 void 744 port_summary_header_display(void) 745 { 746 uint16_t port_number; 747 748 port_number = rte_eth_dev_count_avail(); 749 printf("Number of available ports: %i\n", port_number); 750 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 751 "Driver", "Status", "Link"); 752 } 753 754 void 755 port_summary_display(portid_t port_id) 756 { 757 struct rte_ether_addr mac_addr; 758 struct rte_eth_link link; 759 struct rte_eth_dev_info dev_info; 760 char name[RTE_ETH_NAME_MAX_LEN]; 761 int ret; 762 763 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 764 print_valid_ports(); 765 return; 766 } 767 768 ret = eth_link_get_nowait_print_err(port_id, &link); 769 if (ret < 0) 770 return; 771 772 ret = eth_dev_info_get_print_err(port_id, &dev_info); 773 if (ret != 0) 774 return; 775 776 rte_eth_dev_get_name_by_port(port_id, name); 777 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 778 if (ret != 0) 779 return; 780 781 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 782 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 783 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 784 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 785 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 786 rte_eth_link_speed_to_str(link.link_speed)); 787 } 788 789 void 790 port_eeprom_display(portid_t port_id) 791 { 792 struct rte_dev_eeprom_info einfo; 793 int ret; 794 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 795 print_valid_ports(); 796 return; 797 } 798 799 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 800 if (len_eeprom < 0) { 801 switch (len_eeprom) { 802 case -ENODEV: 803 printf("port index %d invalid\n", port_id); 804 break; 805 case -ENOTSUP: 806 printf("operation not supported by device\n"); 807 break; 808 case -EIO: 809 printf("device is removed\n"); 810 break; 811 default: 812 printf("Unable to get EEPROM: %d\n", len_eeprom); 813 break; 814 } 815 return; 816 } 817 818 char buf[len_eeprom]; 819 einfo.offset = 0; 820 einfo.length = len_eeprom; 821 einfo.data = buf; 822 823 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 824 if (ret != 0) { 825 switch (ret) { 826 case -ENODEV: 827 printf("port index %d invalid\n", port_id); 828 break; 829 case -ENOTSUP: 830 printf("operation not supported by device\n"); 831 break; 832 case -EIO: 833 printf("device is removed\n"); 834 break; 835 default: 836 printf("Unable to get EEPROM: %d\n", ret); 837 break; 838 } 839 return; 840 } 841 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 842 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 843 } 844 845 void 846 port_module_eeprom_display(portid_t port_id) 847 { 848 struct rte_eth_dev_module_info minfo; 849 struct rte_dev_eeprom_info einfo; 850 int ret; 851 852 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 853 print_valid_ports(); 854 return; 855 } 856 857 858 ret = rte_eth_dev_get_module_info(port_id, &minfo); 859 if (ret != 0) { 860 switch (ret) { 861 case -ENODEV: 862 printf("port index %d invalid\n", port_id); 863 break; 864 case -ENOTSUP: 865 printf("operation not supported by device\n"); 866 break; 867 case -EIO: 868 printf("device is removed\n"); 869 break; 870 default: 871 printf("Unable to get module EEPROM: %d\n", ret); 872 break; 873 } 874 return; 875 } 876 877 char buf[minfo.eeprom_len]; 878 einfo.offset = 0; 879 einfo.length = minfo.eeprom_len; 880 einfo.data = buf; 881 882 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 883 if (ret != 0) { 884 switch (ret) { 885 case -ENODEV: 886 printf("port index %d invalid\n", port_id); 887 break; 888 case -ENOTSUP: 889 printf("operation not supported by device\n"); 890 break; 891 case -EIO: 892 printf("device is removed\n"); 893 break; 894 default: 895 printf("Unable to get module EEPROM: %d\n", ret); 896 break; 897 } 898 return; 899 } 900 901 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 902 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 903 } 904 905 void 906 port_offload_cap_display(portid_t port_id) 907 { 908 struct rte_eth_dev_info dev_info; 909 static const char *info_border = "************"; 910 int ret; 911 912 if (port_id_is_invalid(port_id, ENABLED_WARN)) 913 return; 914 915 ret = eth_dev_info_get_print_err(port_id, &dev_info); 916 if (ret != 0) 917 return; 918 919 printf("\n%s Port %d supported offload features: %s\n", 920 info_border, port_id, info_border); 921 922 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 923 printf("VLAN stripped: "); 924 if (ports[port_id].dev_conf.rxmode.offloads & 925 DEV_RX_OFFLOAD_VLAN_STRIP) 926 printf("on\n"); 927 else 928 printf("off\n"); 929 } 930 931 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 932 printf("Double VLANs stripped: "); 933 if (ports[port_id].dev_conf.rxmode.offloads & 934 DEV_RX_OFFLOAD_QINQ_STRIP) 935 printf("on\n"); 936 else 937 printf("off\n"); 938 } 939 940 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 941 printf("RX IPv4 checksum: "); 942 if (ports[port_id].dev_conf.rxmode.offloads & 943 DEV_RX_OFFLOAD_IPV4_CKSUM) 944 printf("on\n"); 945 else 946 printf("off\n"); 947 } 948 949 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 950 printf("RX UDP checksum: "); 951 if (ports[port_id].dev_conf.rxmode.offloads & 952 DEV_RX_OFFLOAD_UDP_CKSUM) 953 printf("on\n"); 954 else 955 printf("off\n"); 956 } 957 958 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 959 printf("RX TCP checksum: "); 960 if (ports[port_id].dev_conf.rxmode.offloads & 961 DEV_RX_OFFLOAD_TCP_CKSUM) 962 printf("on\n"); 963 else 964 printf("off\n"); 965 } 966 967 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 968 printf("RX SCTP checksum: "); 969 if (ports[port_id].dev_conf.rxmode.offloads & 970 DEV_RX_OFFLOAD_SCTP_CKSUM) 971 printf("on\n"); 972 else 973 printf("off\n"); 974 } 975 976 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 977 printf("RX Outer IPv4 checksum: "); 978 if (ports[port_id].dev_conf.rxmode.offloads & 979 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 980 printf("on\n"); 981 else 982 printf("off\n"); 983 } 984 985 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 986 printf("RX Outer UDP checksum: "); 987 if (ports[port_id].dev_conf.rxmode.offloads & 988 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 989 printf("on\n"); 990 else 991 printf("off\n"); 992 } 993 994 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 995 printf("Large receive offload: "); 996 if (ports[port_id].dev_conf.rxmode.offloads & 997 DEV_RX_OFFLOAD_TCP_LRO) 998 printf("on\n"); 999 else 1000 printf("off\n"); 1001 } 1002 1003 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 1004 printf("HW timestamp: "); 1005 if (ports[port_id].dev_conf.rxmode.offloads & 1006 DEV_RX_OFFLOAD_TIMESTAMP) 1007 printf("on\n"); 1008 else 1009 printf("off\n"); 1010 } 1011 1012 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 1013 printf("Rx Keep CRC: "); 1014 if (ports[port_id].dev_conf.rxmode.offloads & 1015 DEV_RX_OFFLOAD_KEEP_CRC) 1016 printf("on\n"); 1017 else 1018 printf("off\n"); 1019 } 1020 1021 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 1022 printf("RX offload security: "); 1023 if (ports[port_id].dev_conf.rxmode.offloads & 1024 DEV_RX_OFFLOAD_SECURITY) 1025 printf("on\n"); 1026 else 1027 printf("off\n"); 1028 } 1029 1030 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 1031 printf("VLAN insert: "); 1032 if (ports[port_id].dev_conf.txmode.offloads & 1033 DEV_TX_OFFLOAD_VLAN_INSERT) 1034 printf("on\n"); 1035 else 1036 printf("off\n"); 1037 } 1038 1039 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 1040 printf("Double VLANs insert: "); 1041 if (ports[port_id].dev_conf.txmode.offloads & 1042 DEV_TX_OFFLOAD_QINQ_INSERT) 1043 printf("on\n"); 1044 else 1045 printf("off\n"); 1046 } 1047 1048 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 1049 printf("TX IPv4 checksum: "); 1050 if (ports[port_id].dev_conf.txmode.offloads & 1051 DEV_TX_OFFLOAD_IPV4_CKSUM) 1052 printf("on\n"); 1053 else 1054 printf("off\n"); 1055 } 1056 1057 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 1058 printf("TX UDP checksum: "); 1059 if (ports[port_id].dev_conf.txmode.offloads & 1060 DEV_TX_OFFLOAD_UDP_CKSUM) 1061 printf("on\n"); 1062 else 1063 printf("off\n"); 1064 } 1065 1066 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 1067 printf("TX TCP checksum: "); 1068 if (ports[port_id].dev_conf.txmode.offloads & 1069 DEV_TX_OFFLOAD_TCP_CKSUM) 1070 printf("on\n"); 1071 else 1072 printf("off\n"); 1073 } 1074 1075 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 1076 printf("TX SCTP checksum: "); 1077 if (ports[port_id].dev_conf.txmode.offloads & 1078 DEV_TX_OFFLOAD_SCTP_CKSUM) 1079 printf("on\n"); 1080 else 1081 printf("off\n"); 1082 } 1083 1084 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 1085 printf("TX Outer IPv4 checksum: "); 1086 if (ports[port_id].dev_conf.txmode.offloads & 1087 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 1088 printf("on\n"); 1089 else 1090 printf("off\n"); 1091 } 1092 1093 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 1094 printf("TX TCP segmentation: "); 1095 if (ports[port_id].dev_conf.txmode.offloads & 1096 DEV_TX_OFFLOAD_TCP_TSO) 1097 printf("on\n"); 1098 else 1099 printf("off\n"); 1100 } 1101 1102 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 1103 printf("TX UDP segmentation: "); 1104 if (ports[port_id].dev_conf.txmode.offloads & 1105 DEV_TX_OFFLOAD_UDP_TSO) 1106 printf("on\n"); 1107 else 1108 printf("off\n"); 1109 } 1110 1111 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 1112 printf("TSO for VXLAN tunnel packet: "); 1113 if (ports[port_id].dev_conf.txmode.offloads & 1114 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 1115 printf("on\n"); 1116 else 1117 printf("off\n"); 1118 } 1119 1120 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 1121 printf("TSO for GRE tunnel packet: "); 1122 if (ports[port_id].dev_conf.txmode.offloads & 1123 DEV_TX_OFFLOAD_GRE_TNL_TSO) 1124 printf("on\n"); 1125 else 1126 printf("off\n"); 1127 } 1128 1129 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 1130 printf("TSO for IPIP tunnel packet: "); 1131 if (ports[port_id].dev_conf.txmode.offloads & 1132 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 1133 printf("on\n"); 1134 else 1135 printf("off\n"); 1136 } 1137 1138 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 1139 printf("TSO for GENEVE tunnel packet: "); 1140 if (ports[port_id].dev_conf.txmode.offloads & 1141 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 1142 printf("on\n"); 1143 else 1144 printf("off\n"); 1145 } 1146 1147 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 1148 printf("IP tunnel TSO: "); 1149 if (ports[port_id].dev_conf.txmode.offloads & 1150 DEV_TX_OFFLOAD_IP_TNL_TSO) 1151 printf("on\n"); 1152 else 1153 printf("off\n"); 1154 } 1155 1156 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 1157 printf("UDP tunnel TSO: "); 1158 if (ports[port_id].dev_conf.txmode.offloads & 1159 DEV_TX_OFFLOAD_UDP_TNL_TSO) 1160 printf("on\n"); 1161 else 1162 printf("off\n"); 1163 } 1164 1165 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 1166 printf("TX Outer UDP checksum: "); 1167 if (ports[port_id].dev_conf.txmode.offloads & 1168 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1169 printf("on\n"); 1170 else 1171 printf("off\n"); 1172 } 1173 1174 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 1175 printf("Tx scheduling on timestamp: "); 1176 if (ports[port_id].dev_conf.txmode.offloads & 1177 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) 1178 printf("on\n"); 1179 else 1180 printf("off\n"); 1181 } 1182 1183 } 1184 1185 int 1186 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1187 { 1188 uint16_t pid; 1189 1190 if (port_id == (portid_t)RTE_PORT_ALL) 1191 return 0; 1192 1193 RTE_ETH_FOREACH_DEV(pid) 1194 if (port_id == pid) 1195 return 0; 1196 1197 if (warning == ENABLED_WARN) 1198 printf("Invalid port %d\n", port_id); 1199 1200 return 1; 1201 } 1202 1203 void print_valid_ports(void) 1204 { 1205 portid_t pid; 1206 1207 printf("The valid ports array is ["); 1208 RTE_ETH_FOREACH_DEV(pid) { 1209 printf(" %d", pid); 1210 } 1211 printf(" ]\n"); 1212 } 1213 1214 static int 1215 vlan_id_is_invalid(uint16_t vlan_id) 1216 { 1217 if (vlan_id < 4096) 1218 return 0; 1219 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1220 return 1; 1221 } 1222 1223 static int 1224 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1225 { 1226 const struct rte_pci_device *pci_dev; 1227 const struct rte_bus *bus; 1228 uint64_t pci_len; 1229 1230 if (reg_off & 0x3) { 1231 printf("Port register offset 0x%X not aligned on a 4-byte " 1232 "boundary\n", 1233 (unsigned)reg_off); 1234 return 1; 1235 } 1236 1237 if (!ports[port_id].dev_info.device) { 1238 printf("Invalid device\n"); 1239 return 0; 1240 } 1241 1242 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1243 if (bus && !strcmp(bus->name, "pci")) { 1244 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1245 } else { 1246 printf("Not a PCI device\n"); 1247 return 1; 1248 } 1249 1250 pci_len = pci_dev->mem_resource[0].len; 1251 if (reg_off >= pci_len) { 1252 printf("Port %d: register offset %u (0x%X) out of port PCI " 1253 "resource (length=%"PRIu64")\n", 1254 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1255 return 1; 1256 } 1257 return 0; 1258 } 1259 1260 static int 1261 reg_bit_pos_is_invalid(uint8_t bit_pos) 1262 { 1263 if (bit_pos <= 31) 1264 return 0; 1265 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1266 return 1; 1267 } 1268 1269 #define display_port_and_reg_off(port_id, reg_off) \ 1270 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1271 1272 static inline void 1273 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1274 { 1275 display_port_and_reg_off(port_id, (unsigned)reg_off); 1276 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1277 } 1278 1279 void 1280 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1281 { 1282 uint32_t reg_v; 1283 1284 1285 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1286 return; 1287 if (port_reg_off_is_invalid(port_id, reg_off)) 1288 return; 1289 if (reg_bit_pos_is_invalid(bit_x)) 1290 return; 1291 reg_v = port_id_pci_reg_read(port_id, reg_off); 1292 display_port_and_reg_off(port_id, (unsigned)reg_off); 1293 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1294 } 1295 1296 void 1297 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1298 uint8_t bit1_pos, uint8_t bit2_pos) 1299 { 1300 uint32_t reg_v; 1301 uint8_t l_bit; 1302 uint8_t h_bit; 1303 1304 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1305 return; 1306 if (port_reg_off_is_invalid(port_id, reg_off)) 1307 return; 1308 if (reg_bit_pos_is_invalid(bit1_pos)) 1309 return; 1310 if (reg_bit_pos_is_invalid(bit2_pos)) 1311 return; 1312 if (bit1_pos > bit2_pos) 1313 l_bit = bit2_pos, h_bit = bit1_pos; 1314 else 1315 l_bit = bit1_pos, h_bit = bit2_pos; 1316 1317 reg_v = port_id_pci_reg_read(port_id, reg_off); 1318 reg_v >>= l_bit; 1319 if (h_bit < 31) 1320 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1321 display_port_and_reg_off(port_id, (unsigned)reg_off); 1322 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1323 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1324 } 1325 1326 void 1327 port_reg_display(portid_t port_id, uint32_t reg_off) 1328 { 1329 uint32_t reg_v; 1330 1331 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1332 return; 1333 if (port_reg_off_is_invalid(port_id, reg_off)) 1334 return; 1335 reg_v = port_id_pci_reg_read(port_id, reg_off); 1336 display_port_reg_value(port_id, reg_off, reg_v); 1337 } 1338 1339 void 1340 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1341 uint8_t bit_v) 1342 { 1343 uint32_t reg_v; 1344 1345 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1346 return; 1347 if (port_reg_off_is_invalid(port_id, reg_off)) 1348 return; 1349 if (reg_bit_pos_is_invalid(bit_pos)) 1350 return; 1351 if (bit_v > 1) { 1352 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1353 return; 1354 } 1355 reg_v = port_id_pci_reg_read(port_id, reg_off); 1356 if (bit_v == 0) 1357 reg_v &= ~(1 << bit_pos); 1358 else 1359 reg_v |= (1 << bit_pos); 1360 port_id_pci_reg_write(port_id, reg_off, reg_v); 1361 display_port_reg_value(port_id, reg_off, reg_v); 1362 } 1363 1364 void 1365 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1366 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1367 { 1368 uint32_t max_v; 1369 uint32_t reg_v; 1370 uint8_t l_bit; 1371 uint8_t h_bit; 1372 1373 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1374 return; 1375 if (port_reg_off_is_invalid(port_id, reg_off)) 1376 return; 1377 if (reg_bit_pos_is_invalid(bit1_pos)) 1378 return; 1379 if (reg_bit_pos_is_invalid(bit2_pos)) 1380 return; 1381 if (bit1_pos > bit2_pos) 1382 l_bit = bit2_pos, h_bit = bit1_pos; 1383 else 1384 l_bit = bit1_pos, h_bit = bit2_pos; 1385 1386 if ((h_bit - l_bit) < 31) 1387 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1388 else 1389 max_v = 0xFFFFFFFF; 1390 1391 if (value > max_v) { 1392 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1393 (unsigned)value, (unsigned)value, 1394 (unsigned)max_v, (unsigned)max_v); 1395 return; 1396 } 1397 reg_v = port_id_pci_reg_read(port_id, reg_off); 1398 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1399 reg_v |= (value << l_bit); /* Set changed bits */ 1400 port_id_pci_reg_write(port_id, reg_off, reg_v); 1401 display_port_reg_value(port_id, reg_off, reg_v); 1402 } 1403 1404 void 1405 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1406 { 1407 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1408 return; 1409 if (port_reg_off_is_invalid(port_id, reg_off)) 1410 return; 1411 port_id_pci_reg_write(port_id, reg_off, reg_v); 1412 display_port_reg_value(port_id, reg_off, reg_v); 1413 } 1414 1415 void 1416 port_mtu_set(portid_t port_id, uint16_t mtu) 1417 { 1418 int diag; 1419 struct rte_port *rte_port = &ports[port_id]; 1420 struct rte_eth_dev_info dev_info; 1421 uint16_t eth_overhead; 1422 int ret; 1423 1424 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1425 return; 1426 1427 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1428 if (ret != 0) 1429 return; 1430 1431 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1432 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1433 mtu, dev_info.min_mtu, dev_info.max_mtu); 1434 return; 1435 } 1436 diag = rte_eth_dev_set_mtu(port_id, mtu); 1437 if (diag) 1438 printf("Set MTU failed. diag=%d\n", diag); 1439 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1440 /* 1441 * Ether overhead in driver is equal to the difference of 1442 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1443 * device supports jumbo frame. 1444 */ 1445 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1446 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1447 rte_port->dev_conf.rxmode.offloads |= 1448 DEV_RX_OFFLOAD_JUMBO_FRAME; 1449 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1450 mtu + eth_overhead; 1451 } else 1452 rte_port->dev_conf.rxmode.offloads &= 1453 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1454 } 1455 } 1456 1457 /* Generic flow management functions. */ 1458 1459 /** Generate a port_flow entry from attributes/pattern/actions. */ 1460 static struct port_flow * 1461 port_flow_new(const struct rte_flow_attr *attr, 1462 const struct rte_flow_item *pattern, 1463 const struct rte_flow_action *actions, 1464 struct rte_flow_error *error) 1465 { 1466 const struct rte_flow_conv_rule rule = { 1467 .attr_ro = attr, 1468 .pattern_ro = pattern, 1469 .actions_ro = actions, 1470 }; 1471 struct port_flow *pf; 1472 int ret; 1473 1474 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1475 if (ret < 0) 1476 return NULL; 1477 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1478 if (!pf) { 1479 rte_flow_error_set 1480 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1481 "calloc() failed"); 1482 return NULL; 1483 } 1484 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1485 error) >= 0) 1486 return pf; 1487 free(pf); 1488 return NULL; 1489 } 1490 1491 /** Print a message out of a flow error. */ 1492 static int 1493 port_flow_complain(struct rte_flow_error *error) 1494 { 1495 static const char *const errstrlist[] = { 1496 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1497 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1498 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1499 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1500 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1501 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1502 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1503 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1504 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1505 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1506 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1507 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1508 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1509 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1510 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1511 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1512 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1513 }; 1514 const char *errstr; 1515 char buf[32]; 1516 int err = rte_errno; 1517 1518 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1519 !errstrlist[error->type]) 1520 errstr = "unknown type"; 1521 else 1522 errstr = errstrlist[error->type]; 1523 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1524 error->type, errstr, 1525 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1526 error->cause), buf) : "", 1527 error->message ? error->message : "(no stated reason)", 1528 rte_strerror(err)); 1529 return -err; 1530 } 1531 1532 static void 1533 rss_config_display(struct rte_flow_action_rss *rss_conf) 1534 { 1535 uint8_t i; 1536 1537 if (rss_conf == NULL) { 1538 printf("Invalid rule\n"); 1539 return; 1540 } 1541 1542 printf("RSS:\n" 1543 " queues:"); 1544 if (rss_conf->queue_num == 0) 1545 printf(" none"); 1546 for (i = 0; i < rss_conf->queue_num; i++) 1547 printf(" %d", rss_conf->queue[i]); 1548 printf("\n"); 1549 1550 printf(" function: "); 1551 switch (rss_conf->func) { 1552 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1553 printf("default\n"); 1554 break; 1555 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1556 printf("toeplitz\n"); 1557 break; 1558 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1559 printf("simple_xor\n"); 1560 break; 1561 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1562 printf("symmetric_toeplitz\n"); 1563 break; 1564 default: 1565 printf("Unknown function\n"); 1566 return; 1567 } 1568 1569 printf(" types:\n"); 1570 if (rss_conf->types == 0) { 1571 printf(" none\n"); 1572 return; 1573 } 1574 for (i = 0; rss_type_table[i].str; i++) { 1575 if ((rss_conf->types & 1576 rss_type_table[i].rss_type) == 1577 rss_type_table[i].rss_type && 1578 rss_type_table[i].rss_type != 0) 1579 printf(" %s\n", rss_type_table[i].str); 1580 } 1581 } 1582 1583 /** Validate flow rule. */ 1584 int 1585 port_flow_validate(portid_t port_id, 1586 const struct rte_flow_attr *attr, 1587 const struct rte_flow_item *pattern, 1588 const struct rte_flow_action *actions) 1589 { 1590 struct rte_flow_error error; 1591 1592 /* Poisoning to make sure PMDs update it in case of error. */ 1593 memset(&error, 0x11, sizeof(error)); 1594 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1595 return port_flow_complain(&error); 1596 printf("Flow rule validated\n"); 1597 return 0; 1598 } 1599 1600 /** Update age action context by port_flow pointer. */ 1601 void 1602 update_age_action_context(const struct rte_flow_action *actions, 1603 struct port_flow *pf) 1604 { 1605 struct rte_flow_action_age *age = NULL; 1606 1607 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1608 switch (actions->type) { 1609 case RTE_FLOW_ACTION_TYPE_AGE: 1610 age = (struct rte_flow_action_age *) 1611 (uintptr_t)actions->conf; 1612 age->context = pf; 1613 return; 1614 default: 1615 break; 1616 } 1617 } 1618 } 1619 1620 /** Create flow rule. */ 1621 int 1622 port_flow_create(portid_t port_id, 1623 const struct rte_flow_attr *attr, 1624 const struct rte_flow_item *pattern, 1625 const struct rte_flow_action *actions) 1626 { 1627 struct rte_flow *flow; 1628 struct rte_port *port; 1629 struct port_flow *pf; 1630 uint32_t id = 0; 1631 struct rte_flow_error error; 1632 1633 port = &ports[port_id]; 1634 if (port->flow_list) { 1635 if (port->flow_list->id == UINT32_MAX) { 1636 printf("Highest rule ID is already assigned, delete" 1637 " it first"); 1638 return -ENOMEM; 1639 } 1640 id = port->flow_list->id + 1; 1641 } 1642 pf = port_flow_new(attr, pattern, actions, &error); 1643 if (!pf) 1644 return port_flow_complain(&error); 1645 update_age_action_context(actions, pf); 1646 /* Poisoning to make sure PMDs update it in case of error. */ 1647 memset(&error, 0x22, sizeof(error)); 1648 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1649 if (!flow) { 1650 free(pf); 1651 return port_flow_complain(&error); 1652 } 1653 pf->next = port->flow_list; 1654 pf->id = id; 1655 pf->flow = flow; 1656 port->flow_list = pf; 1657 printf("Flow rule #%u created\n", pf->id); 1658 return 0; 1659 } 1660 1661 /** Destroy a number of flow rules. */ 1662 int 1663 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1664 { 1665 struct rte_port *port; 1666 struct port_flow **tmp; 1667 uint32_t c = 0; 1668 int ret = 0; 1669 1670 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1671 port_id == (portid_t)RTE_PORT_ALL) 1672 return -EINVAL; 1673 port = &ports[port_id]; 1674 tmp = &port->flow_list; 1675 while (*tmp) { 1676 uint32_t i; 1677 1678 for (i = 0; i != n; ++i) { 1679 struct rte_flow_error error; 1680 struct port_flow *pf = *tmp; 1681 1682 if (rule[i] != pf->id) 1683 continue; 1684 /* 1685 * Poisoning to make sure PMDs update it in case 1686 * of error. 1687 */ 1688 memset(&error, 0x33, sizeof(error)); 1689 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1690 ret = port_flow_complain(&error); 1691 continue; 1692 } 1693 printf("Flow rule #%u destroyed\n", pf->id); 1694 *tmp = pf->next; 1695 free(pf); 1696 break; 1697 } 1698 if (i == n) 1699 tmp = &(*tmp)->next; 1700 ++c; 1701 } 1702 return ret; 1703 } 1704 1705 /** Remove all flow rules. */ 1706 int 1707 port_flow_flush(portid_t port_id) 1708 { 1709 struct rte_flow_error error; 1710 struct rte_port *port = &ports[port_id]; 1711 int ret = 0; 1712 1713 if (port->flow_list == NULL) 1714 return ret; 1715 1716 /* Poisoning to make sure PMDs update it in case of error. */ 1717 memset(&error, 0x44, sizeof(error)); 1718 if (rte_flow_flush(port_id, &error)) { 1719 ret = port_flow_complain(&error); 1720 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1721 port_id == (portid_t)RTE_PORT_ALL) 1722 return ret; 1723 } 1724 1725 while (port->flow_list) { 1726 struct port_flow *pf = port->flow_list->next; 1727 1728 free(port->flow_list); 1729 port->flow_list = pf; 1730 } 1731 return ret; 1732 } 1733 1734 /** Dump all flow rules. */ 1735 int 1736 port_flow_dump(portid_t port_id, const char *file_name) 1737 { 1738 int ret = 0; 1739 FILE *file = stdout; 1740 struct rte_flow_error error; 1741 1742 if (file_name && strlen(file_name)) { 1743 file = fopen(file_name, "w"); 1744 if (!file) { 1745 printf("Failed to create file %s: %s\n", file_name, 1746 strerror(errno)); 1747 return -errno; 1748 } 1749 } 1750 ret = rte_flow_dev_dump(port_id, file, &error); 1751 if (ret) { 1752 port_flow_complain(&error); 1753 printf("Failed to dump flow: %s\n", strerror(-ret)); 1754 } else 1755 printf("Flow dump finished\n"); 1756 if (file_name && strlen(file_name)) 1757 fclose(file); 1758 return ret; 1759 } 1760 1761 /** Query a flow rule. */ 1762 int 1763 port_flow_query(portid_t port_id, uint32_t rule, 1764 const struct rte_flow_action *action) 1765 { 1766 struct rte_flow_error error; 1767 struct rte_port *port; 1768 struct port_flow *pf; 1769 const char *name; 1770 union { 1771 struct rte_flow_query_count count; 1772 struct rte_flow_action_rss rss_conf; 1773 } query; 1774 int ret; 1775 1776 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1777 port_id == (portid_t)RTE_PORT_ALL) 1778 return -EINVAL; 1779 port = &ports[port_id]; 1780 for (pf = port->flow_list; pf; pf = pf->next) 1781 if (pf->id == rule) 1782 break; 1783 if (!pf) { 1784 printf("Flow rule #%u not found\n", rule); 1785 return -ENOENT; 1786 } 1787 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1788 &name, sizeof(name), 1789 (void *)(uintptr_t)action->type, &error); 1790 if (ret < 0) 1791 return port_flow_complain(&error); 1792 switch (action->type) { 1793 case RTE_FLOW_ACTION_TYPE_COUNT: 1794 case RTE_FLOW_ACTION_TYPE_RSS: 1795 break; 1796 default: 1797 printf("Cannot query action type %d (%s)\n", 1798 action->type, name); 1799 return -ENOTSUP; 1800 } 1801 /* Poisoning to make sure PMDs update it in case of error. */ 1802 memset(&error, 0x55, sizeof(error)); 1803 memset(&query, 0, sizeof(query)); 1804 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1805 return port_flow_complain(&error); 1806 switch (action->type) { 1807 case RTE_FLOW_ACTION_TYPE_COUNT: 1808 printf("%s:\n" 1809 " hits_set: %u\n" 1810 " bytes_set: %u\n" 1811 " hits: %" PRIu64 "\n" 1812 " bytes: %" PRIu64 "\n", 1813 name, 1814 query.count.hits_set, 1815 query.count.bytes_set, 1816 query.count.hits, 1817 query.count.bytes); 1818 break; 1819 case RTE_FLOW_ACTION_TYPE_RSS: 1820 rss_config_display(&query.rss_conf); 1821 break; 1822 default: 1823 printf("Cannot display result for action type %d (%s)\n", 1824 action->type, name); 1825 break; 1826 } 1827 return 0; 1828 } 1829 1830 /** List simply and destroy all aged flows. */ 1831 void 1832 port_flow_aged(portid_t port_id, uint8_t destroy) 1833 { 1834 void **contexts; 1835 int nb_context, total = 0, idx; 1836 struct rte_flow_error error; 1837 struct port_flow *pf; 1838 1839 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1840 port_id == (portid_t)RTE_PORT_ALL) 1841 return; 1842 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 1843 printf("Port %u total aged flows: %d\n", port_id, total); 1844 if (total < 0) { 1845 port_flow_complain(&error); 1846 return; 1847 } 1848 if (total == 0) 1849 return; 1850 contexts = malloc(sizeof(void *) * total); 1851 if (contexts == NULL) { 1852 printf("Cannot allocate contexts for aged flow\n"); 1853 return; 1854 } 1855 printf("ID\tGroup\tPrio\tAttr\n"); 1856 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 1857 if (nb_context != total) { 1858 printf("Port:%d get aged flows count(%d) != total(%d)\n", 1859 port_id, nb_context, total); 1860 free(contexts); 1861 return; 1862 } 1863 for (idx = 0; idx < nb_context; idx++) { 1864 pf = (struct port_flow *)contexts[idx]; 1865 if (!pf) { 1866 printf("Error: get Null context in port %u\n", port_id); 1867 continue; 1868 } 1869 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n", 1870 pf->id, 1871 pf->rule.attr->group, 1872 pf->rule.attr->priority, 1873 pf->rule.attr->ingress ? 'i' : '-', 1874 pf->rule.attr->egress ? 'e' : '-', 1875 pf->rule.attr->transfer ? 't' : '-'); 1876 } 1877 if (destroy) { 1878 int ret; 1879 uint32_t flow_id; 1880 1881 total = 0; 1882 printf("\n"); 1883 for (idx = 0; idx < nb_context; idx++) { 1884 pf = (struct port_flow *)contexts[idx]; 1885 if (!pf) 1886 continue; 1887 flow_id = pf->id; 1888 ret = port_flow_destroy(port_id, 1, &flow_id); 1889 if (!ret) 1890 total++; 1891 } 1892 printf("%d flows be destroyed\n", total); 1893 } 1894 free(contexts); 1895 } 1896 1897 /** List flow rules. */ 1898 void 1899 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1900 { 1901 struct rte_port *port; 1902 struct port_flow *pf; 1903 struct port_flow *list = NULL; 1904 uint32_t i; 1905 1906 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1907 port_id == (portid_t)RTE_PORT_ALL) 1908 return; 1909 port = &ports[port_id]; 1910 if (!port->flow_list) 1911 return; 1912 /* Sort flows by group, priority and ID. */ 1913 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1914 struct port_flow **tmp; 1915 const struct rte_flow_attr *curr = pf->rule.attr; 1916 1917 if (n) { 1918 /* Filter out unwanted groups. */ 1919 for (i = 0; i != n; ++i) 1920 if (curr->group == group[i]) 1921 break; 1922 if (i == n) 1923 continue; 1924 } 1925 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1926 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1927 1928 if (curr->group > comp->group || 1929 (curr->group == comp->group && 1930 curr->priority > comp->priority) || 1931 (curr->group == comp->group && 1932 curr->priority == comp->priority && 1933 pf->id > (*tmp)->id)) 1934 continue; 1935 break; 1936 } 1937 pf->tmp = *tmp; 1938 *tmp = pf; 1939 } 1940 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1941 for (pf = list; pf != NULL; pf = pf->tmp) { 1942 const struct rte_flow_item *item = pf->rule.pattern; 1943 const struct rte_flow_action *action = pf->rule.actions; 1944 const char *name; 1945 1946 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1947 pf->id, 1948 pf->rule.attr->group, 1949 pf->rule.attr->priority, 1950 pf->rule.attr->ingress ? 'i' : '-', 1951 pf->rule.attr->egress ? 'e' : '-', 1952 pf->rule.attr->transfer ? 't' : '-'); 1953 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1954 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1955 &name, sizeof(name), 1956 (void *)(uintptr_t)item->type, 1957 NULL) <= 0) 1958 name = "[UNKNOWN]"; 1959 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1960 printf("%s ", name); 1961 ++item; 1962 } 1963 printf("=>"); 1964 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1965 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1966 &name, sizeof(name), 1967 (void *)(uintptr_t)action->type, 1968 NULL) <= 0) 1969 name = "[UNKNOWN]"; 1970 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1971 printf(" %s", name); 1972 ++action; 1973 } 1974 printf("\n"); 1975 } 1976 } 1977 1978 /** Restrict ingress traffic to the defined flow rules. */ 1979 int 1980 port_flow_isolate(portid_t port_id, int set) 1981 { 1982 struct rte_flow_error error; 1983 1984 /* Poisoning to make sure PMDs update it in case of error. */ 1985 memset(&error, 0x66, sizeof(error)); 1986 if (rte_flow_isolate(port_id, set, &error)) 1987 return port_flow_complain(&error); 1988 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1989 port_id, 1990 set ? "now restricted" : "not restricted anymore"); 1991 return 0; 1992 } 1993 1994 /* 1995 * RX/TX ring descriptors display functions. 1996 */ 1997 int 1998 rx_queue_id_is_invalid(queueid_t rxq_id) 1999 { 2000 if (rxq_id < nb_rxq) 2001 return 0; 2002 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2003 return 1; 2004 } 2005 2006 int 2007 tx_queue_id_is_invalid(queueid_t txq_id) 2008 { 2009 if (txq_id < nb_txq) 2010 return 0; 2011 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 2012 return 1; 2013 } 2014 2015 static int 2016 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2017 { 2018 struct rte_port *port = &ports[port_id]; 2019 struct rte_eth_rxq_info rx_qinfo; 2020 int ret; 2021 2022 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2023 if (ret == 0) { 2024 *ring_size = rx_qinfo.nb_desc; 2025 return ret; 2026 } 2027 2028 if (ret != -ENOTSUP) 2029 return ret; 2030 /* 2031 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2032 * ring_size stored in testpmd will be used for validity verification. 2033 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2034 * being 0, it will use a default value provided by PMDs to setup this 2035 * rxq. If the default value is 0, it will use the 2036 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2037 */ 2038 if (port->nb_rx_desc[rxq_id]) 2039 *ring_size = port->nb_rx_desc[rxq_id]; 2040 else if (port->dev_info.default_rxportconf.ring_size) 2041 *ring_size = port->dev_info.default_rxportconf.ring_size; 2042 else 2043 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2044 return 0; 2045 } 2046 2047 static int 2048 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2049 { 2050 struct rte_port *port = &ports[port_id]; 2051 struct rte_eth_txq_info tx_qinfo; 2052 int ret; 2053 2054 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2055 if (ret == 0) { 2056 *ring_size = tx_qinfo.nb_desc; 2057 return ret; 2058 } 2059 2060 if (ret != -ENOTSUP) 2061 return ret; 2062 /* 2063 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2064 * ring_size stored in testpmd will be used for validity verification. 2065 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2066 * being 0, it will use a default value provided by PMDs to setup this 2067 * txq. If the default value is 0, it will use the 2068 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2069 */ 2070 if (port->nb_tx_desc[txq_id]) 2071 *ring_size = port->nb_tx_desc[txq_id]; 2072 else if (port->dev_info.default_txportconf.ring_size) 2073 *ring_size = port->dev_info.default_txportconf.ring_size; 2074 else 2075 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2076 return 0; 2077 } 2078 2079 static int 2080 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2081 { 2082 uint16_t ring_size; 2083 int ret; 2084 2085 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2086 if (ret) 2087 return 1; 2088 2089 if (rxdesc_id < ring_size) 2090 return 0; 2091 2092 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2093 rxdesc_id, ring_size); 2094 return 1; 2095 } 2096 2097 static int 2098 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2099 { 2100 uint16_t ring_size; 2101 int ret; 2102 2103 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2104 if (ret) 2105 return 1; 2106 2107 if (txdesc_id < ring_size) 2108 return 0; 2109 2110 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2111 txdesc_id, ring_size); 2112 return 1; 2113 } 2114 2115 static const struct rte_memzone * 2116 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2117 { 2118 char mz_name[RTE_MEMZONE_NAMESIZE]; 2119 const struct rte_memzone *mz; 2120 2121 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2122 port_id, q_id, ring_name); 2123 mz = rte_memzone_lookup(mz_name); 2124 if (mz == NULL) 2125 printf("%s ring memory zoneof (port %d, queue %d) not" 2126 "found (zone name = %s\n", 2127 ring_name, port_id, q_id, mz_name); 2128 return mz; 2129 } 2130 2131 union igb_ring_dword { 2132 uint64_t dword; 2133 struct { 2134 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2135 uint32_t lo; 2136 uint32_t hi; 2137 #else 2138 uint32_t hi; 2139 uint32_t lo; 2140 #endif 2141 } words; 2142 }; 2143 2144 struct igb_ring_desc_32_bytes { 2145 union igb_ring_dword lo_dword; 2146 union igb_ring_dword hi_dword; 2147 union igb_ring_dword resv1; 2148 union igb_ring_dword resv2; 2149 }; 2150 2151 struct igb_ring_desc_16_bytes { 2152 union igb_ring_dword lo_dword; 2153 union igb_ring_dword hi_dword; 2154 }; 2155 2156 static void 2157 ring_rxd_display_dword(union igb_ring_dword dword) 2158 { 2159 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2160 (unsigned)dword.words.hi); 2161 } 2162 2163 static void 2164 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2165 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2166 portid_t port_id, 2167 #else 2168 __rte_unused portid_t port_id, 2169 #endif 2170 uint16_t desc_id) 2171 { 2172 struct igb_ring_desc_16_bytes *ring = 2173 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2174 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2175 int ret; 2176 struct rte_eth_dev_info dev_info; 2177 2178 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2179 if (ret != 0) 2180 return; 2181 2182 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2183 /* 32 bytes RX descriptor, i40e only */ 2184 struct igb_ring_desc_32_bytes *ring = 2185 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2186 ring[desc_id].lo_dword.dword = 2187 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2188 ring_rxd_display_dword(ring[desc_id].lo_dword); 2189 ring[desc_id].hi_dword.dword = 2190 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2191 ring_rxd_display_dword(ring[desc_id].hi_dword); 2192 ring[desc_id].resv1.dword = 2193 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2194 ring_rxd_display_dword(ring[desc_id].resv1); 2195 ring[desc_id].resv2.dword = 2196 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2197 ring_rxd_display_dword(ring[desc_id].resv2); 2198 2199 return; 2200 } 2201 #endif 2202 /* 16 bytes RX descriptor */ 2203 ring[desc_id].lo_dword.dword = 2204 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2205 ring_rxd_display_dword(ring[desc_id].lo_dword); 2206 ring[desc_id].hi_dword.dword = 2207 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2208 ring_rxd_display_dword(ring[desc_id].hi_dword); 2209 } 2210 2211 static void 2212 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2213 { 2214 struct igb_ring_desc_16_bytes *ring; 2215 struct igb_ring_desc_16_bytes txd; 2216 2217 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2218 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2219 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2220 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2221 (unsigned)txd.lo_dword.words.lo, 2222 (unsigned)txd.lo_dword.words.hi, 2223 (unsigned)txd.hi_dword.words.lo, 2224 (unsigned)txd.hi_dword.words.hi); 2225 } 2226 2227 void 2228 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2229 { 2230 const struct rte_memzone *rx_mz; 2231 2232 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2233 return; 2234 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2235 if (rx_mz == NULL) 2236 return; 2237 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2238 } 2239 2240 void 2241 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2242 { 2243 const struct rte_memzone *tx_mz; 2244 2245 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2246 return; 2247 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2248 if (tx_mz == NULL) 2249 return; 2250 ring_tx_descriptor_display(tx_mz, txd_id); 2251 } 2252 2253 void 2254 fwd_lcores_config_display(void) 2255 { 2256 lcoreid_t lc_id; 2257 2258 printf("List of forwarding lcores:"); 2259 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2260 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2261 printf("\n"); 2262 } 2263 void 2264 rxtx_config_display(void) 2265 { 2266 portid_t pid; 2267 queueid_t qid; 2268 2269 printf(" %s packet forwarding%s packets/burst=%d\n", 2270 cur_fwd_eng->fwd_mode_name, 2271 retry_enabled == 0 ? "" : " with retry", 2272 nb_pkt_per_burst); 2273 2274 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2275 printf(" packet len=%u - nb packet segments=%d\n", 2276 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2277 2278 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2279 nb_fwd_lcores, nb_fwd_ports); 2280 2281 RTE_ETH_FOREACH_DEV(pid) { 2282 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2283 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2284 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2285 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2286 struct rte_eth_rxq_info rx_qinfo; 2287 struct rte_eth_txq_info tx_qinfo; 2288 uint16_t rx_free_thresh_tmp; 2289 uint16_t tx_free_thresh_tmp; 2290 uint16_t tx_rs_thresh_tmp; 2291 uint16_t nb_rx_desc_tmp; 2292 uint16_t nb_tx_desc_tmp; 2293 uint64_t offloads_tmp; 2294 uint8_t pthresh_tmp; 2295 uint8_t hthresh_tmp; 2296 uint8_t wthresh_tmp; 2297 int32_t rc; 2298 2299 /* per port config */ 2300 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2301 (unsigned int)pid, nb_rxq, nb_txq); 2302 2303 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2304 ports[pid].dev_conf.rxmode.offloads, 2305 ports[pid].dev_conf.txmode.offloads); 2306 2307 /* per rx queue config only for first queue to be less verbose */ 2308 for (qid = 0; qid < 1; qid++) { 2309 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2310 if (rc) { 2311 nb_rx_desc_tmp = nb_rx_desc[qid]; 2312 rx_free_thresh_tmp = 2313 rx_conf[qid].rx_free_thresh; 2314 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2315 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2316 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2317 offloads_tmp = rx_conf[qid].offloads; 2318 } else { 2319 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2320 rx_free_thresh_tmp = 2321 rx_qinfo.conf.rx_free_thresh; 2322 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2323 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2324 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2325 offloads_tmp = rx_qinfo.conf.offloads; 2326 } 2327 2328 printf(" RX queue: %d\n", qid); 2329 printf(" RX desc=%d - RX free threshold=%d\n", 2330 nb_rx_desc_tmp, rx_free_thresh_tmp); 2331 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2332 " wthresh=%d\n", 2333 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2334 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2335 } 2336 2337 /* per tx queue config only for first queue to be less verbose */ 2338 for (qid = 0; qid < 1; qid++) { 2339 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2340 if (rc) { 2341 nb_tx_desc_tmp = nb_tx_desc[qid]; 2342 tx_free_thresh_tmp = 2343 tx_conf[qid].tx_free_thresh; 2344 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2345 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2346 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2347 offloads_tmp = tx_conf[qid].offloads; 2348 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2349 } else { 2350 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2351 tx_free_thresh_tmp = 2352 tx_qinfo.conf.tx_free_thresh; 2353 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2354 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2355 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2356 offloads_tmp = tx_qinfo.conf.offloads; 2357 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2358 } 2359 2360 printf(" TX queue: %d\n", qid); 2361 printf(" TX desc=%d - TX free threshold=%d\n", 2362 nb_tx_desc_tmp, tx_free_thresh_tmp); 2363 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2364 " wthresh=%d\n", 2365 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2366 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2367 offloads_tmp, tx_rs_thresh_tmp); 2368 } 2369 } 2370 } 2371 2372 void 2373 port_rss_reta_info(portid_t port_id, 2374 struct rte_eth_rss_reta_entry64 *reta_conf, 2375 uint16_t nb_entries) 2376 { 2377 uint16_t i, idx, shift; 2378 int ret; 2379 2380 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2381 return; 2382 2383 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2384 if (ret != 0) { 2385 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2386 return; 2387 } 2388 2389 for (i = 0; i < nb_entries; i++) { 2390 idx = i / RTE_RETA_GROUP_SIZE; 2391 shift = i % RTE_RETA_GROUP_SIZE; 2392 if (!(reta_conf[idx].mask & (1ULL << shift))) 2393 continue; 2394 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2395 i, reta_conf[idx].reta[shift]); 2396 } 2397 } 2398 2399 /* 2400 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2401 * key of the port. 2402 */ 2403 void 2404 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2405 { 2406 struct rte_eth_rss_conf rss_conf = {0}; 2407 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2408 uint64_t rss_hf; 2409 uint8_t i; 2410 int diag; 2411 struct rte_eth_dev_info dev_info; 2412 uint8_t hash_key_size; 2413 int ret; 2414 2415 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2416 return; 2417 2418 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2419 if (ret != 0) 2420 return; 2421 2422 if (dev_info.hash_key_size > 0 && 2423 dev_info.hash_key_size <= sizeof(rss_key)) 2424 hash_key_size = dev_info.hash_key_size; 2425 else { 2426 printf("dev_info did not provide a valid hash key size\n"); 2427 return; 2428 } 2429 2430 /* Get RSS hash key if asked to display it */ 2431 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2432 rss_conf.rss_key_len = hash_key_size; 2433 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2434 if (diag != 0) { 2435 switch (diag) { 2436 case -ENODEV: 2437 printf("port index %d invalid\n", port_id); 2438 break; 2439 case -ENOTSUP: 2440 printf("operation not supported by device\n"); 2441 break; 2442 default: 2443 printf("operation failed - diag=%d\n", diag); 2444 break; 2445 } 2446 return; 2447 } 2448 rss_hf = rss_conf.rss_hf; 2449 if (rss_hf == 0) { 2450 printf("RSS disabled\n"); 2451 return; 2452 } 2453 printf("RSS functions:\n "); 2454 for (i = 0; rss_type_table[i].str; i++) { 2455 if (rss_hf & rss_type_table[i].rss_type) 2456 printf("%s ", rss_type_table[i].str); 2457 } 2458 printf("\n"); 2459 if (!show_rss_key) 2460 return; 2461 printf("RSS key:\n"); 2462 for (i = 0; i < hash_key_size; i++) 2463 printf("%02X", rss_key[i]); 2464 printf("\n"); 2465 } 2466 2467 void 2468 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2469 uint hash_key_len) 2470 { 2471 struct rte_eth_rss_conf rss_conf; 2472 int diag; 2473 unsigned int i; 2474 2475 rss_conf.rss_key = NULL; 2476 rss_conf.rss_key_len = hash_key_len; 2477 rss_conf.rss_hf = 0; 2478 for (i = 0; rss_type_table[i].str; i++) { 2479 if (!strcmp(rss_type_table[i].str, rss_type)) 2480 rss_conf.rss_hf = rss_type_table[i].rss_type; 2481 } 2482 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2483 if (diag == 0) { 2484 rss_conf.rss_key = hash_key; 2485 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2486 } 2487 if (diag == 0) 2488 return; 2489 2490 switch (diag) { 2491 case -ENODEV: 2492 printf("port index %d invalid\n", port_id); 2493 break; 2494 case -ENOTSUP: 2495 printf("operation not supported by device\n"); 2496 break; 2497 default: 2498 printf("operation failed - diag=%d\n", diag); 2499 break; 2500 } 2501 } 2502 2503 /* 2504 * Setup forwarding configuration for each logical core. 2505 */ 2506 static void 2507 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2508 { 2509 streamid_t nb_fs_per_lcore; 2510 streamid_t nb_fs; 2511 streamid_t sm_id; 2512 lcoreid_t nb_extra; 2513 lcoreid_t nb_fc; 2514 lcoreid_t nb_lc; 2515 lcoreid_t lc_id; 2516 2517 nb_fs = cfg->nb_fwd_streams; 2518 nb_fc = cfg->nb_fwd_lcores; 2519 if (nb_fs <= nb_fc) { 2520 nb_fs_per_lcore = 1; 2521 nb_extra = 0; 2522 } else { 2523 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2524 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2525 } 2526 2527 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2528 sm_id = 0; 2529 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2530 fwd_lcores[lc_id]->stream_idx = sm_id; 2531 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2532 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2533 } 2534 2535 /* 2536 * Assign extra remaining streams, if any. 2537 */ 2538 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2539 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2540 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2541 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2542 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2543 } 2544 } 2545 2546 static portid_t 2547 fwd_topology_tx_port_get(portid_t rxp) 2548 { 2549 static int warning_once = 1; 2550 2551 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2552 2553 switch (port_topology) { 2554 default: 2555 case PORT_TOPOLOGY_PAIRED: 2556 if ((rxp & 0x1) == 0) { 2557 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2558 return rxp + 1; 2559 if (warning_once) { 2560 printf("\nWarning! port-topology=paired" 2561 " and odd forward ports number," 2562 " the last port will pair with" 2563 " itself.\n\n"); 2564 warning_once = 0; 2565 } 2566 return rxp; 2567 } 2568 return rxp - 1; 2569 case PORT_TOPOLOGY_CHAINED: 2570 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2571 case PORT_TOPOLOGY_LOOP: 2572 return rxp; 2573 } 2574 } 2575 2576 static void 2577 simple_fwd_config_setup(void) 2578 { 2579 portid_t i; 2580 2581 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2582 cur_fwd_config.nb_fwd_streams = 2583 (streamid_t) cur_fwd_config.nb_fwd_ports; 2584 2585 /* reinitialize forwarding streams */ 2586 init_fwd_streams(); 2587 2588 /* 2589 * In the simple forwarding test, the number of forwarding cores 2590 * must be lower or equal to the number of forwarding ports. 2591 */ 2592 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2593 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2594 cur_fwd_config.nb_fwd_lcores = 2595 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2596 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2597 2598 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2599 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2600 fwd_streams[i]->rx_queue = 0; 2601 fwd_streams[i]->tx_port = 2602 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2603 fwd_streams[i]->tx_queue = 0; 2604 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2605 fwd_streams[i]->retry_enabled = retry_enabled; 2606 } 2607 } 2608 2609 /** 2610 * For the RSS forwarding test all streams distributed over lcores. Each stream 2611 * being composed of a RX queue to poll on a RX port for input messages, 2612 * associated with a TX queue of a TX port where to send forwarded packets. 2613 */ 2614 static void 2615 rss_fwd_config_setup(void) 2616 { 2617 portid_t rxp; 2618 portid_t txp; 2619 queueid_t rxq; 2620 queueid_t nb_q; 2621 streamid_t sm_id; 2622 2623 nb_q = nb_rxq; 2624 if (nb_q > nb_txq) 2625 nb_q = nb_txq; 2626 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2627 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2628 cur_fwd_config.nb_fwd_streams = 2629 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2630 2631 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2632 cur_fwd_config.nb_fwd_lcores = 2633 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2634 2635 /* reinitialize forwarding streams */ 2636 init_fwd_streams(); 2637 2638 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2639 rxp = 0; rxq = 0; 2640 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2641 struct fwd_stream *fs; 2642 2643 fs = fwd_streams[sm_id]; 2644 txp = fwd_topology_tx_port_get(rxp); 2645 fs->rx_port = fwd_ports_ids[rxp]; 2646 fs->rx_queue = rxq; 2647 fs->tx_port = fwd_ports_ids[txp]; 2648 fs->tx_queue = rxq; 2649 fs->peer_addr = fs->tx_port; 2650 fs->retry_enabled = retry_enabled; 2651 rxp++; 2652 if (rxp < nb_fwd_ports) 2653 continue; 2654 rxp = 0; 2655 rxq++; 2656 } 2657 } 2658 2659 /** 2660 * For the DCB forwarding test, each core is assigned on each traffic class. 2661 * 2662 * Each core is assigned a multi-stream, each stream being composed of 2663 * a RX queue to poll on a RX port for input messages, associated with 2664 * a TX queue of a TX port where to send forwarded packets. All RX and 2665 * TX queues are mapping to the same traffic class. 2666 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2667 * the same core 2668 */ 2669 static void 2670 dcb_fwd_config_setup(void) 2671 { 2672 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2673 portid_t txp, rxp = 0; 2674 queueid_t txq, rxq = 0; 2675 lcoreid_t lc_id; 2676 uint16_t nb_rx_queue, nb_tx_queue; 2677 uint16_t i, j, k, sm_id = 0; 2678 uint8_t tc = 0; 2679 2680 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2681 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2682 cur_fwd_config.nb_fwd_streams = 2683 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2684 2685 /* reinitialize forwarding streams */ 2686 init_fwd_streams(); 2687 sm_id = 0; 2688 txp = 1; 2689 /* get the dcb info on the first RX and TX ports */ 2690 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2691 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2692 2693 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2694 fwd_lcores[lc_id]->stream_nb = 0; 2695 fwd_lcores[lc_id]->stream_idx = sm_id; 2696 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2697 /* if the nb_queue is zero, means this tc is 2698 * not enabled on the POOL 2699 */ 2700 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2701 break; 2702 k = fwd_lcores[lc_id]->stream_nb + 2703 fwd_lcores[lc_id]->stream_idx; 2704 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2705 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2706 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2707 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2708 for (j = 0; j < nb_rx_queue; j++) { 2709 struct fwd_stream *fs; 2710 2711 fs = fwd_streams[k + j]; 2712 fs->rx_port = fwd_ports_ids[rxp]; 2713 fs->rx_queue = rxq + j; 2714 fs->tx_port = fwd_ports_ids[txp]; 2715 fs->tx_queue = txq + j % nb_tx_queue; 2716 fs->peer_addr = fs->tx_port; 2717 fs->retry_enabled = retry_enabled; 2718 } 2719 fwd_lcores[lc_id]->stream_nb += 2720 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2721 } 2722 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2723 2724 tc++; 2725 if (tc < rxp_dcb_info.nb_tcs) 2726 continue; 2727 /* Restart from TC 0 on next RX port */ 2728 tc = 0; 2729 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2730 rxp = (portid_t) 2731 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2732 else 2733 rxp++; 2734 if (rxp >= nb_fwd_ports) 2735 return; 2736 /* get the dcb information on next RX and TX ports */ 2737 if ((rxp & 0x1) == 0) 2738 txp = (portid_t) (rxp + 1); 2739 else 2740 txp = (portid_t) (rxp - 1); 2741 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2742 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2743 } 2744 } 2745 2746 static void 2747 icmp_echo_config_setup(void) 2748 { 2749 portid_t rxp; 2750 queueid_t rxq; 2751 lcoreid_t lc_id; 2752 uint16_t sm_id; 2753 2754 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2755 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2756 (nb_txq * nb_fwd_ports); 2757 else 2758 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2759 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2760 cur_fwd_config.nb_fwd_streams = 2761 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2762 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2763 cur_fwd_config.nb_fwd_lcores = 2764 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2765 if (verbose_level > 0) { 2766 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2767 __FUNCTION__, 2768 cur_fwd_config.nb_fwd_lcores, 2769 cur_fwd_config.nb_fwd_ports, 2770 cur_fwd_config.nb_fwd_streams); 2771 } 2772 2773 /* reinitialize forwarding streams */ 2774 init_fwd_streams(); 2775 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2776 rxp = 0; rxq = 0; 2777 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2778 if (verbose_level > 0) 2779 printf(" core=%d: \n", lc_id); 2780 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2781 struct fwd_stream *fs; 2782 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2783 fs->rx_port = fwd_ports_ids[rxp]; 2784 fs->rx_queue = rxq; 2785 fs->tx_port = fs->rx_port; 2786 fs->tx_queue = rxq; 2787 fs->peer_addr = fs->tx_port; 2788 fs->retry_enabled = retry_enabled; 2789 if (verbose_level > 0) 2790 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2791 sm_id, fs->rx_port, fs->rx_queue, 2792 fs->tx_queue); 2793 rxq = (queueid_t) (rxq + 1); 2794 if (rxq == nb_rxq) { 2795 rxq = 0; 2796 rxp = (portid_t) (rxp + 1); 2797 } 2798 } 2799 } 2800 } 2801 2802 void 2803 fwd_config_setup(void) 2804 { 2805 cur_fwd_config.fwd_eng = cur_fwd_eng; 2806 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2807 icmp_echo_config_setup(); 2808 return; 2809 } 2810 2811 if ((nb_rxq > 1) && (nb_txq > 1)){ 2812 if (dcb_config) 2813 dcb_fwd_config_setup(); 2814 else 2815 rss_fwd_config_setup(); 2816 } 2817 else 2818 simple_fwd_config_setup(); 2819 } 2820 2821 static const char * 2822 mp_alloc_to_str(uint8_t mode) 2823 { 2824 switch (mode) { 2825 case MP_ALLOC_NATIVE: 2826 return "native"; 2827 case MP_ALLOC_ANON: 2828 return "anon"; 2829 case MP_ALLOC_XMEM: 2830 return "xmem"; 2831 case MP_ALLOC_XMEM_HUGE: 2832 return "xmemhuge"; 2833 case MP_ALLOC_XBUF: 2834 return "xbuf"; 2835 default: 2836 return "invalid"; 2837 } 2838 } 2839 2840 void 2841 pkt_fwd_config_display(struct fwd_config *cfg) 2842 { 2843 struct fwd_stream *fs; 2844 lcoreid_t lc_id; 2845 streamid_t sm_id; 2846 2847 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2848 "NUMA support %s, MP allocation mode: %s\n", 2849 cfg->fwd_eng->fwd_mode_name, 2850 retry_enabled == 0 ? "" : " with retry", 2851 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2852 numa_support == 1 ? "enabled" : "disabled", 2853 mp_alloc_to_str(mp_alloc_type)); 2854 2855 if (retry_enabled) 2856 printf("TX retry num: %u, delay between TX retries: %uus\n", 2857 burst_tx_retry_num, burst_tx_delay_time); 2858 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2859 printf("Logical Core %u (socket %u) forwards packets on " 2860 "%d streams:", 2861 fwd_lcores_cpuids[lc_id], 2862 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2863 fwd_lcores[lc_id]->stream_nb); 2864 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2865 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2866 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2867 "P=%d/Q=%d (socket %u) ", 2868 fs->rx_port, fs->rx_queue, 2869 ports[fs->rx_port].socket_id, 2870 fs->tx_port, fs->tx_queue, 2871 ports[fs->tx_port].socket_id); 2872 print_ethaddr("peer=", 2873 &peer_eth_addrs[fs->peer_addr]); 2874 } 2875 printf("\n"); 2876 } 2877 printf("\n"); 2878 } 2879 2880 void 2881 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2882 { 2883 struct rte_ether_addr new_peer_addr; 2884 if (!rte_eth_dev_is_valid_port(port_id)) { 2885 printf("Error: Invalid port number %i\n", port_id); 2886 return; 2887 } 2888 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2889 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2890 return; 2891 } 2892 peer_eth_addrs[port_id] = new_peer_addr; 2893 } 2894 2895 int 2896 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2897 { 2898 unsigned int i; 2899 unsigned int lcore_cpuid; 2900 int record_now; 2901 2902 record_now = 0; 2903 again: 2904 for (i = 0; i < nb_lc; i++) { 2905 lcore_cpuid = lcorelist[i]; 2906 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2907 printf("lcore %u not enabled\n", lcore_cpuid); 2908 return -1; 2909 } 2910 if (lcore_cpuid == rte_get_master_lcore()) { 2911 printf("lcore %u cannot be masked on for running " 2912 "packet forwarding, which is the master lcore " 2913 "and reserved for command line parsing only\n", 2914 lcore_cpuid); 2915 return -1; 2916 } 2917 if (record_now) 2918 fwd_lcores_cpuids[i] = lcore_cpuid; 2919 } 2920 if (record_now == 0) { 2921 record_now = 1; 2922 goto again; 2923 } 2924 nb_cfg_lcores = (lcoreid_t) nb_lc; 2925 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2926 printf("previous number of forwarding cores %u - changed to " 2927 "number of configured cores %u\n", 2928 (unsigned int) nb_fwd_lcores, nb_lc); 2929 nb_fwd_lcores = (lcoreid_t) nb_lc; 2930 } 2931 2932 return 0; 2933 } 2934 2935 int 2936 set_fwd_lcores_mask(uint64_t lcoremask) 2937 { 2938 unsigned int lcorelist[64]; 2939 unsigned int nb_lc; 2940 unsigned int i; 2941 2942 if (lcoremask == 0) { 2943 printf("Invalid NULL mask of cores\n"); 2944 return -1; 2945 } 2946 nb_lc = 0; 2947 for (i = 0; i < 64; i++) { 2948 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2949 continue; 2950 lcorelist[nb_lc++] = i; 2951 } 2952 return set_fwd_lcores_list(lcorelist, nb_lc); 2953 } 2954 2955 void 2956 set_fwd_lcores_number(uint16_t nb_lc) 2957 { 2958 if (nb_lc > nb_cfg_lcores) { 2959 printf("nb fwd cores %u > %u (max. number of configured " 2960 "lcores) - ignored\n", 2961 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2962 return; 2963 } 2964 nb_fwd_lcores = (lcoreid_t) nb_lc; 2965 printf("Number of forwarding cores set to %u\n", 2966 (unsigned int) nb_fwd_lcores); 2967 } 2968 2969 void 2970 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2971 { 2972 unsigned int i; 2973 portid_t port_id; 2974 int record_now; 2975 2976 record_now = 0; 2977 again: 2978 for (i = 0; i < nb_pt; i++) { 2979 port_id = (portid_t) portlist[i]; 2980 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2981 return; 2982 if (record_now) 2983 fwd_ports_ids[i] = port_id; 2984 } 2985 if (record_now == 0) { 2986 record_now = 1; 2987 goto again; 2988 } 2989 nb_cfg_ports = (portid_t) nb_pt; 2990 if (nb_fwd_ports != (portid_t) nb_pt) { 2991 printf("previous number of forwarding ports %u - changed to " 2992 "number of configured ports %u\n", 2993 (unsigned int) nb_fwd_ports, nb_pt); 2994 nb_fwd_ports = (portid_t) nb_pt; 2995 } 2996 } 2997 2998 /** 2999 * Parse the user input and obtain the list of forwarding ports 3000 * 3001 * @param[in] list 3002 * String containing the user input. User can specify 3003 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3004 * For example, if the user wants to use all the available 3005 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3006 * If the user wants to use only the ports 1,2 then the input 3007 * is 1,2. 3008 * valid characters are '-' and ',' 3009 * @param[out] values 3010 * This array will be filled with a list of port IDs 3011 * based on the user input 3012 * Note that duplicate entries are discarded and only the first 3013 * count entries in this array are port IDs and all the rest 3014 * will contain default values 3015 * @param[in] maxsize 3016 * This parameter denotes 2 things 3017 * 1) Number of elements in the values array 3018 * 2) Maximum value of each element in the values array 3019 * @return 3020 * On success, returns total count of parsed port IDs 3021 * On failure, returns 0 3022 */ 3023 static unsigned int 3024 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3025 { 3026 unsigned int count = 0; 3027 char *end = NULL; 3028 int min, max; 3029 int value, i; 3030 unsigned int marked[maxsize]; 3031 3032 if (list == NULL || values == NULL) 3033 return 0; 3034 3035 for (i = 0; i < (int)maxsize; i++) 3036 marked[i] = 0; 3037 3038 min = INT_MAX; 3039 3040 do { 3041 /*Remove the blank spaces if any*/ 3042 while (isblank(*list)) 3043 list++; 3044 if (*list == '\0') 3045 break; 3046 errno = 0; 3047 value = strtol(list, &end, 10); 3048 if (errno || end == NULL) 3049 return 0; 3050 if (value < 0 || value >= (int)maxsize) 3051 return 0; 3052 while (isblank(*end)) 3053 end++; 3054 if (*end == '-' && min == INT_MAX) { 3055 min = value; 3056 } else if ((*end == ',') || (*end == '\0')) { 3057 max = value; 3058 if (min == INT_MAX) 3059 min = value; 3060 for (i = min; i <= max; i++) { 3061 if (count < maxsize) { 3062 if (marked[i]) 3063 continue; 3064 values[count] = i; 3065 marked[i] = 1; 3066 count++; 3067 } 3068 } 3069 min = INT_MAX; 3070 } else 3071 return 0; 3072 list = end + 1; 3073 } while (*end != '\0'); 3074 3075 return count; 3076 } 3077 3078 void 3079 parse_fwd_portlist(const char *portlist) 3080 { 3081 unsigned int portcount; 3082 unsigned int portindex[RTE_MAX_ETHPORTS]; 3083 unsigned int i, valid_port_count = 0; 3084 3085 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3086 if (!portcount) 3087 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3088 3089 /* 3090 * Here we verify the validity of the ports 3091 * and thereby calculate the total number of 3092 * valid ports 3093 */ 3094 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3095 if (rte_eth_dev_is_valid_port(portindex[i])) { 3096 portindex[valid_port_count] = portindex[i]; 3097 valid_port_count++; 3098 } 3099 } 3100 3101 set_fwd_ports_list(portindex, valid_port_count); 3102 } 3103 3104 void 3105 set_fwd_ports_mask(uint64_t portmask) 3106 { 3107 unsigned int portlist[64]; 3108 unsigned int nb_pt; 3109 unsigned int i; 3110 3111 if (portmask == 0) { 3112 printf("Invalid NULL mask of ports\n"); 3113 return; 3114 } 3115 nb_pt = 0; 3116 RTE_ETH_FOREACH_DEV(i) { 3117 if (! ((uint64_t)(1ULL << i) & portmask)) 3118 continue; 3119 portlist[nb_pt++] = i; 3120 } 3121 set_fwd_ports_list(portlist, nb_pt); 3122 } 3123 3124 void 3125 set_fwd_ports_number(uint16_t nb_pt) 3126 { 3127 if (nb_pt > nb_cfg_ports) { 3128 printf("nb fwd ports %u > %u (number of configured " 3129 "ports) - ignored\n", 3130 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3131 return; 3132 } 3133 nb_fwd_ports = (portid_t) nb_pt; 3134 printf("Number of forwarding ports set to %u\n", 3135 (unsigned int) nb_fwd_ports); 3136 } 3137 3138 int 3139 port_is_forwarding(portid_t port_id) 3140 { 3141 unsigned int i; 3142 3143 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3144 return -1; 3145 3146 for (i = 0; i < nb_fwd_ports; i++) { 3147 if (fwd_ports_ids[i] == port_id) 3148 return 1; 3149 } 3150 3151 return 0; 3152 } 3153 3154 void 3155 set_nb_pkt_per_burst(uint16_t nb) 3156 { 3157 if (nb > MAX_PKT_BURST) { 3158 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3159 " ignored\n", 3160 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3161 return; 3162 } 3163 nb_pkt_per_burst = nb; 3164 printf("Number of packets per burst set to %u\n", 3165 (unsigned int) nb_pkt_per_burst); 3166 } 3167 3168 static const char * 3169 tx_split_get_name(enum tx_pkt_split split) 3170 { 3171 uint32_t i; 3172 3173 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3174 if (tx_split_name[i].split == split) 3175 return tx_split_name[i].name; 3176 } 3177 return NULL; 3178 } 3179 3180 void 3181 set_tx_pkt_split(const char *name) 3182 { 3183 uint32_t i; 3184 3185 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3186 if (strcmp(tx_split_name[i].name, name) == 0) { 3187 tx_pkt_split = tx_split_name[i].split; 3188 return; 3189 } 3190 } 3191 printf("unknown value: \"%s\"\n", name); 3192 } 3193 3194 void 3195 show_tx_pkt_segments(void) 3196 { 3197 uint32_t i, n; 3198 const char *split; 3199 3200 n = tx_pkt_nb_segs; 3201 split = tx_split_get_name(tx_pkt_split); 3202 3203 printf("Number of segments: %u\n", n); 3204 printf("Segment sizes: "); 3205 for (i = 0; i != n - 1; i++) 3206 printf("%hu,", tx_pkt_seg_lengths[i]); 3207 printf("%hu\n", tx_pkt_seg_lengths[i]); 3208 printf("Split packet: %s\n", split); 3209 } 3210 3211 static bool 3212 nb_segs_is_invalid(unsigned int nb_segs) 3213 { 3214 uint16_t ring_size; 3215 uint16_t queue_id; 3216 uint16_t port_id; 3217 int ret; 3218 3219 RTE_ETH_FOREACH_DEV(port_id) { 3220 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3221 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3222 3223 if (ret) 3224 return true; 3225 3226 if (ring_size < nb_segs) { 3227 printf("nb segments per TX packets=%u >= " 3228 "TX queue(%u) ring_size=%u - ignored\n", 3229 nb_segs, queue_id, ring_size); 3230 return true; 3231 } 3232 } 3233 } 3234 3235 return false; 3236 } 3237 3238 void 3239 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 3240 { 3241 uint16_t tx_pkt_len; 3242 unsigned i; 3243 3244 if (nb_segs_is_invalid(nb_segs)) 3245 return; 3246 3247 /* 3248 * Check that each segment length is greater or equal than 3249 * the mbuf data sise. 3250 * Check also that the total packet length is greater or equal than the 3251 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3252 * 20 + 8). 3253 */ 3254 tx_pkt_len = 0; 3255 for (i = 0; i < nb_segs; i++) { 3256 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 3257 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3258 i, seg_lengths[i], (unsigned) mbuf_data_size); 3259 return; 3260 } 3261 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3262 } 3263 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3264 printf("total packet length=%u < %d - give up\n", 3265 (unsigned) tx_pkt_len, 3266 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3267 return; 3268 } 3269 3270 for (i = 0; i < nb_segs; i++) 3271 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3272 3273 tx_pkt_length = tx_pkt_len; 3274 tx_pkt_nb_segs = (uint8_t) nb_segs; 3275 } 3276 3277 void 3278 show_tx_pkt_times(void) 3279 { 3280 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3281 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3282 } 3283 3284 void 3285 set_tx_pkt_times(unsigned int *tx_times) 3286 { 3287 uint16_t port_id; 3288 int offload_found = 0; 3289 int offset; 3290 int flag; 3291 3292 static const struct rte_mbuf_dynfield desc_offs = { 3293 .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, 3294 .size = sizeof(uint64_t), 3295 .align = __alignof__(uint64_t), 3296 }; 3297 static const struct rte_mbuf_dynflag desc_flag = { 3298 .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, 3299 }; 3300 3301 RTE_ETH_FOREACH_DEV(port_id) { 3302 struct rte_eth_dev_info dev_info = { 0 }; 3303 int ret; 3304 3305 ret = rte_eth_dev_info_get(port_id, &dev_info); 3306 if (ret == 0 && dev_info.tx_offload_capa & 3307 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 3308 offload_found = 1; 3309 break; 3310 } 3311 } 3312 if (!offload_found) { 3313 printf("No device supporting Tx timestamp scheduling found, " 3314 "dynamic flag and field not registered\n"); 3315 return; 3316 } 3317 offset = rte_mbuf_dynfield_register(&desc_offs); 3318 if (offset < 0 && rte_errno != EEXIST) 3319 printf("Dynamic timestamp field registration error: %d", 3320 rte_errno); 3321 flag = rte_mbuf_dynflag_register(&desc_flag); 3322 if (flag < 0 && rte_errno != EEXIST) 3323 printf("Dynamic timestamp flag registration error: %d", 3324 rte_errno); 3325 tx_pkt_times_inter = tx_times[0]; 3326 tx_pkt_times_intra = tx_times[1]; 3327 } 3328 3329 void 3330 setup_gro(const char *onoff, portid_t port_id) 3331 { 3332 if (!rte_eth_dev_is_valid_port(port_id)) { 3333 printf("invalid port id %u\n", port_id); 3334 return; 3335 } 3336 if (test_done == 0) { 3337 printf("Before enable/disable GRO," 3338 " please stop forwarding first\n"); 3339 return; 3340 } 3341 if (strcmp(onoff, "on") == 0) { 3342 if (gro_ports[port_id].enable != 0) { 3343 printf("Port %u has enabled GRO. Please" 3344 " disable GRO first\n", port_id); 3345 return; 3346 } 3347 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3348 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3349 gro_ports[port_id].param.max_flow_num = 3350 GRO_DEFAULT_FLOW_NUM; 3351 gro_ports[port_id].param.max_item_per_flow = 3352 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3353 } 3354 gro_ports[port_id].enable = 1; 3355 } else { 3356 if (gro_ports[port_id].enable == 0) { 3357 printf("Port %u has disabled GRO\n", port_id); 3358 return; 3359 } 3360 gro_ports[port_id].enable = 0; 3361 } 3362 } 3363 3364 void 3365 setup_gro_flush_cycles(uint8_t cycles) 3366 { 3367 if (test_done == 0) { 3368 printf("Before change flush interval for GRO," 3369 " please stop forwarding first.\n"); 3370 return; 3371 } 3372 3373 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3374 GRO_DEFAULT_FLUSH_CYCLES) { 3375 printf("The flushing cycle be in the range" 3376 " of 1 to %u. Revert to the default" 3377 " value %u.\n", 3378 GRO_MAX_FLUSH_CYCLES, 3379 GRO_DEFAULT_FLUSH_CYCLES); 3380 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3381 } 3382 3383 gro_flush_cycles = cycles; 3384 } 3385 3386 void 3387 show_gro(portid_t port_id) 3388 { 3389 struct rte_gro_param *param; 3390 uint32_t max_pkts_num; 3391 3392 param = &gro_ports[port_id].param; 3393 3394 if (!rte_eth_dev_is_valid_port(port_id)) { 3395 printf("Invalid port id %u.\n", port_id); 3396 return; 3397 } 3398 if (gro_ports[port_id].enable) { 3399 printf("GRO type: TCP/IPv4\n"); 3400 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3401 max_pkts_num = param->max_flow_num * 3402 param->max_item_per_flow; 3403 } else 3404 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3405 printf("Max number of packets to perform GRO: %u\n", 3406 max_pkts_num); 3407 printf("Flushing cycles: %u\n", gro_flush_cycles); 3408 } else 3409 printf("Port %u doesn't enable GRO.\n", port_id); 3410 } 3411 3412 void 3413 setup_gso(const char *mode, portid_t port_id) 3414 { 3415 if (!rte_eth_dev_is_valid_port(port_id)) { 3416 printf("invalid port id %u\n", port_id); 3417 return; 3418 } 3419 if (strcmp(mode, "on") == 0) { 3420 if (test_done == 0) { 3421 printf("before enabling GSO," 3422 " please stop forwarding first\n"); 3423 return; 3424 } 3425 gso_ports[port_id].enable = 1; 3426 } else if (strcmp(mode, "off") == 0) { 3427 if (test_done == 0) { 3428 printf("before disabling GSO," 3429 " please stop forwarding first\n"); 3430 return; 3431 } 3432 gso_ports[port_id].enable = 0; 3433 } 3434 } 3435 3436 char* 3437 list_pkt_forwarding_modes(void) 3438 { 3439 static char fwd_modes[128] = ""; 3440 const char *separator = "|"; 3441 struct fwd_engine *fwd_eng; 3442 unsigned i = 0; 3443 3444 if (strlen (fwd_modes) == 0) { 3445 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3446 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3447 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3448 strncat(fwd_modes, separator, 3449 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3450 } 3451 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3452 } 3453 3454 return fwd_modes; 3455 } 3456 3457 char* 3458 list_pkt_forwarding_retry_modes(void) 3459 { 3460 static char fwd_modes[128] = ""; 3461 const char *separator = "|"; 3462 struct fwd_engine *fwd_eng; 3463 unsigned i = 0; 3464 3465 if (strlen(fwd_modes) == 0) { 3466 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3467 if (fwd_eng == &rx_only_engine) 3468 continue; 3469 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3470 sizeof(fwd_modes) - 3471 strlen(fwd_modes) - 1); 3472 strncat(fwd_modes, separator, 3473 sizeof(fwd_modes) - 3474 strlen(fwd_modes) - 1); 3475 } 3476 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3477 } 3478 3479 return fwd_modes; 3480 } 3481 3482 void 3483 set_pkt_forwarding_mode(const char *fwd_mode_name) 3484 { 3485 struct fwd_engine *fwd_eng; 3486 unsigned i; 3487 3488 i = 0; 3489 while ((fwd_eng = fwd_engines[i]) != NULL) { 3490 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 3491 printf("Set %s packet forwarding mode%s\n", 3492 fwd_mode_name, 3493 retry_enabled == 0 ? "" : " with retry"); 3494 cur_fwd_eng = fwd_eng; 3495 return; 3496 } 3497 i++; 3498 } 3499 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 3500 } 3501 3502 void 3503 add_rx_dump_callbacks(portid_t portid) 3504 { 3505 struct rte_eth_dev_info dev_info; 3506 uint16_t queue; 3507 int ret; 3508 3509 if (port_id_is_invalid(portid, ENABLED_WARN)) 3510 return; 3511 3512 ret = eth_dev_info_get_print_err(portid, &dev_info); 3513 if (ret != 0) 3514 return; 3515 3516 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3517 if (!ports[portid].rx_dump_cb[queue]) 3518 ports[portid].rx_dump_cb[queue] = 3519 rte_eth_add_rx_callback(portid, queue, 3520 dump_rx_pkts, NULL); 3521 } 3522 3523 void 3524 add_tx_dump_callbacks(portid_t portid) 3525 { 3526 struct rte_eth_dev_info dev_info; 3527 uint16_t queue; 3528 int ret; 3529 3530 if (port_id_is_invalid(portid, ENABLED_WARN)) 3531 return; 3532 3533 ret = eth_dev_info_get_print_err(portid, &dev_info); 3534 if (ret != 0) 3535 return; 3536 3537 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3538 if (!ports[portid].tx_dump_cb[queue]) 3539 ports[portid].tx_dump_cb[queue] = 3540 rte_eth_add_tx_callback(portid, queue, 3541 dump_tx_pkts, NULL); 3542 } 3543 3544 void 3545 remove_rx_dump_callbacks(portid_t portid) 3546 { 3547 struct rte_eth_dev_info dev_info; 3548 uint16_t queue; 3549 int ret; 3550 3551 if (port_id_is_invalid(portid, ENABLED_WARN)) 3552 return; 3553 3554 ret = eth_dev_info_get_print_err(portid, &dev_info); 3555 if (ret != 0) 3556 return; 3557 3558 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3559 if (ports[portid].rx_dump_cb[queue]) { 3560 rte_eth_remove_rx_callback(portid, queue, 3561 ports[portid].rx_dump_cb[queue]); 3562 ports[portid].rx_dump_cb[queue] = NULL; 3563 } 3564 } 3565 3566 void 3567 remove_tx_dump_callbacks(portid_t portid) 3568 { 3569 struct rte_eth_dev_info dev_info; 3570 uint16_t queue; 3571 int ret; 3572 3573 if (port_id_is_invalid(portid, ENABLED_WARN)) 3574 return; 3575 3576 ret = eth_dev_info_get_print_err(portid, &dev_info); 3577 if (ret != 0) 3578 return; 3579 3580 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3581 if (ports[portid].tx_dump_cb[queue]) { 3582 rte_eth_remove_tx_callback(portid, queue, 3583 ports[portid].tx_dump_cb[queue]); 3584 ports[portid].tx_dump_cb[queue] = NULL; 3585 } 3586 } 3587 3588 void 3589 configure_rxtx_dump_callbacks(uint16_t verbose) 3590 { 3591 portid_t portid; 3592 3593 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3594 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3595 return; 3596 #endif 3597 3598 RTE_ETH_FOREACH_DEV(portid) 3599 { 3600 if (verbose == 1 || verbose > 2) 3601 add_rx_dump_callbacks(portid); 3602 else 3603 remove_rx_dump_callbacks(portid); 3604 if (verbose >= 2) 3605 add_tx_dump_callbacks(portid); 3606 else 3607 remove_tx_dump_callbacks(portid); 3608 } 3609 } 3610 3611 void 3612 set_verbose_level(uint16_t vb_level) 3613 { 3614 printf("Change verbose level from %u to %u\n", 3615 (unsigned int) verbose_level, (unsigned int) vb_level); 3616 verbose_level = vb_level; 3617 configure_rxtx_dump_callbacks(verbose_level); 3618 } 3619 3620 void 3621 vlan_extend_set(portid_t port_id, int on) 3622 { 3623 int diag; 3624 int vlan_offload; 3625 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3626 3627 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3628 return; 3629 3630 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3631 3632 if (on) { 3633 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3634 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3635 } else { 3636 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3637 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3638 } 3639 3640 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3641 if (diag < 0) { 3642 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3643 "diag=%d\n", port_id, on, diag); 3644 return; 3645 } 3646 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3647 } 3648 3649 void 3650 rx_vlan_strip_set(portid_t port_id, int on) 3651 { 3652 int diag; 3653 int vlan_offload; 3654 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3655 3656 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3657 return; 3658 3659 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3660 3661 if (on) { 3662 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3663 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3664 } else { 3665 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3666 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3667 } 3668 3669 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3670 if (diag < 0) { 3671 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3672 "diag=%d\n", port_id, on, diag); 3673 return; 3674 } 3675 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3676 } 3677 3678 void 3679 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3680 { 3681 int diag; 3682 3683 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3684 return; 3685 3686 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3687 if (diag < 0) 3688 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3689 "diag=%d\n", port_id, queue_id, on, diag); 3690 } 3691 3692 void 3693 rx_vlan_filter_set(portid_t port_id, int on) 3694 { 3695 int diag; 3696 int vlan_offload; 3697 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3698 3699 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3700 return; 3701 3702 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3703 3704 if (on) { 3705 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3706 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3707 } else { 3708 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3709 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3710 } 3711 3712 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3713 if (diag < 0) { 3714 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3715 "diag=%d\n", port_id, on, diag); 3716 return; 3717 } 3718 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3719 } 3720 3721 void 3722 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3723 { 3724 int diag; 3725 int vlan_offload; 3726 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3727 3728 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3729 return; 3730 3731 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3732 3733 if (on) { 3734 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3735 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3736 } else { 3737 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3738 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3739 } 3740 3741 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3742 if (diag < 0) { 3743 printf("%s(port_pi=%d, on=%d) failed " 3744 "diag=%d\n", __func__, port_id, on, diag); 3745 return; 3746 } 3747 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3748 } 3749 3750 int 3751 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3752 { 3753 int diag; 3754 3755 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3756 return 1; 3757 if (vlan_id_is_invalid(vlan_id)) 3758 return 1; 3759 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3760 if (diag == 0) 3761 return 0; 3762 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3763 "diag=%d\n", 3764 port_id, vlan_id, on, diag); 3765 return -1; 3766 } 3767 3768 void 3769 rx_vlan_all_filter_set(portid_t port_id, int on) 3770 { 3771 uint16_t vlan_id; 3772 3773 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3774 return; 3775 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3776 if (rx_vft_set(port_id, vlan_id, on)) 3777 break; 3778 } 3779 } 3780 3781 void 3782 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3783 { 3784 int diag; 3785 3786 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3787 return; 3788 3789 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3790 if (diag == 0) 3791 return; 3792 3793 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3794 "diag=%d\n", 3795 port_id, vlan_type, tp_id, diag); 3796 } 3797 3798 void 3799 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3800 { 3801 struct rte_eth_dev_info dev_info; 3802 int ret; 3803 3804 if (vlan_id_is_invalid(vlan_id)) 3805 return; 3806 3807 if (ports[port_id].dev_conf.txmode.offloads & 3808 DEV_TX_OFFLOAD_QINQ_INSERT) { 3809 printf("Error, as QinQ has been enabled.\n"); 3810 return; 3811 } 3812 3813 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3814 if (ret != 0) 3815 return; 3816 3817 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3818 printf("Error: vlan insert is not supported by port %d\n", 3819 port_id); 3820 return; 3821 } 3822 3823 tx_vlan_reset(port_id); 3824 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3825 ports[port_id].tx_vlan_id = vlan_id; 3826 } 3827 3828 void 3829 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3830 { 3831 struct rte_eth_dev_info dev_info; 3832 int ret; 3833 3834 if (vlan_id_is_invalid(vlan_id)) 3835 return; 3836 if (vlan_id_is_invalid(vlan_id_outer)) 3837 return; 3838 3839 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3840 if (ret != 0) 3841 return; 3842 3843 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3844 printf("Error: qinq insert not supported by port %d\n", 3845 port_id); 3846 return; 3847 } 3848 3849 tx_vlan_reset(port_id); 3850 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3851 DEV_TX_OFFLOAD_QINQ_INSERT); 3852 ports[port_id].tx_vlan_id = vlan_id; 3853 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3854 } 3855 3856 void 3857 tx_vlan_reset(portid_t port_id) 3858 { 3859 ports[port_id].dev_conf.txmode.offloads &= 3860 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3861 DEV_TX_OFFLOAD_QINQ_INSERT); 3862 ports[port_id].tx_vlan_id = 0; 3863 ports[port_id].tx_vlan_id_outer = 0; 3864 } 3865 3866 void 3867 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3868 { 3869 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3870 return; 3871 3872 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3873 } 3874 3875 void 3876 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3877 { 3878 uint16_t i; 3879 uint8_t existing_mapping_found = 0; 3880 3881 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3882 return; 3883 3884 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3885 return; 3886 3887 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3888 printf("map_value not in required range 0..%d\n", 3889 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3890 return; 3891 } 3892 3893 if (!is_rx) { /*then tx*/ 3894 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3895 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3896 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3897 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3898 existing_mapping_found = 1; 3899 break; 3900 } 3901 } 3902 if (!existing_mapping_found) { /* A new additional mapping... */ 3903 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3904 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3905 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3906 nb_tx_queue_stats_mappings++; 3907 } 3908 } 3909 else { /*rx*/ 3910 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3911 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3912 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3913 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3914 existing_mapping_found = 1; 3915 break; 3916 } 3917 } 3918 if (!existing_mapping_found) { /* A new additional mapping... */ 3919 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3920 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3921 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3922 nb_rx_queue_stats_mappings++; 3923 } 3924 } 3925 } 3926 3927 void 3928 set_xstats_hide_zero(uint8_t on_off) 3929 { 3930 xstats_hide_zero = on_off; 3931 } 3932 3933 void 3934 set_record_core_cycles(uint8_t on_off) 3935 { 3936 record_core_cycles = on_off; 3937 } 3938 3939 void 3940 set_record_burst_stats(uint8_t on_off) 3941 { 3942 record_burst_stats = on_off; 3943 } 3944 3945 static inline void 3946 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3947 { 3948 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3949 3950 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3951 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3952 " tunnel_id: 0x%08x", 3953 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3954 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3955 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3956 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3957 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3958 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3959 3960 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3961 rte_be_to_cpu_16(mask->src_port_mask), 3962 rte_be_to_cpu_16(mask->dst_port_mask)); 3963 3964 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3965 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3966 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3967 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3968 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3969 3970 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3971 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3972 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3973 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3974 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3975 } 3976 3977 printf("\n"); 3978 } 3979 3980 static inline void 3981 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3982 { 3983 struct rte_eth_flex_payload_cfg *cfg; 3984 uint32_t i, j; 3985 3986 for (i = 0; i < flex_conf->nb_payloads; i++) { 3987 cfg = &flex_conf->flex_set[i]; 3988 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3989 printf("\n RAW: "); 3990 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3991 printf("\n L2_PAYLOAD: "); 3992 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3993 printf("\n L3_PAYLOAD: "); 3994 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3995 printf("\n L4_PAYLOAD: "); 3996 else 3997 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3998 for (j = 0; j < num; j++) 3999 printf(" %-5u", cfg->src_offset[j]); 4000 } 4001 printf("\n"); 4002 } 4003 4004 static char * 4005 flowtype_to_str(uint16_t flow_type) 4006 { 4007 struct flow_type_info { 4008 char str[32]; 4009 uint16_t ftype; 4010 }; 4011 4012 uint8_t i; 4013 static struct flow_type_info flowtype_str_table[] = { 4014 {"raw", RTE_ETH_FLOW_RAW}, 4015 {"ipv4", RTE_ETH_FLOW_IPV4}, 4016 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4017 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4018 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4019 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4020 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4021 {"ipv6", RTE_ETH_FLOW_IPV6}, 4022 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4023 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4024 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4025 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4026 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4027 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4028 {"port", RTE_ETH_FLOW_PORT}, 4029 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4030 {"geneve", RTE_ETH_FLOW_GENEVE}, 4031 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4032 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4033 }; 4034 4035 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4036 if (flowtype_str_table[i].ftype == flow_type) 4037 return flowtype_str_table[i].str; 4038 } 4039 4040 return NULL; 4041 } 4042 4043 static inline void 4044 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4045 { 4046 struct rte_eth_fdir_flex_mask *mask; 4047 uint32_t i, j; 4048 char *p; 4049 4050 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4051 mask = &flex_conf->flex_mask[i]; 4052 p = flowtype_to_str(mask->flow_type); 4053 printf("\n %s:\t", p ? p : "unknown"); 4054 for (j = 0; j < num; j++) 4055 printf(" %02x", mask->mask[j]); 4056 } 4057 printf("\n"); 4058 } 4059 4060 static inline void 4061 print_fdir_flow_type(uint32_t flow_types_mask) 4062 { 4063 int i; 4064 char *p; 4065 4066 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4067 if (!(flow_types_mask & (1 << i))) 4068 continue; 4069 p = flowtype_to_str(i); 4070 if (p) 4071 printf(" %s", p); 4072 else 4073 printf(" unknown"); 4074 } 4075 printf("\n"); 4076 } 4077 4078 static int 4079 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4080 struct rte_eth_fdir_stats *fdir_stat) 4081 { 4082 int ret; 4083 4084 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 4085 if (!ret) { 4086 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 4087 RTE_ETH_FILTER_INFO, fdir_info); 4088 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 4089 RTE_ETH_FILTER_STATS, fdir_stat); 4090 return 0; 4091 } 4092 4093 #ifdef RTE_LIBRTE_I40E_PMD 4094 if (ret == -ENOTSUP) { 4095 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4096 if (!ret) 4097 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4098 } 4099 #endif 4100 #ifdef RTE_LIBRTE_IXGBE_PMD 4101 if (ret == -ENOTSUP) { 4102 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4103 if (!ret) 4104 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4105 } 4106 #endif 4107 switch (ret) { 4108 case 0: 4109 break; 4110 case -ENOTSUP: 4111 printf("\n FDIR is not supported on port %-2d\n", 4112 port_id); 4113 break; 4114 default: 4115 printf("programming error: (%s)\n", strerror(-ret)); 4116 break; 4117 } 4118 return ret; 4119 } 4120 4121 void 4122 fdir_get_infos(portid_t port_id) 4123 { 4124 struct rte_eth_fdir_stats fdir_stat; 4125 struct rte_eth_fdir_info fdir_info; 4126 4127 static const char *fdir_stats_border = "########################"; 4128 4129 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4130 return; 4131 4132 memset(&fdir_info, 0, sizeof(fdir_info)); 4133 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4134 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4135 return; 4136 4137 printf("\n %s FDIR infos for port %-2d %s\n", 4138 fdir_stats_border, port_id, fdir_stats_border); 4139 printf(" MODE: "); 4140 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4141 printf(" PERFECT\n"); 4142 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4143 printf(" PERFECT-MAC-VLAN\n"); 4144 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4145 printf(" PERFECT-TUNNEL\n"); 4146 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4147 printf(" SIGNATURE\n"); 4148 else 4149 printf(" DISABLE\n"); 4150 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4151 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4152 printf(" SUPPORTED FLOW TYPE: "); 4153 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4154 } 4155 printf(" FLEX PAYLOAD INFO:\n"); 4156 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4157 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4158 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4159 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4160 fdir_info.flex_payload_unit, 4161 fdir_info.max_flex_payload_segment_num, 4162 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4163 printf(" MASK: "); 4164 print_fdir_mask(&fdir_info.mask); 4165 if (fdir_info.flex_conf.nb_payloads > 0) { 4166 printf(" FLEX PAYLOAD SRC OFFSET:"); 4167 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4168 } 4169 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4170 printf(" FLEX MASK CFG:"); 4171 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4172 } 4173 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4174 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4175 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4176 fdir_info.guarant_spc, fdir_info.best_spc); 4177 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4178 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4179 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4180 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4181 fdir_stat.collision, fdir_stat.free, 4182 fdir_stat.maxhash, fdir_stat.maxlen, 4183 fdir_stat.add, fdir_stat.remove, 4184 fdir_stat.f_add, fdir_stat.f_remove); 4185 printf(" %s############################%s\n", 4186 fdir_stats_border, fdir_stats_border); 4187 } 4188 4189 void 4190 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4191 { 4192 struct rte_port *port; 4193 struct rte_eth_fdir_flex_conf *flex_conf; 4194 int i, idx = 0; 4195 4196 port = &ports[port_id]; 4197 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4198 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4199 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4200 idx = i; 4201 break; 4202 } 4203 } 4204 if (i >= RTE_ETH_FLOW_MAX) { 4205 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4206 idx = flex_conf->nb_flexmasks; 4207 flex_conf->nb_flexmasks++; 4208 } else { 4209 printf("The flex mask table is full. Can not set flex" 4210 " mask for flow_type(%u).", cfg->flow_type); 4211 return; 4212 } 4213 } 4214 rte_memcpy(&flex_conf->flex_mask[idx], 4215 cfg, 4216 sizeof(struct rte_eth_fdir_flex_mask)); 4217 } 4218 4219 void 4220 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4221 { 4222 struct rte_port *port; 4223 struct rte_eth_fdir_flex_conf *flex_conf; 4224 int i, idx = 0; 4225 4226 port = &ports[port_id]; 4227 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4228 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4229 if (cfg->type == flex_conf->flex_set[i].type) { 4230 idx = i; 4231 break; 4232 } 4233 } 4234 if (i >= RTE_ETH_PAYLOAD_MAX) { 4235 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4236 idx = flex_conf->nb_payloads; 4237 flex_conf->nb_payloads++; 4238 } else { 4239 printf("The flex payload table is full. Can not set" 4240 " flex payload for type(%u).", cfg->type); 4241 return; 4242 } 4243 } 4244 rte_memcpy(&flex_conf->flex_set[idx], 4245 cfg, 4246 sizeof(struct rte_eth_flex_payload_cfg)); 4247 4248 } 4249 4250 void 4251 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4252 { 4253 #ifdef RTE_LIBRTE_IXGBE_PMD 4254 int diag; 4255 4256 if (is_rx) 4257 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4258 else 4259 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4260 4261 if (diag == 0) 4262 return; 4263 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4264 is_rx ? "rx" : "tx", port_id, diag); 4265 return; 4266 #endif 4267 printf("VF %s setting not supported for port %d\n", 4268 is_rx ? "Rx" : "Tx", port_id); 4269 RTE_SET_USED(vf); 4270 RTE_SET_USED(on); 4271 } 4272 4273 int 4274 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4275 { 4276 int diag; 4277 struct rte_eth_link link; 4278 int ret; 4279 4280 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4281 return 1; 4282 ret = eth_link_get_nowait_print_err(port_id, &link); 4283 if (ret < 0) 4284 return 1; 4285 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4286 rate > link.link_speed) { 4287 printf("Invalid rate value:%u bigger than link speed: %u\n", 4288 rate, link.link_speed); 4289 return 1; 4290 } 4291 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4292 if (diag == 0) 4293 return diag; 4294 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4295 port_id, diag); 4296 return diag; 4297 } 4298 4299 int 4300 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4301 { 4302 int diag = -ENOTSUP; 4303 4304 RTE_SET_USED(vf); 4305 RTE_SET_USED(rate); 4306 RTE_SET_USED(q_msk); 4307 4308 #ifdef RTE_LIBRTE_IXGBE_PMD 4309 if (diag == -ENOTSUP) 4310 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4311 q_msk); 4312 #endif 4313 #ifdef RTE_LIBRTE_BNXT_PMD 4314 if (diag == -ENOTSUP) 4315 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4316 #endif 4317 if (diag == 0) 4318 return diag; 4319 4320 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4321 port_id, diag); 4322 return diag; 4323 } 4324 4325 /* 4326 * Functions to manage the set of filtered Multicast MAC addresses. 4327 * 4328 * A pool of filtered multicast MAC addresses is associated with each port. 4329 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4330 * The address of the pool and the number of valid multicast MAC addresses 4331 * recorded in the pool are stored in the fields "mc_addr_pool" and 4332 * "mc_addr_nb" of the "rte_port" data structure. 4333 * 4334 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4335 * to be supplied a contiguous array of multicast MAC addresses. 4336 * To comply with this constraint, the set of multicast addresses recorded 4337 * into the pool are systematically compacted at the beginning of the pool. 4338 * Hence, when a multicast address is removed from the pool, all following 4339 * addresses, if any, are copied back to keep the set contiguous. 4340 */ 4341 #define MCAST_POOL_INC 32 4342 4343 static int 4344 mcast_addr_pool_extend(struct rte_port *port) 4345 { 4346 struct rte_ether_addr *mc_pool; 4347 size_t mc_pool_size; 4348 4349 /* 4350 * If a free entry is available at the end of the pool, just 4351 * increment the number of recorded multicast addresses. 4352 */ 4353 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4354 port->mc_addr_nb++; 4355 return 0; 4356 } 4357 4358 /* 4359 * [re]allocate a pool with MCAST_POOL_INC more entries. 4360 * The previous test guarantees that port->mc_addr_nb is a multiple 4361 * of MCAST_POOL_INC. 4362 */ 4363 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4364 MCAST_POOL_INC); 4365 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4366 mc_pool_size); 4367 if (mc_pool == NULL) { 4368 printf("allocation of pool of %u multicast addresses failed\n", 4369 port->mc_addr_nb + MCAST_POOL_INC); 4370 return -ENOMEM; 4371 } 4372 4373 port->mc_addr_pool = mc_pool; 4374 port->mc_addr_nb++; 4375 return 0; 4376 4377 } 4378 4379 static void 4380 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4381 { 4382 if (mcast_addr_pool_extend(port) != 0) 4383 return; 4384 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4385 } 4386 4387 static void 4388 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4389 { 4390 port->mc_addr_nb--; 4391 if (addr_idx == port->mc_addr_nb) { 4392 /* No need to recompact the set of multicast addressses. */ 4393 if (port->mc_addr_nb == 0) { 4394 /* free the pool of multicast addresses. */ 4395 free(port->mc_addr_pool); 4396 port->mc_addr_pool = NULL; 4397 } 4398 return; 4399 } 4400 memmove(&port->mc_addr_pool[addr_idx], 4401 &port->mc_addr_pool[addr_idx + 1], 4402 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4403 } 4404 4405 static int 4406 eth_port_multicast_addr_list_set(portid_t port_id) 4407 { 4408 struct rte_port *port; 4409 int diag; 4410 4411 port = &ports[port_id]; 4412 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4413 port->mc_addr_nb); 4414 if (diag < 0) 4415 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4416 port_id, port->mc_addr_nb, diag); 4417 4418 return diag; 4419 } 4420 4421 void 4422 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4423 { 4424 struct rte_port *port; 4425 uint32_t i; 4426 4427 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4428 return; 4429 4430 port = &ports[port_id]; 4431 4432 /* 4433 * Check that the added multicast MAC address is not already recorded 4434 * in the pool of multicast addresses. 4435 */ 4436 for (i = 0; i < port->mc_addr_nb; i++) { 4437 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4438 printf("multicast address already filtered by port\n"); 4439 return; 4440 } 4441 } 4442 4443 mcast_addr_pool_append(port, mc_addr); 4444 if (eth_port_multicast_addr_list_set(port_id) < 0) 4445 /* Rollback on failure, remove the address from the pool */ 4446 mcast_addr_pool_remove(port, i); 4447 } 4448 4449 void 4450 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4451 { 4452 struct rte_port *port; 4453 uint32_t i; 4454 4455 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4456 return; 4457 4458 port = &ports[port_id]; 4459 4460 /* 4461 * Search the pool of multicast MAC addresses for the removed address. 4462 */ 4463 for (i = 0; i < port->mc_addr_nb; i++) { 4464 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 4465 break; 4466 } 4467 if (i == port->mc_addr_nb) { 4468 printf("multicast address not filtered by port %d\n", port_id); 4469 return; 4470 } 4471 4472 mcast_addr_pool_remove(port, i); 4473 if (eth_port_multicast_addr_list_set(port_id) < 0) 4474 /* Rollback on failure, add the address back into the pool */ 4475 mcast_addr_pool_append(port, mc_addr); 4476 } 4477 4478 void 4479 port_dcb_info_display(portid_t port_id) 4480 { 4481 struct rte_eth_dcb_info dcb_info; 4482 uint16_t i; 4483 int ret; 4484 static const char *border = "================"; 4485 4486 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4487 return; 4488 4489 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 4490 if (ret) { 4491 printf("\n Failed to get dcb infos on port %-2d\n", 4492 port_id); 4493 return; 4494 } 4495 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 4496 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 4497 printf("\n TC : "); 4498 for (i = 0; i < dcb_info.nb_tcs; i++) 4499 printf("\t%4d", i); 4500 printf("\n Priority : "); 4501 for (i = 0; i < dcb_info.nb_tcs; i++) 4502 printf("\t%4d", dcb_info.prio_tc[i]); 4503 printf("\n BW percent :"); 4504 for (i = 0; i < dcb_info.nb_tcs; i++) 4505 printf("\t%4d%%", dcb_info.tc_bws[i]); 4506 printf("\n RXQ base : "); 4507 for (i = 0; i < dcb_info.nb_tcs; i++) 4508 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 4509 printf("\n RXQ number :"); 4510 for (i = 0; i < dcb_info.nb_tcs; i++) 4511 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 4512 printf("\n TXQ base : "); 4513 for (i = 0; i < dcb_info.nb_tcs; i++) 4514 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 4515 printf("\n TXQ number :"); 4516 for (i = 0; i < dcb_info.nb_tcs; i++) 4517 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 4518 printf("\n"); 4519 } 4520 4521 uint8_t * 4522 open_file(const char *file_path, uint32_t *size) 4523 { 4524 int fd = open(file_path, O_RDONLY); 4525 off_t pkg_size; 4526 uint8_t *buf = NULL; 4527 int ret = 0; 4528 struct stat st_buf; 4529 4530 if (size) 4531 *size = 0; 4532 4533 if (fd == -1) { 4534 printf("%s: Failed to open %s\n", __func__, file_path); 4535 return buf; 4536 } 4537 4538 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 4539 close(fd); 4540 printf("%s: File operations failed\n", __func__); 4541 return buf; 4542 } 4543 4544 pkg_size = st_buf.st_size; 4545 if (pkg_size < 0) { 4546 close(fd); 4547 printf("%s: File operations failed\n", __func__); 4548 return buf; 4549 } 4550 4551 buf = (uint8_t *)malloc(pkg_size); 4552 if (!buf) { 4553 close(fd); 4554 printf("%s: Failed to malloc memory\n", __func__); 4555 return buf; 4556 } 4557 4558 ret = read(fd, buf, pkg_size); 4559 if (ret < 0) { 4560 close(fd); 4561 printf("%s: File read operation failed\n", __func__); 4562 close_file(buf); 4563 return NULL; 4564 } 4565 4566 if (size) 4567 *size = pkg_size; 4568 4569 close(fd); 4570 4571 return buf; 4572 } 4573 4574 int 4575 save_file(const char *file_path, uint8_t *buf, uint32_t size) 4576 { 4577 FILE *fh = fopen(file_path, "wb"); 4578 4579 if (fh == NULL) { 4580 printf("%s: Failed to open %s\n", __func__, file_path); 4581 return -1; 4582 } 4583 4584 if (fwrite(buf, 1, size, fh) != size) { 4585 fclose(fh); 4586 printf("%s: File write operation failed\n", __func__); 4587 return -1; 4588 } 4589 4590 fclose(fh); 4591 4592 return 0; 4593 } 4594 4595 int 4596 close_file(uint8_t *buf) 4597 { 4598 if (buf) { 4599 free((void *)buf); 4600 return 0; 4601 } 4602 4603 return -1; 4604 } 4605 4606 void 4607 port_queue_region_info_display(portid_t port_id, void *buf) 4608 { 4609 #ifdef RTE_LIBRTE_I40E_PMD 4610 uint16_t i, j; 4611 struct rte_pmd_i40e_queue_regions *info = 4612 (struct rte_pmd_i40e_queue_regions *)buf; 4613 static const char *queue_region_info_stats_border = "-------"; 4614 4615 if (!info->queue_region_number) 4616 printf("there is no region has been set before"); 4617 4618 printf("\n %s All queue region info for port=%2d %s", 4619 queue_region_info_stats_border, port_id, 4620 queue_region_info_stats_border); 4621 printf("\n queue_region_number: %-14u \n", 4622 info->queue_region_number); 4623 4624 for (i = 0; i < info->queue_region_number; i++) { 4625 printf("\n region_id: %-14u queue_number: %-14u " 4626 "queue_start_index: %-14u \n", 4627 info->region[i].region_id, 4628 info->region[i].queue_num, 4629 info->region[i].queue_start_index); 4630 4631 printf(" user_priority_num is %-14u :", 4632 info->region[i].user_priority_num); 4633 for (j = 0; j < info->region[i].user_priority_num; j++) 4634 printf(" %-14u ", info->region[i].user_priority[j]); 4635 4636 printf("\n flowtype_num is %-14u :", 4637 info->region[i].flowtype_num); 4638 for (j = 0; j < info->region[i].flowtype_num; j++) 4639 printf(" %-14u ", info->region[i].hw_flowtype[j]); 4640 } 4641 #else 4642 RTE_SET_USED(port_id); 4643 RTE_SET_USED(buf); 4644 #endif 4645 4646 printf("\n\n"); 4647 } 4648 4649 void 4650 show_macs(portid_t port_id) 4651 { 4652 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4653 struct rte_eth_dev_info dev_info; 4654 struct rte_ether_addr *addr; 4655 uint32_t i, num_macs = 0; 4656 struct rte_eth_dev *dev; 4657 4658 dev = &rte_eth_devices[port_id]; 4659 4660 rte_eth_dev_info_get(port_id, &dev_info); 4661 4662 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4663 addr = &dev->data->mac_addrs[i]; 4664 4665 /* skip zero address */ 4666 if (rte_is_zero_ether_addr(addr)) 4667 continue; 4668 4669 num_macs++; 4670 } 4671 4672 printf("Number of MAC address added: %d\n", num_macs); 4673 4674 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4675 addr = &dev->data->mac_addrs[i]; 4676 4677 /* skip zero address */ 4678 if (rte_is_zero_ether_addr(addr)) 4679 continue; 4680 4681 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4682 printf(" %s\n", buf); 4683 } 4684 } 4685 4686 void 4687 show_mcast_macs(portid_t port_id) 4688 { 4689 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4690 struct rte_ether_addr *addr; 4691 struct rte_port *port; 4692 uint32_t i; 4693 4694 port = &ports[port_id]; 4695 4696 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4697 4698 for (i = 0; i < port->mc_addr_nb; i++) { 4699 addr = &port->mc_addr_pool[i]; 4700 4701 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4702 printf(" %s\n", buf); 4703 } 4704 } 4705