1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 53 #include "testpmd.h" 54 55 #define ETHDEV_FWVERS_LEN 32 56 57 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 58 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 59 #else 60 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 61 #endif 62 63 #define NS_PER_SEC 1E9 64 65 static char *flowtype_to_str(uint16_t flow_type); 66 67 static const struct { 68 enum tx_pkt_split split; 69 const char *name; 70 } tx_split_name[] = { 71 { 72 .split = TX_PKT_SPLIT_OFF, 73 .name = "off", 74 }, 75 { 76 .split = TX_PKT_SPLIT_ON, 77 .name = "on", 78 }, 79 { 80 .split = TX_PKT_SPLIT_RND, 81 .name = "rand", 82 }, 83 }; 84 85 const struct rss_type_info rss_type_table[] = { 86 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 87 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 88 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 89 ETH_RSS_GTPU}, 90 { "none", 0 }, 91 { "eth", ETH_RSS_ETH }, 92 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 93 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 94 { "vlan", ETH_RSS_VLAN }, 95 { "s-vlan", ETH_RSS_S_VLAN }, 96 { "c-vlan", ETH_RSS_C_VLAN }, 97 { "ipv4", ETH_RSS_IPV4 }, 98 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 99 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 100 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 101 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 102 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 103 { "ipv6", ETH_RSS_IPV6 }, 104 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 105 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 106 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 107 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 108 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 109 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 110 { "ipv6-ex", ETH_RSS_IPV6_EX }, 111 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 112 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 113 { "port", ETH_RSS_PORT }, 114 { "vxlan", ETH_RSS_VXLAN }, 115 { "geneve", ETH_RSS_GENEVE }, 116 { "nvgre", ETH_RSS_NVGRE }, 117 { "ip", ETH_RSS_IP }, 118 { "udp", ETH_RSS_UDP }, 119 { "tcp", ETH_RSS_TCP }, 120 { "sctp", ETH_RSS_SCTP }, 121 { "tunnel", ETH_RSS_TUNNEL }, 122 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 123 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 124 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 125 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 126 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 127 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 128 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 129 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 130 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 131 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 132 { "esp", ETH_RSS_ESP }, 133 { "ah", ETH_RSS_AH }, 134 { "l2tpv3", ETH_RSS_L2TPV3 }, 135 { "pfcp", ETH_RSS_PFCP }, 136 { "pppoe", ETH_RSS_PPPOE }, 137 { "gtpu", ETH_RSS_GTPU }, 138 { NULL, 0 }, 139 }; 140 141 static void 142 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 143 { 144 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 145 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 146 printf("%s%s", name, buf); 147 } 148 149 void 150 nic_stats_display(portid_t port_id) 151 { 152 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 153 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 154 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 155 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 156 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 157 struct timespec cur_time; 158 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 159 diff_ns; 160 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 161 struct rte_eth_stats stats; 162 struct rte_port *port = &ports[port_id]; 163 uint8_t i; 164 165 static const char *nic_stats_border = "########################"; 166 167 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 168 print_valid_ports(); 169 return; 170 } 171 rte_eth_stats_get(port_id, &stats); 172 printf("\n %s NIC statistics for port %-2d %s\n", 173 nic_stats_border, port_id, nic_stats_border); 174 175 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 176 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 177 "%-"PRIu64"\n", 178 stats.ipackets, stats.imissed, stats.ibytes); 179 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 180 printf(" RX-nombuf: %-10"PRIu64"\n", 181 stats.rx_nombuf); 182 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 183 "%-"PRIu64"\n", 184 stats.opackets, stats.oerrors, stats.obytes); 185 } 186 else { 187 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 188 " RX-bytes: %10"PRIu64"\n", 189 stats.ipackets, stats.ierrors, stats.ibytes); 190 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 191 printf(" RX-nombuf: %10"PRIu64"\n", 192 stats.rx_nombuf); 193 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 194 " TX-bytes: %10"PRIu64"\n", 195 stats.opackets, stats.oerrors, stats.obytes); 196 } 197 198 if (port->rx_queue_stats_mapping_enabled) { 199 printf("\n"); 200 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 201 printf(" Stats reg %2d RX-packets: %10"PRIu64 202 " RX-errors: %10"PRIu64 203 " RX-bytes: %10"PRIu64"\n", 204 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 205 } 206 } 207 if (port->tx_queue_stats_mapping_enabled) { 208 printf("\n"); 209 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 210 printf(" Stats reg %2d TX-packets: %10"PRIu64 211 " TX-bytes: %10"PRIu64"\n", 212 i, stats.q_opackets[i], stats.q_obytes[i]); 213 } 214 } 215 216 diff_ns = 0; 217 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 218 uint64_t ns; 219 220 ns = cur_time.tv_sec * NS_PER_SEC; 221 ns += cur_time.tv_nsec; 222 223 if (prev_ns[port_id] != 0) 224 diff_ns = ns - prev_ns[port_id]; 225 prev_ns[port_id] = ns; 226 } 227 228 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 229 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 230 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 231 (stats.opackets - prev_pkts_tx[port_id]) : 0; 232 prev_pkts_rx[port_id] = stats.ipackets; 233 prev_pkts_tx[port_id] = stats.opackets; 234 mpps_rx = diff_ns > 0 ? 235 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 236 mpps_tx = diff_ns > 0 ? 237 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 238 239 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 240 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 241 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 242 (stats.obytes - prev_bytes_tx[port_id]) : 0; 243 prev_bytes_rx[port_id] = stats.ibytes; 244 prev_bytes_tx[port_id] = stats.obytes; 245 mbps_rx = diff_ns > 0 ? 246 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 247 mbps_tx = diff_ns > 0 ? 248 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 249 250 printf("\n Throughput (since last show)\n"); 251 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 252 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 253 mpps_tx, mbps_tx * 8); 254 255 printf(" %s############################%s\n", 256 nic_stats_border, nic_stats_border); 257 } 258 259 void 260 nic_stats_clear(portid_t port_id) 261 { 262 int ret; 263 264 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 265 print_valid_ports(); 266 return; 267 } 268 269 ret = rte_eth_stats_reset(port_id); 270 if (ret != 0) { 271 printf("%s: Error: failed to reset stats (port %u): %s", 272 __func__, port_id, strerror(-ret)); 273 return; 274 } 275 276 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 277 if (ret != 0) { 278 if (ret < 0) 279 ret = -ret; 280 printf("%s: Error: failed to get stats (port %u): %s", 281 __func__, port_id, strerror(ret)); 282 return; 283 } 284 printf("\n NIC statistics for port %d cleared\n", port_id); 285 } 286 287 void 288 nic_xstats_display(portid_t port_id) 289 { 290 struct rte_eth_xstat *xstats; 291 int cnt_xstats, idx_xstat; 292 struct rte_eth_xstat_name *xstats_names; 293 294 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 295 print_valid_ports(); 296 return; 297 } 298 printf("###### NIC extended statistics for port %-2d\n", port_id); 299 if (!rte_eth_dev_is_valid_port(port_id)) { 300 printf("Error: Invalid port number %i\n", port_id); 301 return; 302 } 303 304 /* Get count */ 305 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 306 if (cnt_xstats < 0) { 307 printf("Error: Cannot get count of xstats\n"); 308 return; 309 } 310 311 /* Get id-name lookup table */ 312 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 313 if (xstats_names == NULL) { 314 printf("Cannot allocate memory for xstats lookup\n"); 315 return; 316 } 317 if (cnt_xstats != rte_eth_xstats_get_names( 318 port_id, xstats_names, cnt_xstats)) { 319 printf("Error: Cannot get xstats lookup\n"); 320 free(xstats_names); 321 return; 322 } 323 324 /* Get stats themselves */ 325 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 326 if (xstats == NULL) { 327 printf("Cannot allocate memory for xstats\n"); 328 free(xstats_names); 329 return; 330 } 331 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 332 printf("Error: Unable to get xstats\n"); 333 free(xstats_names); 334 free(xstats); 335 return; 336 } 337 338 /* Display xstats */ 339 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 340 if (xstats_hide_zero && !xstats[idx_xstat].value) 341 continue; 342 printf("%s: %"PRIu64"\n", 343 xstats_names[idx_xstat].name, 344 xstats[idx_xstat].value); 345 } 346 free(xstats_names); 347 free(xstats); 348 } 349 350 void 351 nic_xstats_clear(portid_t port_id) 352 { 353 int ret; 354 355 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 356 print_valid_ports(); 357 return; 358 } 359 360 ret = rte_eth_xstats_reset(port_id); 361 if (ret != 0) { 362 printf("%s: Error: failed to reset xstats (port %u): %s", 363 __func__, port_id, strerror(-ret)); 364 return; 365 } 366 367 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 368 if (ret != 0) { 369 if (ret < 0) 370 ret = -ret; 371 printf("%s: Error: failed to get stats (port %u): %s", 372 __func__, port_id, strerror(ret)); 373 return; 374 } 375 } 376 377 void 378 nic_stats_mapping_display(portid_t port_id) 379 { 380 struct rte_port *port = &ports[port_id]; 381 uint16_t i; 382 383 static const char *nic_stats_mapping_border = "########################"; 384 385 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 386 print_valid_ports(); 387 return; 388 } 389 390 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 391 printf("Port id %d - either does not support queue statistic mapping or" 392 " no queue statistic mapping set\n", port_id); 393 return; 394 } 395 396 printf("\n %s NIC statistics mapping for port %-2d %s\n", 397 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 398 399 if (port->rx_queue_stats_mapping_enabled) { 400 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 401 if (rx_queue_stats_mappings[i].port_id == port_id) { 402 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 403 rx_queue_stats_mappings[i].queue_id, 404 rx_queue_stats_mappings[i].stats_counter_id); 405 } 406 } 407 printf("\n"); 408 } 409 410 411 if (port->tx_queue_stats_mapping_enabled) { 412 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 413 if (tx_queue_stats_mappings[i].port_id == port_id) { 414 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 415 tx_queue_stats_mappings[i].queue_id, 416 tx_queue_stats_mappings[i].stats_counter_id); 417 } 418 } 419 } 420 421 printf(" %s####################################%s\n", 422 nic_stats_mapping_border, nic_stats_mapping_border); 423 } 424 425 void 426 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 427 { 428 struct rte_eth_burst_mode mode; 429 struct rte_eth_rxq_info qinfo; 430 int32_t rc; 431 static const char *info_border = "*********************"; 432 433 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 434 if (rc != 0) { 435 printf("Failed to retrieve information for port: %u, " 436 "RX queue: %hu\nerror desc: %s(%d)\n", 437 port_id, queue_id, strerror(-rc), rc); 438 return; 439 } 440 441 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 442 info_border, port_id, queue_id, info_border); 443 444 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 445 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 446 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 447 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 448 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 449 printf("\nRX drop packets: %s", 450 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 451 printf("\nRX deferred start: %s", 452 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 453 printf("\nRX scattered packets: %s", 454 (qinfo.scattered_rx != 0) ? "on" : "off"); 455 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 456 457 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 458 printf("\nBurst mode: %s%s", 459 mode.info, 460 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 461 " (per queue)" : ""); 462 463 printf("\n"); 464 } 465 466 void 467 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 468 { 469 struct rte_eth_burst_mode mode; 470 struct rte_eth_txq_info qinfo; 471 int32_t rc; 472 static const char *info_border = "*********************"; 473 474 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 475 if (rc != 0) { 476 printf("Failed to retrieve information for port: %u, " 477 "TX queue: %hu\nerror desc: %s(%d)\n", 478 port_id, queue_id, strerror(-rc), rc); 479 return; 480 } 481 482 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 483 info_border, port_id, queue_id, info_border); 484 485 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 486 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 487 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 488 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 489 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 490 printf("\nTX deferred start: %s", 491 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 492 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 493 494 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 495 printf("\nBurst mode: %s%s", 496 mode.info, 497 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 498 " (per queue)" : ""); 499 500 printf("\n"); 501 } 502 503 static int bus_match_all(const struct rte_bus *bus, const void *data) 504 { 505 RTE_SET_USED(bus); 506 RTE_SET_USED(data); 507 return 0; 508 } 509 510 void 511 device_infos_display(const char *identifier) 512 { 513 static const char *info_border = "*********************"; 514 struct rte_bus *start = NULL, *next; 515 struct rte_dev_iterator dev_iter; 516 char name[RTE_ETH_NAME_MAX_LEN]; 517 struct rte_ether_addr mac_addr; 518 struct rte_device *dev; 519 struct rte_devargs da; 520 portid_t port_id; 521 char devstr[128]; 522 523 memset(&da, 0, sizeof(da)); 524 if (!identifier) 525 goto skip_parse; 526 527 if (rte_devargs_parsef(&da, "%s", identifier)) { 528 printf("cannot parse identifier\n"); 529 if (da.args) 530 free(da.args); 531 return; 532 } 533 534 skip_parse: 535 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 536 537 start = next; 538 if (identifier && da.bus != next) 539 continue; 540 541 /* Skip buses that don't have iterate method */ 542 if (!next->dev_iterate) 543 continue; 544 545 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 546 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 547 548 if (!dev->driver) 549 continue; 550 /* Check for matching device if identifier is present */ 551 if (identifier && 552 strncmp(da.name, dev->name, strlen(dev->name))) 553 continue; 554 printf("\n%s Infos for device %s %s\n", 555 info_border, dev->name, info_border); 556 printf("Bus name: %s", dev->bus->name); 557 printf("\nDriver name: %s", dev->driver->name); 558 printf("\nDevargs: %s", 559 dev->devargs ? dev->devargs->args : ""); 560 printf("\nConnect to socket: %d", dev->numa_node); 561 printf("\n"); 562 563 /* List ports with matching device name */ 564 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 565 printf("\n\tPort id: %-2d", port_id); 566 if (eth_macaddr_get_print_err(port_id, 567 &mac_addr) == 0) 568 print_ethaddr("\n\tMAC address: ", 569 &mac_addr); 570 rte_eth_dev_get_name_by_port(port_id, name); 571 printf("\n\tDevice name: %s", name); 572 printf("\n"); 573 } 574 } 575 }; 576 } 577 578 void 579 port_infos_display(portid_t port_id) 580 { 581 struct rte_port *port; 582 struct rte_ether_addr mac_addr; 583 struct rte_eth_link link; 584 struct rte_eth_dev_info dev_info; 585 int vlan_offload; 586 struct rte_mempool * mp; 587 static const char *info_border = "*********************"; 588 uint16_t mtu; 589 char name[RTE_ETH_NAME_MAX_LEN]; 590 int ret; 591 char fw_version[ETHDEV_FWVERS_LEN]; 592 593 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 594 print_valid_ports(); 595 return; 596 } 597 port = &ports[port_id]; 598 ret = eth_link_get_nowait_print_err(port_id, &link); 599 if (ret < 0) 600 return; 601 602 ret = eth_dev_info_get_print_err(port_id, &dev_info); 603 if (ret != 0) 604 return; 605 606 printf("\n%s Infos for port %-2d %s\n", 607 info_border, port_id, info_border); 608 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 609 print_ethaddr("MAC address: ", &mac_addr); 610 rte_eth_dev_get_name_by_port(port_id, name); 611 printf("\nDevice name: %s", name); 612 printf("\nDriver name: %s", dev_info.driver_name); 613 614 if (rte_eth_dev_fw_version_get(port_id, fw_version, 615 ETHDEV_FWVERS_LEN) == 0) 616 printf("\nFirmware-version: %s", fw_version); 617 else 618 printf("\nFirmware-version: %s", "not available"); 619 620 if (dev_info.device->devargs && dev_info.device->devargs->args) 621 printf("\nDevargs: %s", dev_info.device->devargs->args); 622 printf("\nConnect to socket: %u", port->socket_id); 623 624 if (port_numa[port_id] != NUMA_NO_CONFIG) { 625 mp = mbuf_pool_find(port_numa[port_id]); 626 if (mp) 627 printf("\nmemory allocation on the socket: %d", 628 port_numa[port_id]); 629 } else 630 printf("\nmemory allocation on the socket: %u",port->socket_id); 631 632 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 633 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 634 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 635 ("full-duplex") : ("half-duplex")); 636 637 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 638 printf("MTU: %u\n", mtu); 639 640 printf("Promiscuous mode: %s\n", 641 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 642 printf("Allmulticast mode: %s\n", 643 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 644 printf("Maximum number of MAC addresses: %u\n", 645 (unsigned int)(port->dev_info.max_mac_addrs)); 646 printf("Maximum number of MAC addresses of hash filtering: %u\n", 647 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 648 649 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 650 if (vlan_offload >= 0){ 651 printf("VLAN offload: \n"); 652 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 653 printf(" strip on, "); 654 else 655 printf(" strip off, "); 656 657 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 658 printf("filter on, "); 659 else 660 printf("filter off, "); 661 662 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 663 printf("extend on, "); 664 else 665 printf("extend off, "); 666 667 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 668 printf("qinq strip on\n"); 669 else 670 printf("qinq strip off\n"); 671 } 672 673 if (dev_info.hash_key_size > 0) 674 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 675 if (dev_info.reta_size > 0) 676 printf("Redirection table size: %u\n", dev_info.reta_size); 677 if (!dev_info.flow_type_rss_offloads) 678 printf("No RSS offload flow type is supported.\n"); 679 else { 680 uint16_t i; 681 char *p; 682 683 printf("Supported RSS offload flow types:\n"); 684 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 685 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 686 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 687 continue; 688 p = flowtype_to_str(i); 689 if (p) 690 printf(" %s\n", p); 691 else 692 printf(" user defined %d\n", i); 693 } 694 } 695 696 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 697 printf("Maximum configurable length of RX packet: %u\n", 698 dev_info.max_rx_pktlen); 699 printf("Maximum configurable size of LRO aggregated packet: %u\n", 700 dev_info.max_lro_pkt_size); 701 if (dev_info.max_vfs) 702 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 703 if (dev_info.max_vmdq_pools) 704 printf("Maximum number of VMDq pools: %u\n", 705 dev_info.max_vmdq_pools); 706 707 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 708 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 709 printf("Max possible number of RXDs per queue: %hu\n", 710 dev_info.rx_desc_lim.nb_max); 711 printf("Min possible number of RXDs per queue: %hu\n", 712 dev_info.rx_desc_lim.nb_min); 713 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 714 715 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 716 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 717 printf("Max possible number of TXDs per queue: %hu\n", 718 dev_info.tx_desc_lim.nb_max); 719 printf("Min possible number of TXDs per queue: %hu\n", 720 dev_info.tx_desc_lim.nb_min); 721 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 722 printf("Max segment number per packet: %hu\n", 723 dev_info.tx_desc_lim.nb_seg_max); 724 printf("Max segment number per MTU/TSO: %hu\n", 725 dev_info.tx_desc_lim.nb_mtu_seg_max); 726 727 /* Show switch info only if valid switch domain and port id is set */ 728 if (dev_info.switch_info.domain_id != 729 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 730 if (dev_info.switch_info.name) 731 printf("Switch name: %s\n", dev_info.switch_info.name); 732 733 printf("Switch domain Id: %u\n", 734 dev_info.switch_info.domain_id); 735 printf("Switch Port Id: %u\n", 736 dev_info.switch_info.port_id); 737 } 738 } 739 740 void 741 port_summary_header_display(void) 742 { 743 uint16_t port_number; 744 745 port_number = rte_eth_dev_count_avail(); 746 printf("Number of available ports: %i\n", port_number); 747 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 748 "Driver", "Status", "Link"); 749 } 750 751 void 752 port_summary_display(portid_t port_id) 753 { 754 struct rte_ether_addr mac_addr; 755 struct rte_eth_link link; 756 struct rte_eth_dev_info dev_info; 757 char name[RTE_ETH_NAME_MAX_LEN]; 758 int ret; 759 760 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 761 print_valid_ports(); 762 return; 763 } 764 765 ret = eth_link_get_nowait_print_err(port_id, &link); 766 if (ret < 0) 767 return; 768 769 ret = eth_dev_info_get_print_err(port_id, &dev_info); 770 if (ret != 0) 771 return; 772 773 rte_eth_dev_get_name_by_port(port_id, name); 774 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 775 if (ret != 0) 776 return; 777 778 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 779 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 780 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 781 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 782 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 783 rte_eth_link_speed_to_str(link.link_speed)); 784 } 785 786 void 787 port_offload_cap_display(portid_t port_id) 788 { 789 struct rte_eth_dev_info dev_info; 790 static const char *info_border = "************"; 791 int ret; 792 793 if (port_id_is_invalid(port_id, ENABLED_WARN)) 794 return; 795 796 ret = eth_dev_info_get_print_err(port_id, &dev_info); 797 if (ret != 0) 798 return; 799 800 printf("\n%s Port %d supported offload features: %s\n", 801 info_border, port_id, info_border); 802 803 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 804 printf("VLAN stripped: "); 805 if (ports[port_id].dev_conf.rxmode.offloads & 806 DEV_RX_OFFLOAD_VLAN_STRIP) 807 printf("on\n"); 808 else 809 printf("off\n"); 810 } 811 812 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 813 printf("Double VLANs stripped: "); 814 if (ports[port_id].dev_conf.rxmode.offloads & 815 DEV_RX_OFFLOAD_QINQ_STRIP) 816 printf("on\n"); 817 else 818 printf("off\n"); 819 } 820 821 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 822 printf("RX IPv4 checksum: "); 823 if (ports[port_id].dev_conf.rxmode.offloads & 824 DEV_RX_OFFLOAD_IPV4_CKSUM) 825 printf("on\n"); 826 else 827 printf("off\n"); 828 } 829 830 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 831 printf("RX UDP checksum: "); 832 if (ports[port_id].dev_conf.rxmode.offloads & 833 DEV_RX_OFFLOAD_UDP_CKSUM) 834 printf("on\n"); 835 else 836 printf("off\n"); 837 } 838 839 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 840 printf("RX TCP checksum: "); 841 if (ports[port_id].dev_conf.rxmode.offloads & 842 DEV_RX_OFFLOAD_TCP_CKSUM) 843 printf("on\n"); 844 else 845 printf("off\n"); 846 } 847 848 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 849 printf("RX SCTP checksum: "); 850 if (ports[port_id].dev_conf.rxmode.offloads & 851 DEV_RX_OFFLOAD_SCTP_CKSUM) 852 printf("on\n"); 853 else 854 printf("off\n"); 855 } 856 857 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 858 printf("RX Outer IPv4 checksum: "); 859 if (ports[port_id].dev_conf.rxmode.offloads & 860 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 861 printf("on\n"); 862 else 863 printf("off\n"); 864 } 865 866 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 867 printf("RX Outer UDP checksum: "); 868 if (ports[port_id].dev_conf.rxmode.offloads & 869 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 870 printf("on\n"); 871 else 872 printf("off\n"); 873 } 874 875 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 876 printf("Large receive offload: "); 877 if (ports[port_id].dev_conf.rxmode.offloads & 878 DEV_RX_OFFLOAD_TCP_LRO) 879 printf("on\n"); 880 else 881 printf("off\n"); 882 } 883 884 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 885 printf("HW timestamp: "); 886 if (ports[port_id].dev_conf.rxmode.offloads & 887 DEV_RX_OFFLOAD_TIMESTAMP) 888 printf("on\n"); 889 else 890 printf("off\n"); 891 } 892 893 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 894 printf("Rx Keep CRC: "); 895 if (ports[port_id].dev_conf.rxmode.offloads & 896 DEV_RX_OFFLOAD_KEEP_CRC) 897 printf("on\n"); 898 else 899 printf("off\n"); 900 } 901 902 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 903 printf("RX offload security: "); 904 if (ports[port_id].dev_conf.rxmode.offloads & 905 DEV_RX_OFFLOAD_SECURITY) 906 printf("on\n"); 907 else 908 printf("off\n"); 909 } 910 911 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 912 printf("VLAN insert: "); 913 if (ports[port_id].dev_conf.txmode.offloads & 914 DEV_TX_OFFLOAD_VLAN_INSERT) 915 printf("on\n"); 916 else 917 printf("off\n"); 918 } 919 920 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 921 printf("Double VLANs insert: "); 922 if (ports[port_id].dev_conf.txmode.offloads & 923 DEV_TX_OFFLOAD_QINQ_INSERT) 924 printf("on\n"); 925 else 926 printf("off\n"); 927 } 928 929 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 930 printf("TX IPv4 checksum: "); 931 if (ports[port_id].dev_conf.txmode.offloads & 932 DEV_TX_OFFLOAD_IPV4_CKSUM) 933 printf("on\n"); 934 else 935 printf("off\n"); 936 } 937 938 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 939 printf("TX UDP checksum: "); 940 if (ports[port_id].dev_conf.txmode.offloads & 941 DEV_TX_OFFLOAD_UDP_CKSUM) 942 printf("on\n"); 943 else 944 printf("off\n"); 945 } 946 947 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 948 printf("TX TCP checksum: "); 949 if (ports[port_id].dev_conf.txmode.offloads & 950 DEV_TX_OFFLOAD_TCP_CKSUM) 951 printf("on\n"); 952 else 953 printf("off\n"); 954 } 955 956 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 957 printf("TX SCTP checksum: "); 958 if (ports[port_id].dev_conf.txmode.offloads & 959 DEV_TX_OFFLOAD_SCTP_CKSUM) 960 printf("on\n"); 961 else 962 printf("off\n"); 963 } 964 965 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 966 printf("TX Outer IPv4 checksum: "); 967 if (ports[port_id].dev_conf.txmode.offloads & 968 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 969 printf("on\n"); 970 else 971 printf("off\n"); 972 } 973 974 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 975 printf("TX TCP segmentation: "); 976 if (ports[port_id].dev_conf.txmode.offloads & 977 DEV_TX_OFFLOAD_TCP_TSO) 978 printf("on\n"); 979 else 980 printf("off\n"); 981 } 982 983 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 984 printf("TX UDP segmentation: "); 985 if (ports[port_id].dev_conf.txmode.offloads & 986 DEV_TX_OFFLOAD_UDP_TSO) 987 printf("on\n"); 988 else 989 printf("off\n"); 990 } 991 992 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 993 printf("TSO for VXLAN tunnel packet: "); 994 if (ports[port_id].dev_conf.txmode.offloads & 995 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 996 printf("on\n"); 997 else 998 printf("off\n"); 999 } 1000 1001 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 1002 printf("TSO for GRE tunnel packet: "); 1003 if (ports[port_id].dev_conf.txmode.offloads & 1004 DEV_TX_OFFLOAD_GRE_TNL_TSO) 1005 printf("on\n"); 1006 else 1007 printf("off\n"); 1008 } 1009 1010 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 1011 printf("TSO for IPIP tunnel packet: "); 1012 if (ports[port_id].dev_conf.txmode.offloads & 1013 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 1014 printf("on\n"); 1015 else 1016 printf("off\n"); 1017 } 1018 1019 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 1020 printf("TSO for GENEVE tunnel packet: "); 1021 if (ports[port_id].dev_conf.txmode.offloads & 1022 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 1023 printf("on\n"); 1024 else 1025 printf("off\n"); 1026 } 1027 1028 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 1029 printf("IP tunnel TSO: "); 1030 if (ports[port_id].dev_conf.txmode.offloads & 1031 DEV_TX_OFFLOAD_IP_TNL_TSO) 1032 printf("on\n"); 1033 else 1034 printf("off\n"); 1035 } 1036 1037 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 1038 printf("UDP tunnel TSO: "); 1039 if (ports[port_id].dev_conf.txmode.offloads & 1040 DEV_TX_OFFLOAD_UDP_TNL_TSO) 1041 printf("on\n"); 1042 else 1043 printf("off\n"); 1044 } 1045 1046 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 1047 printf("TX Outer UDP checksum: "); 1048 if (ports[port_id].dev_conf.txmode.offloads & 1049 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1050 printf("on\n"); 1051 else 1052 printf("off\n"); 1053 } 1054 1055 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 1056 printf("Tx scheduling on timestamp: "); 1057 if (ports[port_id].dev_conf.txmode.offloads & 1058 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) 1059 printf("on\n"); 1060 else 1061 printf("off\n"); 1062 } 1063 1064 } 1065 1066 int 1067 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1068 { 1069 uint16_t pid; 1070 1071 if (port_id == (portid_t)RTE_PORT_ALL) 1072 return 0; 1073 1074 RTE_ETH_FOREACH_DEV(pid) 1075 if (port_id == pid) 1076 return 0; 1077 1078 if (warning == ENABLED_WARN) 1079 printf("Invalid port %d\n", port_id); 1080 1081 return 1; 1082 } 1083 1084 void print_valid_ports(void) 1085 { 1086 portid_t pid; 1087 1088 printf("The valid ports array is ["); 1089 RTE_ETH_FOREACH_DEV(pid) { 1090 printf(" %d", pid); 1091 } 1092 printf(" ]\n"); 1093 } 1094 1095 static int 1096 vlan_id_is_invalid(uint16_t vlan_id) 1097 { 1098 if (vlan_id < 4096) 1099 return 0; 1100 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1101 return 1; 1102 } 1103 1104 static int 1105 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1106 { 1107 const struct rte_pci_device *pci_dev; 1108 const struct rte_bus *bus; 1109 uint64_t pci_len; 1110 1111 if (reg_off & 0x3) { 1112 printf("Port register offset 0x%X not aligned on a 4-byte " 1113 "boundary\n", 1114 (unsigned)reg_off); 1115 return 1; 1116 } 1117 1118 if (!ports[port_id].dev_info.device) { 1119 printf("Invalid device\n"); 1120 return 0; 1121 } 1122 1123 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1124 if (bus && !strcmp(bus->name, "pci")) { 1125 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1126 } else { 1127 printf("Not a PCI device\n"); 1128 return 1; 1129 } 1130 1131 pci_len = pci_dev->mem_resource[0].len; 1132 if (reg_off >= pci_len) { 1133 printf("Port %d: register offset %u (0x%X) out of port PCI " 1134 "resource (length=%"PRIu64")\n", 1135 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1136 return 1; 1137 } 1138 return 0; 1139 } 1140 1141 static int 1142 reg_bit_pos_is_invalid(uint8_t bit_pos) 1143 { 1144 if (bit_pos <= 31) 1145 return 0; 1146 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1147 return 1; 1148 } 1149 1150 #define display_port_and_reg_off(port_id, reg_off) \ 1151 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1152 1153 static inline void 1154 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1155 { 1156 display_port_and_reg_off(port_id, (unsigned)reg_off); 1157 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1158 } 1159 1160 void 1161 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1162 { 1163 uint32_t reg_v; 1164 1165 1166 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1167 return; 1168 if (port_reg_off_is_invalid(port_id, reg_off)) 1169 return; 1170 if (reg_bit_pos_is_invalid(bit_x)) 1171 return; 1172 reg_v = port_id_pci_reg_read(port_id, reg_off); 1173 display_port_and_reg_off(port_id, (unsigned)reg_off); 1174 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1175 } 1176 1177 void 1178 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1179 uint8_t bit1_pos, uint8_t bit2_pos) 1180 { 1181 uint32_t reg_v; 1182 uint8_t l_bit; 1183 uint8_t h_bit; 1184 1185 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1186 return; 1187 if (port_reg_off_is_invalid(port_id, reg_off)) 1188 return; 1189 if (reg_bit_pos_is_invalid(bit1_pos)) 1190 return; 1191 if (reg_bit_pos_is_invalid(bit2_pos)) 1192 return; 1193 if (bit1_pos > bit2_pos) 1194 l_bit = bit2_pos, h_bit = bit1_pos; 1195 else 1196 l_bit = bit1_pos, h_bit = bit2_pos; 1197 1198 reg_v = port_id_pci_reg_read(port_id, reg_off); 1199 reg_v >>= l_bit; 1200 if (h_bit < 31) 1201 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1202 display_port_and_reg_off(port_id, (unsigned)reg_off); 1203 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1204 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1205 } 1206 1207 void 1208 port_reg_display(portid_t port_id, uint32_t reg_off) 1209 { 1210 uint32_t reg_v; 1211 1212 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1213 return; 1214 if (port_reg_off_is_invalid(port_id, reg_off)) 1215 return; 1216 reg_v = port_id_pci_reg_read(port_id, reg_off); 1217 display_port_reg_value(port_id, reg_off, reg_v); 1218 } 1219 1220 void 1221 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1222 uint8_t bit_v) 1223 { 1224 uint32_t reg_v; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 if (port_reg_off_is_invalid(port_id, reg_off)) 1229 return; 1230 if (reg_bit_pos_is_invalid(bit_pos)) 1231 return; 1232 if (bit_v > 1) { 1233 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1234 return; 1235 } 1236 reg_v = port_id_pci_reg_read(port_id, reg_off); 1237 if (bit_v == 0) 1238 reg_v &= ~(1 << bit_pos); 1239 else 1240 reg_v |= (1 << bit_pos); 1241 port_id_pci_reg_write(port_id, reg_off, reg_v); 1242 display_port_reg_value(port_id, reg_off, reg_v); 1243 } 1244 1245 void 1246 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1247 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1248 { 1249 uint32_t max_v; 1250 uint32_t reg_v; 1251 uint8_t l_bit; 1252 uint8_t h_bit; 1253 1254 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1255 return; 1256 if (port_reg_off_is_invalid(port_id, reg_off)) 1257 return; 1258 if (reg_bit_pos_is_invalid(bit1_pos)) 1259 return; 1260 if (reg_bit_pos_is_invalid(bit2_pos)) 1261 return; 1262 if (bit1_pos > bit2_pos) 1263 l_bit = bit2_pos, h_bit = bit1_pos; 1264 else 1265 l_bit = bit1_pos, h_bit = bit2_pos; 1266 1267 if ((h_bit - l_bit) < 31) 1268 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1269 else 1270 max_v = 0xFFFFFFFF; 1271 1272 if (value > max_v) { 1273 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1274 (unsigned)value, (unsigned)value, 1275 (unsigned)max_v, (unsigned)max_v); 1276 return; 1277 } 1278 reg_v = port_id_pci_reg_read(port_id, reg_off); 1279 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1280 reg_v |= (value << l_bit); /* Set changed bits */ 1281 port_id_pci_reg_write(port_id, reg_off, reg_v); 1282 display_port_reg_value(port_id, reg_off, reg_v); 1283 } 1284 1285 void 1286 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1287 { 1288 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1289 return; 1290 if (port_reg_off_is_invalid(port_id, reg_off)) 1291 return; 1292 port_id_pci_reg_write(port_id, reg_off, reg_v); 1293 display_port_reg_value(port_id, reg_off, reg_v); 1294 } 1295 1296 void 1297 port_mtu_set(portid_t port_id, uint16_t mtu) 1298 { 1299 int diag; 1300 struct rte_port *rte_port = &ports[port_id]; 1301 struct rte_eth_dev_info dev_info; 1302 uint16_t eth_overhead; 1303 int ret; 1304 1305 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1306 return; 1307 1308 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1309 if (ret != 0) 1310 return; 1311 1312 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1313 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1314 mtu, dev_info.min_mtu, dev_info.max_mtu); 1315 return; 1316 } 1317 diag = rte_eth_dev_set_mtu(port_id, mtu); 1318 if (diag) 1319 printf("Set MTU failed. diag=%d\n", diag); 1320 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1321 /* 1322 * Ether overhead in driver is equal to the difference of 1323 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1324 * device supports jumbo frame. 1325 */ 1326 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1327 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1328 rte_port->dev_conf.rxmode.offloads |= 1329 DEV_RX_OFFLOAD_JUMBO_FRAME; 1330 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1331 mtu + eth_overhead; 1332 } else 1333 rte_port->dev_conf.rxmode.offloads &= 1334 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1335 } 1336 } 1337 1338 /* Generic flow management functions. */ 1339 1340 /** Generate a port_flow entry from attributes/pattern/actions. */ 1341 static struct port_flow * 1342 port_flow_new(const struct rte_flow_attr *attr, 1343 const struct rte_flow_item *pattern, 1344 const struct rte_flow_action *actions, 1345 struct rte_flow_error *error) 1346 { 1347 const struct rte_flow_conv_rule rule = { 1348 .attr_ro = attr, 1349 .pattern_ro = pattern, 1350 .actions_ro = actions, 1351 }; 1352 struct port_flow *pf; 1353 int ret; 1354 1355 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1356 if (ret < 0) 1357 return NULL; 1358 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1359 if (!pf) { 1360 rte_flow_error_set 1361 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1362 "calloc() failed"); 1363 return NULL; 1364 } 1365 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1366 error) >= 0) 1367 return pf; 1368 free(pf); 1369 return NULL; 1370 } 1371 1372 /** Print a message out of a flow error. */ 1373 static int 1374 port_flow_complain(struct rte_flow_error *error) 1375 { 1376 static const char *const errstrlist[] = { 1377 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1378 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1379 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1380 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1381 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1382 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1383 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1384 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1385 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1386 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1387 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1388 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1389 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1390 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1391 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1392 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1393 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1394 }; 1395 const char *errstr; 1396 char buf[32]; 1397 int err = rte_errno; 1398 1399 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1400 !errstrlist[error->type]) 1401 errstr = "unknown type"; 1402 else 1403 errstr = errstrlist[error->type]; 1404 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1405 error->type, errstr, 1406 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1407 error->cause), buf) : "", 1408 error->message ? error->message : "(no stated reason)", 1409 rte_strerror(err)); 1410 return -err; 1411 } 1412 1413 static void 1414 rss_config_display(struct rte_flow_action_rss *rss_conf) 1415 { 1416 uint8_t i; 1417 1418 if (rss_conf == NULL) { 1419 printf("Invalid rule\n"); 1420 return; 1421 } 1422 1423 printf("RSS:\n" 1424 " queues:"); 1425 if (rss_conf->queue_num == 0) 1426 printf(" none"); 1427 for (i = 0; i < rss_conf->queue_num; i++) 1428 printf(" %d", rss_conf->queue[i]); 1429 printf("\n"); 1430 1431 printf(" function: "); 1432 switch (rss_conf->func) { 1433 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1434 printf("default\n"); 1435 break; 1436 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1437 printf("toeplitz\n"); 1438 break; 1439 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1440 printf("simple_xor\n"); 1441 break; 1442 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1443 printf("symmetric_toeplitz\n"); 1444 break; 1445 default: 1446 printf("Unknown function\n"); 1447 return; 1448 } 1449 1450 printf(" types:\n"); 1451 if (rss_conf->types == 0) { 1452 printf(" none\n"); 1453 return; 1454 } 1455 for (i = 0; rss_type_table[i].str; i++) { 1456 if ((rss_conf->types & 1457 rss_type_table[i].rss_type) == 1458 rss_type_table[i].rss_type && 1459 rss_type_table[i].rss_type != 0) 1460 printf(" %s\n", rss_type_table[i].str); 1461 } 1462 } 1463 1464 /** Validate flow rule. */ 1465 int 1466 port_flow_validate(portid_t port_id, 1467 const struct rte_flow_attr *attr, 1468 const struct rte_flow_item *pattern, 1469 const struct rte_flow_action *actions) 1470 { 1471 struct rte_flow_error error; 1472 1473 /* Poisoning to make sure PMDs update it in case of error. */ 1474 memset(&error, 0x11, sizeof(error)); 1475 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1476 return port_flow_complain(&error); 1477 printf("Flow rule validated\n"); 1478 return 0; 1479 } 1480 1481 /** Update age action context by port_flow pointer. */ 1482 void 1483 update_age_action_context(const struct rte_flow_action *actions, 1484 struct port_flow *pf) 1485 { 1486 struct rte_flow_action_age *age = NULL; 1487 1488 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1489 switch (actions->type) { 1490 case RTE_FLOW_ACTION_TYPE_AGE: 1491 age = (struct rte_flow_action_age *) 1492 (uintptr_t)actions->conf; 1493 age->context = pf; 1494 return; 1495 default: 1496 break; 1497 } 1498 } 1499 } 1500 1501 /** Create flow rule. */ 1502 int 1503 port_flow_create(portid_t port_id, 1504 const struct rte_flow_attr *attr, 1505 const struct rte_flow_item *pattern, 1506 const struct rte_flow_action *actions) 1507 { 1508 struct rte_flow *flow; 1509 struct rte_port *port; 1510 struct port_flow *pf; 1511 uint32_t id = 0; 1512 struct rte_flow_error error; 1513 1514 port = &ports[port_id]; 1515 if (port->flow_list) { 1516 if (port->flow_list->id == UINT32_MAX) { 1517 printf("Highest rule ID is already assigned, delete" 1518 " it first"); 1519 return -ENOMEM; 1520 } 1521 id = port->flow_list->id + 1; 1522 } 1523 pf = port_flow_new(attr, pattern, actions, &error); 1524 if (!pf) 1525 return port_flow_complain(&error); 1526 update_age_action_context(actions, pf); 1527 /* Poisoning to make sure PMDs update it in case of error. */ 1528 memset(&error, 0x22, sizeof(error)); 1529 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1530 if (!flow) { 1531 free(pf); 1532 return port_flow_complain(&error); 1533 } 1534 pf->next = port->flow_list; 1535 pf->id = id; 1536 pf->flow = flow; 1537 port->flow_list = pf; 1538 printf("Flow rule #%u created\n", pf->id); 1539 return 0; 1540 } 1541 1542 /** Destroy a number of flow rules. */ 1543 int 1544 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1545 { 1546 struct rte_port *port; 1547 struct port_flow **tmp; 1548 uint32_t c = 0; 1549 int ret = 0; 1550 1551 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1552 port_id == (portid_t)RTE_PORT_ALL) 1553 return -EINVAL; 1554 port = &ports[port_id]; 1555 tmp = &port->flow_list; 1556 while (*tmp) { 1557 uint32_t i; 1558 1559 for (i = 0; i != n; ++i) { 1560 struct rte_flow_error error; 1561 struct port_flow *pf = *tmp; 1562 1563 if (rule[i] != pf->id) 1564 continue; 1565 /* 1566 * Poisoning to make sure PMDs update it in case 1567 * of error. 1568 */ 1569 memset(&error, 0x33, sizeof(error)); 1570 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1571 ret = port_flow_complain(&error); 1572 continue; 1573 } 1574 printf("Flow rule #%u destroyed\n", pf->id); 1575 *tmp = pf->next; 1576 free(pf); 1577 break; 1578 } 1579 if (i == n) 1580 tmp = &(*tmp)->next; 1581 ++c; 1582 } 1583 return ret; 1584 } 1585 1586 /** Remove all flow rules. */ 1587 int 1588 port_flow_flush(portid_t port_id) 1589 { 1590 struct rte_flow_error error; 1591 struct rte_port *port; 1592 int ret = 0; 1593 1594 /* Poisoning to make sure PMDs update it in case of error. */ 1595 memset(&error, 0x44, sizeof(error)); 1596 if (rte_flow_flush(port_id, &error)) { 1597 ret = port_flow_complain(&error); 1598 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1599 port_id == (portid_t)RTE_PORT_ALL) 1600 return ret; 1601 } 1602 port = &ports[port_id]; 1603 while (port->flow_list) { 1604 struct port_flow *pf = port->flow_list->next; 1605 1606 free(port->flow_list); 1607 port->flow_list = pf; 1608 } 1609 return ret; 1610 } 1611 1612 /** Dump all flow rules. */ 1613 int 1614 port_flow_dump(portid_t port_id, const char *file_name) 1615 { 1616 int ret = 0; 1617 FILE *file = stdout; 1618 struct rte_flow_error error; 1619 1620 if (file_name && strlen(file_name)) { 1621 file = fopen(file_name, "w"); 1622 if (!file) { 1623 printf("Failed to create file %s: %s\n", file_name, 1624 strerror(errno)); 1625 return -errno; 1626 } 1627 } 1628 ret = rte_flow_dev_dump(port_id, file, &error); 1629 if (ret) { 1630 port_flow_complain(&error); 1631 printf("Failed to dump flow: %s\n", strerror(-ret)); 1632 } else 1633 printf("Flow dump finished\n"); 1634 if (file_name && strlen(file_name)) 1635 fclose(file); 1636 return ret; 1637 } 1638 1639 /** Query a flow rule. */ 1640 int 1641 port_flow_query(portid_t port_id, uint32_t rule, 1642 const struct rte_flow_action *action) 1643 { 1644 struct rte_flow_error error; 1645 struct rte_port *port; 1646 struct port_flow *pf; 1647 const char *name; 1648 union { 1649 struct rte_flow_query_count count; 1650 struct rte_flow_action_rss rss_conf; 1651 } query; 1652 int ret; 1653 1654 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1655 port_id == (portid_t)RTE_PORT_ALL) 1656 return -EINVAL; 1657 port = &ports[port_id]; 1658 for (pf = port->flow_list; pf; pf = pf->next) 1659 if (pf->id == rule) 1660 break; 1661 if (!pf) { 1662 printf("Flow rule #%u not found\n", rule); 1663 return -ENOENT; 1664 } 1665 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1666 &name, sizeof(name), 1667 (void *)(uintptr_t)action->type, &error); 1668 if (ret < 0) 1669 return port_flow_complain(&error); 1670 switch (action->type) { 1671 case RTE_FLOW_ACTION_TYPE_COUNT: 1672 case RTE_FLOW_ACTION_TYPE_RSS: 1673 break; 1674 default: 1675 printf("Cannot query action type %d (%s)\n", 1676 action->type, name); 1677 return -ENOTSUP; 1678 } 1679 /* Poisoning to make sure PMDs update it in case of error. */ 1680 memset(&error, 0x55, sizeof(error)); 1681 memset(&query, 0, sizeof(query)); 1682 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1683 return port_flow_complain(&error); 1684 switch (action->type) { 1685 case RTE_FLOW_ACTION_TYPE_COUNT: 1686 printf("%s:\n" 1687 " hits_set: %u\n" 1688 " bytes_set: %u\n" 1689 " hits: %" PRIu64 "\n" 1690 " bytes: %" PRIu64 "\n", 1691 name, 1692 query.count.hits_set, 1693 query.count.bytes_set, 1694 query.count.hits, 1695 query.count.bytes); 1696 break; 1697 case RTE_FLOW_ACTION_TYPE_RSS: 1698 rss_config_display(&query.rss_conf); 1699 break; 1700 default: 1701 printf("Cannot display result for action type %d (%s)\n", 1702 action->type, name); 1703 break; 1704 } 1705 return 0; 1706 } 1707 1708 /** List simply and destroy all aged flows. */ 1709 void 1710 port_flow_aged(portid_t port_id, uint8_t destroy) 1711 { 1712 void **contexts; 1713 int nb_context, total = 0, idx; 1714 struct rte_flow_error error; 1715 struct port_flow *pf; 1716 1717 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1718 port_id == (portid_t)RTE_PORT_ALL) 1719 return; 1720 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 1721 printf("Port %u total aged flows: %d\n", port_id, total); 1722 if (total < 0) { 1723 port_flow_complain(&error); 1724 return; 1725 } 1726 if (total == 0) 1727 return; 1728 contexts = malloc(sizeof(void *) * total); 1729 if (contexts == NULL) { 1730 printf("Cannot allocate contexts for aged flow\n"); 1731 return; 1732 } 1733 printf("ID\tGroup\tPrio\tAttr\n"); 1734 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 1735 if (nb_context != total) { 1736 printf("Port:%d get aged flows count(%d) != total(%d)\n", 1737 port_id, nb_context, total); 1738 free(contexts); 1739 return; 1740 } 1741 for (idx = 0; idx < nb_context; idx++) { 1742 pf = (struct port_flow *)contexts[idx]; 1743 if (!pf) { 1744 printf("Error: get Null context in port %u\n", port_id); 1745 continue; 1746 } 1747 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n", 1748 pf->id, 1749 pf->rule.attr->group, 1750 pf->rule.attr->priority, 1751 pf->rule.attr->ingress ? 'i' : '-', 1752 pf->rule.attr->egress ? 'e' : '-', 1753 pf->rule.attr->transfer ? 't' : '-'); 1754 } 1755 if (destroy) { 1756 int ret; 1757 uint32_t flow_id; 1758 1759 total = 0; 1760 printf("\n"); 1761 for (idx = 0; idx < nb_context; idx++) { 1762 pf = (struct port_flow *)contexts[idx]; 1763 if (!pf) 1764 continue; 1765 flow_id = pf->id; 1766 ret = port_flow_destroy(port_id, 1, &flow_id); 1767 if (!ret) 1768 total++; 1769 } 1770 printf("%d flows be destroyed\n", total); 1771 } 1772 free(contexts); 1773 } 1774 1775 /** List flow rules. */ 1776 void 1777 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1778 { 1779 struct rte_port *port; 1780 struct port_flow *pf; 1781 struct port_flow *list = NULL; 1782 uint32_t i; 1783 1784 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1785 port_id == (portid_t)RTE_PORT_ALL) 1786 return; 1787 port = &ports[port_id]; 1788 if (!port->flow_list) 1789 return; 1790 /* Sort flows by group, priority and ID. */ 1791 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1792 struct port_flow **tmp; 1793 const struct rte_flow_attr *curr = pf->rule.attr; 1794 1795 if (n) { 1796 /* Filter out unwanted groups. */ 1797 for (i = 0; i != n; ++i) 1798 if (curr->group == group[i]) 1799 break; 1800 if (i == n) 1801 continue; 1802 } 1803 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1804 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1805 1806 if (curr->group > comp->group || 1807 (curr->group == comp->group && 1808 curr->priority > comp->priority) || 1809 (curr->group == comp->group && 1810 curr->priority == comp->priority && 1811 pf->id > (*tmp)->id)) 1812 continue; 1813 break; 1814 } 1815 pf->tmp = *tmp; 1816 *tmp = pf; 1817 } 1818 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1819 for (pf = list; pf != NULL; pf = pf->tmp) { 1820 const struct rte_flow_item *item = pf->rule.pattern; 1821 const struct rte_flow_action *action = pf->rule.actions; 1822 const char *name; 1823 1824 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1825 pf->id, 1826 pf->rule.attr->group, 1827 pf->rule.attr->priority, 1828 pf->rule.attr->ingress ? 'i' : '-', 1829 pf->rule.attr->egress ? 'e' : '-', 1830 pf->rule.attr->transfer ? 't' : '-'); 1831 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1832 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1833 &name, sizeof(name), 1834 (void *)(uintptr_t)item->type, 1835 NULL) <= 0) 1836 name = "[UNKNOWN]"; 1837 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1838 printf("%s ", name); 1839 ++item; 1840 } 1841 printf("=>"); 1842 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1843 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1844 &name, sizeof(name), 1845 (void *)(uintptr_t)action->type, 1846 NULL) <= 0) 1847 name = "[UNKNOWN]"; 1848 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1849 printf(" %s", name); 1850 ++action; 1851 } 1852 printf("\n"); 1853 } 1854 } 1855 1856 /** Restrict ingress traffic to the defined flow rules. */ 1857 int 1858 port_flow_isolate(portid_t port_id, int set) 1859 { 1860 struct rte_flow_error error; 1861 1862 /* Poisoning to make sure PMDs update it in case of error. */ 1863 memset(&error, 0x66, sizeof(error)); 1864 if (rte_flow_isolate(port_id, set, &error)) 1865 return port_flow_complain(&error); 1866 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1867 port_id, 1868 set ? "now restricted" : "not restricted anymore"); 1869 return 0; 1870 } 1871 1872 /* 1873 * RX/TX ring descriptors display functions. 1874 */ 1875 int 1876 rx_queue_id_is_invalid(queueid_t rxq_id) 1877 { 1878 if (rxq_id < nb_rxq) 1879 return 0; 1880 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1881 return 1; 1882 } 1883 1884 int 1885 tx_queue_id_is_invalid(queueid_t txq_id) 1886 { 1887 if (txq_id < nb_txq) 1888 return 0; 1889 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1890 return 1; 1891 } 1892 1893 static int 1894 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1895 { 1896 if (rxdesc_id < nb_rxd) 1897 return 0; 1898 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1899 rxdesc_id, nb_rxd); 1900 return 1; 1901 } 1902 1903 static int 1904 tx_desc_id_is_invalid(uint16_t txdesc_id) 1905 { 1906 if (txdesc_id < nb_txd) 1907 return 0; 1908 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1909 txdesc_id, nb_txd); 1910 return 1; 1911 } 1912 1913 static const struct rte_memzone * 1914 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1915 { 1916 char mz_name[RTE_MEMZONE_NAMESIZE]; 1917 const struct rte_memzone *mz; 1918 1919 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1920 port_id, q_id, ring_name); 1921 mz = rte_memzone_lookup(mz_name); 1922 if (mz == NULL) 1923 printf("%s ring memory zoneof (port %d, queue %d) not" 1924 "found (zone name = %s\n", 1925 ring_name, port_id, q_id, mz_name); 1926 return mz; 1927 } 1928 1929 union igb_ring_dword { 1930 uint64_t dword; 1931 struct { 1932 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1933 uint32_t lo; 1934 uint32_t hi; 1935 #else 1936 uint32_t hi; 1937 uint32_t lo; 1938 #endif 1939 } words; 1940 }; 1941 1942 struct igb_ring_desc_32_bytes { 1943 union igb_ring_dword lo_dword; 1944 union igb_ring_dword hi_dword; 1945 union igb_ring_dword resv1; 1946 union igb_ring_dword resv2; 1947 }; 1948 1949 struct igb_ring_desc_16_bytes { 1950 union igb_ring_dword lo_dword; 1951 union igb_ring_dword hi_dword; 1952 }; 1953 1954 static void 1955 ring_rxd_display_dword(union igb_ring_dword dword) 1956 { 1957 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1958 (unsigned)dword.words.hi); 1959 } 1960 1961 static void 1962 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1963 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1964 portid_t port_id, 1965 #else 1966 __rte_unused portid_t port_id, 1967 #endif 1968 uint16_t desc_id) 1969 { 1970 struct igb_ring_desc_16_bytes *ring = 1971 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1972 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1973 int ret; 1974 struct rte_eth_dev_info dev_info; 1975 1976 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1977 if (ret != 0) 1978 return; 1979 1980 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1981 /* 32 bytes RX descriptor, i40e only */ 1982 struct igb_ring_desc_32_bytes *ring = 1983 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1984 ring[desc_id].lo_dword.dword = 1985 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1986 ring_rxd_display_dword(ring[desc_id].lo_dword); 1987 ring[desc_id].hi_dword.dword = 1988 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1989 ring_rxd_display_dword(ring[desc_id].hi_dword); 1990 ring[desc_id].resv1.dword = 1991 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1992 ring_rxd_display_dword(ring[desc_id].resv1); 1993 ring[desc_id].resv2.dword = 1994 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1995 ring_rxd_display_dword(ring[desc_id].resv2); 1996 1997 return; 1998 } 1999 #endif 2000 /* 16 bytes RX descriptor */ 2001 ring[desc_id].lo_dword.dword = 2002 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2003 ring_rxd_display_dword(ring[desc_id].lo_dword); 2004 ring[desc_id].hi_dword.dword = 2005 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2006 ring_rxd_display_dword(ring[desc_id].hi_dword); 2007 } 2008 2009 static void 2010 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2011 { 2012 struct igb_ring_desc_16_bytes *ring; 2013 struct igb_ring_desc_16_bytes txd; 2014 2015 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2016 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2017 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2018 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2019 (unsigned)txd.lo_dword.words.lo, 2020 (unsigned)txd.lo_dword.words.hi, 2021 (unsigned)txd.hi_dword.words.lo, 2022 (unsigned)txd.hi_dword.words.hi); 2023 } 2024 2025 void 2026 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2027 { 2028 const struct rte_memzone *rx_mz; 2029 2030 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2031 return; 2032 if (rx_queue_id_is_invalid(rxq_id)) 2033 return; 2034 if (rx_desc_id_is_invalid(rxd_id)) 2035 return; 2036 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2037 if (rx_mz == NULL) 2038 return; 2039 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2040 } 2041 2042 void 2043 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2044 { 2045 const struct rte_memzone *tx_mz; 2046 2047 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2048 return; 2049 if (tx_queue_id_is_invalid(txq_id)) 2050 return; 2051 if (tx_desc_id_is_invalid(txd_id)) 2052 return; 2053 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2054 if (tx_mz == NULL) 2055 return; 2056 ring_tx_descriptor_display(tx_mz, txd_id); 2057 } 2058 2059 void 2060 fwd_lcores_config_display(void) 2061 { 2062 lcoreid_t lc_id; 2063 2064 printf("List of forwarding lcores:"); 2065 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2066 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2067 printf("\n"); 2068 } 2069 void 2070 rxtx_config_display(void) 2071 { 2072 portid_t pid; 2073 queueid_t qid; 2074 2075 printf(" %s packet forwarding%s packets/burst=%d\n", 2076 cur_fwd_eng->fwd_mode_name, 2077 retry_enabled == 0 ? "" : " with retry", 2078 nb_pkt_per_burst); 2079 2080 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2081 printf(" packet len=%u - nb packet segments=%d\n", 2082 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2083 2084 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2085 nb_fwd_lcores, nb_fwd_ports); 2086 2087 RTE_ETH_FOREACH_DEV(pid) { 2088 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2089 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2090 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2091 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2092 uint16_t nb_rx_desc_tmp; 2093 uint16_t nb_tx_desc_tmp; 2094 struct rte_eth_rxq_info rx_qinfo; 2095 struct rte_eth_txq_info tx_qinfo; 2096 int32_t rc; 2097 2098 /* per port config */ 2099 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2100 (unsigned int)pid, nb_rxq, nb_txq); 2101 2102 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2103 ports[pid].dev_conf.rxmode.offloads, 2104 ports[pid].dev_conf.txmode.offloads); 2105 2106 /* per rx queue config only for first queue to be less verbose */ 2107 for (qid = 0; qid < 1; qid++) { 2108 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2109 if (rc) 2110 nb_rx_desc_tmp = nb_rx_desc[qid]; 2111 else 2112 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2113 2114 printf(" RX queue: %d\n", qid); 2115 printf(" RX desc=%d - RX free threshold=%d\n", 2116 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 2117 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2118 " wthresh=%d\n", 2119 rx_conf[qid].rx_thresh.pthresh, 2120 rx_conf[qid].rx_thresh.hthresh, 2121 rx_conf[qid].rx_thresh.wthresh); 2122 printf(" RX Offloads=0x%"PRIx64"\n", 2123 rx_conf[qid].offloads); 2124 } 2125 2126 /* per tx queue config only for first queue to be less verbose */ 2127 for (qid = 0; qid < 1; qid++) { 2128 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2129 if (rc) 2130 nb_tx_desc_tmp = nb_tx_desc[qid]; 2131 else 2132 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2133 2134 printf(" TX queue: %d\n", qid); 2135 printf(" TX desc=%d - TX free threshold=%d\n", 2136 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 2137 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2138 " wthresh=%d\n", 2139 tx_conf[qid].tx_thresh.pthresh, 2140 tx_conf[qid].tx_thresh.hthresh, 2141 tx_conf[qid].tx_thresh.wthresh); 2142 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2143 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 2144 } 2145 } 2146 } 2147 2148 void 2149 port_rss_reta_info(portid_t port_id, 2150 struct rte_eth_rss_reta_entry64 *reta_conf, 2151 uint16_t nb_entries) 2152 { 2153 uint16_t i, idx, shift; 2154 int ret; 2155 2156 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2157 return; 2158 2159 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2160 if (ret != 0) { 2161 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2162 return; 2163 } 2164 2165 for (i = 0; i < nb_entries; i++) { 2166 idx = i / RTE_RETA_GROUP_SIZE; 2167 shift = i % RTE_RETA_GROUP_SIZE; 2168 if (!(reta_conf[idx].mask & (1ULL << shift))) 2169 continue; 2170 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2171 i, reta_conf[idx].reta[shift]); 2172 } 2173 } 2174 2175 /* 2176 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2177 * key of the port. 2178 */ 2179 void 2180 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2181 { 2182 struct rte_eth_rss_conf rss_conf = {0}; 2183 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2184 uint64_t rss_hf; 2185 uint8_t i; 2186 int diag; 2187 struct rte_eth_dev_info dev_info; 2188 uint8_t hash_key_size; 2189 int ret; 2190 2191 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2192 return; 2193 2194 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2195 if (ret != 0) 2196 return; 2197 2198 if (dev_info.hash_key_size > 0 && 2199 dev_info.hash_key_size <= sizeof(rss_key)) 2200 hash_key_size = dev_info.hash_key_size; 2201 else { 2202 printf("dev_info did not provide a valid hash key size\n"); 2203 return; 2204 } 2205 2206 /* Get RSS hash key if asked to display it */ 2207 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2208 rss_conf.rss_key_len = hash_key_size; 2209 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2210 if (diag != 0) { 2211 switch (diag) { 2212 case -ENODEV: 2213 printf("port index %d invalid\n", port_id); 2214 break; 2215 case -ENOTSUP: 2216 printf("operation not supported by device\n"); 2217 break; 2218 default: 2219 printf("operation failed - diag=%d\n", diag); 2220 break; 2221 } 2222 return; 2223 } 2224 rss_hf = rss_conf.rss_hf; 2225 if (rss_hf == 0) { 2226 printf("RSS disabled\n"); 2227 return; 2228 } 2229 printf("RSS functions:\n "); 2230 for (i = 0; rss_type_table[i].str; i++) { 2231 if (rss_hf & rss_type_table[i].rss_type) 2232 printf("%s ", rss_type_table[i].str); 2233 } 2234 printf("\n"); 2235 if (!show_rss_key) 2236 return; 2237 printf("RSS key:\n"); 2238 for (i = 0; i < hash_key_size; i++) 2239 printf("%02X", rss_key[i]); 2240 printf("\n"); 2241 } 2242 2243 void 2244 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2245 uint hash_key_len) 2246 { 2247 struct rte_eth_rss_conf rss_conf; 2248 int diag; 2249 unsigned int i; 2250 2251 rss_conf.rss_key = NULL; 2252 rss_conf.rss_key_len = hash_key_len; 2253 rss_conf.rss_hf = 0; 2254 for (i = 0; rss_type_table[i].str; i++) { 2255 if (!strcmp(rss_type_table[i].str, rss_type)) 2256 rss_conf.rss_hf = rss_type_table[i].rss_type; 2257 } 2258 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2259 if (diag == 0) { 2260 rss_conf.rss_key = hash_key; 2261 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2262 } 2263 if (diag == 0) 2264 return; 2265 2266 switch (diag) { 2267 case -ENODEV: 2268 printf("port index %d invalid\n", port_id); 2269 break; 2270 case -ENOTSUP: 2271 printf("operation not supported by device\n"); 2272 break; 2273 default: 2274 printf("operation failed - diag=%d\n", diag); 2275 break; 2276 } 2277 } 2278 2279 /* 2280 * Setup forwarding configuration for each logical core. 2281 */ 2282 static void 2283 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2284 { 2285 streamid_t nb_fs_per_lcore; 2286 streamid_t nb_fs; 2287 streamid_t sm_id; 2288 lcoreid_t nb_extra; 2289 lcoreid_t nb_fc; 2290 lcoreid_t nb_lc; 2291 lcoreid_t lc_id; 2292 2293 nb_fs = cfg->nb_fwd_streams; 2294 nb_fc = cfg->nb_fwd_lcores; 2295 if (nb_fs <= nb_fc) { 2296 nb_fs_per_lcore = 1; 2297 nb_extra = 0; 2298 } else { 2299 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2300 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2301 } 2302 2303 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2304 sm_id = 0; 2305 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2306 fwd_lcores[lc_id]->stream_idx = sm_id; 2307 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2308 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2309 } 2310 2311 /* 2312 * Assign extra remaining streams, if any. 2313 */ 2314 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2315 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2316 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2317 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2318 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2319 } 2320 } 2321 2322 static portid_t 2323 fwd_topology_tx_port_get(portid_t rxp) 2324 { 2325 static int warning_once = 1; 2326 2327 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2328 2329 switch (port_topology) { 2330 default: 2331 case PORT_TOPOLOGY_PAIRED: 2332 if ((rxp & 0x1) == 0) { 2333 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2334 return rxp + 1; 2335 if (warning_once) { 2336 printf("\nWarning! port-topology=paired" 2337 " and odd forward ports number," 2338 " the last port will pair with" 2339 " itself.\n\n"); 2340 warning_once = 0; 2341 } 2342 return rxp; 2343 } 2344 return rxp - 1; 2345 case PORT_TOPOLOGY_CHAINED: 2346 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2347 case PORT_TOPOLOGY_LOOP: 2348 return rxp; 2349 } 2350 } 2351 2352 static void 2353 simple_fwd_config_setup(void) 2354 { 2355 portid_t i; 2356 2357 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2358 cur_fwd_config.nb_fwd_streams = 2359 (streamid_t) cur_fwd_config.nb_fwd_ports; 2360 2361 /* reinitialize forwarding streams */ 2362 init_fwd_streams(); 2363 2364 /* 2365 * In the simple forwarding test, the number of forwarding cores 2366 * must be lower or equal to the number of forwarding ports. 2367 */ 2368 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2369 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2370 cur_fwd_config.nb_fwd_lcores = 2371 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2372 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2373 2374 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2375 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2376 fwd_streams[i]->rx_queue = 0; 2377 fwd_streams[i]->tx_port = 2378 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2379 fwd_streams[i]->tx_queue = 0; 2380 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2381 fwd_streams[i]->retry_enabled = retry_enabled; 2382 } 2383 } 2384 2385 /** 2386 * For the RSS forwarding test all streams distributed over lcores. Each stream 2387 * being composed of a RX queue to poll on a RX port for input messages, 2388 * associated with a TX queue of a TX port where to send forwarded packets. 2389 */ 2390 static void 2391 rss_fwd_config_setup(void) 2392 { 2393 portid_t rxp; 2394 portid_t txp; 2395 queueid_t rxq; 2396 queueid_t nb_q; 2397 streamid_t sm_id; 2398 2399 nb_q = nb_rxq; 2400 if (nb_q > nb_txq) 2401 nb_q = nb_txq; 2402 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2403 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2404 cur_fwd_config.nb_fwd_streams = 2405 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2406 2407 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2408 cur_fwd_config.nb_fwd_lcores = 2409 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2410 2411 /* reinitialize forwarding streams */ 2412 init_fwd_streams(); 2413 2414 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2415 rxp = 0; rxq = 0; 2416 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2417 struct fwd_stream *fs; 2418 2419 fs = fwd_streams[sm_id]; 2420 txp = fwd_topology_tx_port_get(rxp); 2421 fs->rx_port = fwd_ports_ids[rxp]; 2422 fs->rx_queue = rxq; 2423 fs->tx_port = fwd_ports_ids[txp]; 2424 fs->tx_queue = rxq; 2425 fs->peer_addr = fs->tx_port; 2426 fs->retry_enabled = retry_enabled; 2427 rxp++; 2428 if (rxp < nb_fwd_ports) 2429 continue; 2430 rxp = 0; 2431 rxq++; 2432 } 2433 } 2434 2435 /** 2436 * For the DCB forwarding test, each core is assigned on each traffic class. 2437 * 2438 * Each core is assigned a multi-stream, each stream being composed of 2439 * a RX queue to poll on a RX port for input messages, associated with 2440 * a TX queue of a TX port where to send forwarded packets. All RX and 2441 * TX queues are mapping to the same traffic class. 2442 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2443 * the same core 2444 */ 2445 static void 2446 dcb_fwd_config_setup(void) 2447 { 2448 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2449 portid_t txp, rxp = 0; 2450 queueid_t txq, rxq = 0; 2451 lcoreid_t lc_id; 2452 uint16_t nb_rx_queue, nb_tx_queue; 2453 uint16_t i, j, k, sm_id = 0; 2454 uint8_t tc = 0; 2455 2456 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2457 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2458 cur_fwd_config.nb_fwd_streams = 2459 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2460 2461 /* reinitialize forwarding streams */ 2462 init_fwd_streams(); 2463 sm_id = 0; 2464 txp = 1; 2465 /* get the dcb info on the first RX and TX ports */ 2466 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2467 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2468 2469 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2470 fwd_lcores[lc_id]->stream_nb = 0; 2471 fwd_lcores[lc_id]->stream_idx = sm_id; 2472 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2473 /* if the nb_queue is zero, means this tc is 2474 * not enabled on the POOL 2475 */ 2476 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2477 break; 2478 k = fwd_lcores[lc_id]->stream_nb + 2479 fwd_lcores[lc_id]->stream_idx; 2480 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2481 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2482 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2483 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2484 for (j = 0; j < nb_rx_queue; j++) { 2485 struct fwd_stream *fs; 2486 2487 fs = fwd_streams[k + j]; 2488 fs->rx_port = fwd_ports_ids[rxp]; 2489 fs->rx_queue = rxq + j; 2490 fs->tx_port = fwd_ports_ids[txp]; 2491 fs->tx_queue = txq + j % nb_tx_queue; 2492 fs->peer_addr = fs->tx_port; 2493 fs->retry_enabled = retry_enabled; 2494 } 2495 fwd_lcores[lc_id]->stream_nb += 2496 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2497 } 2498 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2499 2500 tc++; 2501 if (tc < rxp_dcb_info.nb_tcs) 2502 continue; 2503 /* Restart from TC 0 on next RX port */ 2504 tc = 0; 2505 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2506 rxp = (portid_t) 2507 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2508 else 2509 rxp++; 2510 if (rxp >= nb_fwd_ports) 2511 return; 2512 /* get the dcb information on next RX and TX ports */ 2513 if ((rxp & 0x1) == 0) 2514 txp = (portid_t) (rxp + 1); 2515 else 2516 txp = (portid_t) (rxp - 1); 2517 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2518 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2519 } 2520 } 2521 2522 static void 2523 icmp_echo_config_setup(void) 2524 { 2525 portid_t rxp; 2526 queueid_t rxq; 2527 lcoreid_t lc_id; 2528 uint16_t sm_id; 2529 2530 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2531 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2532 (nb_txq * nb_fwd_ports); 2533 else 2534 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2535 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2536 cur_fwd_config.nb_fwd_streams = 2537 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2538 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2539 cur_fwd_config.nb_fwd_lcores = 2540 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2541 if (verbose_level > 0) { 2542 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2543 __FUNCTION__, 2544 cur_fwd_config.nb_fwd_lcores, 2545 cur_fwd_config.nb_fwd_ports, 2546 cur_fwd_config.nb_fwd_streams); 2547 } 2548 2549 /* reinitialize forwarding streams */ 2550 init_fwd_streams(); 2551 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2552 rxp = 0; rxq = 0; 2553 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2554 if (verbose_level > 0) 2555 printf(" core=%d: \n", lc_id); 2556 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2557 struct fwd_stream *fs; 2558 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2559 fs->rx_port = fwd_ports_ids[rxp]; 2560 fs->rx_queue = rxq; 2561 fs->tx_port = fs->rx_port; 2562 fs->tx_queue = rxq; 2563 fs->peer_addr = fs->tx_port; 2564 fs->retry_enabled = retry_enabled; 2565 if (verbose_level > 0) 2566 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2567 sm_id, fs->rx_port, fs->rx_queue, 2568 fs->tx_queue); 2569 rxq = (queueid_t) (rxq + 1); 2570 if (rxq == nb_rxq) { 2571 rxq = 0; 2572 rxp = (portid_t) (rxp + 1); 2573 } 2574 } 2575 } 2576 } 2577 2578 void 2579 fwd_config_setup(void) 2580 { 2581 cur_fwd_config.fwd_eng = cur_fwd_eng; 2582 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2583 icmp_echo_config_setup(); 2584 return; 2585 } 2586 2587 if ((nb_rxq > 1) && (nb_txq > 1)){ 2588 if (dcb_config) 2589 dcb_fwd_config_setup(); 2590 else 2591 rss_fwd_config_setup(); 2592 } 2593 else 2594 simple_fwd_config_setup(); 2595 } 2596 2597 static const char * 2598 mp_alloc_to_str(uint8_t mode) 2599 { 2600 switch (mode) { 2601 case MP_ALLOC_NATIVE: 2602 return "native"; 2603 case MP_ALLOC_ANON: 2604 return "anon"; 2605 case MP_ALLOC_XMEM: 2606 return "xmem"; 2607 case MP_ALLOC_XMEM_HUGE: 2608 return "xmemhuge"; 2609 case MP_ALLOC_XBUF: 2610 return "xbuf"; 2611 default: 2612 return "invalid"; 2613 } 2614 } 2615 2616 void 2617 pkt_fwd_config_display(struct fwd_config *cfg) 2618 { 2619 struct fwd_stream *fs; 2620 lcoreid_t lc_id; 2621 streamid_t sm_id; 2622 2623 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2624 "NUMA support %s, MP allocation mode: %s\n", 2625 cfg->fwd_eng->fwd_mode_name, 2626 retry_enabled == 0 ? "" : " with retry", 2627 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2628 numa_support == 1 ? "enabled" : "disabled", 2629 mp_alloc_to_str(mp_alloc_type)); 2630 2631 if (retry_enabled) 2632 printf("TX retry num: %u, delay between TX retries: %uus\n", 2633 burst_tx_retry_num, burst_tx_delay_time); 2634 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2635 printf("Logical Core %u (socket %u) forwards packets on " 2636 "%d streams:", 2637 fwd_lcores_cpuids[lc_id], 2638 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2639 fwd_lcores[lc_id]->stream_nb); 2640 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2641 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2642 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2643 "P=%d/Q=%d (socket %u) ", 2644 fs->rx_port, fs->rx_queue, 2645 ports[fs->rx_port].socket_id, 2646 fs->tx_port, fs->tx_queue, 2647 ports[fs->tx_port].socket_id); 2648 print_ethaddr("peer=", 2649 &peer_eth_addrs[fs->peer_addr]); 2650 } 2651 printf("\n"); 2652 } 2653 printf("\n"); 2654 } 2655 2656 void 2657 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2658 { 2659 struct rte_ether_addr new_peer_addr; 2660 if (!rte_eth_dev_is_valid_port(port_id)) { 2661 printf("Error: Invalid port number %i\n", port_id); 2662 return; 2663 } 2664 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2665 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2666 return; 2667 } 2668 peer_eth_addrs[port_id] = new_peer_addr; 2669 } 2670 2671 int 2672 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2673 { 2674 unsigned int i; 2675 unsigned int lcore_cpuid; 2676 int record_now; 2677 2678 record_now = 0; 2679 again: 2680 for (i = 0; i < nb_lc; i++) { 2681 lcore_cpuid = lcorelist[i]; 2682 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2683 printf("lcore %u not enabled\n", lcore_cpuid); 2684 return -1; 2685 } 2686 if (lcore_cpuid == rte_get_master_lcore()) { 2687 printf("lcore %u cannot be masked on for running " 2688 "packet forwarding, which is the master lcore " 2689 "and reserved for command line parsing only\n", 2690 lcore_cpuid); 2691 return -1; 2692 } 2693 if (record_now) 2694 fwd_lcores_cpuids[i] = lcore_cpuid; 2695 } 2696 if (record_now == 0) { 2697 record_now = 1; 2698 goto again; 2699 } 2700 nb_cfg_lcores = (lcoreid_t) nb_lc; 2701 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2702 printf("previous number of forwarding cores %u - changed to " 2703 "number of configured cores %u\n", 2704 (unsigned int) nb_fwd_lcores, nb_lc); 2705 nb_fwd_lcores = (lcoreid_t) nb_lc; 2706 } 2707 2708 return 0; 2709 } 2710 2711 int 2712 set_fwd_lcores_mask(uint64_t lcoremask) 2713 { 2714 unsigned int lcorelist[64]; 2715 unsigned int nb_lc; 2716 unsigned int i; 2717 2718 if (lcoremask == 0) { 2719 printf("Invalid NULL mask of cores\n"); 2720 return -1; 2721 } 2722 nb_lc = 0; 2723 for (i = 0; i < 64; i++) { 2724 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2725 continue; 2726 lcorelist[nb_lc++] = i; 2727 } 2728 return set_fwd_lcores_list(lcorelist, nb_lc); 2729 } 2730 2731 void 2732 set_fwd_lcores_number(uint16_t nb_lc) 2733 { 2734 if (nb_lc > nb_cfg_lcores) { 2735 printf("nb fwd cores %u > %u (max. number of configured " 2736 "lcores) - ignored\n", 2737 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2738 return; 2739 } 2740 nb_fwd_lcores = (lcoreid_t) nb_lc; 2741 printf("Number of forwarding cores set to %u\n", 2742 (unsigned int) nb_fwd_lcores); 2743 } 2744 2745 void 2746 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2747 { 2748 unsigned int i; 2749 portid_t port_id; 2750 int record_now; 2751 2752 record_now = 0; 2753 again: 2754 for (i = 0; i < nb_pt; i++) { 2755 port_id = (portid_t) portlist[i]; 2756 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2757 return; 2758 if (record_now) 2759 fwd_ports_ids[i] = port_id; 2760 } 2761 if (record_now == 0) { 2762 record_now = 1; 2763 goto again; 2764 } 2765 nb_cfg_ports = (portid_t) nb_pt; 2766 if (nb_fwd_ports != (portid_t) nb_pt) { 2767 printf("previous number of forwarding ports %u - changed to " 2768 "number of configured ports %u\n", 2769 (unsigned int) nb_fwd_ports, nb_pt); 2770 nb_fwd_ports = (portid_t) nb_pt; 2771 } 2772 } 2773 2774 /** 2775 * Parse the user input and obtain the list of forwarding ports 2776 * 2777 * @param[in] list 2778 * String containing the user input. User can specify 2779 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 2780 * For example, if the user wants to use all the available 2781 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 2782 * If the user wants to use only the ports 1,2 then the input 2783 * is 1,2. 2784 * valid characters are '-' and ',' 2785 * @param[out] values 2786 * This array will be filled with a list of port IDs 2787 * based on the user input 2788 * Note that duplicate entries are discarded and only the first 2789 * count entries in this array are port IDs and all the rest 2790 * will contain default values 2791 * @param[in] maxsize 2792 * This parameter denotes 2 things 2793 * 1) Number of elements in the values array 2794 * 2) Maximum value of each element in the values array 2795 * @return 2796 * On success, returns total count of parsed port IDs 2797 * On failure, returns 0 2798 */ 2799 static unsigned int 2800 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 2801 { 2802 unsigned int count = 0; 2803 char *end = NULL; 2804 int min, max; 2805 int value, i; 2806 unsigned int marked[maxsize]; 2807 2808 if (list == NULL || values == NULL) 2809 return 0; 2810 2811 for (i = 0; i < (int)maxsize; i++) 2812 marked[i] = 0; 2813 2814 min = INT_MAX; 2815 2816 do { 2817 /*Remove the blank spaces if any*/ 2818 while (isblank(*list)) 2819 list++; 2820 if (*list == '\0') 2821 break; 2822 errno = 0; 2823 value = strtol(list, &end, 10); 2824 if (errno || end == NULL) 2825 return 0; 2826 if (value < 0 || value >= (int)maxsize) 2827 return 0; 2828 while (isblank(*end)) 2829 end++; 2830 if (*end == '-' && min == INT_MAX) { 2831 min = value; 2832 } else if ((*end == ',') || (*end == '\0')) { 2833 max = value; 2834 if (min == INT_MAX) 2835 min = value; 2836 for (i = min; i <= max; i++) { 2837 if (count < maxsize) { 2838 if (marked[i]) 2839 continue; 2840 values[count] = i; 2841 marked[i] = 1; 2842 count++; 2843 } 2844 } 2845 min = INT_MAX; 2846 } else 2847 return 0; 2848 list = end + 1; 2849 } while (*end != '\0'); 2850 2851 return count; 2852 } 2853 2854 void 2855 parse_fwd_portlist(const char *portlist) 2856 { 2857 unsigned int portcount; 2858 unsigned int portindex[RTE_MAX_ETHPORTS]; 2859 unsigned int i, valid_port_count = 0; 2860 2861 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 2862 if (!portcount) 2863 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 2864 2865 /* 2866 * Here we verify the validity of the ports 2867 * and thereby calculate the total number of 2868 * valid ports 2869 */ 2870 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 2871 if (rte_eth_dev_is_valid_port(portindex[i])) { 2872 portindex[valid_port_count] = portindex[i]; 2873 valid_port_count++; 2874 } 2875 } 2876 2877 set_fwd_ports_list(portindex, valid_port_count); 2878 } 2879 2880 void 2881 set_fwd_ports_mask(uint64_t portmask) 2882 { 2883 unsigned int portlist[64]; 2884 unsigned int nb_pt; 2885 unsigned int i; 2886 2887 if (portmask == 0) { 2888 printf("Invalid NULL mask of ports\n"); 2889 return; 2890 } 2891 nb_pt = 0; 2892 RTE_ETH_FOREACH_DEV(i) { 2893 if (! ((uint64_t)(1ULL << i) & portmask)) 2894 continue; 2895 portlist[nb_pt++] = i; 2896 } 2897 set_fwd_ports_list(portlist, nb_pt); 2898 } 2899 2900 void 2901 set_fwd_ports_number(uint16_t nb_pt) 2902 { 2903 if (nb_pt > nb_cfg_ports) { 2904 printf("nb fwd ports %u > %u (number of configured " 2905 "ports) - ignored\n", 2906 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2907 return; 2908 } 2909 nb_fwd_ports = (portid_t) nb_pt; 2910 printf("Number of forwarding ports set to %u\n", 2911 (unsigned int) nb_fwd_ports); 2912 } 2913 2914 int 2915 port_is_forwarding(portid_t port_id) 2916 { 2917 unsigned int i; 2918 2919 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2920 return -1; 2921 2922 for (i = 0; i < nb_fwd_ports; i++) { 2923 if (fwd_ports_ids[i] == port_id) 2924 return 1; 2925 } 2926 2927 return 0; 2928 } 2929 2930 void 2931 set_nb_pkt_per_burst(uint16_t nb) 2932 { 2933 if (nb > MAX_PKT_BURST) { 2934 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2935 " ignored\n", 2936 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2937 return; 2938 } 2939 nb_pkt_per_burst = nb; 2940 printf("Number of packets per burst set to %u\n", 2941 (unsigned int) nb_pkt_per_burst); 2942 } 2943 2944 static const char * 2945 tx_split_get_name(enum tx_pkt_split split) 2946 { 2947 uint32_t i; 2948 2949 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2950 if (tx_split_name[i].split == split) 2951 return tx_split_name[i].name; 2952 } 2953 return NULL; 2954 } 2955 2956 void 2957 set_tx_pkt_split(const char *name) 2958 { 2959 uint32_t i; 2960 2961 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2962 if (strcmp(tx_split_name[i].name, name) == 0) { 2963 tx_pkt_split = tx_split_name[i].split; 2964 return; 2965 } 2966 } 2967 printf("unknown value: \"%s\"\n", name); 2968 } 2969 2970 void 2971 show_tx_pkt_segments(void) 2972 { 2973 uint32_t i, n; 2974 const char *split; 2975 2976 n = tx_pkt_nb_segs; 2977 split = tx_split_get_name(tx_pkt_split); 2978 2979 printf("Number of segments: %u\n", n); 2980 printf("Segment sizes: "); 2981 for (i = 0; i != n - 1; i++) 2982 printf("%hu,", tx_pkt_seg_lengths[i]); 2983 printf("%hu\n", tx_pkt_seg_lengths[i]); 2984 printf("Split packet: %s\n", split); 2985 } 2986 2987 void 2988 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2989 { 2990 uint16_t tx_pkt_len; 2991 unsigned i; 2992 2993 if (nb_segs >= (unsigned) nb_txd) { 2994 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2995 nb_segs, (unsigned int) nb_txd); 2996 return; 2997 } 2998 2999 /* 3000 * Check that each segment length is greater or equal than 3001 * the mbuf data sise. 3002 * Check also that the total packet length is greater or equal than the 3003 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3004 * 20 + 8). 3005 */ 3006 tx_pkt_len = 0; 3007 for (i = 0; i < nb_segs; i++) { 3008 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 3009 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3010 i, seg_lengths[i], (unsigned) mbuf_data_size); 3011 return; 3012 } 3013 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3014 } 3015 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3016 printf("total packet length=%u < %d - give up\n", 3017 (unsigned) tx_pkt_len, 3018 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3019 return; 3020 } 3021 3022 for (i = 0; i < nb_segs; i++) 3023 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3024 3025 tx_pkt_length = tx_pkt_len; 3026 tx_pkt_nb_segs = (uint8_t) nb_segs; 3027 } 3028 3029 void 3030 show_tx_pkt_times(void) 3031 { 3032 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3033 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3034 } 3035 3036 void 3037 set_tx_pkt_times(unsigned int *tx_times) 3038 { 3039 uint16_t port_id; 3040 int offload_found = 0; 3041 int offset; 3042 int flag; 3043 3044 static const struct rte_mbuf_dynfield desc_offs = { 3045 .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, 3046 .size = sizeof(uint64_t), 3047 .align = __alignof__(uint64_t), 3048 }; 3049 static const struct rte_mbuf_dynflag desc_flag = { 3050 .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, 3051 }; 3052 3053 RTE_ETH_FOREACH_DEV(port_id) { 3054 struct rte_eth_dev_info dev_info = { 0 }; 3055 int ret; 3056 3057 ret = rte_eth_dev_info_get(port_id, &dev_info); 3058 if (ret == 0 && dev_info.tx_offload_capa & 3059 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 3060 offload_found = 1; 3061 break; 3062 } 3063 } 3064 if (!offload_found) { 3065 printf("No device supporting Tx timestamp scheduling found, " 3066 "dynamic flag and field not registered\n"); 3067 return; 3068 } 3069 offset = rte_mbuf_dynfield_register(&desc_offs); 3070 if (offset < 0 && rte_errno != EEXIST) 3071 printf("Dynamic timestamp field registration error: %d", 3072 rte_errno); 3073 flag = rte_mbuf_dynflag_register(&desc_flag); 3074 if (flag < 0 && rte_errno != EEXIST) 3075 printf("Dynamic timestamp flag registration error: %d", 3076 rte_errno); 3077 tx_pkt_times_inter = tx_times[0]; 3078 tx_pkt_times_intra = tx_times[1]; 3079 } 3080 3081 void 3082 setup_gro(const char *onoff, portid_t port_id) 3083 { 3084 if (!rte_eth_dev_is_valid_port(port_id)) { 3085 printf("invalid port id %u\n", port_id); 3086 return; 3087 } 3088 if (test_done == 0) { 3089 printf("Before enable/disable GRO," 3090 " please stop forwarding first\n"); 3091 return; 3092 } 3093 if (strcmp(onoff, "on") == 0) { 3094 if (gro_ports[port_id].enable != 0) { 3095 printf("Port %u has enabled GRO. Please" 3096 " disable GRO first\n", port_id); 3097 return; 3098 } 3099 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3100 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3101 gro_ports[port_id].param.max_flow_num = 3102 GRO_DEFAULT_FLOW_NUM; 3103 gro_ports[port_id].param.max_item_per_flow = 3104 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3105 } 3106 gro_ports[port_id].enable = 1; 3107 } else { 3108 if (gro_ports[port_id].enable == 0) { 3109 printf("Port %u has disabled GRO\n", port_id); 3110 return; 3111 } 3112 gro_ports[port_id].enable = 0; 3113 } 3114 } 3115 3116 void 3117 setup_gro_flush_cycles(uint8_t cycles) 3118 { 3119 if (test_done == 0) { 3120 printf("Before change flush interval for GRO," 3121 " please stop forwarding first.\n"); 3122 return; 3123 } 3124 3125 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3126 GRO_DEFAULT_FLUSH_CYCLES) { 3127 printf("The flushing cycle be in the range" 3128 " of 1 to %u. Revert to the default" 3129 " value %u.\n", 3130 GRO_MAX_FLUSH_CYCLES, 3131 GRO_DEFAULT_FLUSH_CYCLES); 3132 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3133 } 3134 3135 gro_flush_cycles = cycles; 3136 } 3137 3138 void 3139 show_gro(portid_t port_id) 3140 { 3141 struct rte_gro_param *param; 3142 uint32_t max_pkts_num; 3143 3144 param = &gro_ports[port_id].param; 3145 3146 if (!rte_eth_dev_is_valid_port(port_id)) { 3147 printf("Invalid port id %u.\n", port_id); 3148 return; 3149 } 3150 if (gro_ports[port_id].enable) { 3151 printf("GRO type: TCP/IPv4\n"); 3152 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3153 max_pkts_num = param->max_flow_num * 3154 param->max_item_per_flow; 3155 } else 3156 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3157 printf("Max number of packets to perform GRO: %u\n", 3158 max_pkts_num); 3159 printf("Flushing cycles: %u\n", gro_flush_cycles); 3160 } else 3161 printf("Port %u doesn't enable GRO.\n", port_id); 3162 } 3163 3164 void 3165 setup_gso(const char *mode, portid_t port_id) 3166 { 3167 if (!rte_eth_dev_is_valid_port(port_id)) { 3168 printf("invalid port id %u\n", port_id); 3169 return; 3170 } 3171 if (strcmp(mode, "on") == 0) { 3172 if (test_done == 0) { 3173 printf("before enabling GSO," 3174 " please stop forwarding first\n"); 3175 return; 3176 } 3177 gso_ports[port_id].enable = 1; 3178 } else if (strcmp(mode, "off") == 0) { 3179 if (test_done == 0) { 3180 printf("before disabling GSO," 3181 " please stop forwarding first\n"); 3182 return; 3183 } 3184 gso_ports[port_id].enable = 0; 3185 } 3186 } 3187 3188 char* 3189 list_pkt_forwarding_modes(void) 3190 { 3191 static char fwd_modes[128] = ""; 3192 const char *separator = "|"; 3193 struct fwd_engine *fwd_eng; 3194 unsigned i = 0; 3195 3196 if (strlen (fwd_modes) == 0) { 3197 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3198 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3199 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3200 strncat(fwd_modes, separator, 3201 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3202 } 3203 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3204 } 3205 3206 return fwd_modes; 3207 } 3208 3209 char* 3210 list_pkt_forwarding_retry_modes(void) 3211 { 3212 static char fwd_modes[128] = ""; 3213 const char *separator = "|"; 3214 struct fwd_engine *fwd_eng; 3215 unsigned i = 0; 3216 3217 if (strlen(fwd_modes) == 0) { 3218 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3219 if (fwd_eng == &rx_only_engine) 3220 continue; 3221 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3222 sizeof(fwd_modes) - 3223 strlen(fwd_modes) - 1); 3224 strncat(fwd_modes, separator, 3225 sizeof(fwd_modes) - 3226 strlen(fwd_modes) - 1); 3227 } 3228 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3229 } 3230 3231 return fwd_modes; 3232 } 3233 3234 void 3235 set_pkt_forwarding_mode(const char *fwd_mode_name) 3236 { 3237 struct fwd_engine *fwd_eng; 3238 unsigned i; 3239 3240 i = 0; 3241 while ((fwd_eng = fwd_engines[i]) != NULL) { 3242 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 3243 printf("Set %s packet forwarding mode%s\n", 3244 fwd_mode_name, 3245 retry_enabled == 0 ? "" : " with retry"); 3246 cur_fwd_eng = fwd_eng; 3247 return; 3248 } 3249 i++; 3250 } 3251 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 3252 } 3253 3254 void 3255 add_rx_dump_callbacks(portid_t portid) 3256 { 3257 struct rte_eth_dev_info dev_info; 3258 uint16_t queue; 3259 int ret; 3260 3261 if (port_id_is_invalid(portid, ENABLED_WARN)) 3262 return; 3263 3264 ret = eth_dev_info_get_print_err(portid, &dev_info); 3265 if (ret != 0) 3266 return; 3267 3268 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3269 if (!ports[portid].rx_dump_cb[queue]) 3270 ports[portid].rx_dump_cb[queue] = 3271 rte_eth_add_rx_callback(portid, queue, 3272 dump_rx_pkts, NULL); 3273 } 3274 3275 void 3276 add_tx_dump_callbacks(portid_t portid) 3277 { 3278 struct rte_eth_dev_info dev_info; 3279 uint16_t queue; 3280 int ret; 3281 3282 if (port_id_is_invalid(portid, ENABLED_WARN)) 3283 return; 3284 3285 ret = eth_dev_info_get_print_err(portid, &dev_info); 3286 if (ret != 0) 3287 return; 3288 3289 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3290 if (!ports[portid].tx_dump_cb[queue]) 3291 ports[portid].tx_dump_cb[queue] = 3292 rte_eth_add_tx_callback(portid, queue, 3293 dump_tx_pkts, NULL); 3294 } 3295 3296 void 3297 remove_rx_dump_callbacks(portid_t portid) 3298 { 3299 struct rte_eth_dev_info dev_info; 3300 uint16_t queue; 3301 int ret; 3302 3303 if (port_id_is_invalid(portid, ENABLED_WARN)) 3304 return; 3305 3306 ret = eth_dev_info_get_print_err(portid, &dev_info); 3307 if (ret != 0) 3308 return; 3309 3310 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3311 if (ports[portid].rx_dump_cb[queue]) { 3312 rte_eth_remove_rx_callback(portid, queue, 3313 ports[portid].rx_dump_cb[queue]); 3314 ports[portid].rx_dump_cb[queue] = NULL; 3315 } 3316 } 3317 3318 void 3319 remove_tx_dump_callbacks(portid_t portid) 3320 { 3321 struct rte_eth_dev_info dev_info; 3322 uint16_t queue; 3323 int ret; 3324 3325 if (port_id_is_invalid(portid, ENABLED_WARN)) 3326 return; 3327 3328 ret = eth_dev_info_get_print_err(portid, &dev_info); 3329 if (ret != 0) 3330 return; 3331 3332 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3333 if (ports[portid].tx_dump_cb[queue]) { 3334 rte_eth_remove_tx_callback(portid, queue, 3335 ports[portid].tx_dump_cb[queue]); 3336 ports[portid].tx_dump_cb[queue] = NULL; 3337 } 3338 } 3339 3340 void 3341 configure_rxtx_dump_callbacks(uint16_t verbose) 3342 { 3343 portid_t portid; 3344 3345 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3346 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3347 return; 3348 #endif 3349 3350 RTE_ETH_FOREACH_DEV(portid) 3351 { 3352 if (verbose == 1 || verbose > 2) 3353 add_rx_dump_callbacks(portid); 3354 else 3355 remove_rx_dump_callbacks(portid); 3356 if (verbose >= 2) 3357 add_tx_dump_callbacks(portid); 3358 else 3359 remove_tx_dump_callbacks(portid); 3360 } 3361 } 3362 3363 void 3364 set_verbose_level(uint16_t vb_level) 3365 { 3366 printf("Change verbose level from %u to %u\n", 3367 (unsigned int) verbose_level, (unsigned int) vb_level); 3368 verbose_level = vb_level; 3369 configure_rxtx_dump_callbacks(verbose_level); 3370 } 3371 3372 void 3373 vlan_extend_set(portid_t port_id, int on) 3374 { 3375 int diag; 3376 int vlan_offload; 3377 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3378 3379 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3380 return; 3381 3382 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3383 3384 if (on) { 3385 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3386 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3387 } else { 3388 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3389 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3390 } 3391 3392 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3393 if (diag < 0) 3394 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3395 "diag=%d\n", port_id, on, diag); 3396 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3397 } 3398 3399 void 3400 rx_vlan_strip_set(portid_t port_id, int on) 3401 { 3402 int diag; 3403 int vlan_offload; 3404 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3405 3406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3407 return; 3408 3409 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3410 3411 if (on) { 3412 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3413 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3414 } else { 3415 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3416 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3417 } 3418 3419 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3420 if (diag < 0) 3421 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3422 "diag=%d\n", port_id, on, diag); 3423 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3424 } 3425 3426 void 3427 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3428 { 3429 int diag; 3430 3431 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3432 return; 3433 3434 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3435 if (diag < 0) 3436 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3437 "diag=%d\n", port_id, queue_id, on, diag); 3438 } 3439 3440 void 3441 rx_vlan_filter_set(portid_t port_id, int on) 3442 { 3443 int diag; 3444 int vlan_offload; 3445 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3446 3447 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3448 return; 3449 3450 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3451 3452 if (on) { 3453 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3454 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3455 } else { 3456 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3457 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3458 } 3459 3460 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3461 if (diag < 0) 3462 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3463 "diag=%d\n", port_id, on, diag); 3464 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3465 } 3466 3467 void 3468 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3469 { 3470 int diag; 3471 int vlan_offload; 3472 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3473 3474 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3475 return; 3476 3477 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3478 3479 if (on) { 3480 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3481 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3482 } else { 3483 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3484 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3485 } 3486 3487 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3488 if (diag < 0) 3489 printf("%s(port_pi=%d, on=%d) failed " 3490 "diag=%d\n", __func__, port_id, on, diag); 3491 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3492 } 3493 3494 int 3495 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3496 { 3497 int diag; 3498 3499 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3500 return 1; 3501 if (vlan_id_is_invalid(vlan_id)) 3502 return 1; 3503 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3504 if (diag == 0) 3505 return 0; 3506 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3507 "diag=%d\n", 3508 port_id, vlan_id, on, diag); 3509 return -1; 3510 } 3511 3512 void 3513 rx_vlan_all_filter_set(portid_t port_id, int on) 3514 { 3515 uint16_t vlan_id; 3516 3517 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3518 return; 3519 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3520 if (rx_vft_set(port_id, vlan_id, on)) 3521 break; 3522 } 3523 } 3524 3525 void 3526 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3527 { 3528 int diag; 3529 3530 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3531 return; 3532 3533 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3534 if (diag == 0) 3535 return; 3536 3537 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3538 "diag=%d\n", 3539 port_id, vlan_type, tp_id, diag); 3540 } 3541 3542 void 3543 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3544 { 3545 struct rte_eth_dev_info dev_info; 3546 int ret; 3547 3548 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3549 return; 3550 if (vlan_id_is_invalid(vlan_id)) 3551 return; 3552 3553 if (ports[port_id].dev_conf.txmode.offloads & 3554 DEV_TX_OFFLOAD_QINQ_INSERT) { 3555 printf("Error, as QinQ has been enabled.\n"); 3556 return; 3557 } 3558 3559 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3560 if (ret != 0) 3561 return; 3562 3563 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3564 printf("Error: vlan insert is not supported by port %d\n", 3565 port_id); 3566 return; 3567 } 3568 3569 tx_vlan_reset(port_id); 3570 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3571 ports[port_id].tx_vlan_id = vlan_id; 3572 } 3573 3574 void 3575 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3576 { 3577 struct rte_eth_dev_info dev_info; 3578 int ret; 3579 3580 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3581 return; 3582 if (vlan_id_is_invalid(vlan_id)) 3583 return; 3584 if (vlan_id_is_invalid(vlan_id_outer)) 3585 return; 3586 3587 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3588 if (ret != 0) 3589 return; 3590 3591 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3592 printf("Error: qinq insert not supported by port %d\n", 3593 port_id); 3594 return; 3595 } 3596 3597 tx_vlan_reset(port_id); 3598 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3599 DEV_TX_OFFLOAD_QINQ_INSERT); 3600 ports[port_id].tx_vlan_id = vlan_id; 3601 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3602 } 3603 3604 void 3605 tx_vlan_reset(portid_t port_id) 3606 { 3607 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3608 return; 3609 ports[port_id].dev_conf.txmode.offloads &= 3610 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3611 DEV_TX_OFFLOAD_QINQ_INSERT); 3612 ports[port_id].tx_vlan_id = 0; 3613 ports[port_id].tx_vlan_id_outer = 0; 3614 } 3615 3616 void 3617 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3618 { 3619 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3620 return; 3621 3622 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3623 } 3624 3625 void 3626 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3627 { 3628 uint16_t i; 3629 uint8_t existing_mapping_found = 0; 3630 3631 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3632 return; 3633 3634 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3635 return; 3636 3637 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3638 printf("map_value not in required range 0..%d\n", 3639 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3640 return; 3641 } 3642 3643 if (!is_rx) { /*then tx*/ 3644 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3645 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3646 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3647 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3648 existing_mapping_found = 1; 3649 break; 3650 } 3651 } 3652 if (!existing_mapping_found) { /* A new additional mapping... */ 3653 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3654 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3655 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3656 nb_tx_queue_stats_mappings++; 3657 } 3658 } 3659 else { /*rx*/ 3660 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3661 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3662 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3663 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3664 existing_mapping_found = 1; 3665 break; 3666 } 3667 } 3668 if (!existing_mapping_found) { /* A new additional mapping... */ 3669 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3670 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3671 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3672 nb_rx_queue_stats_mappings++; 3673 } 3674 } 3675 } 3676 3677 void 3678 set_xstats_hide_zero(uint8_t on_off) 3679 { 3680 xstats_hide_zero = on_off; 3681 } 3682 3683 void 3684 set_record_core_cycles(uint8_t on_off) 3685 { 3686 record_core_cycles = on_off; 3687 } 3688 3689 void 3690 set_record_burst_stats(uint8_t on_off) 3691 { 3692 record_burst_stats = on_off; 3693 } 3694 3695 static inline void 3696 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3697 { 3698 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3699 3700 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3701 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3702 " tunnel_id: 0x%08x", 3703 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3704 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3705 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3706 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3707 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3708 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3709 3710 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3711 rte_be_to_cpu_16(mask->src_port_mask), 3712 rte_be_to_cpu_16(mask->dst_port_mask)); 3713 3714 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3715 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3716 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3717 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3718 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3719 3720 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3721 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3722 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3723 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3724 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3725 } 3726 3727 printf("\n"); 3728 } 3729 3730 static inline void 3731 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3732 { 3733 struct rte_eth_flex_payload_cfg *cfg; 3734 uint32_t i, j; 3735 3736 for (i = 0; i < flex_conf->nb_payloads; i++) { 3737 cfg = &flex_conf->flex_set[i]; 3738 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3739 printf("\n RAW: "); 3740 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3741 printf("\n L2_PAYLOAD: "); 3742 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3743 printf("\n L3_PAYLOAD: "); 3744 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3745 printf("\n L4_PAYLOAD: "); 3746 else 3747 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3748 for (j = 0; j < num; j++) 3749 printf(" %-5u", cfg->src_offset[j]); 3750 } 3751 printf("\n"); 3752 } 3753 3754 static char * 3755 flowtype_to_str(uint16_t flow_type) 3756 { 3757 struct flow_type_info { 3758 char str[32]; 3759 uint16_t ftype; 3760 }; 3761 3762 uint8_t i; 3763 static struct flow_type_info flowtype_str_table[] = { 3764 {"raw", RTE_ETH_FLOW_RAW}, 3765 {"ipv4", RTE_ETH_FLOW_IPV4}, 3766 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3767 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3768 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3769 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3770 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3771 {"ipv6", RTE_ETH_FLOW_IPV6}, 3772 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3773 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3774 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3775 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3776 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3777 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3778 {"port", RTE_ETH_FLOW_PORT}, 3779 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3780 {"geneve", RTE_ETH_FLOW_GENEVE}, 3781 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3782 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3783 }; 3784 3785 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3786 if (flowtype_str_table[i].ftype == flow_type) 3787 return flowtype_str_table[i].str; 3788 } 3789 3790 return NULL; 3791 } 3792 3793 static inline void 3794 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3795 { 3796 struct rte_eth_fdir_flex_mask *mask; 3797 uint32_t i, j; 3798 char *p; 3799 3800 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3801 mask = &flex_conf->flex_mask[i]; 3802 p = flowtype_to_str(mask->flow_type); 3803 printf("\n %s:\t", p ? p : "unknown"); 3804 for (j = 0; j < num; j++) 3805 printf(" %02x", mask->mask[j]); 3806 } 3807 printf("\n"); 3808 } 3809 3810 static inline void 3811 print_fdir_flow_type(uint32_t flow_types_mask) 3812 { 3813 int i; 3814 char *p; 3815 3816 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3817 if (!(flow_types_mask & (1 << i))) 3818 continue; 3819 p = flowtype_to_str(i); 3820 if (p) 3821 printf(" %s", p); 3822 else 3823 printf(" unknown"); 3824 } 3825 printf("\n"); 3826 } 3827 3828 static int 3829 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 3830 struct rte_eth_fdir_stats *fdir_stat) 3831 { 3832 int ret; 3833 3834 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3835 if (!ret) { 3836 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3837 RTE_ETH_FILTER_INFO, fdir_info); 3838 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3839 RTE_ETH_FILTER_STATS, fdir_stat); 3840 return 0; 3841 } 3842 3843 #ifdef RTE_LIBRTE_I40E_PMD 3844 if (ret == -ENOTSUP) { 3845 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 3846 if (!ret) 3847 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 3848 } 3849 #endif 3850 #ifdef RTE_LIBRTE_IXGBE_PMD 3851 if (ret == -ENOTSUP) { 3852 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 3853 if (!ret) 3854 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 3855 } 3856 #endif 3857 switch (ret) { 3858 case 0: 3859 break; 3860 case -ENOTSUP: 3861 printf("\n FDIR is not supported on port %-2d\n", 3862 port_id); 3863 break; 3864 default: 3865 printf("programming error: (%s)\n", strerror(-ret)); 3866 break; 3867 } 3868 return ret; 3869 } 3870 3871 void 3872 fdir_get_infos(portid_t port_id) 3873 { 3874 struct rte_eth_fdir_stats fdir_stat; 3875 struct rte_eth_fdir_info fdir_info; 3876 3877 static const char *fdir_stats_border = "########################"; 3878 3879 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3880 return; 3881 3882 memset(&fdir_info, 0, sizeof(fdir_info)); 3883 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3884 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 3885 return; 3886 3887 printf("\n %s FDIR infos for port %-2d %s\n", 3888 fdir_stats_border, port_id, fdir_stats_border); 3889 printf(" MODE: "); 3890 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3891 printf(" PERFECT\n"); 3892 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3893 printf(" PERFECT-MAC-VLAN\n"); 3894 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3895 printf(" PERFECT-TUNNEL\n"); 3896 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3897 printf(" SIGNATURE\n"); 3898 else 3899 printf(" DISABLE\n"); 3900 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3901 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3902 printf(" SUPPORTED FLOW TYPE: "); 3903 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3904 } 3905 printf(" FLEX PAYLOAD INFO:\n"); 3906 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3907 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3908 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3909 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3910 fdir_info.flex_payload_unit, 3911 fdir_info.max_flex_payload_segment_num, 3912 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3913 printf(" MASK: "); 3914 print_fdir_mask(&fdir_info.mask); 3915 if (fdir_info.flex_conf.nb_payloads > 0) { 3916 printf(" FLEX PAYLOAD SRC OFFSET:"); 3917 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3918 } 3919 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3920 printf(" FLEX MASK CFG:"); 3921 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3922 } 3923 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3924 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3925 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3926 fdir_info.guarant_spc, fdir_info.best_spc); 3927 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3928 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3929 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3930 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3931 fdir_stat.collision, fdir_stat.free, 3932 fdir_stat.maxhash, fdir_stat.maxlen, 3933 fdir_stat.add, fdir_stat.remove, 3934 fdir_stat.f_add, fdir_stat.f_remove); 3935 printf(" %s############################%s\n", 3936 fdir_stats_border, fdir_stats_border); 3937 } 3938 3939 void 3940 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3941 { 3942 struct rte_port *port; 3943 struct rte_eth_fdir_flex_conf *flex_conf; 3944 int i, idx = 0; 3945 3946 port = &ports[port_id]; 3947 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3948 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3949 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3950 idx = i; 3951 break; 3952 } 3953 } 3954 if (i >= RTE_ETH_FLOW_MAX) { 3955 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3956 idx = flex_conf->nb_flexmasks; 3957 flex_conf->nb_flexmasks++; 3958 } else { 3959 printf("The flex mask table is full. Can not set flex" 3960 " mask for flow_type(%u).", cfg->flow_type); 3961 return; 3962 } 3963 } 3964 rte_memcpy(&flex_conf->flex_mask[idx], 3965 cfg, 3966 sizeof(struct rte_eth_fdir_flex_mask)); 3967 } 3968 3969 void 3970 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3971 { 3972 struct rte_port *port; 3973 struct rte_eth_fdir_flex_conf *flex_conf; 3974 int i, idx = 0; 3975 3976 port = &ports[port_id]; 3977 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3978 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3979 if (cfg->type == flex_conf->flex_set[i].type) { 3980 idx = i; 3981 break; 3982 } 3983 } 3984 if (i >= RTE_ETH_PAYLOAD_MAX) { 3985 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3986 idx = flex_conf->nb_payloads; 3987 flex_conf->nb_payloads++; 3988 } else { 3989 printf("The flex payload table is full. Can not set" 3990 " flex payload for type(%u).", cfg->type); 3991 return; 3992 } 3993 } 3994 rte_memcpy(&flex_conf->flex_set[idx], 3995 cfg, 3996 sizeof(struct rte_eth_flex_payload_cfg)); 3997 3998 } 3999 4000 void 4001 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4002 { 4003 #ifdef RTE_LIBRTE_IXGBE_PMD 4004 int diag; 4005 4006 if (is_rx) 4007 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4008 else 4009 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4010 4011 if (diag == 0) 4012 return; 4013 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4014 is_rx ? "rx" : "tx", port_id, diag); 4015 return; 4016 #endif 4017 printf("VF %s setting not supported for port %d\n", 4018 is_rx ? "Rx" : "Tx", port_id); 4019 RTE_SET_USED(vf); 4020 RTE_SET_USED(on); 4021 } 4022 4023 int 4024 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4025 { 4026 int diag; 4027 struct rte_eth_link link; 4028 int ret; 4029 4030 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4031 return 1; 4032 ret = eth_link_get_nowait_print_err(port_id, &link); 4033 if (ret < 0) 4034 return 1; 4035 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4036 rate > link.link_speed) { 4037 printf("Invalid rate value:%u bigger than link speed: %u\n", 4038 rate, link.link_speed); 4039 return 1; 4040 } 4041 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4042 if (diag == 0) 4043 return diag; 4044 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4045 port_id, diag); 4046 return diag; 4047 } 4048 4049 int 4050 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4051 { 4052 int diag = -ENOTSUP; 4053 4054 RTE_SET_USED(vf); 4055 RTE_SET_USED(rate); 4056 RTE_SET_USED(q_msk); 4057 4058 #ifdef RTE_LIBRTE_IXGBE_PMD 4059 if (diag == -ENOTSUP) 4060 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4061 q_msk); 4062 #endif 4063 #ifdef RTE_LIBRTE_BNXT_PMD 4064 if (diag == -ENOTSUP) 4065 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4066 #endif 4067 if (diag == 0) 4068 return diag; 4069 4070 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4071 port_id, diag); 4072 return diag; 4073 } 4074 4075 /* 4076 * Functions to manage the set of filtered Multicast MAC addresses. 4077 * 4078 * A pool of filtered multicast MAC addresses is associated with each port. 4079 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4080 * The address of the pool and the number of valid multicast MAC addresses 4081 * recorded in the pool are stored in the fields "mc_addr_pool" and 4082 * "mc_addr_nb" of the "rte_port" data structure. 4083 * 4084 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4085 * to be supplied a contiguous array of multicast MAC addresses. 4086 * To comply with this constraint, the set of multicast addresses recorded 4087 * into the pool are systematically compacted at the beginning of the pool. 4088 * Hence, when a multicast address is removed from the pool, all following 4089 * addresses, if any, are copied back to keep the set contiguous. 4090 */ 4091 #define MCAST_POOL_INC 32 4092 4093 static int 4094 mcast_addr_pool_extend(struct rte_port *port) 4095 { 4096 struct rte_ether_addr *mc_pool; 4097 size_t mc_pool_size; 4098 4099 /* 4100 * If a free entry is available at the end of the pool, just 4101 * increment the number of recorded multicast addresses. 4102 */ 4103 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4104 port->mc_addr_nb++; 4105 return 0; 4106 } 4107 4108 /* 4109 * [re]allocate a pool with MCAST_POOL_INC more entries. 4110 * The previous test guarantees that port->mc_addr_nb is a multiple 4111 * of MCAST_POOL_INC. 4112 */ 4113 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4114 MCAST_POOL_INC); 4115 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4116 mc_pool_size); 4117 if (mc_pool == NULL) { 4118 printf("allocation of pool of %u multicast addresses failed\n", 4119 port->mc_addr_nb + MCAST_POOL_INC); 4120 return -ENOMEM; 4121 } 4122 4123 port->mc_addr_pool = mc_pool; 4124 port->mc_addr_nb++; 4125 return 0; 4126 4127 } 4128 4129 static void 4130 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4131 { 4132 if (mcast_addr_pool_extend(port) != 0) 4133 return; 4134 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4135 } 4136 4137 static void 4138 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4139 { 4140 port->mc_addr_nb--; 4141 if (addr_idx == port->mc_addr_nb) { 4142 /* No need to recompact the set of multicast addressses. */ 4143 if (port->mc_addr_nb == 0) { 4144 /* free the pool of multicast addresses. */ 4145 free(port->mc_addr_pool); 4146 port->mc_addr_pool = NULL; 4147 } 4148 return; 4149 } 4150 memmove(&port->mc_addr_pool[addr_idx], 4151 &port->mc_addr_pool[addr_idx + 1], 4152 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4153 } 4154 4155 static int 4156 eth_port_multicast_addr_list_set(portid_t port_id) 4157 { 4158 struct rte_port *port; 4159 int diag; 4160 4161 port = &ports[port_id]; 4162 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4163 port->mc_addr_nb); 4164 if (diag < 0) 4165 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4166 port_id, port->mc_addr_nb, diag); 4167 4168 return diag; 4169 } 4170 4171 void 4172 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4173 { 4174 struct rte_port *port; 4175 uint32_t i; 4176 4177 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4178 return; 4179 4180 port = &ports[port_id]; 4181 4182 /* 4183 * Check that the added multicast MAC address is not already recorded 4184 * in the pool of multicast addresses. 4185 */ 4186 for (i = 0; i < port->mc_addr_nb; i++) { 4187 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4188 printf("multicast address already filtered by port\n"); 4189 return; 4190 } 4191 } 4192 4193 mcast_addr_pool_append(port, mc_addr); 4194 if (eth_port_multicast_addr_list_set(port_id) < 0) 4195 /* Rollback on failure, remove the address from the pool */ 4196 mcast_addr_pool_remove(port, i); 4197 } 4198 4199 void 4200 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4201 { 4202 struct rte_port *port; 4203 uint32_t i; 4204 4205 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4206 return; 4207 4208 port = &ports[port_id]; 4209 4210 /* 4211 * Search the pool of multicast MAC addresses for the removed address. 4212 */ 4213 for (i = 0; i < port->mc_addr_nb; i++) { 4214 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 4215 break; 4216 } 4217 if (i == port->mc_addr_nb) { 4218 printf("multicast address not filtered by port %d\n", port_id); 4219 return; 4220 } 4221 4222 mcast_addr_pool_remove(port, i); 4223 if (eth_port_multicast_addr_list_set(port_id) < 0) 4224 /* Rollback on failure, add the address back into the pool */ 4225 mcast_addr_pool_append(port, mc_addr); 4226 } 4227 4228 void 4229 port_dcb_info_display(portid_t port_id) 4230 { 4231 struct rte_eth_dcb_info dcb_info; 4232 uint16_t i; 4233 int ret; 4234 static const char *border = "================"; 4235 4236 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4237 return; 4238 4239 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 4240 if (ret) { 4241 printf("\n Failed to get dcb infos on port %-2d\n", 4242 port_id); 4243 return; 4244 } 4245 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 4246 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 4247 printf("\n TC : "); 4248 for (i = 0; i < dcb_info.nb_tcs; i++) 4249 printf("\t%4d", i); 4250 printf("\n Priority : "); 4251 for (i = 0; i < dcb_info.nb_tcs; i++) 4252 printf("\t%4d", dcb_info.prio_tc[i]); 4253 printf("\n BW percent :"); 4254 for (i = 0; i < dcb_info.nb_tcs; i++) 4255 printf("\t%4d%%", dcb_info.tc_bws[i]); 4256 printf("\n RXQ base : "); 4257 for (i = 0; i < dcb_info.nb_tcs; i++) 4258 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 4259 printf("\n RXQ number :"); 4260 for (i = 0; i < dcb_info.nb_tcs; i++) 4261 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 4262 printf("\n TXQ base : "); 4263 for (i = 0; i < dcb_info.nb_tcs; i++) 4264 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 4265 printf("\n TXQ number :"); 4266 for (i = 0; i < dcb_info.nb_tcs; i++) 4267 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 4268 printf("\n"); 4269 } 4270 4271 uint8_t * 4272 open_file(const char *file_path, uint32_t *size) 4273 { 4274 int fd = open(file_path, O_RDONLY); 4275 off_t pkg_size; 4276 uint8_t *buf = NULL; 4277 int ret = 0; 4278 struct stat st_buf; 4279 4280 if (size) 4281 *size = 0; 4282 4283 if (fd == -1) { 4284 printf("%s: Failed to open %s\n", __func__, file_path); 4285 return buf; 4286 } 4287 4288 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 4289 close(fd); 4290 printf("%s: File operations failed\n", __func__); 4291 return buf; 4292 } 4293 4294 pkg_size = st_buf.st_size; 4295 if (pkg_size < 0) { 4296 close(fd); 4297 printf("%s: File operations failed\n", __func__); 4298 return buf; 4299 } 4300 4301 buf = (uint8_t *)malloc(pkg_size); 4302 if (!buf) { 4303 close(fd); 4304 printf("%s: Failed to malloc memory\n", __func__); 4305 return buf; 4306 } 4307 4308 ret = read(fd, buf, pkg_size); 4309 if (ret < 0) { 4310 close(fd); 4311 printf("%s: File read operation failed\n", __func__); 4312 close_file(buf); 4313 return NULL; 4314 } 4315 4316 if (size) 4317 *size = pkg_size; 4318 4319 close(fd); 4320 4321 return buf; 4322 } 4323 4324 int 4325 save_file(const char *file_path, uint8_t *buf, uint32_t size) 4326 { 4327 FILE *fh = fopen(file_path, "wb"); 4328 4329 if (fh == NULL) { 4330 printf("%s: Failed to open %s\n", __func__, file_path); 4331 return -1; 4332 } 4333 4334 if (fwrite(buf, 1, size, fh) != size) { 4335 fclose(fh); 4336 printf("%s: File write operation failed\n", __func__); 4337 return -1; 4338 } 4339 4340 fclose(fh); 4341 4342 return 0; 4343 } 4344 4345 int 4346 close_file(uint8_t *buf) 4347 { 4348 if (buf) { 4349 free((void *)buf); 4350 return 0; 4351 } 4352 4353 return -1; 4354 } 4355 4356 void 4357 port_queue_region_info_display(portid_t port_id, void *buf) 4358 { 4359 #ifdef RTE_LIBRTE_I40E_PMD 4360 uint16_t i, j; 4361 struct rte_pmd_i40e_queue_regions *info = 4362 (struct rte_pmd_i40e_queue_regions *)buf; 4363 static const char *queue_region_info_stats_border = "-------"; 4364 4365 if (!info->queue_region_number) 4366 printf("there is no region has been set before"); 4367 4368 printf("\n %s All queue region info for port=%2d %s", 4369 queue_region_info_stats_border, port_id, 4370 queue_region_info_stats_border); 4371 printf("\n queue_region_number: %-14u \n", 4372 info->queue_region_number); 4373 4374 for (i = 0; i < info->queue_region_number; i++) { 4375 printf("\n region_id: %-14u queue_number: %-14u " 4376 "queue_start_index: %-14u \n", 4377 info->region[i].region_id, 4378 info->region[i].queue_num, 4379 info->region[i].queue_start_index); 4380 4381 printf(" user_priority_num is %-14u :", 4382 info->region[i].user_priority_num); 4383 for (j = 0; j < info->region[i].user_priority_num; j++) 4384 printf(" %-14u ", info->region[i].user_priority[j]); 4385 4386 printf("\n flowtype_num is %-14u :", 4387 info->region[i].flowtype_num); 4388 for (j = 0; j < info->region[i].flowtype_num; j++) 4389 printf(" %-14u ", info->region[i].hw_flowtype[j]); 4390 } 4391 #else 4392 RTE_SET_USED(port_id); 4393 RTE_SET_USED(buf); 4394 #endif 4395 4396 printf("\n\n"); 4397 } 4398 4399 void 4400 show_macs(portid_t port_id) 4401 { 4402 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4403 struct rte_eth_dev_info dev_info; 4404 struct rte_ether_addr *addr; 4405 uint32_t i, num_macs = 0; 4406 struct rte_eth_dev *dev; 4407 4408 dev = &rte_eth_devices[port_id]; 4409 4410 rte_eth_dev_info_get(port_id, &dev_info); 4411 4412 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4413 addr = &dev->data->mac_addrs[i]; 4414 4415 /* skip zero address */ 4416 if (rte_is_zero_ether_addr(addr)) 4417 continue; 4418 4419 num_macs++; 4420 } 4421 4422 printf("Number of MAC address added: %d\n", num_macs); 4423 4424 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4425 addr = &dev->data->mac_addrs[i]; 4426 4427 /* skip zero address */ 4428 if (rte_is_zero_ether_addr(addr)) 4429 continue; 4430 4431 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4432 printf(" %s\n", buf); 4433 } 4434 } 4435 4436 void 4437 show_mcast_macs(portid_t port_id) 4438 { 4439 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4440 struct rte_ether_addr *addr; 4441 struct rte_port *port; 4442 uint32_t i; 4443 4444 port = &ports[port_id]; 4445 4446 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4447 4448 for (i = 0; i < port->mc_addr_nb; i++) { 4449 addr = &port->mc_addr_pool[i]; 4450 4451 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4452 printf(" %s\n", buf); 4453 } 4454 } 4455