1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { NULL, 0 }, 107 }; 108 109 static void 110 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 111 { 112 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 113 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 114 printf("%s%s", name, buf); 115 } 116 117 void 118 nic_stats_display(portid_t port_id) 119 { 120 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 121 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 122 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 123 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 124 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 125 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 126 diff_cycles; 127 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 128 struct rte_eth_stats stats; 129 struct rte_port *port = &ports[port_id]; 130 uint8_t i; 131 132 static const char *nic_stats_border = "########################"; 133 134 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 135 print_valid_ports(); 136 return; 137 } 138 rte_eth_stats_get(port_id, &stats); 139 printf("\n %s NIC statistics for port %-2d %s\n", 140 nic_stats_border, port_id, nic_stats_border); 141 142 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 143 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 144 "%-"PRIu64"\n", 145 stats.ipackets, stats.imissed, stats.ibytes); 146 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 147 printf(" RX-nombuf: %-10"PRIu64"\n", 148 stats.rx_nombuf); 149 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 150 "%-"PRIu64"\n", 151 stats.opackets, stats.oerrors, stats.obytes); 152 } 153 else { 154 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 155 " RX-bytes: %10"PRIu64"\n", 156 stats.ipackets, stats.ierrors, stats.ibytes); 157 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 158 printf(" RX-nombuf: %10"PRIu64"\n", 159 stats.rx_nombuf); 160 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 161 " TX-bytes: %10"PRIu64"\n", 162 stats.opackets, stats.oerrors, stats.obytes); 163 } 164 165 if (port->rx_queue_stats_mapping_enabled) { 166 printf("\n"); 167 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 168 printf(" Stats reg %2d RX-packets: %10"PRIu64 169 " RX-errors: %10"PRIu64 170 " RX-bytes: %10"PRIu64"\n", 171 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 172 } 173 } 174 if (port->tx_queue_stats_mapping_enabled) { 175 printf("\n"); 176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 177 printf(" Stats reg %2d TX-packets: %10"PRIu64 178 " TX-bytes: %10"PRIu64"\n", 179 i, stats.q_opackets[i], stats.q_obytes[i]); 180 } 181 } 182 183 diff_cycles = prev_cycles[port_id]; 184 prev_cycles[port_id] = rte_rdtsc(); 185 if (diff_cycles > 0) 186 diff_cycles = prev_cycles[port_id] - diff_cycles; 187 188 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 189 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 190 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 191 (stats.opackets - prev_pkts_tx[port_id]) : 0; 192 prev_pkts_rx[port_id] = stats.ipackets; 193 prev_pkts_tx[port_id] = stats.opackets; 194 mpps_rx = diff_cycles > 0 ? 195 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 196 mpps_tx = diff_cycles > 0 ? 197 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 198 199 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 200 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 201 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 202 (stats.obytes - prev_bytes_tx[port_id]) : 0; 203 prev_bytes_rx[port_id] = stats.ibytes; 204 prev_bytes_tx[port_id] = stats.obytes; 205 mbps_rx = diff_cycles > 0 ? 206 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 207 mbps_tx = diff_cycles > 0 ? 208 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 209 210 printf("\n Throughput (since last show)\n"); 211 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 212 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 213 mpps_tx, mbps_tx * 8); 214 215 printf(" %s############################%s\n", 216 nic_stats_border, nic_stats_border); 217 } 218 219 void 220 nic_stats_clear(portid_t port_id) 221 { 222 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 223 print_valid_ports(); 224 return; 225 } 226 rte_eth_stats_reset(port_id); 227 printf("\n NIC statistics for port %d cleared\n", port_id); 228 } 229 230 void 231 nic_xstats_display(portid_t port_id) 232 { 233 struct rte_eth_xstat *xstats; 234 int cnt_xstats, idx_xstat; 235 struct rte_eth_xstat_name *xstats_names; 236 237 printf("###### NIC extended statistics for port %-2d\n", port_id); 238 if (!rte_eth_dev_is_valid_port(port_id)) { 239 printf("Error: Invalid port number %i\n", port_id); 240 return; 241 } 242 243 /* Get count */ 244 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 245 if (cnt_xstats < 0) { 246 printf("Error: Cannot get count of xstats\n"); 247 return; 248 } 249 250 /* Get id-name lookup table */ 251 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 252 if (xstats_names == NULL) { 253 printf("Cannot allocate memory for xstats lookup\n"); 254 return; 255 } 256 if (cnt_xstats != rte_eth_xstats_get_names( 257 port_id, xstats_names, cnt_xstats)) { 258 printf("Error: Cannot get xstats lookup\n"); 259 free(xstats_names); 260 return; 261 } 262 263 /* Get stats themselves */ 264 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 265 if (xstats == NULL) { 266 printf("Cannot allocate memory for xstats\n"); 267 free(xstats_names); 268 return; 269 } 270 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 271 printf("Error: Unable to get xstats\n"); 272 free(xstats_names); 273 free(xstats); 274 return; 275 } 276 277 /* Display xstats */ 278 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 279 if (xstats_hide_zero && !xstats[idx_xstat].value) 280 continue; 281 printf("%s: %"PRIu64"\n", 282 xstats_names[idx_xstat].name, 283 xstats[idx_xstat].value); 284 } 285 free(xstats_names); 286 free(xstats); 287 } 288 289 void 290 nic_xstats_clear(portid_t port_id) 291 { 292 int ret; 293 294 ret = rte_eth_xstats_reset(port_id); 295 if (ret != 0) { 296 printf("%s: Error: failed to reset xstats (port %u): %s", 297 __func__, port_id, strerror(ret)); 298 } 299 } 300 301 void 302 nic_stats_mapping_display(portid_t port_id) 303 { 304 struct rte_port *port = &ports[port_id]; 305 uint16_t i; 306 307 static const char *nic_stats_mapping_border = "########################"; 308 309 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 310 print_valid_ports(); 311 return; 312 } 313 314 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 315 printf("Port id %d - either does not support queue statistic mapping or" 316 " no queue statistic mapping set\n", port_id); 317 return; 318 } 319 320 printf("\n %s NIC statistics mapping for port %-2d %s\n", 321 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 322 323 if (port->rx_queue_stats_mapping_enabled) { 324 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 325 if (rx_queue_stats_mappings[i].port_id == port_id) { 326 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 327 rx_queue_stats_mappings[i].queue_id, 328 rx_queue_stats_mappings[i].stats_counter_id); 329 } 330 } 331 printf("\n"); 332 } 333 334 335 if (port->tx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 337 if (tx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 339 tx_queue_stats_mappings[i].queue_id, 340 tx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 } 344 345 printf(" %s####################################%s\n", 346 nic_stats_mapping_border, nic_stats_mapping_border); 347 } 348 349 static void 350 burst_mode_options_display(uint64_t options) 351 { 352 int offset; 353 354 while (options != 0) { 355 offset = rte_bsf64(options); 356 357 printf(" %s", 358 rte_eth_burst_mode_option_name(1ULL << offset)); 359 360 options &= ~(1ULL << offset); 361 } 362 } 363 364 void 365 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 366 { 367 struct rte_eth_burst_mode mode; 368 struct rte_eth_rxq_info qinfo; 369 int32_t rc; 370 static const char *info_border = "*********************"; 371 372 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 373 if (rc != 0) { 374 printf("Failed to retrieve information for port: %u, " 375 "RX queue: %hu\nerror desc: %s(%d)\n", 376 port_id, queue_id, strerror(-rc), rc); 377 return; 378 } 379 380 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 381 info_border, port_id, queue_id, info_border); 382 383 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 384 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 385 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 386 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 387 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 388 printf("\nRX drop packets: %s", 389 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 390 printf("\nRX deferred start: %s", 391 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 392 printf("\nRX scattered packets: %s", 393 (qinfo.scattered_rx != 0) ? "on" : "off"); 394 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 395 396 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) { 397 printf("\nBurst mode:"); 398 burst_mode_options_display(mode.options); 399 } 400 401 printf("\n"); 402 } 403 404 void 405 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 406 { 407 struct rte_eth_burst_mode mode; 408 struct rte_eth_txq_info qinfo; 409 int32_t rc; 410 static const char *info_border = "*********************"; 411 412 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 413 if (rc != 0) { 414 printf("Failed to retrieve information for port: %u, " 415 "TX queue: %hu\nerror desc: %s(%d)\n", 416 port_id, queue_id, strerror(-rc), rc); 417 return; 418 } 419 420 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 421 info_border, port_id, queue_id, info_border); 422 423 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 424 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 425 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 426 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 427 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 428 printf("\nTX deferred start: %s", 429 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 430 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 431 432 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) { 433 printf("\nBurst mode:"); 434 burst_mode_options_display(mode.options); 435 } 436 437 printf("\n"); 438 } 439 440 static int bus_match_all(const struct rte_bus *bus, const void *data) 441 { 442 RTE_SET_USED(bus); 443 RTE_SET_USED(data); 444 return 0; 445 } 446 447 void 448 device_infos_display(const char *identifier) 449 { 450 static const char *info_border = "*********************"; 451 struct rte_bus *start = NULL, *next; 452 struct rte_dev_iterator dev_iter; 453 char name[RTE_ETH_NAME_MAX_LEN]; 454 struct rte_ether_addr mac_addr; 455 struct rte_device *dev; 456 struct rte_devargs da; 457 portid_t port_id; 458 char devstr[128]; 459 460 memset(&da, 0, sizeof(da)); 461 if (!identifier) 462 goto skip_parse; 463 464 if (rte_devargs_parsef(&da, "%s", identifier)) { 465 printf("cannot parse identifier\n"); 466 if (da.args) 467 free(da.args); 468 return; 469 } 470 471 skip_parse: 472 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 473 474 start = next; 475 if (identifier && da.bus != next) 476 continue; 477 478 /* Skip buses that don't have iterate method */ 479 if (!next->dev_iterate) 480 continue; 481 482 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 483 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 484 485 if (!dev->driver) 486 continue; 487 /* Check for matching device if identifier is present */ 488 if (identifier && 489 strncmp(da.name, dev->name, strlen(dev->name))) 490 continue; 491 printf("\n%s Infos for device %s %s\n", 492 info_border, dev->name, info_border); 493 printf("Bus name: %s", dev->bus->name); 494 printf("\nDriver name: %s", dev->driver->name); 495 printf("\nDevargs: %s", 496 dev->devargs ? dev->devargs->args : ""); 497 printf("\nConnect to socket: %d", dev->numa_node); 498 printf("\n"); 499 500 /* List ports with matching device name */ 501 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 502 printf("\n\tPort id: %-2d", port_id); 503 if (eth_macaddr_get_print_err(port_id, 504 &mac_addr) == 0) 505 print_ethaddr("\n\tMAC address: ", 506 &mac_addr); 507 rte_eth_dev_get_name_by_port(port_id, name); 508 printf("\n\tDevice name: %s", name); 509 printf("\n"); 510 } 511 } 512 }; 513 } 514 515 void 516 port_infos_display(portid_t port_id) 517 { 518 struct rte_port *port; 519 struct rte_ether_addr mac_addr; 520 struct rte_eth_link link; 521 struct rte_eth_dev_info dev_info; 522 int vlan_offload; 523 struct rte_mempool * mp; 524 static const char *info_border = "*********************"; 525 uint16_t mtu; 526 char name[RTE_ETH_NAME_MAX_LEN]; 527 int ret; 528 529 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 530 print_valid_ports(); 531 return; 532 } 533 port = &ports[port_id]; 534 ret = eth_link_get_nowait_print_err(port_id, &link); 535 if (ret < 0) 536 return; 537 538 ret = eth_dev_info_get_print_err(port_id, &dev_info); 539 if (ret != 0) 540 return; 541 542 printf("\n%s Infos for port %-2d %s\n", 543 info_border, port_id, info_border); 544 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 545 print_ethaddr("MAC address: ", &mac_addr); 546 rte_eth_dev_get_name_by_port(port_id, name); 547 printf("\nDevice name: %s", name); 548 printf("\nDriver name: %s", dev_info.driver_name); 549 if (dev_info.device->devargs && dev_info.device->devargs->args) 550 printf("\nDevargs: %s", dev_info.device->devargs->args); 551 printf("\nConnect to socket: %u", port->socket_id); 552 553 if (port_numa[port_id] != NUMA_NO_CONFIG) { 554 mp = mbuf_pool_find(port_numa[port_id]); 555 if (mp) 556 printf("\nmemory allocation on the socket: %d", 557 port_numa[port_id]); 558 } else 559 printf("\nmemory allocation on the socket: %u",port->socket_id); 560 561 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 562 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 563 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 564 ("full-duplex") : ("half-duplex")); 565 566 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 567 printf("MTU: %u\n", mtu); 568 569 printf("Promiscuous mode: %s\n", 570 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 571 printf("Allmulticast mode: %s\n", 572 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 573 printf("Maximum number of MAC addresses: %u\n", 574 (unsigned int)(port->dev_info.max_mac_addrs)); 575 printf("Maximum number of MAC addresses of hash filtering: %u\n", 576 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 577 578 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 579 if (vlan_offload >= 0){ 580 printf("VLAN offload: \n"); 581 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 582 printf(" strip on, "); 583 else 584 printf(" strip off, "); 585 586 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 587 printf("filter on, "); 588 else 589 printf("filter off, "); 590 591 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 592 printf("extend on, "); 593 else 594 printf("extend off, "); 595 596 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 597 printf("qinq strip on\n"); 598 else 599 printf("qinq strip off\n"); 600 } 601 602 if (dev_info.hash_key_size > 0) 603 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 604 if (dev_info.reta_size > 0) 605 printf("Redirection table size: %u\n", dev_info.reta_size); 606 if (!dev_info.flow_type_rss_offloads) 607 printf("No RSS offload flow type is supported.\n"); 608 else { 609 uint16_t i; 610 char *p; 611 612 printf("Supported RSS offload flow types:\n"); 613 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 614 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 615 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 616 continue; 617 p = flowtype_to_str(i); 618 if (p) 619 printf(" %s\n", p); 620 else 621 printf(" user defined %d\n", i); 622 } 623 } 624 625 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 626 printf("Maximum configurable length of RX packet: %u\n", 627 dev_info.max_rx_pktlen); 628 if (dev_info.max_vfs) 629 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 630 if (dev_info.max_vmdq_pools) 631 printf("Maximum number of VMDq pools: %u\n", 632 dev_info.max_vmdq_pools); 633 634 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 635 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 636 printf("Max possible number of RXDs per queue: %hu\n", 637 dev_info.rx_desc_lim.nb_max); 638 printf("Min possible number of RXDs per queue: %hu\n", 639 dev_info.rx_desc_lim.nb_min); 640 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 641 642 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 643 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 644 printf("Max possible number of TXDs per queue: %hu\n", 645 dev_info.tx_desc_lim.nb_max); 646 printf("Min possible number of TXDs per queue: %hu\n", 647 dev_info.tx_desc_lim.nb_min); 648 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 649 printf("Max segment number per packet: %hu\n", 650 dev_info.tx_desc_lim.nb_seg_max); 651 printf("Max segment number per MTU/TSO: %hu\n", 652 dev_info.tx_desc_lim.nb_mtu_seg_max); 653 654 /* Show switch info only if valid switch domain and port id is set */ 655 if (dev_info.switch_info.domain_id != 656 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 657 if (dev_info.switch_info.name) 658 printf("Switch name: %s\n", dev_info.switch_info.name); 659 660 printf("Switch domain Id: %u\n", 661 dev_info.switch_info.domain_id); 662 printf("Switch Port Id: %u\n", 663 dev_info.switch_info.port_id); 664 } 665 } 666 667 void 668 port_summary_header_display(void) 669 { 670 uint16_t port_number; 671 672 port_number = rte_eth_dev_count_avail(); 673 printf("Number of available ports: %i\n", port_number); 674 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 675 "Driver", "Status", "Link"); 676 } 677 678 void 679 port_summary_display(portid_t port_id) 680 { 681 struct rte_ether_addr mac_addr; 682 struct rte_eth_link link; 683 struct rte_eth_dev_info dev_info; 684 char name[RTE_ETH_NAME_MAX_LEN]; 685 int ret; 686 687 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 688 print_valid_ports(); 689 return; 690 } 691 692 ret = eth_link_get_nowait_print_err(port_id, &link); 693 if (ret < 0) 694 return; 695 696 ret = eth_dev_info_get_print_err(port_id, &dev_info); 697 if (ret != 0) 698 return; 699 700 rte_eth_dev_get_name_by_port(port_id, name); 701 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 702 if (ret != 0) 703 return; 704 705 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 706 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 707 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 708 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 709 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 710 (unsigned int) link.link_speed); 711 } 712 713 void 714 port_offload_cap_display(portid_t port_id) 715 { 716 struct rte_eth_dev_info dev_info; 717 static const char *info_border = "************"; 718 int ret; 719 720 if (port_id_is_invalid(port_id, ENABLED_WARN)) 721 return; 722 723 ret = eth_dev_info_get_print_err(port_id, &dev_info); 724 if (ret != 0) 725 return; 726 727 printf("\n%s Port %d supported offload features: %s\n", 728 info_border, port_id, info_border); 729 730 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 731 printf("VLAN stripped: "); 732 if (ports[port_id].dev_conf.rxmode.offloads & 733 DEV_RX_OFFLOAD_VLAN_STRIP) 734 printf("on\n"); 735 else 736 printf("off\n"); 737 } 738 739 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 740 printf("Double VLANs stripped: "); 741 if (ports[port_id].dev_conf.rxmode.offloads & 742 DEV_RX_OFFLOAD_QINQ_STRIP) 743 printf("on\n"); 744 else 745 printf("off\n"); 746 } 747 748 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 749 printf("RX IPv4 checksum: "); 750 if (ports[port_id].dev_conf.rxmode.offloads & 751 DEV_RX_OFFLOAD_IPV4_CKSUM) 752 printf("on\n"); 753 else 754 printf("off\n"); 755 } 756 757 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 758 printf("RX UDP checksum: "); 759 if (ports[port_id].dev_conf.rxmode.offloads & 760 DEV_RX_OFFLOAD_UDP_CKSUM) 761 printf("on\n"); 762 else 763 printf("off\n"); 764 } 765 766 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 767 printf("RX TCP checksum: "); 768 if (ports[port_id].dev_conf.rxmode.offloads & 769 DEV_RX_OFFLOAD_TCP_CKSUM) 770 printf("on\n"); 771 else 772 printf("off\n"); 773 } 774 775 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 776 printf("RX SCTP checksum: "); 777 if (ports[port_id].dev_conf.rxmode.offloads & 778 DEV_RX_OFFLOAD_SCTP_CKSUM) 779 printf("on\n"); 780 else 781 printf("off\n"); 782 } 783 784 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 785 printf("RX Outer IPv4 checksum: "); 786 if (ports[port_id].dev_conf.rxmode.offloads & 787 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 788 printf("on\n"); 789 else 790 printf("off\n"); 791 } 792 793 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 794 printf("RX Outer UDP checksum: "); 795 if (ports[port_id].dev_conf.rxmode.offloads & 796 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 797 printf("on\n"); 798 else 799 printf("off\n"); 800 } 801 802 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 803 printf("Large receive offload: "); 804 if (ports[port_id].dev_conf.rxmode.offloads & 805 DEV_RX_OFFLOAD_TCP_LRO) 806 printf("on\n"); 807 else 808 printf("off\n"); 809 } 810 811 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 812 printf("HW timestamp: "); 813 if (ports[port_id].dev_conf.rxmode.offloads & 814 DEV_RX_OFFLOAD_TIMESTAMP) 815 printf("on\n"); 816 else 817 printf("off\n"); 818 } 819 820 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 821 printf("Rx Keep CRC: "); 822 if (ports[port_id].dev_conf.rxmode.offloads & 823 DEV_RX_OFFLOAD_KEEP_CRC) 824 printf("on\n"); 825 else 826 printf("off\n"); 827 } 828 829 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 830 printf("RX offload security: "); 831 if (ports[port_id].dev_conf.rxmode.offloads & 832 DEV_RX_OFFLOAD_SECURITY) 833 printf("on\n"); 834 else 835 printf("off\n"); 836 } 837 838 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 839 printf("VLAN insert: "); 840 if (ports[port_id].dev_conf.txmode.offloads & 841 DEV_TX_OFFLOAD_VLAN_INSERT) 842 printf("on\n"); 843 else 844 printf("off\n"); 845 } 846 847 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 848 printf("Double VLANs insert: "); 849 if (ports[port_id].dev_conf.txmode.offloads & 850 DEV_TX_OFFLOAD_QINQ_INSERT) 851 printf("on\n"); 852 else 853 printf("off\n"); 854 } 855 856 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 857 printf("TX IPv4 checksum: "); 858 if (ports[port_id].dev_conf.txmode.offloads & 859 DEV_TX_OFFLOAD_IPV4_CKSUM) 860 printf("on\n"); 861 else 862 printf("off\n"); 863 } 864 865 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 866 printf("TX UDP checksum: "); 867 if (ports[port_id].dev_conf.txmode.offloads & 868 DEV_TX_OFFLOAD_UDP_CKSUM) 869 printf("on\n"); 870 else 871 printf("off\n"); 872 } 873 874 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 875 printf("TX TCP checksum: "); 876 if (ports[port_id].dev_conf.txmode.offloads & 877 DEV_TX_OFFLOAD_TCP_CKSUM) 878 printf("on\n"); 879 else 880 printf("off\n"); 881 } 882 883 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 884 printf("TX SCTP checksum: "); 885 if (ports[port_id].dev_conf.txmode.offloads & 886 DEV_TX_OFFLOAD_SCTP_CKSUM) 887 printf("on\n"); 888 else 889 printf("off\n"); 890 } 891 892 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 893 printf("TX Outer IPv4 checksum: "); 894 if (ports[port_id].dev_conf.txmode.offloads & 895 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 896 printf("on\n"); 897 else 898 printf("off\n"); 899 } 900 901 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 902 printf("TX TCP segmentation: "); 903 if (ports[port_id].dev_conf.txmode.offloads & 904 DEV_TX_OFFLOAD_TCP_TSO) 905 printf("on\n"); 906 else 907 printf("off\n"); 908 } 909 910 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 911 printf("TX UDP segmentation: "); 912 if (ports[port_id].dev_conf.txmode.offloads & 913 DEV_TX_OFFLOAD_UDP_TSO) 914 printf("on\n"); 915 else 916 printf("off\n"); 917 } 918 919 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 920 printf("TSO for VXLAN tunnel packet: "); 921 if (ports[port_id].dev_conf.txmode.offloads & 922 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 923 printf("on\n"); 924 else 925 printf("off\n"); 926 } 927 928 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 929 printf("TSO for GRE tunnel packet: "); 930 if (ports[port_id].dev_conf.txmode.offloads & 931 DEV_TX_OFFLOAD_GRE_TNL_TSO) 932 printf("on\n"); 933 else 934 printf("off\n"); 935 } 936 937 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 938 printf("TSO for IPIP tunnel packet: "); 939 if (ports[port_id].dev_conf.txmode.offloads & 940 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 941 printf("on\n"); 942 else 943 printf("off\n"); 944 } 945 946 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 947 printf("TSO for GENEVE tunnel packet: "); 948 if (ports[port_id].dev_conf.txmode.offloads & 949 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 950 printf("on\n"); 951 else 952 printf("off\n"); 953 } 954 955 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 956 printf("IP tunnel TSO: "); 957 if (ports[port_id].dev_conf.txmode.offloads & 958 DEV_TX_OFFLOAD_IP_TNL_TSO) 959 printf("on\n"); 960 else 961 printf("off\n"); 962 } 963 964 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 965 printf("UDP tunnel TSO: "); 966 if (ports[port_id].dev_conf.txmode.offloads & 967 DEV_TX_OFFLOAD_UDP_TNL_TSO) 968 printf("on\n"); 969 else 970 printf("off\n"); 971 } 972 973 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 974 printf("TX Outer UDP checksum: "); 975 if (ports[port_id].dev_conf.txmode.offloads & 976 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 977 printf("on\n"); 978 else 979 printf("off\n"); 980 } 981 982 } 983 984 int 985 port_id_is_invalid(portid_t port_id, enum print_warning warning) 986 { 987 uint16_t pid; 988 989 if (port_id == (portid_t)RTE_PORT_ALL) 990 return 0; 991 992 RTE_ETH_FOREACH_DEV(pid) 993 if (port_id == pid) 994 return 0; 995 996 if (warning == ENABLED_WARN) 997 printf("Invalid port %d\n", port_id); 998 999 return 1; 1000 } 1001 1002 void print_valid_ports(void) 1003 { 1004 portid_t pid; 1005 1006 printf("The valid ports array is ["); 1007 RTE_ETH_FOREACH_DEV(pid) { 1008 printf(" %d", pid); 1009 } 1010 printf(" ]\n"); 1011 } 1012 1013 static int 1014 vlan_id_is_invalid(uint16_t vlan_id) 1015 { 1016 if (vlan_id < 4096) 1017 return 0; 1018 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1019 return 1; 1020 } 1021 1022 static int 1023 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1024 { 1025 const struct rte_pci_device *pci_dev; 1026 const struct rte_bus *bus; 1027 uint64_t pci_len; 1028 1029 if (reg_off & 0x3) { 1030 printf("Port register offset 0x%X not aligned on a 4-byte " 1031 "boundary\n", 1032 (unsigned)reg_off); 1033 return 1; 1034 } 1035 1036 if (!ports[port_id].dev_info.device) { 1037 printf("Invalid device\n"); 1038 return 0; 1039 } 1040 1041 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1042 if (bus && !strcmp(bus->name, "pci")) { 1043 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1044 } else { 1045 printf("Not a PCI device\n"); 1046 return 1; 1047 } 1048 1049 pci_len = pci_dev->mem_resource[0].len; 1050 if (reg_off >= pci_len) { 1051 printf("Port %d: register offset %u (0x%X) out of port PCI " 1052 "resource (length=%"PRIu64")\n", 1053 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1054 return 1; 1055 } 1056 return 0; 1057 } 1058 1059 static int 1060 reg_bit_pos_is_invalid(uint8_t bit_pos) 1061 { 1062 if (bit_pos <= 31) 1063 return 0; 1064 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1065 return 1; 1066 } 1067 1068 #define display_port_and_reg_off(port_id, reg_off) \ 1069 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1070 1071 static inline void 1072 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1073 { 1074 display_port_and_reg_off(port_id, (unsigned)reg_off); 1075 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1076 } 1077 1078 void 1079 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1080 { 1081 uint32_t reg_v; 1082 1083 1084 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1085 return; 1086 if (port_reg_off_is_invalid(port_id, reg_off)) 1087 return; 1088 if (reg_bit_pos_is_invalid(bit_x)) 1089 return; 1090 reg_v = port_id_pci_reg_read(port_id, reg_off); 1091 display_port_and_reg_off(port_id, (unsigned)reg_off); 1092 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1093 } 1094 1095 void 1096 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1097 uint8_t bit1_pos, uint8_t bit2_pos) 1098 { 1099 uint32_t reg_v; 1100 uint8_t l_bit; 1101 uint8_t h_bit; 1102 1103 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1104 return; 1105 if (port_reg_off_is_invalid(port_id, reg_off)) 1106 return; 1107 if (reg_bit_pos_is_invalid(bit1_pos)) 1108 return; 1109 if (reg_bit_pos_is_invalid(bit2_pos)) 1110 return; 1111 if (bit1_pos > bit2_pos) 1112 l_bit = bit2_pos, h_bit = bit1_pos; 1113 else 1114 l_bit = bit1_pos, h_bit = bit2_pos; 1115 1116 reg_v = port_id_pci_reg_read(port_id, reg_off); 1117 reg_v >>= l_bit; 1118 if (h_bit < 31) 1119 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1120 display_port_and_reg_off(port_id, (unsigned)reg_off); 1121 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1122 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1123 } 1124 1125 void 1126 port_reg_display(portid_t port_id, uint32_t reg_off) 1127 { 1128 uint32_t reg_v; 1129 1130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1131 return; 1132 if (port_reg_off_is_invalid(port_id, reg_off)) 1133 return; 1134 reg_v = port_id_pci_reg_read(port_id, reg_off); 1135 display_port_reg_value(port_id, reg_off, reg_v); 1136 } 1137 1138 void 1139 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1140 uint8_t bit_v) 1141 { 1142 uint32_t reg_v; 1143 1144 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1145 return; 1146 if (port_reg_off_is_invalid(port_id, reg_off)) 1147 return; 1148 if (reg_bit_pos_is_invalid(bit_pos)) 1149 return; 1150 if (bit_v > 1) { 1151 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1152 return; 1153 } 1154 reg_v = port_id_pci_reg_read(port_id, reg_off); 1155 if (bit_v == 0) 1156 reg_v &= ~(1 << bit_pos); 1157 else 1158 reg_v |= (1 << bit_pos); 1159 port_id_pci_reg_write(port_id, reg_off, reg_v); 1160 display_port_reg_value(port_id, reg_off, reg_v); 1161 } 1162 1163 void 1164 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1165 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1166 { 1167 uint32_t max_v; 1168 uint32_t reg_v; 1169 uint8_t l_bit; 1170 uint8_t h_bit; 1171 1172 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1173 return; 1174 if (port_reg_off_is_invalid(port_id, reg_off)) 1175 return; 1176 if (reg_bit_pos_is_invalid(bit1_pos)) 1177 return; 1178 if (reg_bit_pos_is_invalid(bit2_pos)) 1179 return; 1180 if (bit1_pos > bit2_pos) 1181 l_bit = bit2_pos, h_bit = bit1_pos; 1182 else 1183 l_bit = bit1_pos, h_bit = bit2_pos; 1184 1185 if ((h_bit - l_bit) < 31) 1186 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1187 else 1188 max_v = 0xFFFFFFFF; 1189 1190 if (value > max_v) { 1191 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1192 (unsigned)value, (unsigned)value, 1193 (unsigned)max_v, (unsigned)max_v); 1194 return; 1195 } 1196 reg_v = port_id_pci_reg_read(port_id, reg_off); 1197 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1198 reg_v |= (value << l_bit); /* Set changed bits */ 1199 port_id_pci_reg_write(port_id, reg_off, reg_v); 1200 display_port_reg_value(port_id, reg_off, reg_v); 1201 } 1202 1203 void 1204 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1205 { 1206 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1207 return; 1208 if (port_reg_off_is_invalid(port_id, reg_off)) 1209 return; 1210 port_id_pci_reg_write(port_id, reg_off, reg_v); 1211 display_port_reg_value(port_id, reg_off, reg_v); 1212 } 1213 1214 void 1215 port_mtu_set(portid_t port_id, uint16_t mtu) 1216 { 1217 int diag; 1218 struct rte_eth_dev_info dev_info; 1219 int ret; 1220 1221 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1222 return; 1223 1224 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1225 if (ret != 0) 1226 return; 1227 1228 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1229 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1230 mtu, dev_info.min_mtu, dev_info.max_mtu); 1231 return; 1232 } 1233 diag = rte_eth_dev_set_mtu(port_id, mtu); 1234 if (diag == 0) 1235 return; 1236 printf("Set MTU failed. diag=%d\n", diag); 1237 } 1238 1239 /* Generic flow management functions. */ 1240 1241 /** Generate a port_flow entry from attributes/pattern/actions. */ 1242 static struct port_flow * 1243 port_flow_new(const struct rte_flow_attr *attr, 1244 const struct rte_flow_item *pattern, 1245 const struct rte_flow_action *actions, 1246 struct rte_flow_error *error) 1247 { 1248 const struct rte_flow_conv_rule rule = { 1249 .attr_ro = attr, 1250 .pattern_ro = pattern, 1251 .actions_ro = actions, 1252 }; 1253 struct port_flow *pf; 1254 int ret; 1255 1256 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1257 if (ret < 0) 1258 return NULL; 1259 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1260 if (!pf) { 1261 rte_flow_error_set 1262 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1263 "calloc() failed"); 1264 return NULL; 1265 } 1266 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1267 error) >= 0) 1268 return pf; 1269 free(pf); 1270 return NULL; 1271 } 1272 1273 /** Print a message out of a flow error. */ 1274 static int 1275 port_flow_complain(struct rte_flow_error *error) 1276 { 1277 static const char *const errstrlist[] = { 1278 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1279 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1280 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1281 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1282 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1283 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1284 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1285 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1286 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1287 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1288 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1289 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1290 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1291 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1292 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1293 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1294 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1295 }; 1296 const char *errstr; 1297 char buf[32]; 1298 int err = rte_errno; 1299 1300 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1301 !errstrlist[error->type]) 1302 errstr = "unknown type"; 1303 else 1304 errstr = errstrlist[error->type]; 1305 printf("Caught error type %d (%s): %s%s: %s\n", 1306 error->type, errstr, 1307 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1308 error->cause), buf) : "", 1309 error->message ? error->message : "(no stated reason)", 1310 rte_strerror(err)); 1311 return -err; 1312 } 1313 1314 /** Validate flow rule. */ 1315 int 1316 port_flow_validate(portid_t port_id, 1317 const struct rte_flow_attr *attr, 1318 const struct rte_flow_item *pattern, 1319 const struct rte_flow_action *actions) 1320 { 1321 struct rte_flow_error error; 1322 1323 /* Poisoning to make sure PMDs update it in case of error. */ 1324 memset(&error, 0x11, sizeof(error)); 1325 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1326 return port_flow_complain(&error); 1327 printf("Flow rule validated\n"); 1328 return 0; 1329 } 1330 1331 /** Create flow rule. */ 1332 int 1333 port_flow_create(portid_t port_id, 1334 const struct rte_flow_attr *attr, 1335 const struct rte_flow_item *pattern, 1336 const struct rte_flow_action *actions) 1337 { 1338 struct rte_flow *flow; 1339 struct rte_port *port; 1340 struct port_flow *pf; 1341 uint32_t id; 1342 struct rte_flow_error error; 1343 1344 /* Poisoning to make sure PMDs update it in case of error. */ 1345 memset(&error, 0x22, sizeof(error)); 1346 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1347 if (!flow) 1348 return port_flow_complain(&error); 1349 port = &ports[port_id]; 1350 if (port->flow_list) { 1351 if (port->flow_list->id == UINT32_MAX) { 1352 printf("Highest rule ID is already assigned, delete" 1353 " it first"); 1354 rte_flow_destroy(port_id, flow, NULL); 1355 return -ENOMEM; 1356 } 1357 id = port->flow_list->id + 1; 1358 } else 1359 id = 0; 1360 pf = port_flow_new(attr, pattern, actions, &error); 1361 if (!pf) { 1362 rte_flow_destroy(port_id, flow, NULL); 1363 return port_flow_complain(&error); 1364 } 1365 pf->next = port->flow_list; 1366 pf->id = id; 1367 pf->flow = flow; 1368 port->flow_list = pf; 1369 printf("Flow rule #%u created\n", pf->id); 1370 return 0; 1371 } 1372 1373 /** Destroy a number of flow rules. */ 1374 int 1375 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1376 { 1377 struct rte_port *port; 1378 struct port_flow **tmp; 1379 uint32_t c = 0; 1380 int ret = 0; 1381 1382 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1383 port_id == (portid_t)RTE_PORT_ALL) 1384 return -EINVAL; 1385 port = &ports[port_id]; 1386 tmp = &port->flow_list; 1387 while (*tmp) { 1388 uint32_t i; 1389 1390 for (i = 0; i != n; ++i) { 1391 struct rte_flow_error error; 1392 struct port_flow *pf = *tmp; 1393 1394 if (rule[i] != pf->id) 1395 continue; 1396 /* 1397 * Poisoning to make sure PMDs update it in case 1398 * of error. 1399 */ 1400 memset(&error, 0x33, sizeof(error)); 1401 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1402 ret = port_flow_complain(&error); 1403 continue; 1404 } 1405 printf("Flow rule #%u destroyed\n", pf->id); 1406 *tmp = pf->next; 1407 free(pf); 1408 break; 1409 } 1410 if (i == n) 1411 tmp = &(*tmp)->next; 1412 ++c; 1413 } 1414 return ret; 1415 } 1416 1417 /** Remove all flow rules. */ 1418 int 1419 port_flow_flush(portid_t port_id) 1420 { 1421 struct rte_flow_error error; 1422 struct rte_port *port; 1423 int ret = 0; 1424 1425 /* Poisoning to make sure PMDs update it in case of error. */ 1426 memset(&error, 0x44, sizeof(error)); 1427 if (rte_flow_flush(port_id, &error)) { 1428 ret = port_flow_complain(&error); 1429 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1430 port_id == (portid_t)RTE_PORT_ALL) 1431 return ret; 1432 } 1433 port = &ports[port_id]; 1434 while (port->flow_list) { 1435 struct port_flow *pf = port->flow_list->next; 1436 1437 free(port->flow_list); 1438 port->flow_list = pf; 1439 } 1440 return ret; 1441 } 1442 1443 /** Query a flow rule. */ 1444 int 1445 port_flow_query(portid_t port_id, uint32_t rule, 1446 const struct rte_flow_action *action) 1447 { 1448 struct rte_flow_error error; 1449 struct rte_port *port; 1450 struct port_flow *pf; 1451 const char *name; 1452 union { 1453 struct rte_flow_query_count count; 1454 } query; 1455 int ret; 1456 1457 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1458 port_id == (portid_t)RTE_PORT_ALL) 1459 return -EINVAL; 1460 port = &ports[port_id]; 1461 for (pf = port->flow_list; pf; pf = pf->next) 1462 if (pf->id == rule) 1463 break; 1464 if (!pf) { 1465 printf("Flow rule #%u not found\n", rule); 1466 return -ENOENT; 1467 } 1468 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1469 &name, sizeof(name), 1470 (void *)(uintptr_t)action->type, &error); 1471 if (ret < 0) 1472 return port_flow_complain(&error); 1473 switch (action->type) { 1474 case RTE_FLOW_ACTION_TYPE_COUNT: 1475 break; 1476 default: 1477 printf("Cannot query action type %d (%s)\n", 1478 action->type, name); 1479 return -ENOTSUP; 1480 } 1481 /* Poisoning to make sure PMDs update it in case of error. */ 1482 memset(&error, 0x55, sizeof(error)); 1483 memset(&query, 0, sizeof(query)); 1484 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1485 return port_flow_complain(&error); 1486 switch (action->type) { 1487 case RTE_FLOW_ACTION_TYPE_COUNT: 1488 printf("%s:\n" 1489 " hits_set: %u\n" 1490 " bytes_set: %u\n" 1491 " hits: %" PRIu64 "\n" 1492 " bytes: %" PRIu64 "\n", 1493 name, 1494 query.count.hits_set, 1495 query.count.bytes_set, 1496 query.count.hits, 1497 query.count.bytes); 1498 break; 1499 default: 1500 printf("Cannot display result for action type %d (%s)\n", 1501 action->type, name); 1502 break; 1503 } 1504 return 0; 1505 } 1506 1507 /** List flow rules. */ 1508 void 1509 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1510 { 1511 struct rte_port *port; 1512 struct port_flow *pf; 1513 struct port_flow *list = NULL; 1514 uint32_t i; 1515 1516 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1517 port_id == (portid_t)RTE_PORT_ALL) 1518 return; 1519 port = &ports[port_id]; 1520 if (!port->flow_list) 1521 return; 1522 /* Sort flows by group, priority and ID. */ 1523 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1524 struct port_flow **tmp; 1525 const struct rte_flow_attr *curr = pf->rule.attr; 1526 1527 if (n) { 1528 /* Filter out unwanted groups. */ 1529 for (i = 0; i != n; ++i) 1530 if (curr->group == group[i]) 1531 break; 1532 if (i == n) 1533 continue; 1534 } 1535 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1536 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1537 1538 if (curr->group > comp->group || 1539 (curr->group == comp->group && 1540 curr->priority > comp->priority) || 1541 (curr->group == comp->group && 1542 curr->priority == comp->priority && 1543 pf->id > (*tmp)->id)) 1544 continue; 1545 break; 1546 } 1547 pf->tmp = *tmp; 1548 *tmp = pf; 1549 } 1550 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1551 for (pf = list; pf != NULL; pf = pf->tmp) { 1552 const struct rte_flow_item *item = pf->rule.pattern; 1553 const struct rte_flow_action *action = pf->rule.actions; 1554 const char *name; 1555 1556 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1557 pf->id, 1558 pf->rule.attr->group, 1559 pf->rule.attr->priority, 1560 pf->rule.attr->ingress ? 'i' : '-', 1561 pf->rule.attr->egress ? 'e' : '-', 1562 pf->rule.attr->transfer ? 't' : '-'); 1563 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1564 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1565 &name, sizeof(name), 1566 (void *)(uintptr_t)item->type, 1567 NULL) <= 0) 1568 name = "[UNKNOWN]"; 1569 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1570 printf("%s ", name); 1571 ++item; 1572 } 1573 printf("=>"); 1574 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1575 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1576 &name, sizeof(name), 1577 (void *)(uintptr_t)action->type, 1578 NULL) <= 0) 1579 name = "[UNKNOWN]"; 1580 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1581 printf(" %s", name); 1582 ++action; 1583 } 1584 printf("\n"); 1585 } 1586 } 1587 1588 /** Restrict ingress traffic to the defined flow rules. */ 1589 int 1590 port_flow_isolate(portid_t port_id, int set) 1591 { 1592 struct rte_flow_error error; 1593 1594 /* Poisoning to make sure PMDs update it in case of error. */ 1595 memset(&error, 0x66, sizeof(error)); 1596 if (rte_flow_isolate(port_id, set, &error)) 1597 return port_flow_complain(&error); 1598 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1599 port_id, 1600 set ? "now restricted" : "not restricted anymore"); 1601 return 0; 1602 } 1603 1604 /* 1605 * RX/TX ring descriptors display functions. 1606 */ 1607 int 1608 rx_queue_id_is_invalid(queueid_t rxq_id) 1609 { 1610 if (rxq_id < nb_rxq) 1611 return 0; 1612 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1613 return 1; 1614 } 1615 1616 int 1617 tx_queue_id_is_invalid(queueid_t txq_id) 1618 { 1619 if (txq_id < nb_txq) 1620 return 0; 1621 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1622 return 1; 1623 } 1624 1625 static int 1626 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1627 { 1628 if (rxdesc_id < nb_rxd) 1629 return 0; 1630 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1631 rxdesc_id, nb_rxd); 1632 return 1; 1633 } 1634 1635 static int 1636 tx_desc_id_is_invalid(uint16_t txdesc_id) 1637 { 1638 if (txdesc_id < nb_txd) 1639 return 0; 1640 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1641 txdesc_id, nb_txd); 1642 return 1; 1643 } 1644 1645 static const struct rte_memzone * 1646 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1647 { 1648 char mz_name[RTE_MEMZONE_NAMESIZE]; 1649 const struct rte_memzone *mz; 1650 1651 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1652 port_id, q_id, ring_name); 1653 mz = rte_memzone_lookup(mz_name); 1654 if (mz == NULL) 1655 printf("%s ring memory zoneof (port %d, queue %d) not" 1656 "found (zone name = %s\n", 1657 ring_name, port_id, q_id, mz_name); 1658 return mz; 1659 } 1660 1661 union igb_ring_dword { 1662 uint64_t dword; 1663 struct { 1664 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1665 uint32_t lo; 1666 uint32_t hi; 1667 #else 1668 uint32_t hi; 1669 uint32_t lo; 1670 #endif 1671 } words; 1672 }; 1673 1674 struct igb_ring_desc_32_bytes { 1675 union igb_ring_dword lo_dword; 1676 union igb_ring_dword hi_dword; 1677 union igb_ring_dword resv1; 1678 union igb_ring_dword resv2; 1679 }; 1680 1681 struct igb_ring_desc_16_bytes { 1682 union igb_ring_dword lo_dword; 1683 union igb_ring_dword hi_dword; 1684 }; 1685 1686 static void 1687 ring_rxd_display_dword(union igb_ring_dword dword) 1688 { 1689 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1690 (unsigned)dword.words.hi); 1691 } 1692 1693 static void 1694 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1695 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1696 portid_t port_id, 1697 #else 1698 __rte_unused portid_t port_id, 1699 #endif 1700 uint16_t desc_id) 1701 { 1702 struct igb_ring_desc_16_bytes *ring = 1703 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1704 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1705 int ret; 1706 struct rte_eth_dev_info dev_info; 1707 1708 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1709 if (ret != 0) 1710 return; 1711 1712 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1713 /* 32 bytes RX descriptor, i40e only */ 1714 struct igb_ring_desc_32_bytes *ring = 1715 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1716 ring[desc_id].lo_dword.dword = 1717 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1718 ring_rxd_display_dword(ring[desc_id].lo_dword); 1719 ring[desc_id].hi_dword.dword = 1720 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1721 ring_rxd_display_dword(ring[desc_id].hi_dword); 1722 ring[desc_id].resv1.dword = 1723 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1724 ring_rxd_display_dword(ring[desc_id].resv1); 1725 ring[desc_id].resv2.dword = 1726 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1727 ring_rxd_display_dword(ring[desc_id].resv2); 1728 1729 return; 1730 } 1731 #endif 1732 /* 16 bytes RX descriptor */ 1733 ring[desc_id].lo_dword.dword = 1734 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1735 ring_rxd_display_dword(ring[desc_id].lo_dword); 1736 ring[desc_id].hi_dword.dword = 1737 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1738 ring_rxd_display_dword(ring[desc_id].hi_dword); 1739 } 1740 1741 static void 1742 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1743 { 1744 struct igb_ring_desc_16_bytes *ring; 1745 struct igb_ring_desc_16_bytes txd; 1746 1747 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1748 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1749 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1750 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1751 (unsigned)txd.lo_dword.words.lo, 1752 (unsigned)txd.lo_dword.words.hi, 1753 (unsigned)txd.hi_dword.words.lo, 1754 (unsigned)txd.hi_dword.words.hi); 1755 } 1756 1757 void 1758 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1759 { 1760 const struct rte_memzone *rx_mz; 1761 1762 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1763 return; 1764 if (rx_queue_id_is_invalid(rxq_id)) 1765 return; 1766 if (rx_desc_id_is_invalid(rxd_id)) 1767 return; 1768 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1769 if (rx_mz == NULL) 1770 return; 1771 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1772 } 1773 1774 void 1775 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1776 { 1777 const struct rte_memzone *tx_mz; 1778 1779 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1780 return; 1781 if (tx_queue_id_is_invalid(txq_id)) 1782 return; 1783 if (tx_desc_id_is_invalid(txd_id)) 1784 return; 1785 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1786 if (tx_mz == NULL) 1787 return; 1788 ring_tx_descriptor_display(tx_mz, txd_id); 1789 } 1790 1791 void 1792 fwd_lcores_config_display(void) 1793 { 1794 lcoreid_t lc_id; 1795 1796 printf("List of forwarding lcores:"); 1797 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1798 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1799 printf("\n"); 1800 } 1801 void 1802 rxtx_config_display(void) 1803 { 1804 portid_t pid; 1805 queueid_t qid; 1806 1807 printf(" %s packet forwarding%s packets/burst=%d\n", 1808 cur_fwd_eng->fwd_mode_name, 1809 retry_enabled == 0 ? "" : " with retry", 1810 nb_pkt_per_burst); 1811 1812 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1813 printf(" packet len=%u - nb packet segments=%d\n", 1814 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1815 1816 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1817 nb_fwd_lcores, nb_fwd_ports); 1818 1819 RTE_ETH_FOREACH_DEV(pid) { 1820 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1821 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1822 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1823 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1824 uint16_t nb_rx_desc_tmp; 1825 uint16_t nb_tx_desc_tmp; 1826 struct rte_eth_rxq_info rx_qinfo; 1827 struct rte_eth_txq_info tx_qinfo; 1828 int32_t rc; 1829 1830 /* per port config */ 1831 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1832 (unsigned int)pid, nb_rxq, nb_txq); 1833 1834 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1835 ports[pid].dev_conf.rxmode.offloads, 1836 ports[pid].dev_conf.txmode.offloads); 1837 1838 /* per rx queue config only for first queue to be less verbose */ 1839 for (qid = 0; qid < 1; qid++) { 1840 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1841 if (rc) 1842 nb_rx_desc_tmp = nb_rx_desc[qid]; 1843 else 1844 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1845 1846 printf(" RX queue: %d\n", qid); 1847 printf(" RX desc=%d - RX free threshold=%d\n", 1848 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1849 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1850 " wthresh=%d\n", 1851 rx_conf[qid].rx_thresh.pthresh, 1852 rx_conf[qid].rx_thresh.hthresh, 1853 rx_conf[qid].rx_thresh.wthresh); 1854 printf(" RX Offloads=0x%"PRIx64"\n", 1855 rx_conf[qid].offloads); 1856 } 1857 1858 /* per tx queue config only for first queue to be less verbose */ 1859 for (qid = 0; qid < 1; qid++) { 1860 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1861 if (rc) 1862 nb_tx_desc_tmp = nb_tx_desc[qid]; 1863 else 1864 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1865 1866 printf(" TX queue: %d\n", qid); 1867 printf(" TX desc=%d - TX free threshold=%d\n", 1868 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1869 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1870 " wthresh=%d\n", 1871 tx_conf[qid].tx_thresh.pthresh, 1872 tx_conf[qid].tx_thresh.hthresh, 1873 tx_conf[qid].tx_thresh.wthresh); 1874 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1875 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1876 } 1877 } 1878 } 1879 1880 void 1881 port_rss_reta_info(portid_t port_id, 1882 struct rte_eth_rss_reta_entry64 *reta_conf, 1883 uint16_t nb_entries) 1884 { 1885 uint16_t i, idx, shift; 1886 int ret; 1887 1888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1889 return; 1890 1891 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1892 if (ret != 0) { 1893 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1894 return; 1895 } 1896 1897 for (i = 0; i < nb_entries; i++) { 1898 idx = i / RTE_RETA_GROUP_SIZE; 1899 shift = i % RTE_RETA_GROUP_SIZE; 1900 if (!(reta_conf[idx].mask & (1ULL << shift))) 1901 continue; 1902 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1903 i, reta_conf[idx].reta[shift]); 1904 } 1905 } 1906 1907 /* 1908 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1909 * key of the port. 1910 */ 1911 void 1912 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1913 { 1914 struct rte_eth_rss_conf rss_conf = {0}; 1915 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1916 uint64_t rss_hf; 1917 uint8_t i; 1918 int diag; 1919 struct rte_eth_dev_info dev_info; 1920 uint8_t hash_key_size; 1921 int ret; 1922 1923 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1924 return; 1925 1926 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1927 if (ret != 0) 1928 return; 1929 1930 if (dev_info.hash_key_size > 0 && 1931 dev_info.hash_key_size <= sizeof(rss_key)) 1932 hash_key_size = dev_info.hash_key_size; 1933 else { 1934 printf("dev_info did not provide a valid hash key size\n"); 1935 return; 1936 } 1937 1938 /* Get RSS hash key if asked to display it */ 1939 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1940 rss_conf.rss_key_len = hash_key_size; 1941 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1942 if (diag != 0) { 1943 switch (diag) { 1944 case -ENODEV: 1945 printf("port index %d invalid\n", port_id); 1946 break; 1947 case -ENOTSUP: 1948 printf("operation not supported by device\n"); 1949 break; 1950 default: 1951 printf("operation failed - diag=%d\n", diag); 1952 break; 1953 } 1954 return; 1955 } 1956 rss_hf = rss_conf.rss_hf; 1957 if (rss_hf == 0) { 1958 printf("RSS disabled\n"); 1959 return; 1960 } 1961 printf("RSS functions:\n "); 1962 for (i = 0; rss_type_table[i].str; i++) { 1963 if (rss_hf & rss_type_table[i].rss_type) 1964 printf("%s ", rss_type_table[i].str); 1965 } 1966 printf("\n"); 1967 if (!show_rss_key) 1968 return; 1969 printf("RSS key:\n"); 1970 for (i = 0; i < hash_key_size; i++) 1971 printf("%02X", rss_key[i]); 1972 printf("\n"); 1973 } 1974 1975 void 1976 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1977 uint hash_key_len) 1978 { 1979 struct rte_eth_rss_conf rss_conf; 1980 int diag; 1981 unsigned int i; 1982 1983 rss_conf.rss_key = NULL; 1984 rss_conf.rss_key_len = hash_key_len; 1985 rss_conf.rss_hf = 0; 1986 for (i = 0; rss_type_table[i].str; i++) { 1987 if (!strcmp(rss_type_table[i].str, rss_type)) 1988 rss_conf.rss_hf = rss_type_table[i].rss_type; 1989 } 1990 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1991 if (diag == 0) { 1992 rss_conf.rss_key = hash_key; 1993 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1994 } 1995 if (diag == 0) 1996 return; 1997 1998 switch (diag) { 1999 case -ENODEV: 2000 printf("port index %d invalid\n", port_id); 2001 break; 2002 case -ENOTSUP: 2003 printf("operation not supported by device\n"); 2004 break; 2005 default: 2006 printf("operation failed - diag=%d\n", diag); 2007 break; 2008 } 2009 } 2010 2011 /* 2012 * Setup forwarding configuration for each logical core. 2013 */ 2014 static void 2015 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2016 { 2017 streamid_t nb_fs_per_lcore; 2018 streamid_t nb_fs; 2019 streamid_t sm_id; 2020 lcoreid_t nb_extra; 2021 lcoreid_t nb_fc; 2022 lcoreid_t nb_lc; 2023 lcoreid_t lc_id; 2024 2025 nb_fs = cfg->nb_fwd_streams; 2026 nb_fc = cfg->nb_fwd_lcores; 2027 if (nb_fs <= nb_fc) { 2028 nb_fs_per_lcore = 1; 2029 nb_extra = 0; 2030 } else { 2031 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2032 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2033 } 2034 2035 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2036 sm_id = 0; 2037 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2038 fwd_lcores[lc_id]->stream_idx = sm_id; 2039 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2040 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2041 } 2042 2043 /* 2044 * Assign extra remaining streams, if any. 2045 */ 2046 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2047 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2048 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2049 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2050 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2051 } 2052 } 2053 2054 static portid_t 2055 fwd_topology_tx_port_get(portid_t rxp) 2056 { 2057 static int warning_once = 1; 2058 2059 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2060 2061 switch (port_topology) { 2062 default: 2063 case PORT_TOPOLOGY_PAIRED: 2064 if ((rxp & 0x1) == 0) { 2065 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2066 return rxp + 1; 2067 if (warning_once) { 2068 printf("\nWarning! port-topology=paired" 2069 " and odd forward ports number," 2070 " the last port will pair with" 2071 " itself.\n\n"); 2072 warning_once = 0; 2073 } 2074 return rxp; 2075 } 2076 return rxp - 1; 2077 case PORT_TOPOLOGY_CHAINED: 2078 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2079 case PORT_TOPOLOGY_LOOP: 2080 return rxp; 2081 } 2082 } 2083 2084 static void 2085 simple_fwd_config_setup(void) 2086 { 2087 portid_t i; 2088 2089 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2090 cur_fwd_config.nb_fwd_streams = 2091 (streamid_t) cur_fwd_config.nb_fwd_ports; 2092 2093 /* reinitialize forwarding streams */ 2094 init_fwd_streams(); 2095 2096 /* 2097 * In the simple forwarding test, the number of forwarding cores 2098 * must be lower or equal to the number of forwarding ports. 2099 */ 2100 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2101 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2102 cur_fwd_config.nb_fwd_lcores = 2103 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2104 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2105 2106 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2107 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2108 fwd_streams[i]->rx_queue = 0; 2109 fwd_streams[i]->tx_port = 2110 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2111 fwd_streams[i]->tx_queue = 0; 2112 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2113 fwd_streams[i]->retry_enabled = retry_enabled; 2114 } 2115 } 2116 2117 /** 2118 * For the RSS forwarding test all streams distributed over lcores. Each stream 2119 * being composed of a RX queue to poll on a RX port for input messages, 2120 * associated with a TX queue of a TX port where to send forwarded packets. 2121 */ 2122 static void 2123 rss_fwd_config_setup(void) 2124 { 2125 portid_t rxp; 2126 portid_t txp; 2127 queueid_t rxq; 2128 queueid_t nb_q; 2129 streamid_t sm_id; 2130 2131 nb_q = nb_rxq; 2132 if (nb_q > nb_txq) 2133 nb_q = nb_txq; 2134 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2135 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2136 cur_fwd_config.nb_fwd_streams = 2137 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2138 2139 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2140 cur_fwd_config.nb_fwd_lcores = 2141 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2142 2143 /* reinitialize forwarding streams */ 2144 init_fwd_streams(); 2145 2146 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2147 rxp = 0; rxq = 0; 2148 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2149 struct fwd_stream *fs; 2150 2151 fs = fwd_streams[sm_id]; 2152 txp = fwd_topology_tx_port_get(rxp); 2153 fs->rx_port = fwd_ports_ids[rxp]; 2154 fs->rx_queue = rxq; 2155 fs->tx_port = fwd_ports_ids[txp]; 2156 fs->tx_queue = rxq; 2157 fs->peer_addr = fs->tx_port; 2158 fs->retry_enabled = retry_enabled; 2159 rxp++; 2160 if (rxp < nb_fwd_ports) 2161 continue; 2162 rxp = 0; 2163 rxq++; 2164 } 2165 } 2166 2167 /** 2168 * For the DCB forwarding test, each core is assigned on each traffic class. 2169 * 2170 * Each core is assigned a multi-stream, each stream being composed of 2171 * a RX queue to poll on a RX port for input messages, associated with 2172 * a TX queue of a TX port where to send forwarded packets. All RX and 2173 * TX queues are mapping to the same traffic class. 2174 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2175 * the same core 2176 */ 2177 static void 2178 dcb_fwd_config_setup(void) 2179 { 2180 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2181 portid_t txp, rxp = 0; 2182 queueid_t txq, rxq = 0; 2183 lcoreid_t lc_id; 2184 uint16_t nb_rx_queue, nb_tx_queue; 2185 uint16_t i, j, k, sm_id = 0; 2186 uint8_t tc = 0; 2187 2188 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2189 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2190 cur_fwd_config.nb_fwd_streams = 2191 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2192 2193 /* reinitialize forwarding streams */ 2194 init_fwd_streams(); 2195 sm_id = 0; 2196 txp = 1; 2197 /* get the dcb info on the first RX and TX ports */ 2198 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2199 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2200 2201 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2202 fwd_lcores[lc_id]->stream_nb = 0; 2203 fwd_lcores[lc_id]->stream_idx = sm_id; 2204 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2205 /* if the nb_queue is zero, means this tc is 2206 * not enabled on the POOL 2207 */ 2208 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2209 break; 2210 k = fwd_lcores[lc_id]->stream_nb + 2211 fwd_lcores[lc_id]->stream_idx; 2212 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2213 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2214 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2215 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2216 for (j = 0; j < nb_rx_queue; j++) { 2217 struct fwd_stream *fs; 2218 2219 fs = fwd_streams[k + j]; 2220 fs->rx_port = fwd_ports_ids[rxp]; 2221 fs->rx_queue = rxq + j; 2222 fs->tx_port = fwd_ports_ids[txp]; 2223 fs->tx_queue = txq + j % nb_tx_queue; 2224 fs->peer_addr = fs->tx_port; 2225 fs->retry_enabled = retry_enabled; 2226 } 2227 fwd_lcores[lc_id]->stream_nb += 2228 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2229 } 2230 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2231 2232 tc++; 2233 if (tc < rxp_dcb_info.nb_tcs) 2234 continue; 2235 /* Restart from TC 0 on next RX port */ 2236 tc = 0; 2237 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2238 rxp = (portid_t) 2239 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2240 else 2241 rxp++; 2242 if (rxp >= nb_fwd_ports) 2243 return; 2244 /* get the dcb information on next RX and TX ports */ 2245 if ((rxp & 0x1) == 0) 2246 txp = (portid_t) (rxp + 1); 2247 else 2248 txp = (portid_t) (rxp - 1); 2249 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2250 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2251 } 2252 } 2253 2254 static void 2255 icmp_echo_config_setup(void) 2256 { 2257 portid_t rxp; 2258 queueid_t rxq; 2259 lcoreid_t lc_id; 2260 uint16_t sm_id; 2261 2262 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2263 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2264 (nb_txq * nb_fwd_ports); 2265 else 2266 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2267 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2268 cur_fwd_config.nb_fwd_streams = 2269 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2270 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2271 cur_fwd_config.nb_fwd_lcores = 2272 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2273 if (verbose_level > 0) { 2274 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2275 __FUNCTION__, 2276 cur_fwd_config.nb_fwd_lcores, 2277 cur_fwd_config.nb_fwd_ports, 2278 cur_fwd_config.nb_fwd_streams); 2279 } 2280 2281 /* reinitialize forwarding streams */ 2282 init_fwd_streams(); 2283 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2284 rxp = 0; rxq = 0; 2285 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2286 if (verbose_level > 0) 2287 printf(" core=%d: \n", lc_id); 2288 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2289 struct fwd_stream *fs; 2290 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2291 fs->rx_port = fwd_ports_ids[rxp]; 2292 fs->rx_queue = rxq; 2293 fs->tx_port = fs->rx_port; 2294 fs->tx_queue = rxq; 2295 fs->peer_addr = fs->tx_port; 2296 fs->retry_enabled = retry_enabled; 2297 if (verbose_level > 0) 2298 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2299 sm_id, fs->rx_port, fs->rx_queue, 2300 fs->tx_queue); 2301 rxq = (queueid_t) (rxq + 1); 2302 if (rxq == nb_rxq) { 2303 rxq = 0; 2304 rxp = (portid_t) (rxp + 1); 2305 } 2306 } 2307 } 2308 } 2309 2310 #if defined RTE_LIBRTE_PMD_SOFTNIC 2311 static void 2312 softnic_fwd_config_setup(void) 2313 { 2314 struct rte_port *port; 2315 portid_t pid, softnic_portid; 2316 queueid_t i; 2317 uint8_t softnic_enable = 0; 2318 2319 RTE_ETH_FOREACH_DEV(pid) { 2320 port = &ports[pid]; 2321 const char *driver = port->dev_info.driver_name; 2322 2323 if (strcmp(driver, "net_softnic") == 0) { 2324 softnic_portid = pid; 2325 softnic_enable = 1; 2326 break; 2327 } 2328 } 2329 2330 if (softnic_enable == 0) { 2331 printf("Softnic mode not configured(%s)!\n", __func__); 2332 return; 2333 } 2334 2335 cur_fwd_config.nb_fwd_ports = 1; 2336 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2337 2338 /* Re-initialize forwarding streams */ 2339 init_fwd_streams(); 2340 2341 /* 2342 * In the softnic forwarding test, the number of forwarding cores 2343 * is set to one and remaining are used for softnic packet processing. 2344 */ 2345 cur_fwd_config.nb_fwd_lcores = 1; 2346 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2347 2348 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2349 fwd_streams[i]->rx_port = softnic_portid; 2350 fwd_streams[i]->rx_queue = i; 2351 fwd_streams[i]->tx_port = softnic_portid; 2352 fwd_streams[i]->tx_queue = i; 2353 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2354 fwd_streams[i]->retry_enabled = retry_enabled; 2355 } 2356 } 2357 #endif 2358 2359 void 2360 fwd_config_setup(void) 2361 { 2362 cur_fwd_config.fwd_eng = cur_fwd_eng; 2363 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2364 icmp_echo_config_setup(); 2365 return; 2366 } 2367 2368 #if defined RTE_LIBRTE_PMD_SOFTNIC 2369 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2370 softnic_fwd_config_setup(); 2371 return; 2372 } 2373 #endif 2374 2375 if ((nb_rxq > 1) && (nb_txq > 1)){ 2376 if (dcb_config) 2377 dcb_fwd_config_setup(); 2378 else 2379 rss_fwd_config_setup(); 2380 } 2381 else 2382 simple_fwd_config_setup(); 2383 } 2384 2385 static const char * 2386 mp_alloc_to_str(uint8_t mode) 2387 { 2388 switch (mode) { 2389 case MP_ALLOC_NATIVE: 2390 return "native"; 2391 case MP_ALLOC_ANON: 2392 return "anon"; 2393 case MP_ALLOC_XMEM: 2394 return "xmem"; 2395 case MP_ALLOC_XMEM_HUGE: 2396 return "xmemhuge"; 2397 default: 2398 return "invalid"; 2399 } 2400 } 2401 2402 void 2403 pkt_fwd_config_display(struct fwd_config *cfg) 2404 { 2405 struct fwd_stream *fs; 2406 lcoreid_t lc_id; 2407 streamid_t sm_id; 2408 2409 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2410 "NUMA support %s, MP allocation mode: %s\n", 2411 cfg->fwd_eng->fwd_mode_name, 2412 retry_enabled == 0 ? "" : " with retry", 2413 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2414 numa_support == 1 ? "enabled" : "disabled", 2415 mp_alloc_to_str(mp_alloc_type)); 2416 2417 if (retry_enabled) 2418 printf("TX retry num: %u, delay between TX retries: %uus\n", 2419 burst_tx_retry_num, burst_tx_delay_time); 2420 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2421 printf("Logical Core %u (socket %u) forwards packets on " 2422 "%d streams:", 2423 fwd_lcores_cpuids[lc_id], 2424 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2425 fwd_lcores[lc_id]->stream_nb); 2426 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2427 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2428 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2429 "P=%d/Q=%d (socket %u) ", 2430 fs->rx_port, fs->rx_queue, 2431 ports[fs->rx_port].socket_id, 2432 fs->tx_port, fs->tx_queue, 2433 ports[fs->tx_port].socket_id); 2434 print_ethaddr("peer=", 2435 &peer_eth_addrs[fs->peer_addr]); 2436 } 2437 printf("\n"); 2438 } 2439 printf("\n"); 2440 } 2441 2442 void 2443 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2444 { 2445 struct rte_ether_addr new_peer_addr; 2446 if (!rte_eth_dev_is_valid_port(port_id)) { 2447 printf("Error: Invalid port number %i\n", port_id); 2448 return; 2449 } 2450 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2451 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2452 return; 2453 } 2454 peer_eth_addrs[port_id] = new_peer_addr; 2455 } 2456 2457 int 2458 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2459 { 2460 unsigned int i; 2461 unsigned int lcore_cpuid; 2462 int record_now; 2463 2464 record_now = 0; 2465 again: 2466 for (i = 0; i < nb_lc; i++) { 2467 lcore_cpuid = lcorelist[i]; 2468 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2469 printf("lcore %u not enabled\n", lcore_cpuid); 2470 return -1; 2471 } 2472 if (lcore_cpuid == rte_get_master_lcore()) { 2473 printf("lcore %u cannot be masked on for running " 2474 "packet forwarding, which is the master lcore " 2475 "and reserved for command line parsing only\n", 2476 lcore_cpuid); 2477 return -1; 2478 } 2479 if (record_now) 2480 fwd_lcores_cpuids[i] = lcore_cpuid; 2481 } 2482 if (record_now == 0) { 2483 record_now = 1; 2484 goto again; 2485 } 2486 nb_cfg_lcores = (lcoreid_t) nb_lc; 2487 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2488 printf("previous number of forwarding cores %u - changed to " 2489 "number of configured cores %u\n", 2490 (unsigned int) nb_fwd_lcores, nb_lc); 2491 nb_fwd_lcores = (lcoreid_t) nb_lc; 2492 } 2493 2494 return 0; 2495 } 2496 2497 int 2498 set_fwd_lcores_mask(uint64_t lcoremask) 2499 { 2500 unsigned int lcorelist[64]; 2501 unsigned int nb_lc; 2502 unsigned int i; 2503 2504 if (lcoremask == 0) { 2505 printf("Invalid NULL mask of cores\n"); 2506 return -1; 2507 } 2508 nb_lc = 0; 2509 for (i = 0; i < 64; i++) { 2510 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2511 continue; 2512 lcorelist[nb_lc++] = i; 2513 } 2514 return set_fwd_lcores_list(lcorelist, nb_lc); 2515 } 2516 2517 void 2518 set_fwd_lcores_number(uint16_t nb_lc) 2519 { 2520 if (nb_lc > nb_cfg_lcores) { 2521 printf("nb fwd cores %u > %u (max. number of configured " 2522 "lcores) - ignored\n", 2523 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2524 return; 2525 } 2526 nb_fwd_lcores = (lcoreid_t) nb_lc; 2527 printf("Number of forwarding cores set to %u\n", 2528 (unsigned int) nb_fwd_lcores); 2529 } 2530 2531 void 2532 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2533 { 2534 unsigned int i; 2535 portid_t port_id; 2536 int record_now; 2537 2538 record_now = 0; 2539 again: 2540 for (i = 0; i < nb_pt; i++) { 2541 port_id = (portid_t) portlist[i]; 2542 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2543 return; 2544 if (record_now) 2545 fwd_ports_ids[i] = port_id; 2546 } 2547 if (record_now == 0) { 2548 record_now = 1; 2549 goto again; 2550 } 2551 nb_cfg_ports = (portid_t) nb_pt; 2552 if (nb_fwd_ports != (portid_t) nb_pt) { 2553 printf("previous number of forwarding ports %u - changed to " 2554 "number of configured ports %u\n", 2555 (unsigned int) nb_fwd_ports, nb_pt); 2556 nb_fwd_ports = (portid_t) nb_pt; 2557 } 2558 } 2559 2560 void 2561 set_fwd_ports_mask(uint64_t portmask) 2562 { 2563 unsigned int portlist[64]; 2564 unsigned int nb_pt; 2565 unsigned int i; 2566 2567 if (portmask == 0) { 2568 printf("Invalid NULL mask of ports\n"); 2569 return; 2570 } 2571 nb_pt = 0; 2572 RTE_ETH_FOREACH_DEV(i) { 2573 if (! ((uint64_t)(1ULL << i) & portmask)) 2574 continue; 2575 portlist[nb_pt++] = i; 2576 } 2577 set_fwd_ports_list(portlist, nb_pt); 2578 } 2579 2580 void 2581 set_fwd_ports_number(uint16_t nb_pt) 2582 { 2583 if (nb_pt > nb_cfg_ports) { 2584 printf("nb fwd ports %u > %u (number of configured " 2585 "ports) - ignored\n", 2586 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2587 return; 2588 } 2589 nb_fwd_ports = (portid_t) nb_pt; 2590 printf("Number of forwarding ports set to %u\n", 2591 (unsigned int) nb_fwd_ports); 2592 } 2593 2594 int 2595 port_is_forwarding(portid_t port_id) 2596 { 2597 unsigned int i; 2598 2599 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2600 return -1; 2601 2602 for (i = 0; i < nb_fwd_ports; i++) { 2603 if (fwd_ports_ids[i] == port_id) 2604 return 1; 2605 } 2606 2607 return 0; 2608 } 2609 2610 void 2611 set_nb_pkt_per_burst(uint16_t nb) 2612 { 2613 if (nb > MAX_PKT_BURST) { 2614 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2615 " ignored\n", 2616 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2617 return; 2618 } 2619 nb_pkt_per_burst = nb; 2620 printf("Number of packets per burst set to %u\n", 2621 (unsigned int) nb_pkt_per_burst); 2622 } 2623 2624 static const char * 2625 tx_split_get_name(enum tx_pkt_split split) 2626 { 2627 uint32_t i; 2628 2629 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2630 if (tx_split_name[i].split == split) 2631 return tx_split_name[i].name; 2632 } 2633 return NULL; 2634 } 2635 2636 void 2637 set_tx_pkt_split(const char *name) 2638 { 2639 uint32_t i; 2640 2641 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2642 if (strcmp(tx_split_name[i].name, name) == 0) { 2643 tx_pkt_split = tx_split_name[i].split; 2644 return; 2645 } 2646 } 2647 printf("unknown value: \"%s\"\n", name); 2648 } 2649 2650 void 2651 show_tx_pkt_segments(void) 2652 { 2653 uint32_t i, n; 2654 const char *split; 2655 2656 n = tx_pkt_nb_segs; 2657 split = tx_split_get_name(tx_pkt_split); 2658 2659 printf("Number of segments: %u\n", n); 2660 printf("Segment sizes: "); 2661 for (i = 0; i != n - 1; i++) 2662 printf("%hu,", tx_pkt_seg_lengths[i]); 2663 printf("%hu\n", tx_pkt_seg_lengths[i]); 2664 printf("Split packet: %s\n", split); 2665 } 2666 2667 void 2668 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2669 { 2670 uint16_t tx_pkt_len; 2671 unsigned i; 2672 2673 if (nb_segs >= (unsigned) nb_txd) { 2674 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2675 nb_segs, (unsigned int) nb_txd); 2676 return; 2677 } 2678 2679 /* 2680 * Check that each segment length is greater or equal than 2681 * the mbuf data sise. 2682 * Check also that the total packet length is greater or equal than the 2683 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2684 * 20 + 8). 2685 */ 2686 tx_pkt_len = 0; 2687 for (i = 0; i < nb_segs; i++) { 2688 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2689 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2690 i, seg_lengths[i], (unsigned) mbuf_data_size); 2691 return; 2692 } 2693 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2694 } 2695 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2696 printf("total packet length=%u < %d - give up\n", 2697 (unsigned) tx_pkt_len, 2698 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2699 return; 2700 } 2701 2702 for (i = 0; i < nb_segs; i++) 2703 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2704 2705 tx_pkt_length = tx_pkt_len; 2706 tx_pkt_nb_segs = (uint8_t) nb_segs; 2707 } 2708 2709 void 2710 setup_gro(const char *onoff, portid_t port_id) 2711 { 2712 if (!rte_eth_dev_is_valid_port(port_id)) { 2713 printf("invalid port id %u\n", port_id); 2714 return; 2715 } 2716 if (test_done == 0) { 2717 printf("Before enable/disable GRO," 2718 " please stop forwarding first\n"); 2719 return; 2720 } 2721 if (strcmp(onoff, "on") == 0) { 2722 if (gro_ports[port_id].enable != 0) { 2723 printf("Port %u has enabled GRO. Please" 2724 " disable GRO first\n", port_id); 2725 return; 2726 } 2727 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2728 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2729 gro_ports[port_id].param.max_flow_num = 2730 GRO_DEFAULT_FLOW_NUM; 2731 gro_ports[port_id].param.max_item_per_flow = 2732 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2733 } 2734 gro_ports[port_id].enable = 1; 2735 } else { 2736 if (gro_ports[port_id].enable == 0) { 2737 printf("Port %u has disabled GRO\n", port_id); 2738 return; 2739 } 2740 gro_ports[port_id].enable = 0; 2741 } 2742 } 2743 2744 void 2745 setup_gro_flush_cycles(uint8_t cycles) 2746 { 2747 if (test_done == 0) { 2748 printf("Before change flush interval for GRO," 2749 " please stop forwarding first.\n"); 2750 return; 2751 } 2752 2753 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2754 GRO_DEFAULT_FLUSH_CYCLES) { 2755 printf("The flushing cycle be in the range" 2756 " of 1 to %u. Revert to the default" 2757 " value %u.\n", 2758 GRO_MAX_FLUSH_CYCLES, 2759 GRO_DEFAULT_FLUSH_CYCLES); 2760 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2761 } 2762 2763 gro_flush_cycles = cycles; 2764 } 2765 2766 void 2767 show_gro(portid_t port_id) 2768 { 2769 struct rte_gro_param *param; 2770 uint32_t max_pkts_num; 2771 2772 param = &gro_ports[port_id].param; 2773 2774 if (!rte_eth_dev_is_valid_port(port_id)) { 2775 printf("Invalid port id %u.\n", port_id); 2776 return; 2777 } 2778 if (gro_ports[port_id].enable) { 2779 printf("GRO type: TCP/IPv4\n"); 2780 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2781 max_pkts_num = param->max_flow_num * 2782 param->max_item_per_flow; 2783 } else 2784 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2785 printf("Max number of packets to perform GRO: %u\n", 2786 max_pkts_num); 2787 printf("Flushing cycles: %u\n", gro_flush_cycles); 2788 } else 2789 printf("Port %u doesn't enable GRO.\n", port_id); 2790 } 2791 2792 void 2793 setup_gso(const char *mode, portid_t port_id) 2794 { 2795 if (!rte_eth_dev_is_valid_port(port_id)) { 2796 printf("invalid port id %u\n", port_id); 2797 return; 2798 } 2799 if (strcmp(mode, "on") == 0) { 2800 if (test_done == 0) { 2801 printf("before enabling GSO," 2802 " please stop forwarding first\n"); 2803 return; 2804 } 2805 gso_ports[port_id].enable = 1; 2806 } else if (strcmp(mode, "off") == 0) { 2807 if (test_done == 0) { 2808 printf("before disabling GSO," 2809 " please stop forwarding first\n"); 2810 return; 2811 } 2812 gso_ports[port_id].enable = 0; 2813 } 2814 } 2815 2816 char* 2817 list_pkt_forwarding_modes(void) 2818 { 2819 static char fwd_modes[128] = ""; 2820 const char *separator = "|"; 2821 struct fwd_engine *fwd_eng; 2822 unsigned i = 0; 2823 2824 if (strlen (fwd_modes) == 0) { 2825 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2826 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2827 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2828 strncat(fwd_modes, separator, 2829 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2830 } 2831 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2832 } 2833 2834 return fwd_modes; 2835 } 2836 2837 char* 2838 list_pkt_forwarding_retry_modes(void) 2839 { 2840 static char fwd_modes[128] = ""; 2841 const char *separator = "|"; 2842 struct fwd_engine *fwd_eng; 2843 unsigned i = 0; 2844 2845 if (strlen(fwd_modes) == 0) { 2846 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2847 if (fwd_eng == &rx_only_engine) 2848 continue; 2849 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2850 sizeof(fwd_modes) - 2851 strlen(fwd_modes) - 1); 2852 strncat(fwd_modes, separator, 2853 sizeof(fwd_modes) - 2854 strlen(fwd_modes) - 1); 2855 } 2856 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2857 } 2858 2859 return fwd_modes; 2860 } 2861 2862 void 2863 set_pkt_forwarding_mode(const char *fwd_mode_name) 2864 { 2865 struct fwd_engine *fwd_eng; 2866 unsigned i; 2867 2868 i = 0; 2869 while ((fwd_eng = fwd_engines[i]) != NULL) { 2870 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2871 printf("Set %s packet forwarding mode%s\n", 2872 fwd_mode_name, 2873 retry_enabled == 0 ? "" : " with retry"); 2874 cur_fwd_eng = fwd_eng; 2875 return; 2876 } 2877 i++; 2878 } 2879 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2880 } 2881 2882 void 2883 add_rx_dump_callbacks(portid_t portid) 2884 { 2885 struct rte_eth_dev_info dev_info; 2886 uint16_t queue; 2887 int ret; 2888 2889 if (port_id_is_invalid(portid, ENABLED_WARN)) 2890 return; 2891 2892 ret = eth_dev_info_get_print_err(portid, &dev_info); 2893 if (ret != 0) 2894 return; 2895 2896 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2897 if (!ports[portid].rx_dump_cb[queue]) 2898 ports[portid].rx_dump_cb[queue] = 2899 rte_eth_add_rx_callback(portid, queue, 2900 dump_rx_pkts, NULL); 2901 } 2902 2903 void 2904 add_tx_dump_callbacks(portid_t portid) 2905 { 2906 struct rte_eth_dev_info dev_info; 2907 uint16_t queue; 2908 int ret; 2909 2910 if (port_id_is_invalid(portid, ENABLED_WARN)) 2911 return; 2912 2913 ret = eth_dev_info_get_print_err(portid, &dev_info); 2914 if (ret != 0) 2915 return; 2916 2917 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2918 if (!ports[portid].tx_dump_cb[queue]) 2919 ports[portid].tx_dump_cb[queue] = 2920 rte_eth_add_tx_callback(portid, queue, 2921 dump_tx_pkts, NULL); 2922 } 2923 2924 void 2925 remove_rx_dump_callbacks(portid_t portid) 2926 { 2927 struct rte_eth_dev_info dev_info; 2928 uint16_t queue; 2929 int ret; 2930 2931 if (port_id_is_invalid(portid, ENABLED_WARN)) 2932 return; 2933 2934 ret = eth_dev_info_get_print_err(portid, &dev_info); 2935 if (ret != 0) 2936 return; 2937 2938 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2939 if (ports[portid].rx_dump_cb[queue]) { 2940 rte_eth_remove_rx_callback(portid, queue, 2941 ports[portid].rx_dump_cb[queue]); 2942 ports[portid].rx_dump_cb[queue] = NULL; 2943 } 2944 } 2945 2946 void 2947 remove_tx_dump_callbacks(portid_t portid) 2948 { 2949 struct rte_eth_dev_info dev_info; 2950 uint16_t queue; 2951 int ret; 2952 2953 if (port_id_is_invalid(portid, ENABLED_WARN)) 2954 return; 2955 2956 ret = eth_dev_info_get_print_err(portid, &dev_info); 2957 if (ret != 0) 2958 return; 2959 2960 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2961 if (ports[portid].tx_dump_cb[queue]) { 2962 rte_eth_remove_tx_callback(portid, queue, 2963 ports[portid].tx_dump_cb[queue]); 2964 ports[portid].tx_dump_cb[queue] = NULL; 2965 } 2966 } 2967 2968 void 2969 configure_rxtx_dump_callbacks(uint16_t verbose) 2970 { 2971 portid_t portid; 2972 2973 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 2974 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 2975 return; 2976 #endif 2977 2978 RTE_ETH_FOREACH_DEV(portid) 2979 { 2980 if (verbose == 1 || verbose > 2) 2981 add_rx_dump_callbacks(portid); 2982 else 2983 remove_rx_dump_callbacks(portid); 2984 if (verbose >= 2) 2985 add_tx_dump_callbacks(portid); 2986 else 2987 remove_tx_dump_callbacks(portid); 2988 } 2989 } 2990 2991 void 2992 set_verbose_level(uint16_t vb_level) 2993 { 2994 printf("Change verbose level from %u to %u\n", 2995 (unsigned int) verbose_level, (unsigned int) vb_level); 2996 verbose_level = vb_level; 2997 configure_rxtx_dump_callbacks(verbose_level); 2998 } 2999 3000 void 3001 vlan_extend_set(portid_t port_id, int on) 3002 { 3003 int diag; 3004 int vlan_offload; 3005 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3006 3007 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3008 return; 3009 3010 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3011 3012 if (on) { 3013 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3014 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3015 } else { 3016 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3017 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3018 } 3019 3020 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3021 if (diag < 0) 3022 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3023 "diag=%d\n", port_id, on, diag); 3024 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3025 } 3026 3027 void 3028 rx_vlan_strip_set(portid_t port_id, int on) 3029 { 3030 int diag; 3031 int vlan_offload; 3032 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3033 3034 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3035 return; 3036 3037 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3038 3039 if (on) { 3040 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3041 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3042 } else { 3043 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3044 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3045 } 3046 3047 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3048 if (diag < 0) 3049 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3050 "diag=%d\n", port_id, on, diag); 3051 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3052 } 3053 3054 void 3055 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3056 { 3057 int diag; 3058 3059 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3060 return; 3061 3062 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3063 if (diag < 0) 3064 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3065 "diag=%d\n", port_id, queue_id, on, diag); 3066 } 3067 3068 void 3069 rx_vlan_filter_set(portid_t port_id, int on) 3070 { 3071 int diag; 3072 int vlan_offload; 3073 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3074 3075 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3076 return; 3077 3078 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3079 3080 if (on) { 3081 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3082 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3083 } else { 3084 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3085 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3086 } 3087 3088 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3089 if (diag < 0) 3090 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3091 "diag=%d\n", port_id, on, diag); 3092 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3093 } 3094 3095 void 3096 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3097 { 3098 int diag; 3099 int vlan_offload; 3100 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3101 3102 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3103 return; 3104 3105 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3106 3107 if (on) { 3108 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3109 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3110 } else { 3111 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3112 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3113 } 3114 3115 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3116 if (diag < 0) 3117 printf("%s(port_pi=%d, on=%d) failed " 3118 "diag=%d\n", __func__, port_id, on, diag); 3119 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3120 } 3121 3122 int 3123 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3124 { 3125 int diag; 3126 3127 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3128 return 1; 3129 if (vlan_id_is_invalid(vlan_id)) 3130 return 1; 3131 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3132 if (diag == 0) 3133 return 0; 3134 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3135 "diag=%d\n", 3136 port_id, vlan_id, on, diag); 3137 return -1; 3138 } 3139 3140 void 3141 rx_vlan_all_filter_set(portid_t port_id, int on) 3142 { 3143 uint16_t vlan_id; 3144 3145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3146 return; 3147 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3148 if (rx_vft_set(port_id, vlan_id, on)) 3149 break; 3150 } 3151 } 3152 3153 void 3154 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3155 { 3156 int diag; 3157 3158 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3159 return; 3160 3161 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3162 if (diag == 0) 3163 return; 3164 3165 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3166 "diag=%d\n", 3167 port_id, vlan_type, tp_id, diag); 3168 } 3169 3170 void 3171 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3172 { 3173 struct rte_eth_dev_info dev_info; 3174 int ret; 3175 3176 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3177 return; 3178 if (vlan_id_is_invalid(vlan_id)) 3179 return; 3180 3181 if (ports[port_id].dev_conf.txmode.offloads & 3182 DEV_TX_OFFLOAD_QINQ_INSERT) { 3183 printf("Error, as QinQ has been enabled.\n"); 3184 return; 3185 } 3186 3187 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3188 if (ret != 0) 3189 return; 3190 3191 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3192 printf("Error: vlan insert is not supported by port %d\n", 3193 port_id); 3194 return; 3195 } 3196 3197 tx_vlan_reset(port_id); 3198 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3199 ports[port_id].tx_vlan_id = vlan_id; 3200 } 3201 3202 void 3203 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3204 { 3205 struct rte_eth_dev_info dev_info; 3206 int ret; 3207 3208 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3209 return; 3210 if (vlan_id_is_invalid(vlan_id)) 3211 return; 3212 if (vlan_id_is_invalid(vlan_id_outer)) 3213 return; 3214 3215 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3216 if (ret != 0) 3217 return; 3218 3219 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3220 printf("Error: qinq insert not supported by port %d\n", 3221 port_id); 3222 return; 3223 } 3224 3225 tx_vlan_reset(port_id); 3226 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3227 DEV_TX_OFFLOAD_QINQ_INSERT); 3228 ports[port_id].tx_vlan_id = vlan_id; 3229 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3230 } 3231 3232 void 3233 tx_vlan_reset(portid_t port_id) 3234 { 3235 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3236 return; 3237 ports[port_id].dev_conf.txmode.offloads &= 3238 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3239 DEV_TX_OFFLOAD_QINQ_INSERT); 3240 ports[port_id].tx_vlan_id = 0; 3241 ports[port_id].tx_vlan_id_outer = 0; 3242 } 3243 3244 void 3245 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3246 { 3247 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3248 return; 3249 3250 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3251 } 3252 3253 void 3254 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3255 { 3256 uint16_t i; 3257 uint8_t existing_mapping_found = 0; 3258 3259 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3260 return; 3261 3262 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3263 return; 3264 3265 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3266 printf("map_value not in required range 0..%d\n", 3267 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3268 return; 3269 } 3270 3271 if (!is_rx) { /*then tx*/ 3272 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3273 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3274 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3275 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3276 existing_mapping_found = 1; 3277 break; 3278 } 3279 } 3280 if (!existing_mapping_found) { /* A new additional mapping... */ 3281 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3282 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3283 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3284 nb_tx_queue_stats_mappings++; 3285 } 3286 } 3287 else { /*rx*/ 3288 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3289 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3290 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3291 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3292 existing_mapping_found = 1; 3293 break; 3294 } 3295 } 3296 if (!existing_mapping_found) { /* A new additional mapping... */ 3297 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3298 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3299 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3300 nb_rx_queue_stats_mappings++; 3301 } 3302 } 3303 } 3304 3305 void 3306 set_xstats_hide_zero(uint8_t on_off) 3307 { 3308 xstats_hide_zero = on_off; 3309 } 3310 3311 static inline void 3312 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3313 { 3314 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3315 3316 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3317 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3318 " tunnel_id: 0x%08x", 3319 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3320 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3321 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3322 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3323 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3324 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3325 3326 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3327 rte_be_to_cpu_16(mask->src_port_mask), 3328 rte_be_to_cpu_16(mask->dst_port_mask)); 3329 3330 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3331 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3332 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3333 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3334 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3335 3336 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3337 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3338 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3339 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3340 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3341 } 3342 3343 printf("\n"); 3344 } 3345 3346 static inline void 3347 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3348 { 3349 struct rte_eth_flex_payload_cfg *cfg; 3350 uint32_t i, j; 3351 3352 for (i = 0; i < flex_conf->nb_payloads; i++) { 3353 cfg = &flex_conf->flex_set[i]; 3354 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3355 printf("\n RAW: "); 3356 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3357 printf("\n L2_PAYLOAD: "); 3358 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3359 printf("\n L3_PAYLOAD: "); 3360 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3361 printf("\n L4_PAYLOAD: "); 3362 else 3363 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3364 for (j = 0; j < num; j++) 3365 printf(" %-5u", cfg->src_offset[j]); 3366 } 3367 printf("\n"); 3368 } 3369 3370 static char * 3371 flowtype_to_str(uint16_t flow_type) 3372 { 3373 struct flow_type_info { 3374 char str[32]; 3375 uint16_t ftype; 3376 }; 3377 3378 uint8_t i; 3379 static struct flow_type_info flowtype_str_table[] = { 3380 {"raw", RTE_ETH_FLOW_RAW}, 3381 {"ipv4", RTE_ETH_FLOW_IPV4}, 3382 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3383 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3384 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3385 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3386 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3387 {"ipv6", RTE_ETH_FLOW_IPV6}, 3388 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3389 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3390 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3391 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3392 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3393 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3394 {"port", RTE_ETH_FLOW_PORT}, 3395 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3396 {"geneve", RTE_ETH_FLOW_GENEVE}, 3397 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3398 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3399 }; 3400 3401 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3402 if (flowtype_str_table[i].ftype == flow_type) 3403 return flowtype_str_table[i].str; 3404 } 3405 3406 return NULL; 3407 } 3408 3409 static inline void 3410 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3411 { 3412 struct rte_eth_fdir_flex_mask *mask; 3413 uint32_t i, j; 3414 char *p; 3415 3416 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3417 mask = &flex_conf->flex_mask[i]; 3418 p = flowtype_to_str(mask->flow_type); 3419 printf("\n %s:\t", p ? p : "unknown"); 3420 for (j = 0; j < num; j++) 3421 printf(" %02x", mask->mask[j]); 3422 } 3423 printf("\n"); 3424 } 3425 3426 static inline void 3427 print_fdir_flow_type(uint32_t flow_types_mask) 3428 { 3429 int i; 3430 char *p; 3431 3432 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3433 if (!(flow_types_mask & (1 << i))) 3434 continue; 3435 p = flowtype_to_str(i); 3436 if (p) 3437 printf(" %s", p); 3438 else 3439 printf(" unknown"); 3440 } 3441 printf("\n"); 3442 } 3443 3444 void 3445 fdir_get_infos(portid_t port_id) 3446 { 3447 struct rte_eth_fdir_stats fdir_stat; 3448 struct rte_eth_fdir_info fdir_info; 3449 int ret; 3450 3451 static const char *fdir_stats_border = "########################"; 3452 3453 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3454 return; 3455 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3456 if (ret < 0) { 3457 printf("\n FDIR is not supported on port %-2d\n", 3458 port_id); 3459 return; 3460 } 3461 3462 memset(&fdir_info, 0, sizeof(fdir_info)); 3463 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3464 RTE_ETH_FILTER_INFO, &fdir_info); 3465 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3466 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3467 RTE_ETH_FILTER_STATS, &fdir_stat); 3468 printf("\n %s FDIR infos for port %-2d %s\n", 3469 fdir_stats_border, port_id, fdir_stats_border); 3470 printf(" MODE: "); 3471 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3472 printf(" PERFECT\n"); 3473 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3474 printf(" PERFECT-MAC-VLAN\n"); 3475 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3476 printf(" PERFECT-TUNNEL\n"); 3477 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3478 printf(" SIGNATURE\n"); 3479 else 3480 printf(" DISABLE\n"); 3481 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3482 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3483 printf(" SUPPORTED FLOW TYPE: "); 3484 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3485 } 3486 printf(" FLEX PAYLOAD INFO:\n"); 3487 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3488 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3489 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3490 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3491 fdir_info.flex_payload_unit, 3492 fdir_info.max_flex_payload_segment_num, 3493 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3494 printf(" MASK: "); 3495 print_fdir_mask(&fdir_info.mask); 3496 if (fdir_info.flex_conf.nb_payloads > 0) { 3497 printf(" FLEX PAYLOAD SRC OFFSET:"); 3498 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3499 } 3500 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3501 printf(" FLEX MASK CFG:"); 3502 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3503 } 3504 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3505 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3506 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3507 fdir_info.guarant_spc, fdir_info.best_spc); 3508 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3509 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3510 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3511 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3512 fdir_stat.collision, fdir_stat.free, 3513 fdir_stat.maxhash, fdir_stat.maxlen, 3514 fdir_stat.add, fdir_stat.remove, 3515 fdir_stat.f_add, fdir_stat.f_remove); 3516 printf(" %s############################%s\n", 3517 fdir_stats_border, fdir_stats_border); 3518 } 3519 3520 void 3521 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3522 { 3523 struct rte_port *port; 3524 struct rte_eth_fdir_flex_conf *flex_conf; 3525 int i, idx = 0; 3526 3527 port = &ports[port_id]; 3528 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3529 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3530 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3531 idx = i; 3532 break; 3533 } 3534 } 3535 if (i >= RTE_ETH_FLOW_MAX) { 3536 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3537 idx = flex_conf->nb_flexmasks; 3538 flex_conf->nb_flexmasks++; 3539 } else { 3540 printf("The flex mask table is full. Can not set flex" 3541 " mask for flow_type(%u).", cfg->flow_type); 3542 return; 3543 } 3544 } 3545 rte_memcpy(&flex_conf->flex_mask[idx], 3546 cfg, 3547 sizeof(struct rte_eth_fdir_flex_mask)); 3548 } 3549 3550 void 3551 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3552 { 3553 struct rte_port *port; 3554 struct rte_eth_fdir_flex_conf *flex_conf; 3555 int i, idx = 0; 3556 3557 port = &ports[port_id]; 3558 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3559 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3560 if (cfg->type == flex_conf->flex_set[i].type) { 3561 idx = i; 3562 break; 3563 } 3564 } 3565 if (i >= RTE_ETH_PAYLOAD_MAX) { 3566 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3567 idx = flex_conf->nb_payloads; 3568 flex_conf->nb_payloads++; 3569 } else { 3570 printf("The flex payload table is full. Can not set" 3571 " flex payload for type(%u).", cfg->type); 3572 return; 3573 } 3574 } 3575 rte_memcpy(&flex_conf->flex_set[idx], 3576 cfg, 3577 sizeof(struct rte_eth_flex_payload_cfg)); 3578 3579 } 3580 3581 void 3582 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3583 { 3584 #ifdef RTE_LIBRTE_IXGBE_PMD 3585 int diag; 3586 3587 if (is_rx) 3588 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3589 else 3590 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3591 3592 if (diag == 0) 3593 return; 3594 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3595 is_rx ? "rx" : "tx", port_id, diag); 3596 return; 3597 #endif 3598 printf("VF %s setting not supported for port %d\n", 3599 is_rx ? "Rx" : "Tx", port_id); 3600 RTE_SET_USED(vf); 3601 RTE_SET_USED(on); 3602 } 3603 3604 int 3605 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3606 { 3607 int diag; 3608 struct rte_eth_link link; 3609 int ret; 3610 3611 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3612 return 1; 3613 ret = eth_link_get_nowait_print_err(port_id, &link); 3614 if (ret < 0) 3615 return 1; 3616 if (rate > link.link_speed) { 3617 printf("Invalid rate value:%u bigger than link speed: %u\n", 3618 rate, link.link_speed); 3619 return 1; 3620 } 3621 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3622 if (diag == 0) 3623 return diag; 3624 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3625 port_id, diag); 3626 return diag; 3627 } 3628 3629 int 3630 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3631 { 3632 int diag = -ENOTSUP; 3633 3634 RTE_SET_USED(vf); 3635 RTE_SET_USED(rate); 3636 RTE_SET_USED(q_msk); 3637 3638 #ifdef RTE_LIBRTE_IXGBE_PMD 3639 if (diag == -ENOTSUP) 3640 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3641 q_msk); 3642 #endif 3643 #ifdef RTE_LIBRTE_BNXT_PMD 3644 if (diag == -ENOTSUP) 3645 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3646 #endif 3647 if (diag == 0) 3648 return diag; 3649 3650 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3651 port_id, diag); 3652 return diag; 3653 } 3654 3655 /* 3656 * Functions to manage the set of filtered Multicast MAC addresses. 3657 * 3658 * A pool of filtered multicast MAC addresses is associated with each port. 3659 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3660 * The address of the pool and the number of valid multicast MAC addresses 3661 * recorded in the pool are stored in the fields "mc_addr_pool" and 3662 * "mc_addr_nb" of the "rte_port" data structure. 3663 * 3664 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3665 * to be supplied a contiguous array of multicast MAC addresses. 3666 * To comply with this constraint, the set of multicast addresses recorded 3667 * into the pool are systematically compacted at the beginning of the pool. 3668 * Hence, when a multicast address is removed from the pool, all following 3669 * addresses, if any, are copied back to keep the set contiguous. 3670 */ 3671 #define MCAST_POOL_INC 32 3672 3673 static int 3674 mcast_addr_pool_extend(struct rte_port *port) 3675 { 3676 struct rte_ether_addr *mc_pool; 3677 size_t mc_pool_size; 3678 3679 /* 3680 * If a free entry is available at the end of the pool, just 3681 * increment the number of recorded multicast addresses. 3682 */ 3683 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3684 port->mc_addr_nb++; 3685 return 0; 3686 } 3687 3688 /* 3689 * [re]allocate a pool with MCAST_POOL_INC more entries. 3690 * The previous test guarantees that port->mc_addr_nb is a multiple 3691 * of MCAST_POOL_INC. 3692 */ 3693 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3694 MCAST_POOL_INC); 3695 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3696 mc_pool_size); 3697 if (mc_pool == NULL) { 3698 printf("allocation of pool of %u multicast addresses failed\n", 3699 port->mc_addr_nb + MCAST_POOL_INC); 3700 return -ENOMEM; 3701 } 3702 3703 port->mc_addr_pool = mc_pool; 3704 port->mc_addr_nb++; 3705 return 0; 3706 3707 } 3708 3709 static void 3710 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3711 { 3712 port->mc_addr_nb--; 3713 if (addr_idx == port->mc_addr_nb) { 3714 /* No need to recompact the set of multicast addressses. */ 3715 if (port->mc_addr_nb == 0) { 3716 /* free the pool of multicast addresses. */ 3717 free(port->mc_addr_pool); 3718 port->mc_addr_pool = NULL; 3719 } 3720 return; 3721 } 3722 memmove(&port->mc_addr_pool[addr_idx], 3723 &port->mc_addr_pool[addr_idx + 1], 3724 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3725 } 3726 3727 static void 3728 eth_port_multicast_addr_list_set(portid_t port_id) 3729 { 3730 struct rte_port *port; 3731 int diag; 3732 3733 port = &ports[port_id]; 3734 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3735 port->mc_addr_nb); 3736 if (diag == 0) 3737 return; 3738 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3739 port->mc_addr_nb, port_id, -diag); 3740 } 3741 3742 void 3743 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3744 { 3745 struct rte_port *port; 3746 uint32_t i; 3747 3748 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3749 return; 3750 3751 port = &ports[port_id]; 3752 3753 /* 3754 * Check that the added multicast MAC address is not already recorded 3755 * in the pool of multicast addresses. 3756 */ 3757 for (i = 0; i < port->mc_addr_nb; i++) { 3758 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3759 printf("multicast address already filtered by port\n"); 3760 return; 3761 } 3762 } 3763 3764 if (mcast_addr_pool_extend(port) != 0) 3765 return; 3766 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3767 eth_port_multicast_addr_list_set(port_id); 3768 } 3769 3770 void 3771 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3772 { 3773 struct rte_port *port; 3774 uint32_t i; 3775 3776 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3777 return; 3778 3779 port = &ports[port_id]; 3780 3781 /* 3782 * Search the pool of multicast MAC addresses for the removed address. 3783 */ 3784 for (i = 0; i < port->mc_addr_nb; i++) { 3785 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3786 break; 3787 } 3788 if (i == port->mc_addr_nb) { 3789 printf("multicast address not filtered by port %d\n", port_id); 3790 return; 3791 } 3792 3793 mcast_addr_pool_remove(port, i); 3794 eth_port_multicast_addr_list_set(port_id); 3795 } 3796 3797 void 3798 port_dcb_info_display(portid_t port_id) 3799 { 3800 struct rte_eth_dcb_info dcb_info; 3801 uint16_t i; 3802 int ret; 3803 static const char *border = "================"; 3804 3805 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3806 return; 3807 3808 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3809 if (ret) { 3810 printf("\n Failed to get dcb infos on port %-2d\n", 3811 port_id); 3812 return; 3813 } 3814 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3815 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3816 printf("\n TC : "); 3817 for (i = 0; i < dcb_info.nb_tcs; i++) 3818 printf("\t%4d", i); 3819 printf("\n Priority : "); 3820 for (i = 0; i < dcb_info.nb_tcs; i++) 3821 printf("\t%4d", dcb_info.prio_tc[i]); 3822 printf("\n BW percent :"); 3823 for (i = 0; i < dcb_info.nb_tcs; i++) 3824 printf("\t%4d%%", dcb_info.tc_bws[i]); 3825 printf("\n RXQ base : "); 3826 for (i = 0; i < dcb_info.nb_tcs; i++) 3827 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3828 printf("\n RXQ number :"); 3829 for (i = 0; i < dcb_info.nb_tcs; i++) 3830 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3831 printf("\n TXQ base : "); 3832 for (i = 0; i < dcb_info.nb_tcs; i++) 3833 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3834 printf("\n TXQ number :"); 3835 for (i = 0; i < dcb_info.nb_tcs; i++) 3836 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3837 printf("\n"); 3838 } 3839 3840 uint8_t * 3841 open_file(const char *file_path, uint32_t *size) 3842 { 3843 int fd = open(file_path, O_RDONLY); 3844 off_t pkg_size; 3845 uint8_t *buf = NULL; 3846 int ret = 0; 3847 struct stat st_buf; 3848 3849 if (size) 3850 *size = 0; 3851 3852 if (fd == -1) { 3853 printf("%s: Failed to open %s\n", __func__, file_path); 3854 return buf; 3855 } 3856 3857 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3858 close(fd); 3859 printf("%s: File operations failed\n", __func__); 3860 return buf; 3861 } 3862 3863 pkg_size = st_buf.st_size; 3864 if (pkg_size < 0) { 3865 close(fd); 3866 printf("%s: File operations failed\n", __func__); 3867 return buf; 3868 } 3869 3870 buf = (uint8_t *)malloc(pkg_size); 3871 if (!buf) { 3872 close(fd); 3873 printf("%s: Failed to malloc memory\n", __func__); 3874 return buf; 3875 } 3876 3877 ret = read(fd, buf, pkg_size); 3878 if (ret < 0) { 3879 close(fd); 3880 printf("%s: File read operation failed\n", __func__); 3881 close_file(buf); 3882 return NULL; 3883 } 3884 3885 if (size) 3886 *size = pkg_size; 3887 3888 close(fd); 3889 3890 return buf; 3891 } 3892 3893 int 3894 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3895 { 3896 FILE *fh = fopen(file_path, "wb"); 3897 3898 if (fh == NULL) { 3899 printf("%s: Failed to open %s\n", __func__, file_path); 3900 return -1; 3901 } 3902 3903 if (fwrite(buf, 1, size, fh) != size) { 3904 fclose(fh); 3905 printf("%s: File write operation failed\n", __func__); 3906 return -1; 3907 } 3908 3909 fclose(fh); 3910 3911 return 0; 3912 } 3913 3914 int 3915 close_file(uint8_t *buf) 3916 { 3917 if (buf) { 3918 free((void *)buf); 3919 return 0; 3920 } 3921 3922 return -1; 3923 } 3924 3925 void 3926 port_queue_region_info_display(portid_t port_id, void *buf) 3927 { 3928 #ifdef RTE_LIBRTE_I40E_PMD 3929 uint16_t i, j; 3930 struct rte_pmd_i40e_queue_regions *info = 3931 (struct rte_pmd_i40e_queue_regions *)buf; 3932 static const char *queue_region_info_stats_border = "-------"; 3933 3934 if (!info->queue_region_number) 3935 printf("there is no region has been set before"); 3936 3937 printf("\n %s All queue region info for port=%2d %s", 3938 queue_region_info_stats_border, port_id, 3939 queue_region_info_stats_border); 3940 printf("\n queue_region_number: %-14u \n", 3941 info->queue_region_number); 3942 3943 for (i = 0; i < info->queue_region_number; i++) { 3944 printf("\n region_id: %-14u queue_number: %-14u " 3945 "queue_start_index: %-14u \n", 3946 info->region[i].region_id, 3947 info->region[i].queue_num, 3948 info->region[i].queue_start_index); 3949 3950 printf(" user_priority_num is %-14u :", 3951 info->region[i].user_priority_num); 3952 for (j = 0; j < info->region[i].user_priority_num; j++) 3953 printf(" %-14u ", info->region[i].user_priority[j]); 3954 3955 printf("\n flowtype_num is %-14u :", 3956 info->region[i].flowtype_num); 3957 for (j = 0; j < info->region[i].flowtype_num; j++) 3958 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3959 } 3960 #else 3961 RTE_SET_USED(port_id); 3962 RTE_SET_USED(buf); 3963 #endif 3964 3965 printf("\n\n"); 3966 } 3967