1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 107 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 108 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 109 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 110 { NULL, 0 }, 111 }; 112 113 static void 114 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 115 { 116 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 117 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 118 printf("%s%s", name, buf); 119 } 120 121 void 122 nic_stats_display(portid_t port_id) 123 { 124 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 125 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 126 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 127 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 128 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 129 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 130 diff_cycles; 131 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 132 struct rte_eth_stats stats; 133 struct rte_port *port = &ports[port_id]; 134 uint8_t i; 135 136 static const char *nic_stats_border = "########################"; 137 138 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 139 print_valid_ports(); 140 return; 141 } 142 rte_eth_stats_get(port_id, &stats); 143 printf("\n %s NIC statistics for port %-2d %s\n", 144 nic_stats_border, port_id, nic_stats_border); 145 146 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 147 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 148 "%-"PRIu64"\n", 149 stats.ipackets, stats.imissed, stats.ibytes); 150 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 151 printf(" RX-nombuf: %-10"PRIu64"\n", 152 stats.rx_nombuf); 153 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 154 "%-"PRIu64"\n", 155 stats.opackets, stats.oerrors, stats.obytes); 156 } 157 else { 158 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 159 " RX-bytes: %10"PRIu64"\n", 160 stats.ipackets, stats.ierrors, stats.ibytes); 161 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 162 printf(" RX-nombuf: %10"PRIu64"\n", 163 stats.rx_nombuf); 164 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 165 " TX-bytes: %10"PRIu64"\n", 166 stats.opackets, stats.oerrors, stats.obytes); 167 } 168 169 if (port->rx_queue_stats_mapping_enabled) { 170 printf("\n"); 171 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 172 printf(" Stats reg %2d RX-packets: %10"PRIu64 173 " RX-errors: %10"PRIu64 174 " RX-bytes: %10"PRIu64"\n", 175 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 176 } 177 } 178 if (port->tx_queue_stats_mapping_enabled) { 179 printf("\n"); 180 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 181 printf(" Stats reg %2d TX-packets: %10"PRIu64 182 " TX-bytes: %10"PRIu64"\n", 183 i, stats.q_opackets[i], stats.q_obytes[i]); 184 } 185 } 186 187 diff_cycles = prev_cycles[port_id]; 188 prev_cycles[port_id] = rte_rdtsc(); 189 if (diff_cycles > 0) 190 diff_cycles = prev_cycles[port_id] - diff_cycles; 191 192 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 193 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 194 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 195 (stats.opackets - prev_pkts_tx[port_id]) : 0; 196 prev_pkts_rx[port_id] = stats.ipackets; 197 prev_pkts_tx[port_id] = stats.opackets; 198 mpps_rx = diff_cycles > 0 ? 199 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 200 mpps_tx = diff_cycles > 0 ? 201 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 202 203 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 204 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 205 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 206 (stats.obytes - prev_bytes_tx[port_id]) : 0; 207 prev_bytes_rx[port_id] = stats.ibytes; 208 prev_bytes_tx[port_id] = stats.obytes; 209 mbps_rx = diff_cycles > 0 ? 210 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 211 mbps_tx = diff_cycles > 0 ? 212 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 213 214 printf("\n Throughput (since last show)\n"); 215 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 216 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 217 mpps_tx, mbps_tx * 8); 218 219 printf(" %s############################%s\n", 220 nic_stats_border, nic_stats_border); 221 } 222 223 void 224 nic_stats_clear(portid_t port_id) 225 { 226 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 227 print_valid_ports(); 228 return; 229 } 230 rte_eth_stats_reset(port_id); 231 printf("\n NIC statistics for port %d cleared\n", port_id); 232 } 233 234 void 235 nic_xstats_display(portid_t port_id) 236 { 237 struct rte_eth_xstat *xstats; 238 int cnt_xstats, idx_xstat; 239 struct rte_eth_xstat_name *xstats_names; 240 241 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 242 print_valid_ports(); 243 return; 244 } 245 printf("###### NIC extended statistics for port %-2d\n", port_id); 246 if (!rte_eth_dev_is_valid_port(port_id)) { 247 printf("Error: Invalid port number %i\n", port_id); 248 return; 249 } 250 251 /* Get count */ 252 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 253 if (cnt_xstats < 0) { 254 printf("Error: Cannot get count of xstats\n"); 255 return; 256 } 257 258 /* Get id-name lookup table */ 259 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 260 if (xstats_names == NULL) { 261 printf("Cannot allocate memory for xstats lookup\n"); 262 return; 263 } 264 if (cnt_xstats != rte_eth_xstats_get_names( 265 port_id, xstats_names, cnt_xstats)) { 266 printf("Error: Cannot get xstats lookup\n"); 267 free(xstats_names); 268 return; 269 } 270 271 /* Get stats themselves */ 272 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 273 if (xstats == NULL) { 274 printf("Cannot allocate memory for xstats\n"); 275 free(xstats_names); 276 return; 277 } 278 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 279 printf("Error: Unable to get xstats\n"); 280 free(xstats_names); 281 free(xstats); 282 return; 283 } 284 285 /* Display xstats */ 286 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 287 if (xstats_hide_zero && !xstats[idx_xstat].value) 288 continue; 289 printf("%s: %"PRIu64"\n", 290 xstats_names[idx_xstat].name, 291 xstats[idx_xstat].value); 292 } 293 free(xstats_names); 294 free(xstats); 295 } 296 297 void 298 nic_xstats_clear(portid_t port_id) 299 { 300 int ret; 301 302 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 303 print_valid_ports(); 304 return; 305 } 306 ret = rte_eth_xstats_reset(port_id); 307 if (ret != 0) { 308 printf("%s: Error: failed to reset xstats (port %u): %s", 309 __func__, port_id, strerror(ret)); 310 } 311 } 312 313 void 314 nic_stats_mapping_display(portid_t port_id) 315 { 316 struct rte_port *port = &ports[port_id]; 317 uint16_t i; 318 319 static const char *nic_stats_mapping_border = "########################"; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 327 printf("Port id %d - either does not support queue statistic mapping or" 328 " no queue statistic mapping set\n", port_id); 329 return; 330 } 331 332 printf("\n %s NIC statistics mapping for port %-2d %s\n", 333 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 334 335 if (port->rx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 337 if (rx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 339 rx_queue_stats_mappings[i].queue_id, 340 rx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 printf("\n"); 344 } 345 346 347 if (port->tx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 349 if (tx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 351 tx_queue_stats_mappings[i].queue_id, 352 tx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 } 356 357 printf(" %s####################################%s\n", 358 nic_stats_mapping_border, nic_stats_mapping_border); 359 } 360 361 void 362 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 363 { 364 struct rte_eth_burst_mode mode; 365 struct rte_eth_rxq_info qinfo; 366 int32_t rc; 367 static const char *info_border = "*********************"; 368 369 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 370 if (rc != 0) { 371 printf("Failed to retrieve information for port: %u, " 372 "RX queue: %hu\nerror desc: %s(%d)\n", 373 port_id, queue_id, strerror(-rc), rc); 374 return; 375 } 376 377 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 378 info_border, port_id, queue_id, info_border); 379 380 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 381 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 382 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 383 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 384 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 385 printf("\nRX drop packets: %s", 386 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 387 printf("\nRX deferred start: %s", 388 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 389 printf("\nRX scattered packets: %s", 390 (qinfo.scattered_rx != 0) ? "on" : "off"); 391 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 392 393 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 394 printf("\nBurst mode: %s%s", 395 mode.info, 396 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 397 " (per queue)" : ""); 398 399 printf("\n"); 400 } 401 402 void 403 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 404 { 405 struct rte_eth_burst_mode mode; 406 struct rte_eth_txq_info qinfo; 407 int32_t rc; 408 static const char *info_border = "*********************"; 409 410 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 411 if (rc != 0) { 412 printf("Failed to retrieve information for port: %u, " 413 "TX queue: %hu\nerror desc: %s(%d)\n", 414 port_id, queue_id, strerror(-rc), rc); 415 return; 416 } 417 418 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 419 info_border, port_id, queue_id, info_border); 420 421 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 422 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 423 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 424 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 425 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 426 printf("\nTX deferred start: %s", 427 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 428 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 429 430 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 431 printf("\nBurst mode: %s%s", 432 mode.info, 433 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 434 " (per queue)" : ""); 435 436 printf("\n"); 437 } 438 439 static int bus_match_all(const struct rte_bus *bus, const void *data) 440 { 441 RTE_SET_USED(bus); 442 RTE_SET_USED(data); 443 return 0; 444 } 445 446 void 447 device_infos_display(const char *identifier) 448 { 449 static const char *info_border = "*********************"; 450 struct rte_bus *start = NULL, *next; 451 struct rte_dev_iterator dev_iter; 452 char name[RTE_ETH_NAME_MAX_LEN]; 453 struct rte_ether_addr mac_addr; 454 struct rte_device *dev; 455 struct rte_devargs da; 456 portid_t port_id; 457 char devstr[128]; 458 459 memset(&da, 0, sizeof(da)); 460 if (!identifier) 461 goto skip_parse; 462 463 if (rte_devargs_parsef(&da, "%s", identifier)) { 464 printf("cannot parse identifier\n"); 465 if (da.args) 466 free(da.args); 467 return; 468 } 469 470 skip_parse: 471 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 472 473 start = next; 474 if (identifier && da.bus != next) 475 continue; 476 477 /* Skip buses that don't have iterate method */ 478 if (!next->dev_iterate) 479 continue; 480 481 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 482 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 483 484 if (!dev->driver) 485 continue; 486 /* Check for matching device if identifier is present */ 487 if (identifier && 488 strncmp(da.name, dev->name, strlen(dev->name))) 489 continue; 490 printf("\n%s Infos for device %s %s\n", 491 info_border, dev->name, info_border); 492 printf("Bus name: %s", dev->bus->name); 493 printf("\nDriver name: %s", dev->driver->name); 494 printf("\nDevargs: %s", 495 dev->devargs ? dev->devargs->args : ""); 496 printf("\nConnect to socket: %d", dev->numa_node); 497 printf("\n"); 498 499 /* List ports with matching device name */ 500 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 501 printf("\n\tPort id: %-2d", port_id); 502 if (eth_macaddr_get_print_err(port_id, 503 &mac_addr) == 0) 504 print_ethaddr("\n\tMAC address: ", 505 &mac_addr); 506 rte_eth_dev_get_name_by_port(port_id, name); 507 printf("\n\tDevice name: %s", name); 508 printf("\n"); 509 } 510 } 511 }; 512 } 513 514 void 515 port_infos_display(portid_t port_id) 516 { 517 struct rte_port *port; 518 struct rte_ether_addr mac_addr; 519 struct rte_eth_link link; 520 struct rte_eth_dev_info dev_info; 521 int vlan_offload; 522 struct rte_mempool * mp; 523 static const char *info_border = "*********************"; 524 uint16_t mtu; 525 char name[RTE_ETH_NAME_MAX_LEN]; 526 int ret; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 529 print_valid_ports(); 530 return; 531 } 532 port = &ports[port_id]; 533 ret = eth_link_get_nowait_print_err(port_id, &link); 534 if (ret < 0) 535 return; 536 537 ret = eth_dev_info_get_print_err(port_id, &dev_info); 538 if (ret != 0) 539 return; 540 541 printf("\n%s Infos for port %-2d %s\n", 542 info_border, port_id, info_border); 543 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 544 print_ethaddr("MAC address: ", &mac_addr); 545 rte_eth_dev_get_name_by_port(port_id, name); 546 printf("\nDevice name: %s", name); 547 printf("\nDriver name: %s", dev_info.driver_name); 548 if (dev_info.device->devargs && dev_info.device->devargs->args) 549 printf("\nDevargs: %s", dev_info.device->devargs->args); 550 printf("\nConnect to socket: %u", port->socket_id); 551 552 if (port_numa[port_id] != NUMA_NO_CONFIG) { 553 mp = mbuf_pool_find(port_numa[port_id]); 554 if (mp) 555 printf("\nmemory allocation on the socket: %d", 556 port_numa[port_id]); 557 } else 558 printf("\nmemory allocation on the socket: %u",port->socket_id); 559 560 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 561 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 562 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 563 ("full-duplex") : ("half-duplex")); 564 565 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 566 printf("MTU: %u\n", mtu); 567 568 printf("Promiscuous mode: %s\n", 569 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 570 printf("Allmulticast mode: %s\n", 571 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 572 printf("Maximum number of MAC addresses: %u\n", 573 (unsigned int)(port->dev_info.max_mac_addrs)); 574 printf("Maximum number of MAC addresses of hash filtering: %u\n", 575 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 576 577 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 578 if (vlan_offload >= 0){ 579 printf("VLAN offload: \n"); 580 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 581 printf(" strip on, "); 582 else 583 printf(" strip off, "); 584 585 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 586 printf("filter on, "); 587 else 588 printf("filter off, "); 589 590 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 591 printf("extend on, "); 592 else 593 printf("extend off, "); 594 595 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 596 printf("qinq strip on\n"); 597 else 598 printf("qinq strip off\n"); 599 } 600 601 if (dev_info.hash_key_size > 0) 602 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 603 if (dev_info.reta_size > 0) 604 printf("Redirection table size: %u\n", dev_info.reta_size); 605 if (!dev_info.flow_type_rss_offloads) 606 printf("No RSS offload flow type is supported.\n"); 607 else { 608 uint16_t i; 609 char *p; 610 611 printf("Supported RSS offload flow types:\n"); 612 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 613 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 614 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 615 continue; 616 p = flowtype_to_str(i); 617 if (p) 618 printf(" %s\n", p); 619 else 620 printf(" user defined %d\n", i); 621 } 622 } 623 624 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 625 printf("Maximum configurable length of RX packet: %u\n", 626 dev_info.max_rx_pktlen); 627 printf("Maximum configurable size of LRO aggregated packet: %u\n", 628 dev_info.max_lro_pkt_size); 629 if (dev_info.max_vfs) 630 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 631 if (dev_info.max_vmdq_pools) 632 printf("Maximum number of VMDq pools: %u\n", 633 dev_info.max_vmdq_pools); 634 635 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 636 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 637 printf("Max possible number of RXDs per queue: %hu\n", 638 dev_info.rx_desc_lim.nb_max); 639 printf("Min possible number of RXDs per queue: %hu\n", 640 dev_info.rx_desc_lim.nb_min); 641 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 642 643 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 644 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 645 printf("Max possible number of TXDs per queue: %hu\n", 646 dev_info.tx_desc_lim.nb_max); 647 printf("Min possible number of TXDs per queue: %hu\n", 648 dev_info.tx_desc_lim.nb_min); 649 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 650 printf("Max segment number per packet: %hu\n", 651 dev_info.tx_desc_lim.nb_seg_max); 652 printf("Max segment number per MTU/TSO: %hu\n", 653 dev_info.tx_desc_lim.nb_mtu_seg_max); 654 655 /* Show switch info only if valid switch domain and port id is set */ 656 if (dev_info.switch_info.domain_id != 657 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 658 if (dev_info.switch_info.name) 659 printf("Switch name: %s\n", dev_info.switch_info.name); 660 661 printf("Switch domain Id: %u\n", 662 dev_info.switch_info.domain_id); 663 printf("Switch Port Id: %u\n", 664 dev_info.switch_info.port_id); 665 } 666 } 667 668 void 669 port_summary_header_display(void) 670 { 671 uint16_t port_number; 672 673 port_number = rte_eth_dev_count_avail(); 674 printf("Number of available ports: %i\n", port_number); 675 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 676 "Driver", "Status", "Link"); 677 } 678 679 void 680 port_summary_display(portid_t port_id) 681 { 682 struct rte_ether_addr mac_addr; 683 struct rte_eth_link link; 684 struct rte_eth_dev_info dev_info; 685 char name[RTE_ETH_NAME_MAX_LEN]; 686 int ret; 687 688 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 689 print_valid_ports(); 690 return; 691 } 692 693 ret = eth_link_get_nowait_print_err(port_id, &link); 694 if (ret < 0) 695 return; 696 697 ret = eth_dev_info_get_print_err(port_id, &dev_info); 698 if (ret != 0) 699 return; 700 701 rte_eth_dev_get_name_by_port(port_id, name); 702 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 703 if (ret != 0) 704 return; 705 706 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 707 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 708 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 709 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 710 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 711 (unsigned int) link.link_speed); 712 } 713 714 void 715 port_offload_cap_display(portid_t port_id) 716 { 717 struct rte_eth_dev_info dev_info; 718 static const char *info_border = "************"; 719 int ret; 720 721 if (port_id_is_invalid(port_id, ENABLED_WARN)) 722 return; 723 724 ret = eth_dev_info_get_print_err(port_id, &dev_info); 725 if (ret != 0) 726 return; 727 728 printf("\n%s Port %d supported offload features: %s\n", 729 info_border, port_id, info_border); 730 731 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 732 printf("VLAN stripped: "); 733 if (ports[port_id].dev_conf.rxmode.offloads & 734 DEV_RX_OFFLOAD_VLAN_STRIP) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 741 printf("Double VLANs stripped: "); 742 if (ports[port_id].dev_conf.rxmode.offloads & 743 DEV_RX_OFFLOAD_QINQ_STRIP) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 750 printf("RX IPv4 checksum: "); 751 if (ports[port_id].dev_conf.rxmode.offloads & 752 DEV_RX_OFFLOAD_IPV4_CKSUM) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 758 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 759 printf("RX UDP checksum: "); 760 if (ports[port_id].dev_conf.rxmode.offloads & 761 DEV_RX_OFFLOAD_UDP_CKSUM) 762 printf("on\n"); 763 else 764 printf("off\n"); 765 } 766 767 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 768 printf("RX TCP checksum: "); 769 if (ports[port_id].dev_conf.rxmode.offloads & 770 DEV_RX_OFFLOAD_TCP_CKSUM) 771 printf("on\n"); 772 else 773 printf("off\n"); 774 } 775 776 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 777 printf("RX SCTP checksum: "); 778 if (ports[port_id].dev_conf.rxmode.offloads & 779 DEV_RX_OFFLOAD_SCTP_CKSUM) 780 printf("on\n"); 781 else 782 printf("off\n"); 783 } 784 785 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 786 printf("RX Outer IPv4 checksum: "); 787 if (ports[port_id].dev_conf.rxmode.offloads & 788 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 789 printf("on\n"); 790 else 791 printf("off\n"); 792 } 793 794 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 795 printf("RX Outer UDP checksum: "); 796 if (ports[port_id].dev_conf.rxmode.offloads & 797 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 798 printf("on\n"); 799 else 800 printf("off\n"); 801 } 802 803 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 804 printf("Large receive offload: "); 805 if (ports[port_id].dev_conf.rxmode.offloads & 806 DEV_RX_OFFLOAD_TCP_LRO) 807 printf("on\n"); 808 else 809 printf("off\n"); 810 } 811 812 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 813 printf("HW timestamp: "); 814 if (ports[port_id].dev_conf.rxmode.offloads & 815 DEV_RX_OFFLOAD_TIMESTAMP) 816 printf("on\n"); 817 else 818 printf("off\n"); 819 } 820 821 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 822 printf("Rx Keep CRC: "); 823 if (ports[port_id].dev_conf.rxmode.offloads & 824 DEV_RX_OFFLOAD_KEEP_CRC) 825 printf("on\n"); 826 else 827 printf("off\n"); 828 } 829 830 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 831 printf("RX offload security: "); 832 if (ports[port_id].dev_conf.rxmode.offloads & 833 DEV_RX_OFFLOAD_SECURITY) 834 printf("on\n"); 835 else 836 printf("off\n"); 837 } 838 839 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 840 printf("VLAN insert: "); 841 if (ports[port_id].dev_conf.txmode.offloads & 842 DEV_TX_OFFLOAD_VLAN_INSERT) 843 printf("on\n"); 844 else 845 printf("off\n"); 846 } 847 848 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 849 printf("Double VLANs insert: "); 850 if (ports[port_id].dev_conf.txmode.offloads & 851 DEV_TX_OFFLOAD_QINQ_INSERT) 852 printf("on\n"); 853 else 854 printf("off\n"); 855 } 856 857 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 858 printf("TX IPv4 checksum: "); 859 if (ports[port_id].dev_conf.txmode.offloads & 860 DEV_TX_OFFLOAD_IPV4_CKSUM) 861 printf("on\n"); 862 else 863 printf("off\n"); 864 } 865 866 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 867 printf("TX UDP checksum: "); 868 if (ports[port_id].dev_conf.txmode.offloads & 869 DEV_TX_OFFLOAD_UDP_CKSUM) 870 printf("on\n"); 871 else 872 printf("off\n"); 873 } 874 875 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 876 printf("TX TCP checksum: "); 877 if (ports[port_id].dev_conf.txmode.offloads & 878 DEV_TX_OFFLOAD_TCP_CKSUM) 879 printf("on\n"); 880 else 881 printf("off\n"); 882 } 883 884 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 885 printf("TX SCTP checksum: "); 886 if (ports[port_id].dev_conf.txmode.offloads & 887 DEV_TX_OFFLOAD_SCTP_CKSUM) 888 printf("on\n"); 889 else 890 printf("off\n"); 891 } 892 893 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 894 printf("TX Outer IPv4 checksum: "); 895 if (ports[port_id].dev_conf.txmode.offloads & 896 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 897 printf("on\n"); 898 else 899 printf("off\n"); 900 } 901 902 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 903 printf("TX TCP segmentation: "); 904 if (ports[port_id].dev_conf.txmode.offloads & 905 DEV_TX_OFFLOAD_TCP_TSO) 906 printf("on\n"); 907 else 908 printf("off\n"); 909 } 910 911 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 912 printf("TX UDP segmentation: "); 913 if (ports[port_id].dev_conf.txmode.offloads & 914 DEV_TX_OFFLOAD_UDP_TSO) 915 printf("on\n"); 916 else 917 printf("off\n"); 918 } 919 920 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 921 printf("TSO for VXLAN tunnel packet: "); 922 if (ports[port_id].dev_conf.txmode.offloads & 923 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 924 printf("on\n"); 925 else 926 printf("off\n"); 927 } 928 929 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 930 printf("TSO for GRE tunnel packet: "); 931 if (ports[port_id].dev_conf.txmode.offloads & 932 DEV_TX_OFFLOAD_GRE_TNL_TSO) 933 printf("on\n"); 934 else 935 printf("off\n"); 936 } 937 938 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 939 printf("TSO for IPIP tunnel packet: "); 940 if (ports[port_id].dev_conf.txmode.offloads & 941 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 942 printf("on\n"); 943 else 944 printf("off\n"); 945 } 946 947 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 948 printf("TSO for GENEVE tunnel packet: "); 949 if (ports[port_id].dev_conf.txmode.offloads & 950 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 951 printf("on\n"); 952 else 953 printf("off\n"); 954 } 955 956 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 957 printf("IP tunnel TSO: "); 958 if (ports[port_id].dev_conf.txmode.offloads & 959 DEV_TX_OFFLOAD_IP_TNL_TSO) 960 printf("on\n"); 961 else 962 printf("off\n"); 963 } 964 965 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 966 printf("UDP tunnel TSO: "); 967 if (ports[port_id].dev_conf.txmode.offloads & 968 DEV_TX_OFFLOAD_UDP_TNL_TSO) 969 printf("on\n"); 970 else 971 printf("off\n"); 972 } 973 974 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 975 printf("TX Outer UDP checksum: "); 976 if (ports[port_id].dev_conf.txmode.offloads & 977 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 978 printf("on\n"); 979 else 980 printf("off\n"); 981 } 982 983 } 984 985 int 986 port_id_is_invalid(portid_t port_id, enum print_warning warning) 987 { 988 uint16_t pid; 989 990 if (port_id == (portid_t)RTE_PORT_ALL) 991 return 0; 992 993 RTE_ETH_FOREACH_DEV(pid) 994 if (port_id == pid) 995 return 0; 996 997 if (warning == ENABLED_WARN) 998 printf("Invalid port %d\n", port_id); 999 1000 return 1; 1001 } 1002 1003 void print_valid_ports(void) 1004 { 1005 portid_t pid; 1006 1007 printf("The valid ports array is ["); 1008 RTE_ETH_FOREACH_DEV(pid) { 1009 printf(" %d", pid); 1010 } 1011 printf(" ]\n"); 1012 } 1013 1014 static int 1015 vlan_id_is_invalid(uint16_t vlan_id) 1016 { 1017 if (vlan_id < 4096) 1018 return 0; 1019 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1020 return 1; 1021 } 1022 1023 static int 1024 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1025 { 1026 const struct rte_pci_device *pci_dev; 1027 const struct rte_bus *bus; 1028 uint64_t pci_len; 1029 1030 if (reg_off & 0x3) { 1031 printf("Port register offset 0x%X not aligned on a 4-byte " 1032 "boundary\n", 1033 (unsigned)reg_off); 1034 return 1; 1035 } 1036 1037 if (!ports[port_id].dev_info.device) { 1038 printf("Invalid device\n"); 1039 return 0; 1040 } 1041 1042 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1043 if (bus && !strcmp(bus->name, "pci")) { 1044 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1045 } else { 1046 printf("Not a PCI device\n"); 1047 return 1; 1048 } 1049 1050 pci_len = pci_dev->mem_resource[0].len; 1051 if (reg_off >= pci_len) { 1052 printf("Port %d: register offset %u (0x%X) out of port PCI " 1053 "resource (length=%"PRIu64")\n", 1054 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1055 return 1; 1056 } 1057 return 0; 1058 } 1059 1060 static int 1061 reg_bit_pos_is_invalid(uint8_t bit_pos) 1062 { 1063 if (bit_pos <= 31) 1064 return 0; 1065 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1066 return 1; 1067 } 1068 1069 #define display_port_and_reg_off(port_id, reg_off) \ 1070 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1071 1072 static inline void 1073 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1074 { 1075 display_port_and_reg_off(port_id, (unsigned)reg_off); 1076 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1077 } 1078 1079 void 1080 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1081 { 1082 uint32_t reg_v; 1083 1084 1085 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1086 return; 1087 if (port_reg_off_is_invalid(port_id, reg_off)) 1088 return; 1089 if (reg_bit_pos_is_invalid(bit_x)) 1090 return; 1091 reg_v = port_id_pci_reg_read(port_id, reg_off); 1092 display_port_and_reg_off(port_id, (unsigned)reg_off); 1093 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1094 } 1095 1096 void 1097 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1098 uint8_t bit1_pos, uint8_t bit2_pos) 1099 { 1100 uint32_t reg_v; 1101 uint8_t l_bit; 1102 uint8_t h_bit; 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit1_pos)) 1109 return; 1110 if (reg_bit_pos_is_invalid(bit2_pos)) 1111 return; 1112 if (bit1_pos > bit2_pos) 1113 l_bit = bit2_pos, h_bit = bit1_pos; 1114 else 1115 l_bit = bit1_pos, h_bit = bit2_pos; 1116 1117 reg_v = port_id_pci_reg_read(port_id, reg_off); 1118 reg_v >>= l_bit; 1119 if (h_bit < 31) 1120 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1121 display_port_and_reg_off(port_id, (unsigned)reg_off); 1122 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1123 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1124 } 1125 1126 void 1127 port_reg_display(portid_t port_id, uint32_t reg_off) 1128 { 1129 uint32_t reg_v; 1130 1131 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1132 return; 1133 if (port_reg_off_is_invalid(port_id, reg_off)) 1134 return; 1135 reg_v = port_id_pci_reg_read(port_id, reg_off); 1136 display_port_reg_value(port_id, reg_off, reg_v); 1137 } 1138 1139 void 1140 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1141 uint8_t bit_v) 1142 { 1143 uint32_t reg_v; 1144 1145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1146 return; 1147 if (port_reg_off_is_invalid(port_id, reg_off)) 1148 return; 1149 if (reg_bit_pos_is_invalid(bit_pos)) 1150 return; 1151 if (bit_v > 1) { 1152 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1153 return; 1154 } 1155 reg_v = port_id_pci_reg_read(port_id, reg_off); 1156 if (bit_v == 0) 1157 reg_v &= ~(1 << bit_pos); 1158 else 1159 reg_v |= (1 << bit_pos); 1160 port_id_pci_reg_write(port_id, reg_off, reg_v); 1161 display_port_reg_value(port_id, reg_off, reg_v); 1162 } 1163 1164 void 1165 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1166 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1167 { 1168 uint32_t max_v; 1169 uint32_t reg_v; 1170 uint8_t l_bit; 1171 uint8_t h_bit; 1172 1173 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1174 return; 1175 if (port_reg_off_is_invalid(port_id, reg_off)) 1176 return; 1177 if (reg_bit_pos_is_invalid(bit1_pos)) 1178 return; 1179 if (reg_bit_pos_is_invalid(bit2_pos)) 1180 return; 1181 if (bit1_pos > bit2_pos) 1182 l_bit = bit2_pos, h_bit = bit1_pos; 1183 else 1184 l_bit = bit1_pos, h_bit = bit2_pos; 1185 1186 if ((h_bit - l_bit) < 31) 1187 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1188 else 1189 max_v = 0xFFFFFFFF; 1190 1191 if (value > max_v) { 1192 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1193 (unsigned)value, (unsigned)value, 1194 (unsigned)max_v, (unsigned)max_v); 1195 return; 1196 } 1197 reg_v = port_id_pci_reg_read(port_id, reg_off); 1198 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1199 reg_v |= (value << l_bit); /* Set changed bits */ 1200 port_id_pci_reg_write(port_id, reg_off, reg_v); 1201 display_port_reg_value(port_id, reg_off, reg_v); 1202 } 1203 1204 void 1205 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1206 { 1207 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1208 return; 1209 if (port_reg_off_is_invalid(port_id, reg_off)) 1210 return; 1211 port_id_pci_reg_write(port_id, reg_off, reg_v); 1212 display_port_reg_value(port_id, reg_off, reg_v); 1213 } 1214 1215 void 1216 port_mtu_set(portid_t port_id, uint16_t mtu) 1217 { 1218 int diag; 1219 struct rte_eth_dev_info dev_info; 1220 int ret; 1221 1222 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1223 return; 1224 1225 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1226 if (ret != 0) 1227 return; 1228 1229 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1230 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1231 mtu, dev_info.min_mtu, dev_info.max_mtu); 1232 return; 1233 } 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag == 0) 1236 return; 1237 printf("Set MTU failed. diag=%d\n", diag); 1238 } 1239 1240 /* Generic flow management functions. */ 1241 1242 /** Generate a port_flow entry from attributes/pattern/actions. */ 1243 static struct port_flow * 1244 port_flow_new(const struct rte_flow_attr *attr, 1245 const struct rte_flow_item *pattern, 1246 const struct rte_flow_action *actions, 1247 struct rte_flow_error *error) 1248 { 1249 const struct rte_flow_conv_rule rule = { 1250 .attr_ro = attr, 1251 .pattern_ro = pattern, 1252 .actions_ro = actions, 1253 }; 1254 struct port_flow *pf; 1255 int ret; 1256 1257 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1258 if (ret < 0) 1259 return NULL; 1260 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1261 if (!pf) { 1262 rte_flow_error_set 1263 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1264 "calloc() failed"); 1265 return NULL; 1266 } 1267 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1268 error) >= 0) 1269 return pf; 1270 free(pf); 1271 return NULL; 1272 } 1273 1274 /** Print a message out of a flow error. */ 1275 static int 1276 port_flow_complain(struct rte_flow_error *error) 1277 { 1278 static const char *const errstrlist[] = { 1279 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1280 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1281 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1282 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1283 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1284 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1285 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1286 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1287 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1288 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1289 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1290 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1291 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1292 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1293 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1294 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1295 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1296 }; 1297 const char *errstr; 1298 char buf[32]; 1299 int err = rte_errno; 1300 1301 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1302 !errstrlist[error->type]) 1303 errstr = "unknown type"; 1304 else 1305 errstr = errstrlist[error->type]; 1306 printf("Caught error type %d (%s): %s%s: %s\n", 1307 error->type, errstr, 1308 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1309 error->cause), buf) : "", 1310 error->message ? error->message : "(no stated reason)", 1311 rte_strerror(err)); 1312 return -err; 1313 } 1314 1315 /** Validate flow rule. */ 1316 int 1317 port_flow_validate(portid_t port_id, 1318 const struct rte_flow_attr *attr, 1319 const struct rte_flow_item *pattern, 1320 const struct rte_flow_action *actions) 1321 { 1322 struct rte_flow_error error; 1323 1324 /* Poisoning to make sure PMDs update it in case of error. */ 1325 memset(&error, 0x11, sizeof(error)); 1326 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1327 return port_flow_complain(&error); 1328 printf("Flow rule validated\n"); 1329 return 0; 1330 } 1331 1332 /** Create flow rule. */ 1333 int 1334 port_flow_create(portid_t port_id, 1335 const struct rte_flow_attr *attr, 1336 const struct rte_flow_item *pattern, 1337 const struct rte_flow_action *actions) 1338 { 1339 struct rte_flow *flow; 1340 struct rte_port *port; 1341 struct port_flow *pf; 1342 uint32_t id; 1343 struct rte_flow_error error; 1344 1345 /* Poisoning to make sure PMDs update it in case of error. */ 1346 memset(&error, 0x22, sizeof(error)); 1347 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1348 if (!flow) 1349 return port_flow_complain(&error); 1350 port = &ports[port_id]; 1351 if (port->flow_list) { 1352 if (port->flow_list->id == UINT32_MAX) { 1353 printf("Highest rule ID is already assigned, delete" 1354 " it first"); 1355 rte_flow_destroy(port_id, flow, NULL); 1356 return -ENOMEM; 1357 } 1358 id = port->flow_list->id + 1; 1359 } else 1360 id = 0; 1361 pf = port_flow_new(attr, pattern, actions, &error); 1362 if (!pf) { 1363 rte_flow_destroy(port_id, flow, NULL); 1364 return port_flow_complain(&error); 1365 } 1366 pf->next = port->flow_list; 1367 pf->id = id; 1368 pf->flow = flow; 1369 port->flow_list = pf; 1370 printf("Flow rule #%u created\n", pf->id); 1371 return 0; 1372 } 1373 1374 /** Destroy a number of flow rules. */ 1375 int 1376 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1377 { 1378 struct rte_port *port; 1379 struct port_flow **tmp; 1380 uint32_t c = 0; 1381 int ret = 0; 1382 1383 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1384 port_id == (portid_t)RTE_PORT_ALL) 1385 return -EINVAL; 1386 port = &ports[port_id]; 1387 tmp = &port->flow_list; 1388 while (*tmp) { 1389 uint32_t i; 1390 1391 for (i = 0; i != n; ++i) { 1392 struct rte_flow_error error; 1393 struct port_flow *pf = *tmp; 1394 1395 if (rule[i] != pf->id) 1396 continue; 1397 /* 1398 * Poisoning to make sure PMDs update it in case 1399 * of error. 1400 */ 1401 memset(&error, 0x33, sizeof(error)); 1402 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1403 ret = port_flow_complain(&error); 1404 continue; 1405 } 1406 printf("Flow rule #%u destroyed\n", pf->id); 1407 *tmp = pf->next; 1408 free(pf); 1409 break; 1410 } 1411 if (i == n) 1412 tmp = &(*tmp)->next; 1413 ++c; 1414 } 1415 return ret; 1416 } 1417 1418 /** Remove all flow rules. */ 1419 int 1420 port_flow_flush(portid_t port_id) 1421 { 1422 struct rte_flow_error error; 1423 struct rte_port *port; 1424 int ret = 0; 1425 1426 /* Poisoning to make sure PMDs update it in case of error. */ 1427 memset(&error, 0x44, sizeof(error)); 1428 if (rte_flow_flush(port_id, &error)) { 1429 ret = port_flow_complain(&error); 1430 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1431 port_id == (portid_t)RTE_PORT_ALL) 1432 return ret; 1433 } 1434 port = &ports[port_id]; 1435 while (port->flow_list) { 1436 struct port_flow *pf = port->flow_list->next; 1437 1438 free(port->flow_list); 1439 port->flow_list = pf; 1440 } 1441 return ret; 1442 } 1443 1444 /** Query a flow rule. */ 1445 int 1446 port_flow_query(portid_t port_id, uint32_t rule, 1447 const struct rte_flow_action *action) 1448 { 1449 struct rte_flow_error error; 1450 struct rte_port *port; 1451 struct port_flow *pf; 1452 const char *name; 1453 union { 1454 struct rte_flow_query_count count; 1455 } query; 1456 int ret; 1457 1458 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1459 port_id == (portid_t)RTE_PORT_ALL) 1460 return -EINVAL; 1461 port = &ports[port_id]; 1462 for (pf = port->flow_list; pf; pf = pf->next) 1463 if (pf->id == rule) 1464 break; 1465 if (!pf) { 1466 printf("Flow rule #%u not found\n", rule); 1467 return -ENOENT; 1468 } 1469 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1470 &name, sizeof(name), 1471 (void *)(uintptr_t)action->type, &error); 1472 if (ret < 0) 1473 return port_flow_complain(&error); 1474 switch (action->type) { 1475 case RTE_FLOW_ACTION_TYPE_COUNT: 1476 break; 1477 default: 1478 printf("Cannot query action type %d (%s)\n", 1479 action->type, name); 1480 return -ENOTSUP; 1481 } 1482 /* Poisoning to make sure PMDs update it in case of error. */ 1483 memset(&error, 0x55, sizeof(error)); 1484 memset(&query, 0, sizeof(query)); 1485 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1486 return port_flow_complain(&error); 1487 switch (action->type) { 1488 case RTE_FLOW_ACTION_TYPE_COUNT: 1489 printf("%s:\n" 1490 " hits_set: %u\n" 1491 " bytes_set: %u\n" 1492 " hits: %" PRIu64 "\n" 1493 " bytes: %" PRIu64 "\n", 1494 name, 1495 query.count.hits_set, 1496 query.count.bytes_set, 1497 query.count.hits, 1498 query.count.bytes); 1499 break; 1500 default: 1501 printf("Cannot display result for action type %d (%s)\n", 1502 action->type, name); 1503 break; 1504 } 1505 return 0; 1506 } 1507 1508 /** List flow rules. */ 1509 void 1510 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1511 { 1512 struct rte_port *port; 1513 struct port_flow *pf; 1514 struct port_flow *list = NULL; 1515 uint32_t i; 1516 1517 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1518 port_id == (portid_t)RTE_PORT_ALL) 1519 return; 1520 port = &ports[port_id]; 1521 if (!port->flow_list) 1522 return; 1523 /* Sort flows by group, priority and ID. */ 1524 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1525 struct port_flow **tmp; 1526 const struct rte_flow_attr *curr = pf->rule.attr; 1527 1528 if (n) { 1529 /* Filter out unwanted groups. */ 1530 for (i = 0; i != n; ++i) 1531 if (curr->group == group[i]) 1532 break; 1533 if (i == n) 1534 continue; 1535 } 1536 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1537 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1538 1539 if (curr->group > comp->group || 1540 (curr->group == comp->group && 1541 curr->priority > comp->priority) || 1542 (curr->group == comp->group && 1543 curr->priority == comp->priority && 1544 pf->id > (*tmp)->id)) 1545 continue; 1546 break; 1547 } 1548 pf->tmp = *tmp; 1549 *tmp = pf; 1550 } 1551 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1552 for (pf = list; pf != NULL; pf = pf->tmp) { 1553 const struct rte_flow_item *item = pf->rule.pattern; 1554 const struct rte_flow_action *action = pf->rule.actions; 1555 const char *name; 1556 1557 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1558 pf->id, 1559 pf->rule.attr->group, 1560 pf->rule.attr->priority, 1561 pf->rule.attr->ingress ? 'i' : '-', 1562 pf->rule.attr->egress ? 'e' : '-', 1563 pf->rule.attr->transfer ? 't' : '-'); 1564 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1565 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1566 &name, sizeof(name), 1567 (void *)(uintptr_t)item->type, 1568 NULL) <= 0) 1569 name = "[UNKNOWN]"; 1570 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1571 printf("%s ", name); 1572 ++item; 1573 } 1574 printf("=>"); 1575 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1576 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1577 &name, sizeof(name), 1578 (void *)(uintptr_t)action->type, 1579 NULL) <= 0) 1580 name = "[UNKNOWN]"; 1581 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1582 printf(" %s", name); 1583 ++action; 1584 } 1585 printf("\n"); 1586 } 1587 } 1588 1589 /** Restrict ingress traffic to the defined flow rules. */ 1590 int 1591 port_flow_isolate(portid_t port_id, int set) 1592 { 1593 struct rte_flow_error error; 1594 1595 /* Poisoning to make sure PMDs update it in case of error. */ 1596 memset(&error, 0x66, sizeof(error)); 1597 if (rte_flow_isolate(port_id, set, &error)) 1598 return port_flow_complain(&error); 1599 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1600 port_id, 1601 set ? "now restricted" : "not restricted anymore"); 1602 return 0; 1603 } 1604 1605 /* 1606 * RX/TX ring descriptors display functions. 1607 */ 1608 int 1609 rx_queue_id_is_invalid(queueid_t rxq_id) 1610 { 1611 if (rxq_id < nb_rxq) 1612 return 0; 1613 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1614 return 1; 1615 } 1616 1617 int 1618 tx_queue_id_is_invalid(queueid_t txq_id) 1619 { 1620 if (txq_id < nb_txq) 1621 return 0; 1622 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1623 return 1; 1624 } 1625 1626 static int 1627 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1628 { 1629 if (rxdesc_id < nb_rxd) 1630 return 0; 1631 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1632 rxdesc_id, nb_rxd); 1633 return 1; 1634 } 1635 1636 static int 1637 tx_desc_id_is_invalid(uint16_t txdesc_id) 1638 { 1639 if (txdesc_id < nb_txd) 1640 return 0; 1641 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1642 txdesc_id, nb_txd); 1643 return 1; 1644 } 1645 1646 static const struct rte_memzone * 1647 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1648 { 1649 char mz_name[RTE_MEMZONE_NAMESIZE]; 1650 const struct rte_memzone *mz; 1651 1652 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1653 port_id, q_id, ring_name); 1654 mz = rte_memzone_lookup(mz_name); 1655 if (mz == NULL) 1656 printf("%s ring memory zoneof (port %d, queue %d) not" 1657 "found (zone name = %s\n", 1658 ring_name, port_id, q_id, mz_name); 1659 return mz; 1660 } 1661 1662 union igb_ring_dword { 1663 uint64_t dword; 1664 struct { 1665 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1666 uint32_t lo; 1667 uint32_t hi; 1668 #else 1669 uint32_t hi; 1670 uint32_t lo; 1671 #endif 1672 } words; 1673 }; 1674 1675 struct igb_ring_desc_32_bytes { 1676 union igb_ring_dword lo_dword; 1677 union igb_ring_dword hi_dword; 1678 union igb_ring_dword resv1; 1679 union igb_ring_dword resv2; 1680 }; 1681 1682 struct igb_ring_desc_16_bytes { 1683 union igb_ring_dword lo_dword; 1684 union igb_ring_dword hi_dword; 1685 }; 1686 1687 static void 1688 ring_rxd_display_dword(union igb_ring_dword dword) 1689 { 1690 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1691 (unsigned)dword.words.hi); 1692 } 1693 1694 static void 1695 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1696 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1697 portid_t port_id, 1698 #else 1699 __rte_unused portid_t port_id, 1700 #endif 1701 uint16_t desc_id) 1702 { 1703 struct igb_ring_desc_16_bytes *ring = 1704 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1705 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1706 int ret; 1707 struct rte_eth_dev_info dev_info; 1708 1709 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1710 if (ret != 0) 1711 return; 1712 1713 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1714 /* 32 bytes RX descriptor, i40e only */ 1715 struct igb_ring_desc_32_bytes *ring = 1716 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1717 ring[desc_id].lo_dword.dword = 1718 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1719 ring_rxd_display_dword(ring[desc_id].lo_dword); 1720 ring[desc_id].hi_dword.dword = 1721 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1722 ring_rxd_display_dword(ring[desc_id].hi_dword); 1723 ring[desc_id].resv1.dword = 1724 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1725 ring_rxd_display_dword(ring[desc_id].resv1); 1726 ring[desc_id].resv2.dword = 1727 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1728 ring_rxd_display_dword(ring[desc_id].resv2); 1729 1730 return; 1731 } 1732 #endif 1733 /* 16 bytes RX descriptor */ 1734 ring[desc_id].lo_dword.dword = 1735 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1736 ring_rxd_display_dword(ring[desc_id].lo_dword); 1737 ring[desc_id].hi_dword.dword = 1738 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1739 ring_rxd_display_dword(ring[desc_id].hi_dword); 1740 } 1741 1742 static void 1743 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1744 { 1745 struct igb_ring_desc_16_bytes *ring; 1746 struct igb_ring_desc_16_bytes txd; 1747 1748 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1749 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1750 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1751 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1752 (unsigned)txd.lo_dword.words.lo, 1753 (unsigned)txd.lo_dword.words.hi, 1754 (unsigned)txd.hi_dword.words.lo, 1755 (unsigned)txd.hi_dword.words.hi); 1756 } 1757 1758 void 1759 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1760 { 1761 const struct rte_memzone *rx_mz; 1762 1763 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1764 return; 1765 if (rx_queue_id_is_invalid(rxq_id)) 1766 return; 1767 if (rx_desc_id_is_invalid(rxd_id)) 1768 return; 1769 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1770 if (rx_mz == NULL) 1771 return; 1772 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1773 } 1774 1775 void 1776 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1777 { 1778 const struct rte_memzone *tx_mz; 1779 1780 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1781 return; 1782 if (tx_queue_id_is_invalid(txq_id)) 1783 return; 1784 if (tx_desc_id_is_invalid(txd_id)) 1785 return; 1786 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1787 if (tx_mz == NULL) 1788 return; 1789 ring_tx_descriptor_display(tx_mz, txd_id); 1790 } 1791 1792 void 1793 fwd_lcores_config_display(void) 1794 { 1795 lcoreid_t lc_id; 1796 1797 printf("List of forwarding lcores:"); 1798 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1799 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1800 printf("\n"); 1801 } 1802 void 1803 rxtx_config_display(void) 1804 { 1805 portid_t pid; 1806 queueid_t qid; 1807 1808 printf(" %s packet forwarding%s packets/burst=%d\n", 1809 cur_fwd_eng->fwd_mode_name, 1810 retry_enabled == 0 ? "" : " with retry", 1811 nb_pkt_per_burst); 1812 1813 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1814 printf(" packet len=%u - nb packet segments=%d\n", 1815 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1816 1817 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1818 nb_fwd_lcores, nb_fwd_ports); 1819 1820 RTE_ETH_FOREACH_DEV(pid) { 1821 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1822 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1823 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1824 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1825 uint16_t nb_rx_desc_tmp; 1826 uint16_t nb_tx_desc_tmp; 1827 struct rte_eth_rxq_info rx_qinfo; 1828 struct rte_eth_txq_info tx_qinfo; 1829 int32_t rc; 1830 1831 /* per port config */ 1832 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1833 (unsigned int)pid, nb_rxq, nb_txq); 1834 1835 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1836 ports[pid].dev_conf.rxmode.offloads, 1837 ports[pid].dev_conf.txmode.offloads); 1838 1839 /* per rx queue config only for first queue to be less verbose */ 1840 for (qid = 0; qid < 1; qid++) { 1841 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1842 if (rc) 1843 nb_rx_desc_tmp = nb_rx_desc[qid]; 1844 else 1845 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1846 1847 printf(" RX queue: %d\n", qid); 1848 printf(" RX desc=%d - RX free threshold=%d\n", 1849 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1850 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1851 " wthresh=%d\n", 1852 rx_conf[qid].rx_thresh.pthresh, 1853 rx_conf[qid].rx_thresh.hthresh, 1854 rx_conf[qid].rx_thresh.wthresh); 1855 printf(" RX Offloads=0x%"PRIx64"\n", 1856 rx_conf[qid].offloads); 1857 } 1858 1859 /* per tx queue config only for first queue to be less verbose */ 1860 for (qid = 0; qid < 1; qid++) { 1861 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1862 if (rc) 1863 nb_tx_desc_tmp = nb_tx_desc[qid]; 1864 else 1865 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1866 1867 printf(" TX queue: %d\n", qid); 1868 printf(" TX desc=%d - TX free threshold=%d\n", 1869 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1870 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1871 " wthresh=%d\n", 1872 tx_conf[qid].tx_thresh.pthresh, 1873 tx_conf[qid].tx_thresh.hthresh, 1874 tx_conf[qid].tx_thresh.wthresh); 1875 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1876 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1877 } 1878 } 1879 } 1880 1881 void 1882 port_rss_reta_info(portid_t port_id, 1883 struct rte_eth_rss_reta_entry64 *reta_conf, 1884 uint16_t nb_entries) 1885 { 1886 uint16_t i, idx, shift; 1887 int ret; 1888 1889 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1890 return; 1891 1892 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1893 if (ret != 0) { 1894 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1895 return; 1896 } 1897 1898 for (i = 0; i < nb_entries; i++) { 1899 idx = i / RTE_RETA_GROUP_SIZE; 1900 shift = i % RTE_RETA_GROUP_SIZE; 1901 if (!(reta_conf[idx].mask & (1ULL << shift))) 1902 continue; 1903 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1904 i, reta_conf[idx].reta[shift]); 1905 } 1906 } 1907 1908 /* 1909 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1910 * key of the port. 1911 */ 1912 void 1913 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1914 { 1915 struct rte_eth_rss_conf rss_conf = {0}; 1916 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1917 uint64_t rss_hf; 1918 uint8_t i; 1919 int diag; 1920 struct rte_eth_dev_info dev_info; 1921 uint8_t hash_key_size; 1922 int ret; 1923 1924 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1925 return; 1926 1927 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1928 if (ret != 0) 1929 return; 1930 1931 if (dev_info.hash_key_size > 0 && 1932 dev_info.hash_key_size <= sizeof(rss_key)) 1933 hash_key_size = dev_info.hash_key_size; 1934 else { 1935 printf("dev_info did not provide a valid hash key size\n"); 1936 return; 1937 } 1938 1939 /* Get RSS hash key if asked to display it */ 1940 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1941 rss_conf.rss_key_len = hash_key_size; 1942 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1943 if (diag != 0) { 1944 switch (diag) { 1945 case -ENODEV: 1946 printf("port index %d invalid\n", port_id); 1947 break; 1948 case -ENOTSUP: 1949 printf("operation not supported by device\n"); 1950 break; 1951 default: 1952 printf("operation failed - diag=%d\n", diag); 1953 break; 1954 } 1955 return; 1956 } 1957 rss_hf = rss_conf.rss_hf; 1958 if (rss_hf == 0) { 1959 printf("RSS disabled\n"); 1960 return; 1961 } 1962 printf("RSS functions:\n "); 1963 for (i = 0; rss_type_table[i].str; i++) { 1964 if (rss_hf & rss_type_table[i].rss_type) 1965 printf("%s ", rss_type_table[i].str); 1966 } 1967 printf("\n"); 1968 if (!show_rss_key) 1969 return; 1970 printf("RSS key:\n"); 1971 for (i = 0; i < hash_key_size; i++) 1972 printf("%02X", rss_key[i]); 1973 printf("\n"); 1974 } 1975 1976 void 1977 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1978 uint hash_key_len) 1979 { 1980 struct rte_eth_rss_conf rss_conf; 1981 int diag; 1982 unsigned int i; 1983 1984 rss_conf.rss_key = NULL; 1985 rss_conf.rss_key_len = hash_key_len; 1986 rss_conf.rss_hf = 0; 1987 for (i = 0; rss_type_table[i].str; i++) { 1988 if (!strcmp(rss_type_table[i].str, rss_type)) 1989 rss_conf.rss_hf = rss_type_table[i].rss_type; 1990 } 1991 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1992 if (diag == 0) { 1993 rss_conf.rss_key = hash_key; 1994 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1995 } 1996 if (diag == 0) 1997 return; 1998 1999 switch (diag) { 2000 case -ENODEV: 2001 printf("port index %d invalid\n", port_id); 2002 break; 2003 case -ENOTSUP: 2004 printf("operation not supported by device\n"); 2005 break; 2006 default: 2007 printf("operation failed - diag=%d\n", diag); 2008 break; 2009 } 2010 } 2011 2012 /* 2013 * Setup forwarding configuration for each logical core. 2014 */ 2015 static void 2016 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2017 { 2018 streamid_t nb_fs_per_lcore; 2019 streamid_t nb_fs; 2020 streamid_t sm_id; 2021 lcoreid_t nb_extra; 2022 lcoreid_t nb_fc; 2023 lcoreid_t nb_lc; 2024 lcoreid_t lc_id; 2025 2026 nb_fs = cfg->nb_fwd_streams; 2027 nb_fc = cfg->nb_fwd_lcores; 2028 if (nb_fs <= nb_fc) { 2029 nb_fs_per_lcore = 1; 2030 nb_extra = 0; 2031 } else { 2032 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2033 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2034 } 2035 2036 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2037 sm_id = 0; 2038 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2039 fwd_lcores[lc_id]->stream_idx = sm_id; 2040 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2041 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2042 } 2043 2044 /* 2045 * Assign extra remaining streams, if any. 2046 */ 2047 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2048 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2049 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2050 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2051 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2052 } 2053 } 2054 2055 static portid_t 2056 fwd_topology_tx_port_get(portid_t rxp) 2057 { 2058 static int warning_once = 1; 2059 2060 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2061 2062 switch (port_topology) { 2063 default: 2064 case PORT_TOPOLOGY_PAIRED: 2065 if ((rxp & 0x1) == 0) { 2066 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2067 return rxp + 1; 2068 if (warning_once) { 2069 printf("\nWarning! port-topology=paired" 2070 " and odd forward ports number," 2071 " the last port will pair with" 2072 " itself.\n\n"); 2073 warning_once = 0; 2074 } 2075 return rxp; 2076 } 2077 return rxp - 1; 2078 case PORT_TOPOLOGY_CHAINED: 2079 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2080 case PORT_TOPOLOGY_LOOP: 2081 return rxp; 2082 } 2083 } 2084 2085 static void 2086 simple_fwd_config_setup(void) 2087 { 2088 portid_t i; 2089 2090 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2091 cur_fwd_config.nb_fwd_streams = 2092 (streamid_t) cur_fwd_config.nb_fwd_ports; 2093 2094 /* reinitialize forwarding streams */ 2095 init_fwd_streams(); 2096 2097 /* 2098 * In the simple forwarding test, the number of forwarding cores 2099 * must be lower or equal to the number of forwarding ports. 2100 */ 2101 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2102 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2103 cur_fwd_config.nb_fwd_lcores = 2104 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2105 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2106 2107 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2108 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2109 fwd_streams[i]->rx_queue = 0; 2110 fwd_streams[i]->tx_port = 2111 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2112 fwd_streams[i]->tx_queue = 0; 2113 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2114 fwd_streams[i]->retry_enabled = retry_enabled; 2115 } 2116 } 2117 2118 /** 2119 * For the RSS forwarding test all streams distributed over lcores. Each stream 2120 * being composed of a RX queue to poll on a RX port for input messages, 2121 * associated with a TX queue of a TX port where to send forwarded packets. 2122 */ 2123 static void 2124 rss_fwd_config_setup(void) 2125 { 2126 portid_t rxp; 2127 portid_t txp; 2128 queueid_t rxq; 2129 queueid_t nb_q; 2130 streamid_t sm_id; 2131 2132 nb_q = nb_rxq; 2133 if (nb_q > nb_txq) 2134 nb_q = nb_txq; 2135 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2136 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2137 cur_fwd_config.nb_fwd_streams = 2138 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2139 2140 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2141 cur_fwd_config.nb_fwd_lcores = 2142 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2143 2144 /* reinitialize forwarding streams */ 2145 init_fwd_streams(); 2146 2147 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2148 rxp = 0; rxq = 0; 2149 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2150 struct fwd_stream *fs; 2151 2152 fs = fwd_streams[sm_id]; 2153 txp = fwd_topology_tx_port_get(rxp); 2154 fs->rx_port = fwd_ports_ids[rxp]; 2155 fs->rx_queue = rxq; 2156 fs->tx_port = fwd_ports_ids[txp]; 2157 fs->tx_queue = rxq; 2158 fs->peer_addr = fs->tx_port; 2159 fs->retry_enabled = retry_enabled; 2160 rxp++; 2161 if (rxp < nb_fwd_ports) 2162 continue; 2163 rxp = 0; 2164 rxq++; 2165 } 2166 } 2167 2168 /** 2169 * For the DCB forwarding test, each core is assigned on each traffic class. 2170 * 2171 * Each core is assigned a multi-stream, each stream being composed of 2172 * a RX queue to poll on a RX port for input messages, associated with 2173 * a TX queue of a TX port where to send forwarded packets. All RX and 2174 * TX queues are mapping to the same traffic class. 2175 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2176 * the same core 2177 */ 2178 static void 2179 dcb_fwd_config_setup(void) 2180 { 2181 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2182 portid_t txp, rxp = 0; 2183 queueid_t txq, rxq = 0; 2184 lcoreid_t lc_id; 2185 uint16_t nb_rx_queue, nb_tx_queue; 2186 uint16_t i, j, k, sm_id = 0; 2187 uint8_t tc = 0; 2188 2189 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2190 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2191 cur_fwd_config.nb_fwd_streams = 2192 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2193 2194 /* reinitialize forwarding streams */ 2195 init_fwd_streams(); 2196 sm_id = 0; 2197 txp = 1; 2198 /* get the dcb info on the first RX and TX ports */ 2199 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2200 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2201 2202 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2203 fwd_lcores[lc_id]->stream_nb = 0; 2204 fwd_lcores[lc_id]->stream_idx = sm_id; 2205 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2206 /* if the nb_queue is zero, means this tc is 2207 * not enabled on the POOL 2208 */ 2209 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2210 break; 2211 k = fwd_lcores[lc_id]->stream_nb + 2212 fwd_lcores[lc_id]->stream_idx; 2213 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2214 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2215 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2216 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2217 for (j = 0; j < nb_rx_queue; j++) { 2218 struct fwd_stream *fs; 2219 2220 fs = fwd_streams[k + j]; 2221 fs->rx_port = fwd_ports_ids[rxp]; 2222 fs->rx_queue = rxq + j; 2223 fs->tx_port = fwd_ports_ids[txp]; 2224 fs->tx_queue = txq + j % nb_tx_queue; 2225 fs->peer_addr = fs->tx_port; 2226 fs->retry_enabled = retry_enabled; 2227 } 2228 fwd_lcores[lc_id]->stream_nb += 2229 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2230 } 2231 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2232 2233 tc++; 2234 if (tc < rxp_dcb_info.nb_tcs) 2235 continue; 2236 /* Restart from TC 0 on next RX port */ 2237 tc = 0; 2238 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2239 rxp = (portid_t) 2240 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2241 else 2242 rxp++; 2243 if (rxp >= nb_fwd_ports) 2244 return; 2245 /* get the dcb information on next RX and TX ports */ 2246 if ((rxp & 0x1) == 0) 2247 txp = (portid_t) (rxp + 1); 2248 else 2249 txp = (portid_t) (rxp - 1); 2250 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2251 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2252 } 2253 } 2254 2255 static void 2256 icmp_echo_config_setup(void) 2257 { 2258 portid_t rxp; 2259 queueid_t rxq; 2260 lcoreid_t lc_id; 2261 uint16_t sm_id; 2262 2263 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2264 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2265 (nb_txq * nb_fwd_ports); 2266 else 2267 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2268 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2269 cur_fwd_config.nb_fwd_streams = 2270 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2271 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2272 cur_fwd_config.nb_fwd_lcores = 2273 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2274 if (verbose_level > 0) { 2275 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2276 __FUNCTION__, 2277 cur_fwd_config.nb_fwd_lcores, 2278 cur_fwd_config.nb_fwd_ports, 2279 cur_fwd_config.nb_fwd_streams); 2280 } 2281 2282 /* reinitialize forwarding streams */ 2283 init_fwd_streams(); 2284 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2285 rxp = 0; rxq = 0; 2286 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2287 if (verbose_level > 0) 2288 printf(" core=%d: \n", lc_id); 2289 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2290 struct fwd_stream *fs; 2291 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2292 fs->rx_port = fwd_ports_ids[rxp]; 2293 fs->rx_queue = rxq; 2294 fs->tx_port = fs->rx_port; 2295 fs->tx_queue = rxq; 2296 fs->peer_addr = fs->tx_port; 2297 fs->retry_enabled = retry_enabled; 2298 if (verbose_level > 0) 2299 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2300 sm_id, fs->rx_port, fs->rx_queue, 2301 fs->tx_queue); 2302 rxq = (queueid_t) (rxq + 1); 2303 if (rxq == nb_rxq) { 2304 rxq = 0; 2305 rxp = (portid_t) (rxp + 1); 2306 } 2307 } 2308 } 2309 } 2310 2311 #if defined RTE_LIBRTE_PMD_SOFTNIC 2312 static void 2313 softnic_fwd_config_setup(void) 2314 { 2315 struct rte_port *port; 2316 portid_t pid, softnic_portid; 2317 queueid_t i; 2318 uint8_t softnic_enable = 0; 2319 2320 RTE_ETH_FOREACH_DEV(pid) { 2321 port = &ports[pid]; 2322 const char *driver = port->dev_info.driver_name; 2323 2324 if (strcmp(driver, "net_softnic") == 0) { 2325 softnic_portid = pid; 2326 softnic_enable = 1; 2327 break; 2328 } 2329 } 2330 2331 if (softnic_enable == 0) { 2332 printf("Softnic mode not configured(%s)!\n", __func__); 2333 return; 2334 } 2335 2336 cur_fwd_config.nb_fwd_ports = 1; 2337 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2338 2339 /* Re-initialize forwarding streams */ 2340 init_fwd_streams(); 2341 2342 /* 2343 * In the softnic forwarding test, the number of forwarding cores 2344 * is set to one and remaining are used for softnic packet processing. 2345 */ 2346 cur_fwd_config.nb_fwd_lcores = 1; 2347 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2348 2349 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2350 fwd_streams[i]->rx_port = softnic_portid; 2351 fwd_streams[i]->rx_queue = i; 2352 fwd_streams[i]->tx_port = softnic_portid; 2353 fwd_streams[i]->tx_queue = i; 2354 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2355 fwd_streams[i]->retry_enabled = retry_enabled; 2356 } 2357 } 2358 #endif 2359 2360 void 2361 fwd_config_setup(void) 2362 { 2363 cur_fwd_config.fwd_eng = cur_fwd_eng; 2364 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2365 icmp_echo_config_setup(); 2366 return; 2367 } 2368 2369 #if defined RTE_LIBRTE_PMD_SOFTNIC 2370 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2371 softnic_fwd_config_setup(); 2372 return; 2373 } 2374 #endif 2375 2376 if ((nb_rxq > 1) && (nb_txq > 1)){ 2377 if (dcb_config) 2378 dcb_fwd_config_setup(); 2379 else 2380 rss_fwd_config_setup(); 2381 } 2382 else 2383 simple_fwd_config_setup(); 2384 } 2385 2386 static const char * 2387 mp_alloc_to_str(uint8_t mode) 2388 { 2389 switch (mode) { 2390 case MP_ALLOC_NATIVE: 2391 return "native"; 2392 case MP_ALLOC_ANON: 2393 return "anon"; 2394 case MP_ALLOC_XMEM: 2395 return "xmem"; 2396 case MP_ALLOC_XMEM_HUGE: 2397 return "xmemhuge"; 2398 default: 2399 return "invalid"; 2400 } 2401 } 2402 2403 void 2404 pkt_fwd_config_display(struct fwd_config *cfg) 2405 { 2406 struct fwd_stream *fs; 2407 lcoreid_t lc_id; 2408 streamid_t sm_id; 2409 2410 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2411 "NUMA support %s, MP allocation mode: %s\n", 2412 cfg->fwd_eng->fwd_mode_name, 2413 retry_enabled == 0 ? "" : " with retry", 2414 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2415 numa_support == 1 ? "enabled" : "disabled", 2416 mp_alloc_to_str(mp_alloc_type)); 2417 2418 if (retry_enabled) 2419 printf("TX retry num: %u, delay between TX retries: %uus\n", 2420 burst_tx_retry_num, burst_tx_delay_time); 2421 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2422 printf("Logical Core %u (socket %u) forwards packets on " 2423 "%d streams:", 2424 fwd_lcores_cpuids[lc_id], 2425 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2426 fwd_lcores[lc_id]->stream_nb); 2427 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2428 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2429 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2430 "P=%d/Q=%d (socket %u) ", 2431 fs->rx_port, fs->rx_queue, 2432 ports[fs->rx_port].socket_id, 2433 fs->tx_port, fs->tx_queue, 2434 ports[fs->tx_port].socket_id); 2435 print_ethaddr("peer=", 2436 &peer_eth_addrs[fs->peer_addr]); 2437 } 2438 printf("\n"); 2439 } 2440 printf("\n"); 2441 } 2442 2443 void 2444 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2445 { 2446 struct rte_ether_addr new_peer_addr; 2447 if (!rte_eth_dev_is_valid_port(port_id)) { 2448 printf("Error: Invalid port number %i\n", port_id); 2449 return; 2450 } 2451 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2452 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2453 return; 2454 } 2455 peer_eth_addrs[port_id] = new_peer_addr; 2456 } 2457 2458 int 2459 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2460 { 2461 unsigned int i; 2462 unsigned int lcore_cpuid; 2463 int record_now; 2464 2465 record_now = 0; 2466 again: 2467 for (i = 0; i < nb_lc; i++) { 2468 lcore_cpuid = lcorelist[i]; 2469 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2470 printf("lcore %u not enabled\n", lcore_cpuid); 2471 return -1; 2472 } 2473 if (lcore_cpuid == rte_get_master_lcore()) { 2474 printf("lcore %u cannot be masked on for running " 2475 "packet forwarding, which is the master lcore " 2476 "and reserved for command line parsing only\n", 2477 lcore_cpuid); 2478 return -1; 2479 } 2480 if (record_now) 2481 fwd_lcores_cpuids[i] = lcore_cpuid; 2482 } 2483 if (record_now == 0) { 2484 record_now = 1; 2485 goto again; 2486 } 2487 nb_cfg_lcores = (lcoreid_t) nb_lc; 2488 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2489 printf("previous number of forwarding cores %u - changed to " 2490 "number of configured cores %u\n", 2491 (unsigned int) nb_fwd_lcores, nb_lc); 2492 nb_fwd_lcores = (lcoreid_t) nb_lc; 2493 } 2494 2495 return 0; 2496 } 2497 2498 int 2499 set_fwd_lcores_mask(uint64_t lcoremask) 2500 { 2501 unsigned int lcorelist[64]; 2502 unsigned int nb_lc; 2503 unsigned int i; 2504 2505 if (lcoremask == 0) { 2506 printf("Invalid NULL mask of cores\n"); 2507 return -1; 2508 } 2509 nb_lc = 0; 2510 for (i = 0; i < 64; i++) { 2511 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2512 continue; 2513 lcorelist[nb_lc++] = i; 2514 } 2515 return set_fwd_lcores_list(lcorelist, nb_lc); 2516 } 2517 2518 void 2519 set_fwd_lcores_number(uint16_t nb_lc) 2520 { 2521 if (nb_lc > nb_cfg_lcores) { 2522 printf("nb fwd cores %u > %u (max. number of configured " 2523 "lcores) - ignored\n", 2524 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2525 return; 2526 } 2527 nb_fwd_lcores = (lcoreid_t) nb_lc; 2528 printf("Number of forwarding cores set to %u\n", 2529 (unsigned int) nb_fwd_lcores); 2530 } 2531 2532 void 2533 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2534 { 2535 unsigned int i; 2536 portid_t port_id; 2537 int record_now; 2538 2539 record_now = 0; 2540 again: 2541 for (i = 0; i < nb_pt; i++) { 2542 port_id = (portid_t) portlist[i]; 2543 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2544 return; 2545 if (record_now) 2546 fwd_ports_ids[i] = port_id; 2547 } 2548 if (record_now == 0) { 2549 record_now = 1; 2550 goto again; 2551 } 2552 nb_cfg_ports = (portid_t) nb_pt; 2553 if (nb_fwd_ports != (portid_t) nb_pt) { 2554 printf("previous number of forwarding ports %u - changed to " 2555 "number of configured ports %u\n", 2556 (unsigned int) nb_fwd_ports, nb_pt); 2557 nb_fwd_ports = (portid_t) nb_pt; 2558 } 2559 } 2560 2561 void 2562 set_fwd_ports_mask(uint64_t portmask) 2563 { 2564 unsigned int portlist[64]; 2565 unsigned int nb_pt; 2566 unsigned int i; 2567 2568 if (portmask == 0) { 2569 printf("Invalid NULL mask of ports\n"); 2570 return; 2571 } 2572 nb_pt = 0; 2573 RTE_ETH_FOREACH_DEV(i) { 2574 if (! ((uint64_t)(1ULL << i) & portmask)) 2575 continue; 2576 portlist[nb_pt++] = i; 2577 } 2578 set_fwd_ports_list(portlist, nb_pt); 2579 } 2580 2581 void 2582 set_fwd_ports_number(uint16_t nb_pt) 2583 { 2584 if (nb_pt > nb_cfg_ports) { 2585 printf("nb fwd ports %u > %u (number of configured " 2586 "ports) - ignored\n", 2587 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2588 return; 2589 } 2590 nb_fwd_ports = (portid_t) nb_pt; 2591 printf("Number of forwarding ports set to %u\n", 2592 (unsigned int) nb_fwd_ports); 2593 } 2594 2595 int 2596 port_is_forwarding(portid_t port_id) 2597 { 2598 unsigned int i; 2599 2600 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2601 return -1; 2602 2603 for (i = 0; i < nb_fwd_ports; i++) { 2604 if (fwd_ports_ids[i] == port_id) 2605 return 1; 2606 } 2607 2608 return 0; 2609 } 2610 2611 void 2612 set_nb_pkt_per_burst(uint16_t nb) 2613 { 2614 if (nb > MAX_PKT_BURST) { 2615 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2616 " ignored\n", 2617 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2618 return; 2619 } 2620 nb_pkt_per_burst = nb; 2621 printf("Number of packets per burst set to %u\n", 2622 (unsigned int) nb_pkt_per_burst); 2623 } 2624 2625 static const char * 2626 tx_split_get_name(enum tx_pkt_split split) 2627 { 2628 uint32_t i; 2629 2630 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2631 if (tx_split_name[i].split == split) 2632 return tx_split_name[i].name; 2633 } 2634 return NULL; 2635 } 2636 2637 void 2638 set_tx_pkt_split(const char *name) 2639 { 2640 uint32_t i; 2641 2642 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2643 if (strcmp(tx_split_name[i].name, name) == 0) { 2644 tx_pkt_split = tx_split_name[i].split; 2645 return; 2646 } 2647 } 2648 printf("unknown value: \"%s\"\n", name); 2649 } 2650 2651 void 2652 show_tx_pkt_segments(void) 2653 { 2654 uint32_t i, n; 2655 const char *split; 2656 2657 n = tx_pkt_nb_segs; 2658 split = tx_split_get_name(tx_pkt_split); 2659 2660 printf("Number of segments: %u\n", n); 2661 printf("Segment sizes: "); 2662 for (i = 0; i != n - 1; i++) 2663 printf("%hu,", tx_pkt_seg_lengths[i]); 2664 printf("%hu\n", tx_pkt_seg_lengths[i]); 2665 printf("Split packet: %s\n", split); 2666 } 2667 2668 void 2669 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2670 { 2671 uint16_t tx_pkt_len; 2672 unsigned i; 2673 2674 if (nb_segs >= (unsigned) nb_txd) { 2675 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2676 nb_segs, (unsigned int) nb_txd); 2677 return; 2678 } 2679 2680 /* 2681 * Check that each segment length is greater or equal than 2682 * the mbuf data sise. 2683 * Check also that the total packet length is greater or equal than the 2684 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2685 * 20 + 8). 2686 */ 2687 tx_pkt_len = 0; 2688 for (i = 0; i < nb_segs; i++) { 2689 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2690 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2691 i, seg_lengths[i], (unsigned) mbuf_data_size); 2692 return; 2693 } 2694 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2695 } 2696 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2697 printf("total packet length=%u < %d - give up\n", 2698 (unsigned) tx_pkt_len, 2699 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2700 return; 2701 } 2702 2703 for (i = 0; i < nb_segs; i++) 2704 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2705 2706 tx_pkt_length = tx_pkt_len; 2707 tx_pkt_nb_segs = (uint8_t) nb_segs; 2708 } 2709 2710 void 2711 setup_gro(const char *onoff, portid_t port_id) 2712 { 2713 if (!rte_eth_dev_is_valid_port(port_id)) { 2714 printf("invalid port id %u\n", port_id); 2715 return; 2716 } 2717 if (test_done == 0) { 2718 printf("Before enable/disable GRO," 2719 " please stop forwarding first\n"); 2720 return; 2721 } 2722 if (strcmp(onoff, "on") == 0) { 2723 if (gro_ports[port_id].enable != 0) { 2724 printf("Port %u has enabled GRO. Please" 2725 " disable GRO first\n", port_id); 2726 return; 2727 } 2728 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2729 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2730 gro_ports[port_id].param.max_flow_num = 2731 GRO_DEFAULT_FLOW_NUM; 2732 gro_ports[port_id].param.max_item_per_flow = 2733 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2734 } 2735 gro_ports[port_id].enable = 1; 2736 } else { 2737 if (gro_ports[port_id].enable == 0) { 2738 printf("Port %u has disabled GRO\n", port_id); 2739 return; 2740 } 2741 gro_ports[port_id].enable = 0; 2742 } 2743 } 2744 2745 void 2746 setup_gro_flush_cycles(uint8_t cycles) 2747 { 2748 if (test_done == 0) { 2749 printf("Before change flush interval for GRO," 2750 " please stop forwarding first.\n"); 2751 return; 2752 } 2753 2754 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2755 GRO_DEFAULT_FLUSH_CYCLES) { 2756 printf("The flushing cycle be in the range" 2757 " of 1 to %u. Revert to the default" 2758 " value %u.\n", 2759 GRO_MAX_FLUSH_CYCLES, 2760 GRO_DEFAULT_FLUSH_CYCLES); 2761 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2762 } 2763 2764 gro_flush_cycles = cycles; 2765 } 2766 2767 void 2768 show_gro(portid_t port_id) 2769 { 2770 struct rte_gro_param *param; 2771 uint32_t max_pkts_num; 2772 2773 param = &gro_ports[port_id].param; 2774 2775 if (!rte_eth_dev_is_valid_port(port_id)) { 2776 printf("Invalid port id %u.\n", port_id); 2777 return; 2778 } 2779 if (gro_ports[port_id].enable) { 2780 printf("GRO type: TCP/IPv4\n"); 2781 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2782 max_pkts_num = param->max_flow_num * 2783 param->max_item_per_flow; 2784 } else 2785 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2786 printf("Max number of packets to perform GRO: %u\n", 2787 max_pkts_num); 2788 printf("Flushing cycles: %u\n", gro_flush_cycles); 2789 } else 2790 printf("Port %u doesn't enable GRO.\n", port_id); 2791 } 2792 2793 void 2794 setup_gso(const char *mode, portid_t port_id) 2795 { 2796 if (!rte_eth_dev_is_valid_port(port_id)) { 2797 printf("invalid port id %u\n", port_id); 2798 return; 2799 } 2800 if (strcmp(mode, "on") == 0) { 2801 if (test_done == 0) { 2802 printf("before enabling GSO," 2803 " please stop forwarding first\n"); 2804 return; 2805 } 2806 gso_ports[port_id].enable = 1; 2807 } else if (strcmp(mode, "off") == 0) { 2808 if (test_done == 0) { 2809 printf("before disabling GSO," 2810 " please stop forwarding first\n"); 2811 return; 2812 } 2813 gso_ports[port_id].enable = 0; 2814 } 2815 } 2816 2817 char* 2818 list_pkt_forwarding_modes(void) 2819 { 2820 static char fwd_modes[128] = ""; 2821 const char *separator = "|"; 2822 struct fwd_engine *fwd_eng; 2823 unsigned i = 0; 2824 2825 if (strlen (fwd_modes) == 0) { 2826 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2827 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2828 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2829 strncat(fwd_modes, separator, 2830 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2831 } 2832 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2833 } 2834 2835 return fwd_modes; 2836 } 2837 2838 char* 2839 list_pkt_forwarding_retry_modes(void) 2840 { 2841 static char fwd_modes[128] = ""; 2842 const char *separator = "|"; 2843 struct fwd_engine *fwd_eng; 2844 unsigned i = 0; 2845 2846 if (strlen(fwd_modes) == 0) { 2847 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2848 if (fwd_eng == &rx_only_engine) 2849 continue; 2850 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2851 sizeof(fwd_modes) - 2852 strlen(fwd_modes) - 1); 2853 strncat(fwd_modes, separator, 2854 sizeof(fwd_modes) - 2855 strlen(fwd_modes) - 1); 2856 } 2857 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2858 } 2859 2860 return fwd_modes; 2861 } 2862 2863 void 2864 set_pkt_forwarding_mode(const char *fwd_mode_name) 2865 { 2866 struct fwd_engine *fwd_eng; 2867 unsigned i; 2868 2869 i = 0; 2870 while ((fwd_eng = fwd_engines[i]) != NULL) { 2871 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2872 printf("Set %s packet forwarding mode%s\n", 2873 fwd_mode_name, 2874 retry_enabled == 0 ? "" : " with retry"); 2875 cur_fwd_eng = fwd_eng; 2876 return; 2877 } 2878 i++; 2879 } 2880 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2881 } 2882 2883 void 2884 add_rx_dump_callbacks(portid_t portid) 2885 { 2886 struct rte_eth_dev_info dev_info; 2887 uint16_t queue; 2888 int ret; 2889 2890 if (port_id_is_invalid(portid, ENABLED_WARN)) 2891 return; 2892 2893 ret = eth_dev_info_get_print_err(portid, &dev_info); 2894 if (ret != 0) 2895 return; 2896 2897 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2898 if (!ports[portid].rx_dump_cb[queue]) 2899 ports[portid].rx_dump_cb[queue] = 2900 rte_eth_add_rx_callback(portid, queue, 2901 dump_rx_pkts, NULL); 2902 } 2903 2904 void 2905 add_tx_dump_callbacks(portid_t portid) 2906 { 2907 struct rte_eth_dev_info dev_info; 2908 uint16_t queue; 2909 int ret; 2910 2911 if (port_id_is_invalid(portid, ENABLED_WARN)) 2912 return; 2913 2914 ret = eth_dev_info_get_print_err(portid, &dev_info); 2915 if (ret != 0) 2916 return; 2917 2918 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2919 if (!ports[portid].tx_dump_cb[queue]) 2920 ports[portid].tx_dump_cb[queue] = 2921 rte_eth_add_tx_callback(portid, queue, 2922 dump_tx_pkts, NULL); 2923 } 2924 2925 void 2926 remove_rx_dump_callbacks(portid_t portid) 2927 { 2928 struct rte_eth_dev_info dev_info; 2929 uint16_t queue; 2930 int ret; 2931 2932 if (port_id_is_invalid(portid, ENABLED_WARN)) 2933 return; 2934 2935 ret = eth_dev_info_get_print_err(portid, &dev_info); 2936 if (ret != 0) 2937 return; 2938 2939 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2940 if (ports[portid].rx_dump_cb[queue]) { 2941 rte_eth_remove_rx_callback(portid, queue, 2942 ports[portid].rx_dump_cb[queue]); 2943 ports[portid].rx_dump_cb[queue] = NULL; 2944 } 2945 } 2946 2947 void 2948 remove_tx_dump_callbacks(portid_t portid) 2949 { 2950 struct rte_eth_dev_info dev_info; 2951 uint16_t queue; 2952 int ret; 2953 2954 if (port_id_is_invalid(portid, ENABLED_WARN)) 2955 return; 2956 2957 ret = eth_dev_info_get_print_err(portid, &dev_info); 2958 if (ret != 0) 2959 return; 2960 2961 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2962 if (ports[portid].tx_dump_cb[queue]) { 2963 rte_eth_remove_tx_callback(portid, queue, 2964 ports[portid].tx_dump_cb[queue]); 2965 ports[portid].tx_dump_cb[queue] = NULL; 2966 } 2967 } 2968 2969 void 2970 configure_rxtx_dump_callbacks(uint16_t verbose) 2971 { 2972 portid_t portid; 2973 2974 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 2975 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 2976 return; 2977 #endif 2978 2979 RTE_ETH_FOREACH_DEV(portid) 2980 { 2981 if (verbose == 1 || verbose > 2) 2982 add_rx_dump_callbacks(portid); 2983 else 2984 remove_rx_dump_callbacks(portid); 2985 if (verbose >= 2) 2986 add_tx_dump_callbacks(portid); 2987 else 2988 remove_tx_dump_callbacks(portid); 2989 } 2990 } 2991 2992 void 2993 set_verbose_level(uint16_t vb_level) 2994 { 2995 printf("Change verbose level from %u to %u\n", 2996 (unsigned int) verbose_level, (unsigned int) vb_level); 2997 verbose_level = vb_level; 2998 configure_rxtx_dump_callbacks(verbose_level); 2999 } 3000 3001 void 3002 vlan_extend_set(portid_t port_id, int on) 3003 { 3004 int diag; 3005 int vlan_offload; 3006 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3007 3008 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3009 return; 3010 3011 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3012 3013 if (on) { 3014 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3015 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3016 } else { 3017 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3018 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3019 } 3020 3021 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3022 if (diag < 0) 3023 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3024 "diag=%d\n", port_id, on, diag); 3025 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3026 } 3027 3028 void 3029 rx_vlan_strip_set(portid_t port_id, int on) 3030 { 3031 int diag; 3032 int vlan_offload; 3033 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3034 3035 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3036 return; 3037 3038 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3039 3040 if (on) { 3041 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3042 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3043 } else { 3044 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3045 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3046 } 3047 3048 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3049 if (diag < 0) 3050 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3051 "diag=%d\n", port_id, on, diag); 3052 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3053 } 3054 3055 void 3056 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3057 { 3058 int diag; 3059 3060 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3061 return; 3062 3063 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3064 if (diag < 0) 3065 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3066 "diag=%d\n", port_id, queue_id, on, diag); 3067 } 3068 3069 void 3070 rx_vlan_filter_set(portid_t port_id, int on) 3071 { 3072 int diag; 3073 int vlan_offload; 3074 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3075 3076 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3077 return; 3078 3079 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3080 3081 if (on) { 3082 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3083 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3084 } else { 3085 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3086 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3087 } 3088 3089 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3090 if (diag < 0) 3091 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3092 "diag=%d\n", port_id, on, diag); 3093 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3094 } 3095 3096 void 3097 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3098 { 3099 int diag; 3100 int vlan_offload; 3101 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3102 3103 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3104 return; 3105 3106 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3107 3108 if (on) { 3109 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3110 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3111 } else { 3112 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3113 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3114 } 3115 3116 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3117 if (diag < 0) 3118 printf("%s(port_pi=%d, on=%d) failed " 3119 "diag=%d\n", __func__, port_id, on, diag); 3120 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3121 } 3122 3123 int 3124 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3125 { 3126 int diag; 3127 3128 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3129 return 1; 3130 if (vlan_id_is_invalid(vlan_id)) 3131 return 1; 3132 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3133 if (diag == 0) 3134 return 0; 3135 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3136 "diag=%d\n", 3137 port_id, vlan_id, on, diag); 3138 return -1; 3139 } 3140 3141 void 3142 rx_vlan_all_filter_set(portid_t port_id, int on) 3143 { 3144 uint16_t vlan_id; 3145 3146 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3147 return; 3148 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3149 if (rx_vft_set(port_id, vlan_id, on)) 3150 break; 3151 } 3152 } 3153 3154 void 3155 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3156 { 3157 int diag; 3158 3159 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3160 return; 3161 3162 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3163 if (diag == 0) 3164 return; 3165 3166 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3167 "diag=%d\n", 3168 port_id, vlan_type, tp_id, diag); 3169 } 3170 3171 void 3172 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3173 { 3174 struct rte_eth_dev_info dev_info; 3175 int ret; 3176 3177 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3178 return; 3179 if (vlan_id_is_invalid(vlan_id)) 3180 return; 3181 3182 if (ports[port_id].dev_conf.txmode.offloads & 3183 DEV_TX_OFFLOAD_QINQ_INSERT) { 3184 printf("Error, as QinQ has been enabled.\n"); 3185 return; 3186 } 3187 3188 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3189 if (ret != 0) 3190 return; 3191 3192 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3193 printf("Error: vlan insert is not supported by port %d\n", 3194 port_id); 3195 return; 3196 } 3197 3198 tx_vlan_reset(port_id); 3199 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3200 ports[port_id].tx_vlan_id = vlan_id; 3201 } 3202 3203 void 3204 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3205 { 3206 struct rte_eth_dev_info dev_info; 3207 int ret; 3208 3209 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3210 return; 3211 if (vlan_id_is_invalid(vlan_id)) 3212 return; 3213 if (vlan_id_is_invalid(vlan_id_outer)) 3214 return; 3215 3216 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3217 if (ret != 0) 3218 return; 3219 3220 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3221 printf("Error: qinq insert not supported by port %d\n", 3222 port_id); 3223 return; 3224 } 3225 3226 tx_vlan_reset(port_id); 3227 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3228 DEV_TX_OFFLOAD_QINQ_INSERT); 3229 ports[port_id].tx_vlan_id = vlan_id; 3230 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3231 } 3232 3233 void 3234 tx_vlan_reset(portid_t port_id) 3235 { 3236 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3237 return; 3238 ports[port_id].dev_conf.txmode.offloads &= 3239 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3240 DEV_TX_OFFLOAD_QINQ_INSERT); 3241 ports[port_id].tx_vlan_id = 0; 3242 ports[port_id].tx_vlan_id_outer = 0; 3243 } 3244 3245 void 3246 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3247 { 3248 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3249 return; 3250 3251 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3252 } 3253 3254 void 3255 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3256 { 3257 uint16_t i; 3258 uint8_t existing_mapping_found = 0; 3259 3260 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3261 return; 3262 3263 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3264 return; 3265 3266 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3267 printf("map_value not in required range 0..%d\n", 3268 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3269 return; 3270 } 3271 3272 if (!is_rx) { /*then tx*/ 3273 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3274 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3275 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3276 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3277 existing_mapping_found = 1; 3278 break; 3279 } 3280 } 3281 if (!existing_mapping_found) { /* A new additional mapping... */ 3282 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3283 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3284 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3285 nb_tx_queue_stats_mappings++; 3286 } 3287 } 3288 else { /*rx*/ 3289 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3290 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3291 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3292 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3293 existing_mapping_found = 1; 3294 break; 3295 } 3296 } 3297 if (!existing_mapping_found) { /* A new additional mapping... */ 3298 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3299 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3300 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3301 nb_rx_queue_stats_mappings++; 3302 } 3303 } 3304 } 3305 3306 void 3307 set_xstats_hide_zero(uint8_t on_off) 3308 { 3309 xstats_hide_zero = on_off; 3310 } 3311 3312 static inline void 3313 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3314 { 3315 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3316 3317 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3318 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3319 " tunnel_id: 0x%08x", 3320 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3321 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3322 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3323 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3324 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3325 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3326 3327 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3328 rte_be_to_cpu_16(mask->src_port_mask), 3329 rte_be_to_cpu_16(mask->dst_port_mask)); 3330 3331 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3332 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3333 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3334 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3335 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3336 3337 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3338 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3339 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3340 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3341 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3342 } 3343 3344 printf("\n"); 3345 } 3346 3347 static inline void 3348 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3349 { 3350 struct rte_eth_flex_payload_cfg *cfg; 3351 uint32_t i, j; 3352 3353 for (i = 0; i < flex_conf->nb_payloads; i++) { 3354 cfg = &flex_conf->flex_set[i]; 3355 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3356 printf("\n RAW: "); 3357 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3358 printf("\n L2_PAYLOAD: "); 3359 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3360 printf("\n L3_PAYLOAD: "); 3361 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3362 printf("\n L4_PAYLOAD: "); 3363 else 3364 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3365 for (j = 0; j < num; j++) 3366 printf(" %-5u", cfg->src_offset[j]); 3367 } 3368 printf("\n"); 3369 } 3370 3371 static char * 3372 flowtype_to_str(uint16_t flow_type) 3373 { 3374 struct flow_type_info { 3375 char str[32]; 3376 uint16_t ftype; 3377 }; 3378 3379 uint8_t i; 3380 static struct flow_type_info flowtype_str_table[] = { 3381 {"raw", RTE_ETH_FLOW_RAW}, 3382 {"ipv4", RTE_ETH_FLOW_IPV4}, 3383 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3384 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3385 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3386 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3387 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3388 {"ipv6", RTE_ETH_FLOW_IPV6}, 3389 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3390 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3391 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3392 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3393 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3394 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3395 {"port", RTE_ETH_FLOW_PORT}, 3396 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3397 {"geneve", RTE_ETH_FLOW_GENEVE}, 3398 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3399 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3400 }; 3401 3402 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3403 if (flowtype_str_table[i].ftype == flow_type) 3404 return flowtype_str_table[i].str; 3405 } 3406 3407 return NULL; 3408 } 3409 3410 static inline void 3411 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3412 { 3413 struct rte_eth_fdir_flex_mask *mask; 3414 uint32_t i, j; 3415 char *p; 3416 3417 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3418 mask = &flex_conf->flex_mask[i]; 3419 p = flowtype_to_str(mask->flow_type); 3420 printf("\n %s:\t", p ? p : "unknown"); 3421 for (j = 0; j < num; j++) 3422 printf(" %02x", mask->mask[j]); 3423 } 3424 printf("\n"); 3425 } 3426 3427 static inline void 3428 print_fdir_flow_type(uint32_t flow_types_mask) 3429 { 3430 int i; 3431 char *p; 3432 3433 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3434 if (!(flow_types_mask & (1 << i))) 3435 continue; 3436 p = flowtype_to_str(i); 3437 if (p) 3438 printf(" %s", p); 3439 else 3440 printf(" unknown"); 3441 } 3442 printf("\n"); 3443 } 3444 3445 void 3446 fdir_get_infos(portid_t port_id) 3447 { 3448 struct rte_eth_fdir_stats fdir_stat; 3449 struct rte_eth_fdir_info fdir_info; 3450 int ret; 3451 3452 static const char *fdir_stats_border = "########################"; 3453 3454 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3455 return; 3456 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3457 if (ret < 0) { 3458 printf("\n FDIR is not supported on port %-2d\n", 3459 port_id); 3460 return; 3461 } 3462 3463 memset(&fdir_info, 0, sizeof(fdir_info)); 3464 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3465 RTE_ETH_FILTER_INFO, &fdir_info); 3466 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3467 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3468 RTE_ETH_FILTER_STATS, &fdir_stat); 3469 printf("\n %s FDIR infos for port %-2d %s\n", 3470 fdir_stats_border, port_id, fdir_stats_border); 3471 printf(" MODE: "); 3472 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3473 printf(" PERFECT\n"); 3474 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3475 printf(" PERFECT-MAC-VLAN\n"); 3476 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3477 printf(" PERFECT-TUNNEL\n"); 3478 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3479 printf(" SIGNATURE\n"); 3480 else 3481 printf(" DISABLE\n"); 3482 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3483 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3484 printf(" SUPPORTED FLOW TYPE: "); 3485 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3486 } 3487 printf(" FLEX PAYLOAD INFO:\n"); 3488 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3489 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3490 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3491 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3492 fdir_info.flex_payload_unit, 3493 fdir_info.max_flex_payload_segment_num, 3494 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3495 printf(" MASK: "); 3496 print_fdir_mask(&fdir_info.mask); 3497 if (fdir_info.flex_conf.nb_payloads > 0) { 3498 printf(" FLEX PAYLOAD SRC OFFSET:"); 3499 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3500 } 3501 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3502 printf(" FLEX MASK CFG:"); 3503 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3504 } 3505 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3506 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3507 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3508 fdir_info.guarant_spc, fdir_info.best_spc); 3509 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3510 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3511 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3512 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3513 fdir_stat.collision, fdir_stat.free, 3514 fdir_stat.maxhash, fdir_stat.maxlen, 3515 fdir_stat.add, fdir_stat.remove, 3516 fdir_stat.f_add, fdir_stat.f_remove); 3517 printf(" %s############################%s\n", 3518 fdir_stats_border, fdir_stats_border); 3519 } 3520 3521 void 3522 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3523 { 3524 struct rte_port *port; 3525 struct rte_eth_fdir_flex_conf *flex_conf; 3526 int i, idx = 0; 3527 3528 port = &ports[port_id]; 3529 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3530 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3531 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3532 idx = i; 3533 break; 3534 } 3535 } 3536 if (i >= RTE_ETH_FLOW_MAX) { 3537 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3538 idx = flex_conf->nb_flexmasks; 3539 flex_conf->nb_flexmasks++; 3540 } else { 3541 printf("The flex mask table is full. Can not set flex" 3542 " mask for flow_type(%u).", cfg->flow_type); 3543 return; 3544 } 3545 } 3546 rte_memcpy(&flex_conf->flex_mask[idx], 3547 cfg, 3548 sizeof(struct rte_eth_fdir_flex_mask)); 3549 } 3550 3551 void 3552 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3553 { 3554 struct rte_port *port; 3555 struct rte_eth_fdir_flex_conf *flex_conf; 3556 int i, idx = 0; 3557 3558 port = &ports[port_id]; 3559 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3560 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3561 if (cfg->type == flex_conf->flex_set[i].type) { 3562 idx = i; 3563 break; 3564 } 3565 } 3566 if (i >= RTE_ETH_PAYLOAD_MAX) { 3567 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3568 idx = flex_conf->nb_payloads; 3569 flex_conf->nb_payloads++; 3570 } else { 3571 printf("The flex payload table is full. Can not set" 3572 " flex payload for type(%u).", cfg->type); 3573 return; 3574 } 3575 } 3576 rte_memcpy(&flex_conf->flex_set[idx], 3577 cfg, 3578 sizeof(struct rte_eth_flex_payload_cfg)); 3579 3580 } 3581 3582 void 3583 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3584 { 3585 #ifdef RTE_LIBRTE_IXGBE_PMD 3586 int diag; 3587 3588 if (is_rx) 3589 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3590 else 3591 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3592 3593 if (diag == 0) 3594 return; 3595 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3596 is_rx ? "rx" : "tx", port_id, diag); 3597 return; 3598 #endif 3599 printf("VF %s setting not supported for port %d\n", 3600 is_rx ? "Rx" : "Tx", port_id); 3601 RTE_SET_USED(vf); 3602 RTE_SET_USED(on); 3603 } 3604 3605 int 3606 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3607 { 3608 int diag; 3609 struct rte_eth_link link; 3610 int ret; 3611 3612 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3613 return 1; 3614 ret = eth_link_get_nowait_print_err(port_id, &link); 3615 if (ret < 0) 3616 return 1; 3617 if (rate > link.link_speed) { 3618 printf("Invalid rate value:%u bigger than link speed: %u\n", 3619 rate, link.link_speed); 3620 return 1; 3621 } 3622 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3623 if (diag == 0) 3624 return diag; 3625 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3626 port_id, diag); 3627 return diag; 3628 } 3629 3630 int 3631 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3632 { 3633 int diag = -ENOTSUP; 3634 3635 RTE_SET_USED(vf); 3636 RTE_SET_USED(rate); 3637 RTE_SET_USED(q_msk); 3638 3639 #ifdef RTE_LIBRTE_IXGBE_PMD 3640 if (diag == -ENOTSUP) 3641 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3642 q_msk); 3643 #endif 3644 #ifdef RTE_LIBRTE_BNXT_PMD 3645 if (diag == -ENOTSUP) 3646 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3647 #endif 3648 if (diag == 0) 3649 return diag; 3650 3651 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3652 port_id, diag); 3653 return diag; 3654 } 3655 3656 /* 3657 * Functions to manage the set of filtered Multicast MAC addresses. 3658 * 3659 * A pool of filtered multicast MAC addresses is associated with each port. 3660 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3661 * The address of the pool and the number of valid multicast MAC addresses 3662 * recorded in the pool are stored in the fields "mc_addr_pool" and 3663 * "mc_addr_nb" of the "rte_port" data structure. 3664 * 3665 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3666 * to be supplied a contiguous array of multicast MAC addresses. 3667 * To comply with this constraint, the set of multicast addresses recorded 3668 * into the pool are systematically compacted at the beginning of the pool. 3669 * Hence, when a multicast address is removed from the pool, all following 3670 * addresses, if any, are copied back to keep the set contiguous. 3671 */ 3672 #define MCAST_POOL_INC 32 3673 3674 static int 3675 mcast_addr_pool_extend(struct rte_port *port) 3676 { 3677 struct rte_ether_addr *mc_pool; 3678 size_t mc_pool_size; 3679 3680 /* 3681 * If a free entry is available at the end of the pool, just 3682 * increment the number of recorded multicast addresses. 3683 */ 3684 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3685 port->mc_addr_nb++; 3686 return 0; 3687 } 3688 3689 /* 3690 * [re]allocate a pool with MCAST_POOL_INC more entries. 3691 * The previous test guarantees that port->mc_addr_nb is a multiple 3692 * of MCAST_POOL_INC. 3693 */ 3694 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3695 MCAST_POOL_INC); 3696 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3697 mc_pool_size); 3698 if (mc_pool == NULL) { 3699 printf("allocation of pool of %u multicast addresses failed\n", 3700 port->mc_addr_nb + MCAST_POOL_INC); 3701 return -ENOMEM; 3702 } 3703 3704 port->mc_addr_pool = mc_pool; 3705 port->mc_addr_nb++; 3706 return 0; 3707 3708 } 3709 3710 static void 3711 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3712 { 3713 port->mc_addr_nb--; 3714 if (addr_idx == port->mc_addr_nb) { 3715 /* No need to recompact the set of multicast addressses. */ 3716 if (port->mc_addr_nb == 0) { 3717 /* free the pool of multicast addresses. */ 3718 free(port->mc_addr_pool); 3719 port->mc_addr_pool = NULL; 3720 } 3721 return; 3722 } 3723 memmove(&port->mc_addr_pool[addr_idx], 3724 &port->mc_addr_pool[addr_idx + 1], 3725 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3726 } 3727 3728 static void 3729 eth_port_multicast_addr_list_set(portid_t port_id) 3730 { 3731 struct rte_port *port; 3732 int diag; 3733 3734 port = &ports[port_id]; 3735 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3736 port->mc_addr_nb); 3737 if (diag == 0) 3738 return; 3739 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3740 port->mc_addr_nb, port_id, -diag); 3741 } 3742 3743 void 3744 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3745 { 3746 struct rte_port *port; 3747 uint32_t i; 3748 3749 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3750 return; 3751 3752 port = &ports[port_id]; 3753 3754 /* 3755 * Check that the added multicast MAC address is not already recorded 3756 * in the pool of multicast addresses. 3757 */ 3758 for (i = 0; i < port->mc_addr_nb; i++) { 3759 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3760 printf("multicast address already filtered by port\n"); 3761 return; 3762 } 3763 } 3764 3765 if (mcast_addr_pool_extend(port) != 0) 3766 return; 3767 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3768 eth_port_multicast_addr_list_set(port_id); 3769 } 3770 3771 void 3772 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3773 { 3774 struct rte_port *port; 3775 uint32_t i; 3776 3777 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3778 return; 3779 3780 port = &ports[port_id]; 3781 3782 /* 3783 * Search the pool of multicast MAC addresses for the removed address. 3784 */ 3785 for (i = 0; i < port->mc_addr_nb; i++) { 3786 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3787 break; 3788 } 3789 if (i == port->mc_addr_nb) { 3790 printf("multicast address not filtered by port %d\n", port_id); 3791 return; 3792 } 3793 3794 mcast_addr_pool_remove(port, i); 3795 eth_port_multicast_addr_list_set(port_id); 3796 } 3797 3798 void 3799 port_dcb_info_display(portid_t port_id) 3800 { 3801 struct rte_eth_dcb_info dcb_info; 3802 uint16_t i; 3803 int ret; 3804 static const char *border = "================"; 3805 3806 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3807 return; 3808 3809 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3810 if (ret) { 3811 printf("\n Failed to get dcb infos on port %-2d\n", 3812 port_id); 3813 return; 3814 } 3815 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3816 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3817 printf("\n TC : "); 3818 for (i = 0; i < dcb_info.nb_tcs; i++) 3819 printf("\t%4d", i); 3820 printf("\n Priority : "); 3821 for (i = 0; i < dcb_info.nb_tcs; i++) 3822 printf("\t%4d", dcb_info.prio_tc[i]); 3823 printf("\n BW percent :"); 3824 for (i = 0; i < dcb_info.nb_tcs; i++) 3825 printf("\t%4d%%", dcb_info.tc_bws[i]); 3826 printf("\n RXQ base : "); 3827 for (i = 0; i < dcb_info.nb_tcs; i++) 3828 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3829 printf("\n RXQ number :"); 3830 for (i = 0; i < dcb_info.nb_tcs; i++) 3831 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3832 printf("\n TXQ base : "); 3833 for (i = 0; i < dcb_info.nb_tcs; i++) 3834 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3835 printf("\n TXQ number :"); 3836 for (i = 0; i < dcb_info.nb_tcs; i++) 3837 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3838 printf("\n"); 3839 } 3840 3841 uint8_t * 3842 open_file(const char *file_path, uint32_t *size) 3843 { 3844 int fd = open(file_path, O_RDONLY); 3845 off_t pkg_size; 3846 uint8_t *buf = NULL; 3847 int ret = 0; 3848 struct stat st_buf; 3849 3850 if (size) 3851 *size = 0; 3852 3853 if (fd == -1) { 3854 printf("%s: Failed to open %s\n", __func__, file_path); 3855 return buf; 3856 } 3857 3858 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3859 close(fd); 3860 printf("%s: File operations failed\n", __func__); 3861 return buf; 3862 } 3863 3864 pkg_size = st_buf.st_size; 3865 if (pkg_size < 0) { 3866 close(fd); 3867 printf("%s: File operations failed\n", __func__); 3868 return buf; 3869 } 3870 3871 buf = (uint8_t *)malloc(pkg_size); 3872 if (!buf) { 3873 close(fd); 3874 printf("%s: Failed to malloc memory\n", __func__); 3875 return buf; 3876 } 3877 3878 ret = read(fd, buf, pkg_size); 3879 if (ret < 0) { 3880 close(fd); 3881 printf("%s: File read operation failed\n", __func__); 3882 close_file(buf); 3883 return NULL; 3884 } 3885 3886 if (size) 3887 *size = pkg_size; 3888 3889 close(fd); 3890 3891 return buf; 3892 } 3893 3894 int 3895 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3896 { 3897 FILE *fh = fopen(file_path, "wb"); 3898 3899 if (fh == NULL) { 3900 printf("%s: Failed to open %s\n", __func__, file_path); 3901 return -1; 3902 } 3903 3904 if (fwrite(buf, 1, size, fh) != size) { 3905 fclose(fh); 3906 printf("%s: File write operation failed\n", __func__); 3907 return -1; 3908 } 3909 3910 fclose(fh); 3911 3912 return 0; 3913 } 3914 3915 int 3916 close_file(uint8_t *buf) 3917 { 3918 if (buf) { 3919 free((void *)buf); 3920 return 0; 3921 } 3922 3923 return -1; 3924 } 3925 3926 void 3927 port_queue_region_info_display(portid_t port_id, void *buf) 3928 { 3929 #ifdef RTE_LIBRTE_I40E_PMD 3930 uint16_t i, j; 3931 struct rte_pmd_i40e_queue_regions *info = 3932 (struct rte_pmd_i40e_queue_regions *)buf; 3933 static const char *queue_region_info_stats_border = "-------"; 3934 3935 if (!info->queue_region_number) 3936 printf("there is no region has been set before"); 3937 3938 printf("\n %s All queue region info for port=%2d %s", 3939 queue_region_info_stats_border, port_id, 3940 queue_region_info_stats_border); 3941 printf("\n queue_region_number: %-14u \n", 3942 info->queue_region_number); 3943 3944 for (i = 0; i < info->queue_region_number; i++) { 3945 printf("\n region_id: %-14u queue_number: %-14u " 3946 "queue_start_index: %-14u \n", 3947 info->region[i].region_id, 3948 info->region[i].queue_num, 3949 info->region[i].queue_start_index); 3950 3951 printf(" user_priority_num is %-14u :", 3952 info->region[i].user_priority_num); 3953 for (j = 0; j < info->region[i].user_priority_num; j++) 3954 printf(" %-14u ", info->region[i].user_priority[j]); 3955 3956 printf("\n flowtype_num is %-14u :", 3957 info->region[i].flowtype_num); 3958 for (j = 0; j < info->region[i].flowtype_num; j++) 3959 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3960 } 3961 #else 3962 RTE_SET_USED(port_id); 3963 RTE_SET_USED(buf); 3964 #endif 3965 3966 printf("\n\n"); 3967 } 3968