1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 107 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 108 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 109 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 110 { NULL, 0 }, 111 }; 112 113 static void 114 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 115 { 116 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 117 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 118 printf("%s%s", name, buf); 119 } 120 121 void 122 nic_stats_display(portid_t port_id) 123 { 124 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 125 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 126 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 127 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 128 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 129 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 130 diff_cycles; 131 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 132 struct rte_eth_stats stats; 133 struct rte_port *port = &ports[port_id]; 134 uint8_t i; 135 136 static const char *nic_stats_border = "########################"; 137 138 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 139 print_valid_ports(); 140 return; 141 } 142 rte_eth_stats_get(port_id, &stats); 143 printf("\n %s NIC statistics for port %-2d %s\n", 144 nic_stats_border, port_id, nic_stats_border); 145 146 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 147 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 148 "%-"PRIu64"\n", 149 stats.ipackets, stats.imissed, stats.ibytes); 150 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 151 printf(" RX-nombuf: %-10"PRIu64"\n", 152 stats.rx_nombuf); 153 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 154 "%-"PRIu64"\n", 155 stats.opackets, stats.oerrors, stats.obytes); 156 } 157 else { 158 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 159 " RX-bytes: %10"PRIu64"\n", 160 stats.ipackets, stats.ierrors, stats.ibytes); 161 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 162 printf(" RX-nombuf: %10"PRIu64"\n", 163 stats.rx_nombuf); 164 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 165 " TX-bytes: %10"PRIu64"\n", 166 stats.opackets, stats.oerrors, stats.obytes); 167 } 168 169 if (port->rx_queue_stats_mapping_enabled) { 170 printf("\n"); 171 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 172 printf(" Stats reg %2d RX-packets: %10"PRIu64 173 " RX-errors: %10"PRIu64 174 " RX-bytes: %10"PRIu64"\n", 175 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 176 } 177 } 178 if (port->tx_queue_stats_mapping_enabled) { 179 printf("\n"); 180 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 181 printf(" Stats reg %2d TX-packets: %10"PRIu64 182 " TX-bytes: %10"PRIu64"\n", 183 i, stats.q_opackets[i], stats.q_obytes[i]); 184 } 185 } 186 187 diff_cycles = prev_cycles[port_id]; 188 prev_cycles[port_id] = rte_rdtsc(); 189 if (diff_cycles > 0) 190 diff_cycles = prev_cycles[port_id] - diff_cycles; 191 192 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 193 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 194 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 195 (stats.opackets - prev_pkts_tx[port_id]) : 0; 196 prev_pkts_rx[port_id] = stats.ipackets; 197 prev_pkts_tx[port_id] = stats.opackets; 198 mpps_rx = diff_cycles > 0 ? 199 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 200 mpps_tx = diff_cycles > 0 ? 201 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 202 203 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 204 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 205 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 206 (stats.obytes - prev_bytes_tx[port_id]) : 0; 207 prev_bytes_rx[port_id] = stats.ibytes; 208 prev_bytes_tx[port_id] = stats.obytes; 209 mbps_rx = diff_cycles > 0 ? 210 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 211 mbps_tx = diff_cycles > 0 ? 212 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 213 214 printf("\n Throughput (since last show)\n"); 215 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 216 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 217 mpps_tx, mbps_tx * 8); 218 219 printf(" %s############################%s\n", 220 nic_stats_border, nic_stats_border); 221 } 222 223 void 224 nic_stats_clear(portid_t port_id) 225 { 226 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 227 print_valid_ports(); 228 return; 229 } 230 rte_eth_stats_reset(port_id); 231 printf("\n NIC statistics for port %d cleared\n", port_id); 232 } 233 234 void 235 nic_xstats_display(portid_t port_id) 236 { 237 struct rte_eth_xstat *xstats; 238 int cnt_xstats, idx_xstat; 239 struct rte_eth_xstat_name *xstats_names; 240 241 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 242 print_valid_ports(); 243 return; 244 } 245 printf("###### NIC extended statistics for port %-2d\n", port_id); 246 if (!rte_eth_dev_is_valid_port(port_id)) { 247 printf("Error: Invalid port number %i\n", port_id); 248 return; 249 } 250 251 /* Get count */ 252 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 253 if (cnt_xstats < 0) { 254 printf("Error: Cannot get count of xstats\n"); 255 return; 256 } 257 258 /* Get id-name lookup table */ 259 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 260 if (xstats_names == NULL) { 261 printf("Cannot allocate memory for xstats lookup\n"); 262 return; 263 } 264 if (cnt_xstats != rte_eth_xstats_get_names( 265 port_id, xstats_names, cnt_xstats)) { 266 printf("Error: Cannot get xstats lookup\n"); 267 free(xstats_names); 268 return; 269 } 270 271 /* Get stats themselves */ 272 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 273 if (xstats == NULL) { 274 printf("Cannot allocate memory for xstats\n"); 275 free(xstats_names); 276 return; 277 } 278 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 279 printf("Error: Unable to get xstats\n"); 280 free(xstats_names); 281 free(xstats); 282 return; 283 } 284 285 /* Display xstats */ 286 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 287 if (xstats_hide_zero && !xstats[idx_xstat].value) 288 continue; 289 printf("%s: %"PRIu64"\n", 290 xstats_names[idx_xstat].name, 291 xstats[idx_xstat].value); 292 } 293 free(xstats_names); 294 free(xstats); 295 } 296 297 void 298 nic_xstats_clear(portid_t port_id) 299 { 300 int ret; 301 302 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 303 print_valid_ports(); 304 return; 305 } 306 ret = rte_eth_xstats_reset(port_id); 307 if (ret != 0) { 308 printf("%s: Error: failed to reset xstats (port %u): %s", 309 __func__, port_id, strerror(ret)); 310 } 311 } 312 313 void 314 nic_stats_mapping_display(portid_t port_id) 315 { 316 struct rte_port *port = &ports[port_id]; 317 uint16_t i; 318 319 static const char *nic_stats_mapping_border = "########################"; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 327 printf("Port id %d - either does not support queue statistic mapping or" 328 " no queue statistic mapping set\n", port_id); 329 return; 330 } 331 332 printf("\n %s NIC statistics mapping for port %-2d %s\n", 333 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 334 335 if (port->rx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 337 if (rx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 339 rx_queue_stats_mappings[i].queue_id, 340 rx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 printf("\n"); 344 } 345 346 347 if (port->tx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 349 if (tx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 351 tx_queue_stats_mappings[i].queue_id, 352 tx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 } 356 357 printf(" %s####################################%s\n", 358 nic_stats_mapping_border, nic_stats_mapping_border); 359 } 360 361 void 362 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 363 { 364 struct rte_eth_burst_mode mode; 365 struct rte_eth_rxq_info qinfo; 366 int32_t rc; 367 static const char *info_border = "*********************"; 368 369 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 370 if (rc != 0) { 371 printf("Failed to retrieve information for port: %u, " 372 "RX queue: %hu\nerror desc: %s(%d)\n", 373 port_id, queue_id, strerror(-rc), rc); 374 return; 375 } 376 377 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 378 info_border, port_id, queue_id, info_border); 379 380 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 381 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 382 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 383 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 384 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 385 printf("\nRX drop packets: %s", 386 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 387 printf("\nRX deferred start: %s", 388 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 389 printf("\nRX scattered packets: %s", 390 (qinfo.scattered_rx != 0) ? "on" : "off"); 391 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 392 393 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 394 printf("\nBurst mode: %s%s", 395 mode.info, 396 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 397 " (per queue)" : ""); 398 399 printf("\n"); 400 } 401 402 void 403 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 404 { 405 struct rte_eth_burst_mode mode; 406 struct rte_eth_txq_info qinfo; 407 int32_t rc; 408 static const char *info_border = "*********************"; 409 410 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 411 if (rc != 0) { 412 printf("Failed to retrieve information for port: %u, " 413 "TX queue: %hu\nerror desc: %s(%d)\n", 414 port_id, queue_id, strerror(-rc), rc); 415 return; 416 } 417 418 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 419 info_border, port_id, queue_id, info_border); 420 421 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 422 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 423 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 424 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 425 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 426 printf("\nTX deferred start: %s", 427 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 428 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 429 430 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 431 printf("\nBurst mode: %s%s", 432 mode.info, 433 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 434 " (per queue)" : ""); 435 436 printf("\n"); 437 } 438 439 static int bus_match_all(const struct rte_bus *bus, const void *data) 440 { 441 RTE_SET_USED(bus); 442 RTE_SET_USED(data); 443 return 0; 444 } 445 446 void 447 device_infos_display(const char *identifier) 448 { 449 static const char *info_border = "*********************"; 450 struct rte_bus *start = NULL, *next; 451 struct rte_dev_iterator dev_iter; 452 char name[RTE_ETH_NAME_MAX_LEN]; 453 struct rte_ether_addr mac_addr; 454 struct rte_device *dev; 455 struct rte_devargs da; 456 portid_t port_id; 457 char devstr[128]; 458 459 memset(&da, 0, sizeof(da)); 460 if (!identifier) 461 goto skip_parse; 462 463 if (rte_devargs_parsef(&da, "%s", identifier)) { 464 printf("cannot parse identifier\n"); 465 if (da.args) 466 free(da.args); 467 return; 468 } 469 470 skip_parse: 471 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 472 473 start = next; 474 if (identifier && da.bus != next) 475 continue; 476 477 /* Skip buses that don't have iterate method */ 478 if (!next->dev_iterate) 479 continue; 480 481 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 482 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 483 484 if (!dev->driver) 485 continue; 486 /* Check for matching device if identifier is present */ 487 if (identifier && 488 strncmp(da.name, dev->name, strlen(dev->name))) 489 continue; 490 printf("\n%s Infos for device %s %s\n", 491 info_border, dev->name, info_border); 492 printf("Bus name: %s", dev->bus->name); 493 printf("\nDriver name: %s", dev->driver->name); 494 printf("\nDevargs: %s", 495 dev->devargs ? dev->devargs->args : ""); 496 printf("\nConnect to socket: %d", dev->numa_node); 497 printf("\n"); 498 499 /* List ports with matching device name */ 500 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 501 printf("\n\tPort id: %-2d", port_id); 502 if (eth_macaddr_get_print_err(port_id, 503 &mac_addr) == 0) 504 print_ethaddr("\n\tMAC address: ", 505 &mac_addr); 506 rte_eth_dev_get_name_by_port(port_id, name); 507 printf("\n\tDevice name: %s", name); 508 printf("\n"); 509 } 510 } 511 }; 512 } 513 514 void 515 port_infos_display(portid_t port_id) 516 { 517 struct rte_port *port; 518 struct rte_ether_addr mac_addr; 519 struct rte_eth_link link; 520 struct rte_eth_dev_info dev_info; 521 int vlan_offload; 522 struct rte_mempool * mp; 523 static const char *info_border = "*********************"; 524 uint16_t mtu; 525 char name[RTE_ETH_NAME_MAX_LEN]; 526 int ret; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 529 print_valid_ports(); 530 return; 531 } 532 port = &ports[port_id]; 533 ret = eth_link_get_nowait_print_err(port_id, &link); 534 if (ret < 0) 535 return; 536 537 ret = eth_dev_info_get_print_err(port_id, &dev_info); 538 if (ret != 0) 539 return; 540 541 printf("\n%s Infos for port %-2d %s\n", 542 info_border, port_id, info_border); 543 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 544 print_ethaddr("MAC address: ", &mac_addr); 545 rte_eth_dev_get_name_by_port(port_id, name); 546 printf("\nDevice name: %s", name); 547 printf("\nDriver name: %s", dev_info.driver_name); 548 if (dev_info.device->devargs && dev_info.device->devargs->args) 549 printf("\nDevargs: %s", dev_info.device->devargs->args); 550 printf("\nConnect to socket: %u", port->socket_id); 551 552 if (port_numa[port_id] != NUMA_NO_CONFIG) { 553 mp = mbuf_pool_find(port_numa[port_id]); 554 if (mp) 555 printf("\nmemory allocation on the socket: %d", 556 port_numa[port_id]); 557 } else 558 printf("\nmemory allocation on the socket: %u",port->socket_id); 559 560 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 561 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 562 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 563 ("full-duplex") : ("half-duplex")); 564 565 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 566 printf("MTU: %u\n", mtu); 567 568 printf("Promiscuous mode: %s\n", 569 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 570 printf("Allmulticast mode: %s\n", 571 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 572 printf("Maximum number of MAC addresses: %u\n", 573 (unsigned int)(port->dev_info.max_mac_addrs)); 574 printf("Maximum number of MAC addresses of hash filtering: %u\n", 575 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 576 577 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 578 if (vlan_offload >= 0){ 579 printf("VLAN offload: \n"); 580 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 581 printf(" strip on, "); 582 else 583 printf(" strip off, "); 584 585 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 586 printf("filter on, "); 587 else 588 printf("filter off, "); 589 590 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 591 printf("extend on, "); 592 else 593 printf("extend off, "); 594 595 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 596 printf("qinq strip on\n"); 597 else 598 printf("qinq strip off\n"); 599 } 600 601 if (dev_info.hash_key_size > 0) 602 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 603 if (dev_info.reta_size > 0) 604 printf("Redirection table size: %u\n", dev_info.reta_size); 605 if (!dev_info.flow_type_rss_offloads) 606 printf("No RSS offload flow type is supported.\n"); 607 else { 608 uint16_t i; 609 char *p; 610 611 printf("Supported RSS offload flow types:\n"); 612 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 613 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 614 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 615 continue; 616 p = flowtype_to_str(i); 617 if (p) 618 printf(" %s\n", p); 619 else 620 printf(" user defined %d\n", i); 621 } 622 } 623 624 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 625 printf("Maximum configurable length of RX packet: %u\n", 626 dev_info.max_rx_pktlen); 627 printf("Maximum configurable size of LRO aggregated packet: %u\n", 628 dev_info.max_lro_pkt_size); 629 if (dev_info.max_vfs) 630 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 631 if (dev_info.max_vmdq_pools) 632 printf("Maximum number of VMDq pools: %u\n", 633 dev_info.max_vmdq_pools); 634 635 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 636 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 637 printf("Max possible number of RXDs per queue: %hu\n", 638 dev_info.rx_desc_lim.nb_max); 639 printf("Min possible number of RXDs per queue: %hu\n", 640 dev_info.rx_desc_lim.nb_min); 641 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 642 643 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 644 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 645 printf("Max possible number of TXDs per queue: %hu\n", 646 dev_info.tx_desc_lim.nb_max); 647 printf("Min possible number of TXDs per queue: %hu\n", 648 dev_info.tx_desc_lim.nb_min); 649 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 650 printf("Max segment number per packet: %hu\n", 651 dev_info.tx_desc_lim.nb_seg_max); 652 printf("Max segment number per MTU/TSO: %hu\n", 653 dev_info.tx_desc_lim.nb_mtu_seg_max); 654 655 /* Show switch info only if valid switch domain and port id is set */ 656 if (dev_info.switch_info.domain_id != 657 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 658 if (dev_info.switch_info.name) 659 printf("Switch name: %s\n", dev_info.switch_info.name); 660 661 printf("Switch domain Id: %u\n", 662 dev_info.switch_info.domain_id); 663 printf("Switch Port Id: %u\n", 664 dev_info.switch_info.port_id); 665 } 666 } 667 668 void 669 port_summary_header_display(void) 670 { 671 uint16_t port_number; 672 673 port_number = rte_eth_dev_count_avail(); 674 printf("Number of available ports: %i\n", port_number); 675 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 676 "Driver", "Status", "Link"); 677 } 678 679 void 680 port_summary_display(portid_t port_id) 681 { 682 struct rte_ether_addr mac_addr; 683 struct rte_eth_link link; 684 struct rte_eth_dev_info dev_info; 685 char name[RTE_ETH_NAME_MAX_LEN]; 686 int ret; 687 688 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 689 print_valid_ports(); 690 return; 691 } 692 693 ret = eth_link_get_nowait_print_err(port_id, &link); 694 if (ret < 0) 695 return; 696 697 ret = eth_dev_info_get_print_err(port_id, &dev_info); 698 if (ret != 0) 699 return; 700 701 rte_eth_dev_get_name_by_port(port_id, name); 702 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 703 if (ret != 0) 704 return; 705 706 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 707 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 708 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 709 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 710 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 711 (unsigned int) link.link_speed); 712 } 713 714 void 715 port_offload_cap_display(portid_t port_id) 716 { 717 struct rte_eth_dev_info dev_info; 718 static const char *info_border = "************"; 719 int ret; 720 721 if (port_id_is_invalid(port_id, ENABLED_WARN)) 722 return; 723 724 ret = eth_dev_info_get_print_err(port_id, &dev_info); 725 if (ret != 0) 726 return; 727 728 printf("\n%s Port %d supported offload features: %s\n", 729 info_border, port_id, info_border); 730 731 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 732 printf("VLAN stripped: "); 733 if (ports[port_id].dev_conf.rxmode.offloads & 734 DEV_RX_OFFLOAD_VLAN_STRIP) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 741 printf("Double VLANs stripped: "); 742 if (ports[port_id].dev_conf.rxmode.offloads & 743 DEV_RX_OFFLOAD_QINQ_STRIP) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 750 printf("RX IPv4 checksum: "); 751 if (ports[port_id].dev_conf.rxmode.offloads & 752 DEV_RX_OFFLOAD_IPV4_CKSUM) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 758 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 759 printf("RX UDP checksum: "); 760 if (ports[port_id].dev_conf.rxmode.offloads & 761 DEV_RX_OFFLOAD_UDP_CKSUM) 762 printf("on\n"); 763 else 764 printf("off\n"); 765 } 766 767 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 768 printf("RX TCP checksum: "); 769 if (ports[port_id].dev_conf.rxmode.offloads & 770 DEV_RX_OFFLOAD_TCP_CKSUM) 771 printf("on\n"); 772 else 773 printf("off\n"); 774 } 775 776 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 777 printf("RX SCTP checksum: "); 778 if (ports[port_id].dev_conf.rxmode.offloads & 779 DEV_RX_OFFLOAD_SCTP_CKSUM) 780 printf("on\n"); 781 else 782 printf("off\n"); 783 } 784 785 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 786 printf("RX Outer IPv4 checksum: "); 787 if (ports[port_id].dev_conf.rxmode.offloads & 788 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 789 printf("on\n"); 790 else 791 printf("off\n"); 792 } 793 794 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 795 printf("RX Outer UDP checksum: "); 796 if (ports[port_id].dev_conf.rxmode.offloads & 797 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 798 printf("on\n"); 799 else 800 printf("off\n"); 801 } 802 803 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 804 printf("Large receive offload: "); 805 if (ports[port_id].dev_conf.rxmode.offloads & 806 DEV_RX_OFFLOAD_TCP_LRO) 807 printf("on\n"); 808 else 809 printf("off\n"); 810 } 811 812 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 813 printf("HW timestamp: "); 814 if (ports[port_id].dev_conf.rxmode.offloads & 815 DEV_RX_OFFLOAD_TIMESTAMP) 816 printf("on\n"); 817 else 818 printf("off\n"); 819 } 820 821 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 822 printf("Rx Keep CRC: "); 823 if (ports[port_id].dev_conf.rxmode.offloads & 824 DEV_RX_OFFLOAD_KEEP_CRC) 825 printf("on\n"); 826 else 827 printf("off\n"); 828 } 829 830 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 831 printf("RX offload security: "); 832 if (ports[port_id].dev_conf.rxmode.offloads & 833 DEV_RX_OFFLOAD_SECURITY) 834 printf("on\n"); 835 else 836 printf("off\n"); 837 } 838 839 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 840 printf("VLAN insert: "); 841 if (ports[port_id].dev_conf.txmode.offloads & 842 DEV_TX_OFFLOAD_VLAN_INSERT) 843 printf("on\n"); 844 else 845 printf("off\n"); 846 } 847 848 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 849 printf("Double VLANs insert: "); 850 if (ports[port_id].dev_conf.txmode.offloads & 851 DEV_TX_OFFLOAD_QINQ_INSERT) 852 printf("on\n"); 853 else 854 printf("off\n"); 855 } 856 857 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 858 printf("TX IPv4 checksum: "); 859 if (ports[port_id].dev_conf.txmode.offloads & 860 DEV_TX_OFFLOAD_IPV4_CKSUM) 861 printf("on\n"); 862 else 863 printf("off\n"); 864 } 865 866 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 867 printf("TX UDP checksum: "); 868 if (ports[port_id].dev_conf.txmode.offloads & 869 DEV_TX_OFFLOAD_UDP_CKSUM) 870 printf("on\n"); 871 else 872 printf("off\n"); 873 } 874 875 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 876 printf("TX TCP checksum: "); 877 if (ports[port_id].dev_conf.txmode.offloads & 878 DEV_TX_OFFLOAD_TCP_CKSUM) 879 printf("on\n"); 880 else 881 printf("off\n"); 882 } 883 884 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 885 printf("TX SCTP checksum: "); 886 if (ports[port_id].dev_conf.txmode.offloads & 887 DEV_TX_OFFLOAD_SCTP_CKSUM) 888 printf("on\n"); 889 else 890 printf("off\n"); 891 } 892 893 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 894 printf("TX Outer IPv4 checksum: "); 895 if (ports[port_id].dev_conf.txmode.offloads & 896 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 897 printf("on\n"); 898 else 899 printf("off\n"); 900 } 901 902 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 903 printf("TX TCP segmentation: "); 904 if (ports[port_id].dev_conf.txmode.offloads & 905 DEV_TX_OFFLOAD_TCP_TSO) 906 printf("on\n"); 907 else 908 printf("off\n"); 909 } 910 911 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 912 printf("TX UDP segmentation: "); 913 if (ports[port_id].dev_conf.txmode.offloads & 914 DEV_TX_OFFLOAD_UDP_TSO) 915 printf("on\n"); 916 else 917 printf("off\n"); 918 } 919 920 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 921 printf("TSO for VXLAN tunnel packet: "); 922 if (ports[port_id].dev_conf.txmode.offloads & 923 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 924 printf("on\n"); 925 else 926 printf("off\n"); 927 } 928 929 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 930 printf("TSO for GRE tunnel packet: "); 931 if (ports[port_id].dev_conf.txmode.offloads & 932 DEV_TX_OFFLOAD_GRE_TNL_TSO) 933 printf("on\n"); 934 else 935 printf("off\n"); 936 } 937 938 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 939 printf("TSO for IPIP tunnel packet: "); 940 if (ports[port_id].dev_conf.txmode.offloads & 941 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 942 printf("on\n"); 943 else 944 printf("off\n"); 945 } 946 947 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 948 printf("TSO for GENEVE tunnel packet: "); 949 if (ports[port_id].dev_conf.txmode.offloads & 950 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 951 printf("on\n"); 952 else 953 printf("off\n"); 954 } 955 956 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 957 printf("IP tunnel TSO: "); 958 if (ports[port_id].dev_conf.txmode.offloads & 959 DEV_TX_OFFLOAD_IP_TNL_TSO) 960 printf("on\n"); 961 else 962 printf("off\n"); 963 } 964 965 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 966 printf("UDP tunnel TSO: "); 967 if (ports[port_id].dev_conf.txmode.offloads & 968 DEV_TX_OFFLOAD_UDP_TNL_TSO) 969 printf("on\n"); 970 else 971 printf("off\n"); 972 } 973 974 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 975 printf("TX Outer UDP checksum: "); 976 if (ports[port_id].dev_conf.txmode.offloads & 977 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 978 printf("on\n"); 979 else 980 printf("off\n"); 981 } 982 983 } 984 985 int 986 port_id_is_invalid(portid_t port_id, enum print_warning warning) 987 { 988 uint16_t pid; 989 990 if (port_id == (portid_t)RTE_PORT_ALL) 991 return 0; 992 993 RTE_ETH_FOREACH_DEV(pid) 994 if (port_id == pid) 995 return 0; 996 997 if (warning == ENABLED_WARN) 998 printf("Invalid port %d\n", port_id); 999 1000 return 1; 1001 } 1002 1003 void print_valid_ports(void) 1004 { 1005 portid_t pid; 1006 1007 printf("The valid ports array is ["); 1008 RTE_ETH_FOREACH_DEV(pid) { 1009 printf(" %d", pid); 1010 } 1011 printf(" ]\n"); 1012 } 1013 1014 static int 1015 vlan_id_is_invalid(uint16_t vlan_id) 1016 { 1017 if (vlan_id < 4096) 1018 return 0; 1019 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1020 return 1; 1021 } 1022 1023 static int 1024 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1025 { 1026 const struct rte_pci_device *pci_dev; 1027 const struct rte_bus *bus; 1028 uint64_t pci_len; 1029 1030 if (reg_off & 0x3) { 1031 printf("Port register offset 0x%X not aligned on a 4-byte " 1032 "boundary\n", 1033 (unsigned)reg_off); 1034 return 1; 1035 } 1036 1037 if (!ports[port_id].dev_info.device) { 1038 printf("Invalid device\n"); 1039 return 0; 1040 } 1041 1042 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1043 if (bus && !strcmp(bus->name, "pci")) { 1044 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1045 } else { 1046 printf("Not a PCI device\n"); 1047 return 1; 1048 } 1049 1050 pci_len = pci_dev->mem_resource[0].len; 1051 if (reg_off >= pci_len) { 1052 printf("Port %d: register offset %u (0x%X) out of port PCI " 1053 "resource (length=%"PRIu64")\n", 1054 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1055 return 1; 1056 } 1057 return 0; 1058 } 1059 1060 static int 1061 reg_bit_pos_is_invalid(uint8_t bit_pos) 1062 { 1063 if (bit_pos <= 31) 1064 return 0; 1065 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1066 return 1; 1067 } 1068 1069 #define display_port_and_reg_off(port_id, reg_off) \ 1070 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1071 1072 static inline void 1073 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1074 { 1075 display_port_and_reg_off(port_id, (unsigned)reg_off); 1076 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1077 } 1078 1079 void 1080 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1081 { 1082 uint32_t reg_v; 1083 1084 1085 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1086 return; 1087 if (port_reg_off_is_invalid(port_id, reg_off)) 1088 return; 1089 if (reg_bit_pos_is_invalid(bit_x)) 1090 return; 1091 reg_v = port_id_pci_reg_read(port_id, reg_off); 1092 display_port_and_reg_off(port_id, (unsigned)reg_off); 1093 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1094 } 1095 1096 void 1097 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1098 uint8_t bit1_pos, uint8_t bit2_pos) 1099 { 1100 uint32_t reg_v; 1101 uint8_t l_bit; 1102 uint8_t h_bit; 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit1_pos)) 1109 return; 1110 if (reg_bit_pos_is_invalid(bit2_pos)) 1111 return; 1112 if (bit1_pos > bit2_pos) 1113 l_bit = bit2_pos, h_bit = bit1_pos; 1114 else 1115 l_bit = bit1_pos, h_bit = bit2_pos; 1116 1117 reg_v = port_id_pci_reg_read(port_id, reg_off); 1118 reg_v >>= l_bit; 1119 if (h_bit < 31) 1120 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1121 display_port_and_reg_off(port_id, (unsigned)reg_off); 1122 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1123 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1124 } 1125 1126 void 1127 port_reg_display(portid_t port_id, uint32_t reg_off) 1128 { 1129 uint32_t reg_v; 1130 1131 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1132 return; 1133 if (port_reg_off_is_invalid(port_id, reg_off)) 1134 return; 1135 reg_v = port_id_pci_reg_read(port_id, reg_off); 1136 display_port_reg_value(port_id, reg_off, reg_v); 1137 } 1138 1139 void 1140 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1141 uint8_t bit_v) 1142 { 1143 uint32_t reg_v; 1144 1145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1146 return; 1147 if (port_reg_off_is_invalid(port_id, reg_off)) 1148 return; 1149 if (reg_bit_pos_is_invalid(bit_pos)) 1150 return; 1151 if (bit_v > 1) { 1152 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1153 return; 1154 } 1155 reg_v = port_id_pci_reg_read(port_id, reg_off); 1156 if (bit_v == 0) 1157 reg_v &= ~(1 << bit_pos); 1158 else 1159 reg_v |= (1 << bit_pos); 1160 port_id_pci_reg_write(port_id, reg_off, reg_v); 1161 display_port_reg_value(port_id, reg_off, reg_v); 1162 } 1163 1164 void 1165 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1166 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1167 { 1168 uint32_t max_v; 1169 uint32_t reg_v; 1170 uint8_t l_bit; 1171 uint8_t h_bit; 1172 1173 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1174 return; 1175 if (port_reg_off_is_invalid(port_id, reg_off)) 1176 return; 1177 if (reg_bit_pos_is_invalid(bit1_pos)) 1178 return; 1179 if (reg_bit_pos_is_invalid(bit2_pos)) 1180 return; 1181 if (bit1_pos > bit2_pos) 1182 l_bit = bit2_pos, h_bit = bit1_pos; 1183 else 1184 l_bit = bit1_pos, h_bit = bit2_pos; 1185 1186 if ((h_bit - l_bit) < 31) 1187 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1188 else 1189 max_v = 0xFFFFFFFF; 1190 1191 if (value > max_v) { 1192 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1193 (unsigned)value, (unsigned)value, 1194 (unsigned)max_v, (unsigned)max_v); 1195 return; 1196 } 1197 reg_v = port_id_pci_reg_read(port_id, reg_off); 1198 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1199 reg_v |= (value << l_bit); /* Set changed bits */ 1200 port_id_pci_reg_write(port_id, reg_off, reg_v); 1201 display_port_reg_value(port_id, reg_off, reg_v); 1202 } 1203 1204 void 1205 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1206 { 1207 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1208 return; 1209 if (port_reg_off_is_invalid(port_id, reg_off)) 1210 return; 1211 port_id_pci_reg_write(port_id, reg_off, reg_v); 1212 display_port_reg_value(port_id, reg_off, reg_v); 1213 } 1214 1215 void 1216 port_mtu_set(portid_t port_id, uint16_t mtu) 1217 { 1218 int diag; 1219 struct rte_eth_dev_info dev_info; 1220 int ret; 1221 1222 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1223 return; 1224 1225 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1226 if (ret != 0) 1227 return; 1228 1229 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1230 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1231 mtu, dev_info.min_mtu, dev_info.max_mtu); 1232 return; 1233 } 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag == 0) 1236 return; 1237 printf("Set MTU failed. diag=%d\n", diag); 1238 } 1239 1240 /* Generic flow management functions. */ 1241 1242 /** Generate a port_flow entry from attributes/pattern/actions. */ 1243 static struct port_flow * 1244 port_flow_new(const struct rte_flow_attr *attr, 1245 const struct rte_flow_item *pattern, 1246 const struct rte_flow_action *actions, 1247 struct rte_flow_error *error) 1248 { 1249 const struct rte_flow_conv_rule rule = { 1250 .attr_ro = attr, 1251 .pattern_ro = pattern, 1252 .actions_ro = actions, 1253 }; 1254 struct port_flow *pf; 1255 int ret; 1256 1257 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1258 if (ret < 0) 1259 return NULL; 1260 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1261 if (!pf) { 1262 rte_flow_error_set 1263 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1264 "calloc() failed"); 1265 return NULL; 1266 } 1267 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1268 error) >= 0) 1269 return pf; 1270 free(pf); 1271 return NULL; 1272 } 1273 1274 /** Print a message out of a flow error. */ 1275 static int 1276 port_flow_complain(struct rte_flow_error *error) 1277 { 1278 static const char *const errstrlist[] = { 1279 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1280 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1281 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1282 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1283 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1284 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1285 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1286 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1287 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1288 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1289 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1290 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1291 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1292 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1293 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1294 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1295 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1296 }; 1297 const char *errstr; 1298 char buf[32]; 1299 int err = rte_errno; 1300 1301 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1302 !errstrlist[error->type]) 1303 errstr = "unknown type"; 1304 else 1305 errstr = errstrlist[error->type]; 1306 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1307 error->type, errstr, 1308 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1309 error->cause), buf) : "", 1310 error->message ? error->message : "(no stated reason)", 1311 rte_strerror(err)); 1312 return -err; 1313 } 1314 1315 /** Validate flow rule. */ 1316 int 1317 port_flow_validate(portid_t port_id, 1318 const struct rte_flow_attr *attr, 1319 const struct rte_flow_item *pattern, 1320 const struct rte_flow_action *actions) 1321 { 1322 struct rte_flow_error error; 1323 1324 /* Poisoning to make sure PMDs update it in case of error. */ 1325 memset(&error, 0x11, sizeof(error)); 1326 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1327 return port_flow_complain(&error); 1328 printf("Flow rule validated\n"); 1329 return 0; 1330 } 1331 1332 /** Create flow rule. */ 1333 int 1334 port_flow_create(portid_t port_id, 1335 const struct rte_flow_attr *attr, 1336 const struct rte_flow_item *pattern, 1337 const struct rte_flow_action *actions) 1338 { 1339 struct rte_flow *flow; 1340 struct rte_port *port; 1341 struct port_flow *pf; 1342 uint32_t id; 1343 struct rte_flow_error error; 1344 1345 /* Poisoning to make sure PMDs update it in case of error. */ 1346 memset(&error, 0x22, sizeof(error)); 1347 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1348 if (!flow) 1349 return port_flow_complain(&error); 1350 port = &ports[port_id]; 1351 if (port->flow_list) { 1352 if (port->flow_list->id == UINT32_MAX) { 1353 printf("Highest rule ID is already assigned, delete" 1354 " it first"); 1355 rte_flow_destroy(port_id, flow, NULL); 1356 return -ENOMEM; 1357 } 1358 id = port->flow_list->id + 1; 1359 } else 1360 id = 0; 1361 pf = port_flow_new(attr, pattern, actions, &error); 1362 if (!pf) { 1363 rte_flow_destroy(port_id, flow, NULL); 1364 return port_flow_complain(&error); 1365 } 1366 pf->next = port->flow_list; 1367 pf->id = id; 1368 pf->flow = flow; 1369 port->flow_list = pf; 1370 printf("Flow rule #%u created\n", pf->id); 1371 return 0; 1372 } 1373 1374 /** Destroy a number of flow rules. */ 1375 int 1376 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1377 { 1378 struct rte_port *port; 1379 struct port_flow **tmp; 1380 uint32_t c = 0; 1381 int ret = 0; 1382 1383 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1384 port_id == (portid_t)RTE_PORT_ALL) 1385 return -EINVAL; 1386 port = &ports[port_id]; 1387 tmp = &port->flow_list; 1388 while (*tmp) { 1389 uint32_t i; 1390 1391 for (i = 0; i != n; ++i) { 1392 struct rte_flow_error error; 1393 struct port_flow *pf = *tmp; 1394 1395 if (rule[i] != pf->id) 1396 continue; 1397 /* 1398 * Poisoning to make sure PMDs update it in case 1399 * of error. 1400 */ 1401 memset(&error, 0x33, sizeof(error)); 1402 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1403 ret = port_flow_complain(&error); 1404 continue; 1405 } 1406 printf("Flow rule #%u destroyed\n", pf->id); 1407 *tmp = pf->next; 1408 free(pf); 1409 break; 1410 } 1411 if (i == n) 1412 tmp = &(*tmp)->next; 1413 ++c; 1414 } 1415 return ret; 1416 } 1417 1418 /** Remove all flow rules. */ 1419 int 1420 port_flow_flush(portid_t port_id) 1421 { 1422 struct rte_flow_error error; 1423 struct rte_port *port; 1424 int ret = 0; 1425 1426 /* Poisoning to make sure PMDs update it in case of error. */ 1427 memset(&error, 0x44, sizeof(error)); 1428 if (rte_flow_flush(port_id, &error)) { 1429 ret = port_flow_complain(&error); 1430 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1431 port_id == (portid_t)RTE_PORT_ALL) 1432 return ret; 1433 } 1434 port = &ports[port_id]; 1435 while (port->flow_list) { 1436 struct port_flow *pf = port->flow_list->next; 1437 1438 free(port->flow_list); 1439 port->flow_list = pf; 1440 } 1441 return ret; 1442 } 1443 1444 /** Dump all flow rules. */ 1445 int 1446 port_flow_dump(portid_t port_id, const char *file_name) 1447 { 1448 int ret = 0; 1449 FILE *file = stdout; 1450 struct rte_flow_error error; 1451 1452 if (file_name && strlen(file_name)) { 1453 file = fopen(file_name, "w"); 1454 if (!file) { 1455 printf("Failed to create file %s: %s\n", file_name, 1456 strerror(errno)); 1457 return -errno; 1458 } 1459 } 1460 ret = rte_flow_dev_dump(port_id, file, &error); 1461 if (ret) { 1462 port_flow_complain(&error); 1463 printf("Failed to dump flow: %s\n", strerror(-ret)); 1464 } else 1465 printf("Flow dump finished\n"); 1466 if (file_name && strlen(file_name)) 1467 fclose(file); 1468 return ret; 1469 } 1470 1471 /** Query a flow rule. */ 1472 int 1473 port_flow_query(portid_t port_id, uint32_t rule, 1474 const struct rte_flow_action *action) 1475 { 1476 struct rte_flow_error error; 1477 struct rte_port *port; 1478 struct port_flow *pf; 1479 const char *name; 1480 union { 1481 struct rte_flow_query_count count; 1482 } query; 1483 int ret; 1484 1485 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1486 port_id == (portid_t)RTE_PORT_ALL) 1487 return -EINVAL; 1488 port = &ports[port_id]; 1489 for (pf = port->flow_list; pf; pf = pf->next) 1490 if (pf->id == rule) 1491 break; 1492 if (!pf) { 1493 printf("Flow rule #%u not found\n", rule); 1494 return -ENOENT; 1495 } 1496 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1497 &name, sizeof(name), 1498 (void *)(uintptr_t)action->type, &error); 1499 if (ret < 0) 1500 return port_flow_complain(&error); 1501 switch (action->type) { 1502 case RTE_FLOW_ACTION_TYPE_COUNT: 1503 break; 1504 default: 1505 printf("Cannot query action type %d (%s)\n", 1506 action->type, name); 1507 return -ENOTSUP; 1508 } 1509 /* Poisoning to make sure PMDs update it in case of error. */ 1510 memset(&error, 0x55, sizeof(error)); 1511 memset(&query, 0, sizeof(query)); 1512 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1513 return port_flow_complain(&error); 1514 switch (action->type) { 1515 case RTE_FLOW_ACTION_TYPE_COUNT: 1516 printf("%s:\n" 1517 " hits_set: %u\n" 1518 " bytes_set: %u\n" 1519 " hits: %" PRIu64 "\n" 1520 " bytes: %" PRIu64 "\n", 1521 name, 1522 query.count.hits_set, 1523 query.count.bytes_set, 1524 query.count.hits, 1525 query.count.bytes); 1526 break; 1527 default: 1528 printf("Cannot display result for action type %d (%s)\n", 1529 action->type, name); 1530 break; 1531 } 1532 return 0; 1533 } 1534 1535 /** List flow rules. */ 1536 void 1537 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1538 { 1539 struct rte_port *port; 1540 struct port_flow *pf; 1541 struct port_flow *list = NULL; 1542 uint32_t i; 1543 1544 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1545 port_id == (portid_t)RTE_PORT_ALL) 1546 return; 1547 port = &ports[port_id]; 1548 if (!port->flow_list) 1549 return; 1550 /* Sort flows by group, priority and ID. */ 1551 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1552 struct port_flow **tmp; 1553 const struct rte_flow_attr *curr = pf->rule.attr; 1554 1555 if (n) { 1556 /* Filter out unwanted groups. */ 1557 for (i = 0; i != n; ++i) 1558 if (curr->group == group[i]) 1559 break; 1560 if (i == n) 1561 continue; 1562 } 1563 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1564 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1565 1566 if (curr->group > comp->group || 1567 (curr->group == comp->group && 1568 curr->priority > comp->priority) || 1569 (curr->group == comp->group && 1570 curr->priority == comp->priority && 1571 pf->id > (*tmp)->id)) 1572 continue; 1573 break; 1574 } 1575 pf->tmp = *tmp; 1576 *tmp = pf; 1577 } 1578 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1579 for (pf = list; pf != NULL; pf = pf->tmp) { 1580 const struct rte_flow_item *item = pf->rule.pattern; 1581 const struct rte_flow_action *action = pf->rule.actions; 1582 const char *name; 1583 1584 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1585 pf->id, 1586 pf->rule.attr->group, 1587 pf->rule.attr->priority, 1588 pf->rule.attr->ingress ? 'i' : '-', 1589 pf->rule.attr->egress ? 'e' : '-', 1590 pf->rule.attr->transfer ? 't' : '-'); 1591 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1592 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1593 &name, sizeof(name), 1594 (void *)(uintptr_t)item->type, 1595 NULL) <= 0) 1596 name = "[UNKNOWN]"; 1597 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1598 printf("%s ", name); 1599 ++item; 1600 } 1601 printf("=>"); 1602 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1603 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1604 &name, sizeof(name), 1605 (void *)(uintptr_t)action->type, 1606 NULL) <= 0) 1607 name = "[UNKNOWN]"; 1608 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1609 printf(" %s", name); 1610 ++action; 1611 } 1612 printf("\n"); 1613 } 1614 } 1615 1616 /** Restrict ingress traffic to the defined flow rules. */ 1617 int 1618 port_flow_isolate(portid_t port_id, int set) 1619 { 1620 struct rte_flow_error error; 1621 1622 /* Poisoning to make sure PMDs update it in case of error. */ 1623 memset(&error, 0x66, sizeof(error)); 1624 if (rte_flow_isolate(port_id, set, &error)) 1625 return port_flow_complain(&error); 1626 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1627 port_id, 1628 set ? "now restricted" : "not restricted anymore"); 1629 return 0; 1630 } 1631 1632 /* 1633 * RX/TX ring descriptors display functions. 1634 */ 1635 int 1636 rx_queue_id_is_invalid(queueid_t rxq_id) 1637 { 1638 if (rxq_id < nb_rxq) 1639 return 0; 1640 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1641 return 1; 1642 } 1643 1644 int 1645 tx_queue_id_is_invalid(queueid_t txq_id) 1646 { 1647 if (txq_id < nb_txq) 1648 return 0; 1649 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1650 return 1; 1651 } 1652 1653 static int 1654 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1655 { 1656 if (rxdesc_id < nb_rxd) 1657 return 0; 1658 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1659 rxdesc_id, nb_rxd); 1660 return 1; 1661 } 1662 1663 static int 1664 tx_desc_id_is_invalid(uint16_t txdesc_id) 1665 { 1666 if (txdesc_id < nb_txd) 1667 return 0; 1668 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1669 txdesc_id, nb_txd); 1670 return 1; 1671 } 1672 1673 static const struct rte_memzone * 1674 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1675 { 1676 char mz_name[RTE_MEMZONE_NAMESIZE]; 1677 const struct rte_memzone *mz; 1678 1679 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1680 port_id, q_id, ring_name); 1681 mz = rte_memzone_lookup(mz_name); 1682 if (mz == NULL) 1683 printf("%s ring memory zoneof (port %d, queue %d) not" 1684 "found (zone name = %s\n", 1685 ring_name, port_id, q_id, mz_name); 1686 return mz; 1687 } 1688 1689 union igb_ring_dword { 1690 uint64_t dword; 1691 struct { 1692 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1693 uint32_t lo; 1694 uint32_t hi; 1695 #else 1696 uint32_t hi; 1697 uint32_t lo; 1698 #endif 1699 } words; 1700 }; 1701 1702 struct igb_ring_desc_32_bytes { 1703 union igb_ring_dword lo_dword; 1704 union igb_ring_dword hi_dword; 1705 union igb_ring_dword resv1; 1706 union igb_ring_dword resv2; 1707 }; 1708 1709 struct igb_ring_desc_16_bytes { 1710 union igb_ring_dword lo_dword; 1711 union igb_ring_dword hi_dword; 1712 }; 1713 1714 static void 1715 ring_rxd_display_dword(union igb_ring_dword dword) 1716 { 1717 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1718 (unsigned)dword.words.hi); 1719 } 1720 1721 static void 1722 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1723 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1724 portid_t port_id, 1725 #else 1726 __rte_unused portid_t port_id, 1727 #endif 1728 uint16_t desc_id) 1729 { 1730 struct igb_ring_desc_16_bytes *ring = 1731 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1732 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1733 int ret; 1734 struct rte_eth_dev_info dev_info; 1735 1736 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1737 if (ret != 0) 1738 return; 1739 1740 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1741 /* 32 bytes RX descriptor, i40e only */ 1742 struct igb_ring_desc_32_bytes *ring = 1743 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1744 ring[desc_id].lo_dword.dword = 1745 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1746 ring_rxd_display_dword(ring[desc_id].lo_dword); 1747 ring[desc_id].hi_dword.dword = 1748 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1749 ring_rxd_display_dword(ring[desc_id].hi_dword); 1750 ring[desc_id].resv1.dword = 1751 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1752 ring_rxd_display_dword(ring[desc_id].resv1); 1753 ring[desc_id].resv2.dword = 1754 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1755 ring_rxd_display_dword(ring[desc_id].resv2); 1756 1757 return; 1758 } 1759 #endif 1760 /* 16 bytes RX descriptor */ 1761 ring[desc_id].lo_dword.dword = 1762 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1763 ring_rxd_display_dword(ring[desc_id].lo_dword); 1764 ring[desc_id].hi_dword.dword = 1765 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1766 ring_rxd_display_dword(ring[desc_id].hi_dword); 1767 } 1768 1769 static void 1770 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1771 { 1772 struct igb_ring_desc_16_bytes *ring; 1773 struct igb_ring_desc_16_bytes txd; 1774 1775 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1776 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1777 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1778 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1779 (unsigned)txd.lo_dword.words.lo, 1780 (unsigned)txd.lo_dword.words.hi, 1781 (unsigned)txd.hi_dword.words.lo, 1782 (unsigned)txd.hi_dword.words.hi); 1783 } 1784 1785 void 1786 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1787 { 1788 const struct rte_memzone *rx_mz; 1789 1790 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1791 return; 1792 if (rx_queue_id_is_invalid(rxq_id)) 1793 return; 1794 if (rx_desc_id_is_invalid(rxd_id)) 1795 return; 1796 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1797 if (rx_mz == NULL) 1798 return; 1799 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1800 } 1801 1802 void 1803 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1804 { 1805 const struct rte_memzone *tx_mz; 1806 1807 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1808 return; 1809 if (tx_queue_id_is_invalid(txq_id)) 1810 return; 1811 if (tx_desc_id_is_invalid(txd_id)) 1812 return; 1813 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1814 if (tx_mz == NULL) 1815 return; 1816 ring_tx_descriptor_display(tx_mz, txd_id); 1817 } 1818 1819 void 1820 fwd_lcores_config_display(void) 1821 { 1822 lcoreid_t lc_id; 1823 1824 printf("List of forwarding lcores:"); 1825 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1826 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1827 printf("\n"); 1828 } 1829 void 1830 rxtx_config_display(void) 1831 { 1832 portid_t pid; 1833 queueid_t qid; 1834 1835 printf(" %s packet forwarding%s packets/burst=%d\n", 1836 cur_fwd_eng->fwd_mode_name, 1837 retry_enabled == 0 ? "" : " with retry", 1838 nb_pkt_per_burst); 1839 1840 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1841 printf(" packet len=%u - nb packet segments=%d\n", 1842 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1843 1844 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1845 nb_fwd_lcores, nb_fwd_ports); 1846 1847 RTE_ETH_FOREACH_DEV(pid) { 1848 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1849 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1850 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1851 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1852 uint16_t nb_rx_desc_tmp; 1853 uint16_t nb_tx_desc_tmp; 1854 struct rte_eth_rxq_info rx_qinfo; 1855 struct rte_eth_txq_info tx_qinfo; 1856 int32_t rc; 1857 1858 /* per port config */ 1859 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1860 (unsigned int)pid, nb_rxq, nb_txq); 1861 1862 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1863 ports[pid].dev_conf.rxmode.offloads, 1864 ports[pid].dev_conf.txmode.offloads); 1865 1866 /* per rx queue config only for first queue to be less verbose */ 1867 for (qid = 0; qid < 1; qid++) { 1868 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1869 if (rc) 1870 nb_rx_desc_tmp = nb_rx_desc[qid]; 1871 else 1872 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1873 1874 printf(" RX queue: %d\n", qid); 1875 printf(" RX desc=%d - RX free threshold=%d\n", 1876 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1877 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1878 " wthresh=%d\n", 1879 rx_conf[qid].rx_thresh.pthresh, 1880 rx_conf[qid].rx_thresh.hthresh, 1881 rx_conf[qid].rx_thresh.wthresh); 1882 printf(" RX Offloads=0x%"PRIx64"\n", 1883 rx_conf[qid].offloads); 1884 } 1885 1886 /* per tx queue config only for first queue to be less verbose */ 1887 for (qid = 0; qid < 1; qid++) { 1888 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1889 if (rc) 1890 nb_tx_desc_tmp = nb_tx_desc[qid]; 1891 else 1892 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1893 1894 printf(" TX queue: %d\n", qid); 1895 printf(" TX desc=%d - TX free threshold=%d\n", 1896 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1897 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1898 " wthresh=%d\n", 1899 tx_conf[qid].tx_thresh.pthresh, 1900 tx_conf[qid].tx_thresh.hthresh, 1901 tx_conf[qid].tx_thresh.wthresh); 1902 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1903 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1904 } 1905 } 1906 } 1907 1908 void 1909 port_rss_reta_info(portid_t port_id, 1910 struct rte_eth_rss_reta_entry64 *reta_conf, 1911 uint16_t nb_entries) 1912 { 1913 uint16_t i, idx, shift; 1914 int ret; 1915 1916 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1917 return; 1918 1919 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1920 if (ret != 0) { 1921 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1922 return; 1923 } 1924 1925 for (i = 0; i < nb_entries; i++) { 1926 idx = i / RTE_RETA_GROUP_SIZE; 1927 shift = i % RTE_RETA_GROUP_SIZE; 1928 if (!(reta_conf[idx].mask & (1ULL << shift))) 1929 continue; 1930 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1931 i, reta_conf[idx].reta[shift]); 1932 } 1933 } 1934 1935 /* 1936 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1937 * key of the port. 1938 */ 1939 void 1940 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1941 { 1942 struct rte_eth_rss_conf rss_conf = {0}; 1943 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1944 uint64_t rss_hf; 1945 uint8_t i; 1946 int diag; 1947 struct rte_eth_dev_info dev_info; 1948 uint8_t hash_key_size; 1949 int ret; 1950 1951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1952 return; 1953 1954 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1955 if (ret != 0) 1956 return; 1957 1958 if (dev_info.hash_key_size > 0 && 1959 dev_info.hash_key_size <= sizeof(rss_key)) 1960 hash_key_size = dev_info.hash_key_size; 1961 else { 1962 printf("dev_info did not provide a valid hash key size\n"); 1963 return; 1964 } 1965 1966 /* Get RSS hash key if asked to display it */ 1967 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1968 rss_conf.rss_key_len = hash_key_size; 1969 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1970 if (diag != 0) { 1971 switch (diag) { 1972 case -ENODEV: 1973 printf("port index %d invalid\n", port_id); 1974 break; 1975 case -ENOTSUP: 1976 printf("operation not supported by device\n"); 1977 break; 1978 default: 1979 printf("operation failed - diag=%d\n", diag); 1980 break; 1981 } 1982 return; 1983 } 1984 rss_hf = rss_conf.rss_hf; 1985 if (rss_hf == 0) { 1986 printf("RSS disabled\n"); 1987 return; 1988 } 1989 printf("RSS functions:\n "); 1990 for (i = 0; rss_type_table[i].str; i++) { 1991 if (rss_hf & rss_type_table[i].rss_type) 1992 printf("%s ", rss_type_table[i].str); 1993 } 1994 printf("\n"); 1995 if (!show_rss_key) 1996 return; 1997 printf("RSS key:\n"); 1998 for (i = 0; i < hash_key_size; i++) 1999 printf("%02X", rss_key[i]); 2000 printf("\n"); 2001 } 2002 2003 void 2004 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2005 uint hash_key_len) 2006 { 2007 struct rte_eth_rss_conf rss_conf; 2008 int diag; 2009 unsigned int i; 2010 2011 rss_conf.rss_key = NULL; 2012 rss_conf.rss_key_len = hash_key_len; 2013 rss_conf.rss_hf = 0; 2014 for (i = 0; rss_type_table[i].str; i++) { 2015 if (!strcmp(rss_type_table[i].str, rss_type)) 2016 rss_conf.rss_hf = rss_type_table[i].rss_type; 2017 } 2018 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2019 if (diag == 0) { 2020 rss_conf.rss_key = hash_key; 2021 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2022 } 2023 if (diag == 0) 2024 return; 2025 2026 switch (diag) { 2027 case -ENODEV: 2028 printf("port index %d invalid\n", port_id); 2029 break; 2030 case -ENOTSUP: 2031 printf("operation not supported by device\n"); 2032 break; 2033 default: 2034 printf("operation failed - diag=%d\n", diag); 2035 break; 2036 } 2037 } 2038 2039 /* 2040 * Setup forwarding configuration for each logical core. 2041 */ 2042 static void 2043 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2044 { 2045 streamid_t nb_fs_per_lcore; 2046 streamid_t nb_fs; 2047 streamid_t sm_id; 2048 lcoreid_t nb_extra; 2049 lcoreid_t nb_fc; 2050 lcoreid_t nb_lc; 2051 lcoreid_t lc_id; 2052 2053 nb_fs = cfg->nb_fwd_streams; 2054 nb_fc = cfg->nb_fwd_lcores; 2055 if (nb_fs <= nb_fc) { 2056 nb_fs_per_lcore = 1; 2057 nb_extra = 0; 2058 } else { 2059 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2060 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2061 } 2062 2063 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2064 sm_id = 0; 2065 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2066 fwd_lcores[lc_id]->stream_idx = sm_id; 2067 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2068 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2069 } 2070 2071 /* 2072 * Assign extra remaining streams, if any. 2073 */ 2074 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2075 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2076 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2077 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2078 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2079 } 2080 } 2081 2082 static portid_t 2083 fwd_topology_tx_port_get(portid_t rxp) 2084 { 2085 static int warning_once = 1; 2086 2087 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2088 2089 switch (port_topology) { 2090 default: 2091 case PORT_TOPOLOGY_PAIRED: 2092 if ((rxp & 0x1) == 0) { 2093 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2094 return rxp + 1; 2095 if (warning_once) { 2096 printf("\nWarning! port-topology=paired" 2097 " and odd forward ports number," 2098 " the last port will pair with" 2099 " itself.\n\n"); 2100 warning_once = 0; 2101 } 2102 return rxp; 2103 } 2104 return rxp - 1; 2105 case PORT_TOPOLOGY_CHAINED: 2106 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2107 case PORT_TOPOLOGY_LOOP: 2108 return rxp; 2109 } 2110 } 2111 2112 static void 2113 simple_fwd_config_setup(void) 2114 { 2115 portid_t i; 2116 2117 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2118 cur_fwd_config.nb_fwd_streams = 2119 (streamid_t) cur_fwd_config.nb_fwd_ports; 2120 2121 /* reinitialize forwarding streams */ 2122 init_fwd_streams(); 2123 2124 /* 2125 * In the simple forwarding test, the number of forwarding cores 2126 * must be lower or equal to the number of forwarding ports. 2127 */ 2128 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2129 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2130 cur_fwd_config.nb_fwd_lcores = 2131 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2132 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2133 2134 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2135 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2136 fwd_streams[i]->rx_queue = 0; 2137 fwd_streams[i]->tx_port = 2138 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2139 fwd_streams[i]->tx_queue = 0; 2140 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2141 fwd_streams[i]->retry_enabled = retry_enabled; 2142 } 2143 } 2144 2145 /** 2146 * For the RSS forwarding test all streams distributed over lcores. Each stream 2147 * being composed of a RX queue to poll on a RX port for input messages, 2148 * associated with a TX queue of a TX port where to send forwarded packets. 2149 */ 2150 static void 2151 rss_fwd_config_setup(void) 2152 { 2153 portid_t rxp; 2154 portid_t txp; 2155 queueid_t rxq; 2156 queueid_t nb_q; 2157 streamid_t sm_id; 2158 2159 nb_q = nb_rxq; 2160 if (nb_q > nb_txq) 2161 nb_q = nb_txq; 2162 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2163 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2164 cur_fwd_config.nb_fwd_streams = 2165 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2166 2167 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2168 cur_fwd_config.nb_fwd_lcores = 2169 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2170 2171 /* reinitialize forwarding streams */ 2172 init_fwd_streams(); 2173 2174 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2175 rxp = 0; rxq = 0; 2176 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2177 struct fwd_stream *fs; 2178 2179 fs = fwd_streams[sm_id]; 2180 txp = fwd_topology_tx_port_get(rxp); 2181 fs->rx_port = fwd_ports_ids[rxp]; 2182 fs->rx_queue = rxq; 2183 fs->tx_port = fwd_ports_ids[txp]; 2184 fs->tx_queue = rxq; 2185 fs->peer_addr = fs->tx_port; 2186 fs->retry_enabled = retry_enabled; 2187 rxp++; 2188 if (rxp < nb_fwd_ports) 2189 continue; 2190 rxp = 0; 2191 rxq++; 2192 } 2193 } 2194 2195 /** 2196 * For the DCB forwarding test, each core is assigned on each traffic class. 2197 * 2198 * Each core is assigned a multi-stream, each stream being composed of 2199 * a RX queue to poll on a RX port for input messages, associated with 2200 * a TX queue of a TX port where to send forwarded packets. All RX and 2201 * TX queues are mapping to the same traffic class. 2202 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2203 * the same core 2204 */ 2205 static void 2206 dcb_fwd_config_setup(void) 2207 { 2208 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2209 portid_t txp, rxp = 0; 2210 queueid_t txq, rxq = 0; 2211 lcoreid_t lc_id; 2212 uint16_t nb_rx_queue, nb_tx_queue; 2213 uint16_t i, j, k, sm_id = 0; 2214 uint8_t tc = 0; 2215 2216 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2217 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2218 cur_fwd_config.nb_fwd_streams = 2219 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2220 2221 /* reinitialize forwarding streams */ 2222 init_fwd_streams(); 2223 sm_id = 0; 2224 txp = 1; 2225 /* get the dcb info on the first RX and TX ports */ 2226 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2227 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2228 2229 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2230 fwd_lcores[lc_id]->stream_nb = 0; 2231 fwd_lcores[lc_id]->stream_idx = sm_id; 2232 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2233 /* if the nb_queue is zero, means this tc is 2234 * not enabled on the POOL 2235 */ 2236 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2237 break; 2238 k = fwd_lcores[lc_id]->stream_nb + 2239 fwd_lcores[lc_id]->stream_idx; 2240 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2241 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2242 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2243 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2244 for (j = 0; j < nb_rx_queue; j++) { 2245 struct fwd_stream *fs; 2246 2247 fs = fwd_streams[k + j]; 2248 fs->rx_port = fwd_ports_ids[rxp]; 2249 fs->rx_queue = rxq + j; 2250 fs->tx_port = fwd_ports_ids[txp]; 2251 fs->tx_queue = txq + j % nb_tx_queue; 2252 fs->peer_addr = fs->tx_port; 2253 fs->retry_enabled = retry_enabled; 2254 } 2255 fwd_lcores[lc_id]->stream_nb += 2256 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2257 } 2258 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2259 2260 tc++; 2261 if (tc < rxp_dcb_info.nb_tcs) 2262 continue; 2263 /* Restart from TC 0 on next RX port */ 2264 tc = 0; 2265 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2266 rxp = (portid_t) 2267 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2268 else 2269 rxp++; 2270 if (rxp >= nb_fwd_ports) 2271 return; 2272 /* get the dcb information on next RX and TX ports */ 2273 if ((rxp & 0x1) == 0) 2274 txp = (portid_t) (rxp + 1); 2275 else 2276 txp = (portid_t) (rxp - 1); 2277 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2278 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2279 } 2280 } 2281 2282 static void 2283 icmp_echo_config_setup(void) 2284 { 2285 portid_t rxp; 2286 queueid_t rxq; 2287 lcoreid_t lc_id; 2288 uint16_t sm_id; 2289 2290 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2291 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2292 (nb_txq * nb_fwd_ports); 2293 else 2294 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2295 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2296 cur_fwd_config.nb_fwd_streams = 2297 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2298 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2299 cur_fwd_config.nb_fwd_lcores = 2300 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2301 if (verbose_level > 0) { 2302 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2303 __FUNCTION__, 2304 cur_fwd_config.nb_fwd_lcores, 2305 cur_fwd_config.nb_fwd_ports, 2306 cur_fwd_config.nb_fwd_streams); 2307 } 2308 2309 /* reinitialize forwarding streams */ 2310 init_fwd_streams(); 2311 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2312 rxp = 0; rxq = 0; 2313 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2314 if (verbose_level > 0) 2315 printf(" core=%d: \n", lc_id); 2316 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2317 struct fwd_stream *fs; 2318 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2319 fs->rx_port = fwd_ports_ids[rxp]; 2320 fs->rx_queue = rxq; 2321 fs->tx_port = fs->rx_port; 2322 fs->tx_queue = rxq; 2323 fs->peer_addr = fs->tx_port; 2324 fs->retry_enabled = retry_enabled; 2325 if (verbose_level > 0) 2326 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2327 sm_id, fs->rx_port, fs->rx_queue, 2328 fs->tx_queue); 2329 rxq = (queueid_t) (rxq + 1); 2330 if (rxq == nb_rxq) { 2331 rxq = 0; 2332 rxp = (portid_t) (rxp + 1); 2333 } 2334 } 2335 } 2336 } 2337 2338 #if defined RTE_LIBRTE_PMD_SOFTNIC 2339 static void 2340 softnic_fwd_config_setup(void) 2341 { 2342 struct rte_port *port; 2343 portid_t pid, softnic_portid; 2344 queueid_t i; 2345 uint8_t softnic_enable = 0; 2346 2347 RTE_ETH_FOREACH_DEV(pid) { 2348 port = &ports[pid]; 2349 const char *driver = port->dev_info.driver_name; 2350 2351 if (strcmp(driver, "net_softnic") == 0) { 2352 softnic_portid = pid; 2353 softnic_enable = 1; 2354 break; 2355 } 2356 } 2357 2358 if (softnic_enable == 0) { 2359 printf("Softnic mode not configured(%s)!\n", __func__); 2360 return; 2361 } 2362 2363 cur_fwd_config.nb_fwd_ports = 1; 2364 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2365 2366 /* Re-initialize forwarding streams */ 2367 init_fwd_streams(); 2368 2369 /* 2370 * In the softnic forwarding test, the number of forwarding cores 2371 * is set to one and remaining are used for softnic packet processing. 2372 */ 2373 cur_fwd_config.nb_fwd_lcores = 1; 2374 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2375 2376 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2377 fwd_streams[i]->rx_port = softnic_portid; 2378 fwd_streams[i]->rx_queue = i; 2379 fwd_streams[i]->tx_port = softnic_portid; 2380 fwd_streams[i]->tx_queue = i; 2381 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2382 fwd_streams[i]->retry_enabled = retry_enabled; 2383 } 2384 } 2385 #endif 2386 2387 void 2388 fwd_config_setup(void) 2389 { 2390 cur_fwd_config.fwd_eng = cur_fwd_eng; 2391 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2392 icmp_echo_config_setup(); 2393 return; 2394 } 2395 2396 #if defined RTE_LIBRTE_PMD_SOFTNIC 2397 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2398 softnic_fwd_config_setup(); 2399 return; 2400 } 2401 #endif 2402 2403 if ((nb_rxq > 1) && (nb_txq > 1)){ 2404 if (dcb_config) 2405 dcb_fwd_config_setup(); 2406 else 2407 rss_fwd_config_setup(); 2408 } 2409 else 2410 simple_fwd_config_setup(); 2411 } 2412 2413 static const char * 2414 mp_alloc_to_str(uint8_t mode) 2415 { 2416 switch (mode) { 2417 case MP_ALLOC_NATIVE: 2418 return "native"; 2419 case MP_ALLOC_ANON: 2420 return "anon"; 2421 case MP_ALLOC_XMEM: 2422 return "xmem"; 2423 case MP_ALLOC_XMEM_HUGE: 2424 return "xmemhuge"; 2425 case MP_ALLOC_XBUF: 2426 return "xbuf"; 2427 default: 2428 return "invalid"; 2429 } 2430 } 2431 2432 void 2433 pkt_fwd_config_display(struct fwd_config *cfg) 2434 { 2435 struct fwd_stream *fs; 2436 lcoreid_t lc_id; 2437 streamid_t sm_id; 2438 2439 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2440 "NUMA support %s, MP allocation mode: %s\n", 2441 cfg->fwd_eng->fwd_mode_name, 2442 retry_enabled == 0 ? "" : " with retry", 2443 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2444 numa_support == 1 ? "enabled" : "disabled", 2445 mp_alloc_to_str(mp_alloc_type)); 2446 2447 if (retry_enabled) 2448 printf("TX retry num: %u, delay between TX retries: %uus\n", 2449 burst_tx_retry_num, burst_tx_delay_time); 2450 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2451 printf("Logical Core %u (socket %u) forwards packets on " 2452 "%d streams:", 2453 fwd_lcores_cpuids[lc_id], 2454 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2455 fwd_lcores[lc_id]->stream_nb); 2456 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2457 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2458 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2459 "P=%d/Q=%d (socket %u) ", 2460 fs->rx_port, fs->rx_queue, 2461 ports[fs->rx_port].socket_id, 2462 fs->tx_port, fs->tx_queue, 2463 ports[fs->tx_port].socket_id); 2464 print_ethaddr("peer=", 2465 &peer_eth_addrs[fs->peer_addr]); 2466 } 2467 printf("\n"); 2468 } 2469 printf("\n"); 2470 } 2471 2472 void 2473 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2474 { 2475 struct rte_ether_addr new_peer_addr; 2476 if (!rte_eth_dev_is_valid_port(port_id)) { 2477 printf("Error: Invalid port number %i\n", port_id); 2478 return; 2479 } 2480 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2481 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2482 return; 2483 } 2484 peer_eth_addrs[port_id] = new_peer_addr; 2485 } 2486 2487 int 2488 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2489 { 2490 unsigned int i; 2491 unsigned int lcore_cpuid; 2492 int record_now; 2493 2494 record_now = 0; 2495 again: 2496 for (i = 0; i < nb_lc; i++) { 2497 lcore_cpuid = lcorelist[i]; 2498 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2499 printf("lcore %u not enabled\n", lcore_cpuid); 2500 return -1; 2501 } 2502 if (lcore_cpuid == rte_get_master_lcore()) { 2503 printf("lcore %u cannot be masked on for running " 2504 "packet forwarding, which is the master lcore " 2505 "and reserved for command line parsing only\n", 2506 lcore_cpuid); 2507 return -1; 2508 } 2509 if (record_now) 2510 fwd_lcores_cpuids[i] = lcore_cpuid; 2511 } 2512 if (record_now == 0) { 2513 record_now = 1; 2514 goto again; 2515 } 2516 nb_cfg_lcores = (lcoreid_t) nb_lc; 2517 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2518 printf("previous number of forwarding cores %u - changed to " 2519 "number of configured cores %u\n", 2520 (unsigned int) nb_fwd_lcores, nb_lc); 2521 nb_fwd_lcores = (lcoreid_t) nb_lc; 2522 } 2523 2524 return 0; 2525 } 2526 2527 int 2528 set_fwd_lcores_mask(uint64_t lcoremask) 2529 { 2530 unsigned int lcorelist[64]; 2531 unsigned int nb_lc; 2532 unsigned int i; 2533 2534 if (lcoremask == 0) { 2535 printf("Invalid NULL mask of cores\n"); 2536 return -1; 2537 } 2538 nb_lc = 0; 2539 for (i = 0; i < 64; i++) { 2540 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2541 continue; 2542 lcorelist[nb_lc++] = i; 2543 } 2544 return set_fwd_lcores_list(lcorelist, nb_lc); 2545 } 2546 2547 void 2548 set_fwd_lcores_number(uint16_t nb_lc) 2549 { 2550 if (nb_lc > nb_cfg_lcores) { 2551 printf("nb fwd cores %u > %u (max. number of configured " 2552 "lcores) - ignored\n", 2553 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2554 return; 2555 } 2556 nb_fwd_lcores = (lcoreid_t) nb_lc; 2557 printf("Number of forwarding cores set to %u\n", 2558 (unsigned int) nb_fwd_lcores); 2559 } 2560 2561 void 2562 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2563 { 2564 unsigned int i; 2565 portid_t port_id; 2566 int record_now; 2567 2568 record_now = 0; 2569 again: 2570 for (i = 0; i < nb_pt; i++) { 2571 port_id = (portid_t) portlist[i]; 2572 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2573 return; 2574 if (record_now) 2575 fwd_ports_ids[i] = port_id; 2576 } 2577 if (record_now == 0) { 2578 record_now = 1; 2579 goto again; 2580 } 2581 nb_cfg_ports = (portid_t) nb_pt; 2582 if (nb_fwd_ports != (portid_t) nb_pt) { 2583 printf("previous number of forwarding ports %u - changed to " 2584 "number of configured ports %u\n", 2585 (unsigned int) nb_fwd_ports, nb_pt); 2586 nb_fwd_ports = (portid_t) nb_pt; 2587 } 2588 } 2589 2590 void 2591 set_fwd_ports_mask(uint64_t portmask) 2592 { 2593 unsigned int portlist[64]; 2594 unsigned int nb_pt; 2595 unsigned int i; 2596 2597 if (portmask == 0) { 2598 printf("Invalid NULL mask of ports\n"); 2599 return; 2600 } 2601 nb_pt = 0; 2602 RTE_ETH_FOREACH_DEV(i) { 2603 if (! ((uint64_t)(1ULL << i) & portmask)) 2604 continue; 2605 portlist[nb_pt++] = i; 2606 } 2607 set_fwd_ports_list(portlist, nb_pt); 2608 } 2609 2610 void 2611 set_fwd_ports_number(uint16_t nb_pt) 2612 { 2613 if (nb_pt > nb_cfg_ports) { 2614 printf("nb fwd ports %u > %u (number of configured " 2615 "ports) - ignored\n", 2616 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2617 return; 2618 } 2619 nb_fwd_ports = (portid_t) nb_pt; 2620 printf("Number of forwarding ports set to %u\n", 2621 (unsigned int) nb_fwd_ports); 2622 } 2623 2624 int 2625 port_is_forwarding(portid_t port_id) 2626 { 2627 unsigned int i; 2628 2629 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2630 return -1; 2631 2632 for (i = 0; i < nb_fwd_ports; i++) { 2633 if (fwd_ports_ids[i] == port_id) 2634 return 1; 2635 } 2636 2637 return 0; 2638 } 2639 2640 void 2641 set_nb_pkt_per_burst(uint16_t nb) 2642 { 2643 if (nb > MAX_PKT_BURST) { 2644 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2645 " ignored\n", 2646 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2647 return; 2648 } 2649 nb_pkt_per_burst = nb; 2650 printf("Number of packets per burst set to %u\n", 2651 (unsigned int) nb_pkt_per_burst); 2652 } 2653 2654 static const char * 2655 tx_split_get_name(enum tx_pkt_split split) 2656 { 2657 uint32_t i; 2658 2659 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2660 if (tx_split_name[i].split == split) 2661 return tx_split_name[i].name; 2662 } 2663 return NULL; 2664 } 2665 2666 void 2667 set_tx_pkt_split(const char *name) 2668 { 2669 uint32_t i; 2670 2671 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2672 if (strcmp(tx_split_name[i].name, name) == 0) { 2673 tx_pkt_split = tx_split_name[i].split; 2674 return; 2675 } 2676 } 2677 printf("unknown value: \"%s\"\n", name); 2678 } 2679 2680 void 2681 show_tx_pkt_segments(void) 2682 { 2683 uint32_t i, n; 2684 const char *split; 2685 2686 n = tx_pkt_nb_segs; 2687 split = tx_split_get_name(tx_pkt_split); 2688 2689 printf("Number of segments: %u\n", n); 2690 printf("Segment sizes: "); 2691 for (i = 0; i != n - 1; i++) 2692 printf("%hu,", tx_pkt_seg_lengths[i]); 2693 printf("%hu\n", tx_pkt_seg_lengths[i]); 2694 printf("Split packet: %s\n", split); 2695 } 2696 2697 void 2698 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2699 { 2700 uint16_t tx_pkt_len; 2701 unsigned i; 2702 2703 if (nb_segs >= (unsigned) nb_txd) { 2704 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2705 nb_segs, (unsigned int) nb_txd); 2706 return; 2707 } 2708 2709 /* 2710 * Check that each segment length is greater or equal than 2711 * the mbuf data sise. 2712 * Check also that the total packet length is greater or equal than the 2713 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2714 * 20 + 8). 2715 */ 2716 tx_pkt_len = 0; 2717 for (i = 0; i < nb_segs; i++) { 2718 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2719 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2720 i, seg_lengths[i], (unsigned) mbuf_data_size); 2721 return; 2722 } 2723 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2724 } 2725 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2726 printf("total packet length=%u < %d - give up\n", 2727 (unsigned) tx_pkt_len, 2728 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2729 return; 2730 } 2731 2732 for (i = 0; i < nb_segs; i++) 2733 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2734 2735 tx_pkt_length = tx_pkt_len; 2736 tx_pkt_nb_segs = (uint8_t) nb_segs; 2737 } 2738 2739 void 2740 setup_gro(const char *onoff, portid_t port_id) 2741 { 2742 if (!rte_eth_dev_is_valid_port(port_id)) { 2743 printf("invalid port id %u\n", port_id); 2744 return; 2745 } 2746 if (test_done == 0) { 2747 printf("Before enable/disable GRO," 2748 " please stop forwarding first\n"); 2749 return; 2750 } 2751 if (strcmp(onoff, "on") == 0) { 2752 if (gro_ports[port_id].enable != 0) { 2753 printf("Port %u has enabled GRO. Please" 2754 " disable GRO first\n", port_id); 2755 return; 2756 } 2757 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2758 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2759 gro_ports[port_id].param.max_flow_num = 2760 GRO_DEFAULT_FLOW_NUM; 2761 gro_ports[port_id].param.max_item_per_flow = 2762 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2763 } 2764 gro_ports[port_id].enable = 1; 2765 } else { 2766 if (gro_ports[port_id].enable == 0) { 2767 printf("Port %u has disabled GRO\n", port_id); 2768 return; 2769 } 2770 gro_ports[port_id].enable = 0; 2771 } 2772 } 2773 2774 void 2775 setup_gro_flush_cycles(uint8_t cycles) 2776 { 2777 if (test_done == 0) { 2778 printf("Before change flush interval for GRO," 2779 " please stop forwarding first.\n"); 2780 return; 2781 } 2782 2783 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2784 GRO_DEFAULT_FLUSH_CYCLES) { 2785 printf("The flushing cycle be in the range" 2786 " of 1 to %u. Revert to the default" 2787 " value %u.\n", 2788 GRO_MAX_FLUSH_CYCLES, 2789 GRO_DEFAULT_FLUSH_CYCLES); 2790 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2791 } 2792 2793 gro_flush_cycles = cycles; 2794 } 2795 2796 void 2797 show_gro(portid_t port_id) 2798 { 2799 struct rte_gro_param *param; 2800 uint32_t max_pkts_num; 2801 2802 param = &gro_ports[port_id].param; 2803 2804 if (!rte_eth_dev_is_valid_port(port_id)) { 2805 printf("Invalid port id %u.\n", port_id); 2806 return; 2807 } 2808 if (gro_ports[port_id].enable) { 2809 printf("GRO type: TCP/IPv4\n"); 2810 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2811 max_pkts_num = param->max_flow_num * 2812 param->max_item_per_flow; 2813 } else 2814 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2815 printf("Max number of packets to perform GRO: %u\n", 2816 max_pkts_num); 2817 printf("Flushing cycles: %u\n", gro_flush_cycles); 2818 } else 2819 printf("Port %u doesn't enable GRO.\n", port_id); 2820 } 2821 2822 void 2823 setup_gso(const char *mode, portid_t port_id) 2824 { 2825 if (!rte_eth_dev_is_valid_port(port_id)) { 2826 printf("invalid port id %u\n", port_id); 2827 return; 2828 } 2829 if (strcmp(mode, "on") == 0) { 2830 if (test_done == 0) { 2831 printf("before enabling GSO," 2832 " please stop forwarding first\n"); 2833 return; 2834 } 2835 gso_ports[port_id].enable = 1; 2836 } else if (strcmp(mode, "off") == 0) { 2837 if (test_done == 0) { 2838 printf("before disabling GSO," 2839 " please stop forwarding first\n"); 2840 return; 2841 } 2842 gso_ports[port_id].enable = 0; 2843 } 2844 } 2845 2846 char* 2847 list_pkt_forwarding_modes(void) 2848 { 2849 static char fwd_modes[128] = ""; 2850 const char *separator = "|"; 2851 struct fwd_engine *fwd_eng; 2852 unsigned i = 0; 2853 2854 if (strlen (fwd_modes) == 0) { 2855 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2856 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2857 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2858 strncat(fwd_modes, separator, 2859 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2860 } 2861 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2862 } 2863 2864 return fwd_modes; 2865 } 2866 2867 char* 2868 list_pkt_forwarding_retry_modes(void) 2869 { 2870 static char fwd_modes[128] = ""; 2871 const char *separator = "|"; 2872 struct fwd_engine *fwd_eng; 2873 unsigned i = 0; 2874 2875 if (strlen(fwd_modes) == 0) { 2876 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2877 if (fwd_eng == &rx_only_engine) 2878 continue; 2879 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2880 sizeof(fwd_modes) - 2881 strlen(fwd_modes) - 1); 2882 strncat(fwd_modes, separator, 2883 sizeof(fwd_modes) - 2884 strlen(fwd_modes) - 1); 2885 } 2886 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2887 } 2888 2889 return fwd_modes; 2890 } 2891 2892 void 2893 set_pkt_forwarding_mode(const char *fwd_mode_name) 2894 { 2895 struct fwd_engine *fwd_eng; 2896 unsigned i; 2897 2898 i = 0; 2899 while ((fwd_eng = fwd_engines[i]) != NULL) { 2900 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2901 printf("Set %s packet forwarding mode%s\n", 2902 fwd_mode_name, 2903 retry_enabled == 0 ? "" : " with retry"); 2904 cur_fwd_eng = fwd_eng; 2905 return; 2906 } 2907 i++; 2908 } 2909 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2910 } 2911 2912 void 2913 add_rx_dump_callbacks(portid_t portid) 2914 { 2915 struct rte_eth_dev_info dev_info; 2916 uint16_t queue; 2917 int ret; 2918 2919 if (port_id_is_invalid(portid, ENABLED_WARN)) 2920 return; 2921 2922 ret = eth_dev_info_get_print_err(portid, &dev_info); 2923 if (ret != 0) 2924 return; 2925 2926 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2927 if (!ports[portid].rx_dump_cb[queue]) 2928 ports[portid].rx_dump_cb[queue] = 2929 rte_eth_add_rx_callback(portid, queue, 2930 dump_rx_pkts, NULL); 2931 } 2932 2933 void 2934 add_tx_dump_callbacks(portid_t portid) 2935 { 2936 struct rte_eth_dev_info dev_info; 2937 uint16_t queue; 2938 int ret; 2939 2940 if (port_id_is_invalid(portid, ENABLED_WARN)) 2941 return; 2942 2943 ret = eth_dev_info_get_print_err(portid, &dev_info); 2944 if (ret != 0) 2945 return; 2946 2947 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2948 if (!ports[portid].tx_dump_cb[queue]) 2949 ports[portid].tx_dump_cb[queue] = 2950 rte_eth_add_tx_callback(portid, queue, 2951 dump_tx_pkts, NULL); 2952 } 2953 2954 void 2955 remove_rx_dump_callbacks(portid_t portid) 2956 { 2957 struct rte_eth_dev_info dev_info; 2958 uint16_t queue; 2959 int ret; 2960 2961 if (port_id_is_invalid(portid, ENABLED_WARN)) 2962 return; 2963 2964 ret = eth_dev_info_get_print_err(portid, &dev_info); 2965 if (ret != 0) 2966 return; 2967 2968 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2969 if (ports[portid].rx_dump_cb[queue]) { 2970 rte_eth_remove_rx_callback(portid, queue, 2971 ports[portid].rx_dump_cb[queue]); 2972 ports[portid].rx_dump_cb[queue] = NULL; 2973 } 2974 } 2975 2976 void 2977 remove_tx_dump_callbacks(portid_t portid) 2978 { 2979 struct rte_eth_dev_info dev_info; 2980 uint16_t queue; 2981 int ret; 2982 2983 if (port_id_is_invalid(portid, ENABLED_WARN)) 2984 return; 2985 2986 ret = eth_dev_info_get_print_err(portid, &dev_info); 2987 if (ret != 0) 2988 return; 2989 2990 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2991 if (ports[portid].tx_dump_cb[queue]) { 2992 rte_eth_remove_tx_callback(portid, queue, 2993 ports[portid].tx_dump_cb[queue]); 2994 ports[portid].tx_dump_cb[queue] = NULL; 2995 } 2996 } 2997 2998 void 2999 configure_rxtx_dump_callbacks(uint16_t verbose) 3000 { 3001 portid_t portid; 3002 3003 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3004 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3005 return; 3006 #endif 3007 3008 RTE_ETH_FOREACH_DEV(portid) 3009 { 3010 if (verbose == 1 || verbose > 2) 3011 add_rx_dump_callbacks(portid); 3012 else 3013 remove_rx_dump_callbacks(portid); 3014 if (verbose >= 2) 3015 add_tx_dump_callbacks(portid); 3016 else 3017 remove_tx_dump_callbacks(portid); 3018 } 3019 } 3020 3021 void 3022 set_verbose_level(uint16_t vb_level) 3023 { 3024 printf("Change verbose level from %u to %u\n", 3025 (unsigned int) verbose_level, (unsigned int) vb_level); 3026 verbose_level = vb_level; 3027 configure_rxtx_dump_callbacks(verbose_level); 3028 } 3029 3030 void 3031 vlan_extend_set(portid_t port_id, int on) 3032 { 3033 int diag; 3034 int vlan_offload; 3035 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3036 3037 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3038 return; 3039 3040 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3041 3042 if (on) { 3043 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3044 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3045 } else { 3046 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3047 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3048 } 3049 3050 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3051 if (diag < 0) 3052 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3053 "diag=%d\n", port_id, on, diag); 3054 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3055 } 3056 3057 void 3058 rx_vlan_strip_set(portid_t port_id, int on) 3059 { 3060 int diag; 3061 int vlan_offload; 3062 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3063 3064 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3065 return; 3066 3067 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3068 3069 if (on) { 3070 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3071 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3072 } else { 3073 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3074 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3075 } 3076 3077 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3078 if (diag < 0) 3079 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3080 "diag=%d\n", port_id, on, diag); 3081 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3082 } 3083 3084 void 3085 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3086 { 3087 int diag; 3088 3089 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3090 return; 3091 3092 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3093 if (diag < 0) 3094 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3095 "diag=%d\n", port_id, queue_id, on, diag); 3096 } 3097 3098 void 3099 rx_vlan_filter_set(portid_t port_id, int on) 3100 { 3101 int diag; 3102 int vlan_offload; 3103 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3104 3105 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3106 return; 3107 3108 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3109 3110 if (on) { 3111 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3112 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3113 } else { 3114 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3115 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3116 } 3117 3118 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3119 if (diag < 0) 3120 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3121 "diag=%d\n", port_id, on, diag); 3122 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3123 } 3124 3125 void 3126 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3127 { 3128 int diag; 3129 int vlan_offload; 3130 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3131 3132 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3133 return; 3134 3135 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3136 3137 if (on) { 3138 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3139 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3140 } else { 3141 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3142 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3143 } 3144 3145 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3146 if (diag < 0) 3147 printf("%s(port_pi=%d, on=%d) failed " 3148 "diag=%d\n", __func__, port_id, on, diag); 3149 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3150 } 3151 3152 int 3153 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3154 { 3155 int diag; 3156 3157 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3158 return 1; 3159 if (vlan_id_is_invalid(vlan_id)) 3160 return 1; 3161 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3162 if (diag == 0) 3163 return 0; 3164 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3165 "diag=%d\n", 3166 port_id, vlan_id, on, diag); 3167 return -1; 3168 } 3169 3170 void 3171 rx_vlan_all_filter_set(portid_t port_id, int on) 3172 { 3173 uint16_t vlan_id; 3174 3175 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3176 return; 3177 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3178 if (rx_vft_set(port_id, vlan_id, on)) 3179 break; 3180 } 3181 } 3182 3183 void 3184 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3185 { 3186 int diag; 3187 3188 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3189 return; 3190 3191 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3192 if (diag == 0) 3193 return; 3194 3195 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3196 "diag=%d\n", 3197 port_id, vlan_type, tp_id, diag); 3198 } 3199 3200 void 3201 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3202 { 3203 struct rte_eth_dev_info dev_info; 3204 int ret; 3205 3206 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3207 return; 3208 if (vlan_id_is_invalid(vlan_id)) 3209 return; 3210 3211 if (ports[port_id].dev_conf.txmode.offloads & 3212 DEV_TX_OFFLOAD_QINQ_INSERT) { 3213 printf("Error, as QinQ has been enabled.\n"); 3214 return; 3215 } 3216 3217 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3218 if (ret != 0) 3219 return; 3220 3221 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3222 printf("Error: vlan insert is not supported by port %d\n", 3223 port_id); 3224 return; 3225 } 3226 3227 tx_vlan_reset(port_id); 3228 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3229 ports[port_id].tx_vlan_id = vlan_id; 3230 } 3231 3232 void 3233 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3234 { 3235 struct rte_eth_dev_info dev_info; 3236 int ret; 3237 3238 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3239 return; 3240 if (vlan_id_is_invalid(vlan_id)) 3241 return; 3242 if (vlan_id_is_invalid(vlan_id_outer)) 3243 return; 3244 3245 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3246 if (ret != 0) 3247 return; 3248 3249 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3250 printf("Error: qinq insert not supported by port %d\n", 3251 port_id); 3252 return; 3253 } 3254 3255 tx_vlan_reset(port_id); 3256 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3257 DEV_TX_OFFLOAD_QINQ_INSERT); 3258 ports[port_id].tx_vlan_id = vlan_id; 3259 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3260 } 3261 3262 void 3263 tx_vlan_reset(portid_t port_id) 3264 { 3265 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3266 return; 3267 ports[port_id].dev_conf.txmode.offloads &= 3268 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3269 DEV_TX_OFFLOAD_QINQ_INSERT); 3270 ports[port_id].tx_vlan_id = 0; 3271 ports[port_id].tx_vlan_id_outer = 0; 3272 } 3273 3274 void 3275 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3276 { 3277 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3278 return; 3279 3280 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3281 } 3282 3283 void 3284 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3285 { 3286 uint16_t i; 3287 uint8_t existing_mapping_found = 0; 3288 3289 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3290 return; 3291 3292 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3293 return; 3294 3295 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3296 printf("map_value not in required range 0..%d\n", 3297 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3298 return; 3299 } 3300 3301 if (!is_rx) { /*then tx*/ 3302 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3303 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3304 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3305 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3306 existing_mapping_found = 1; 3307 break; 3308 } 3309 } 3310 if (!existing_mapping_found) { /* A new additional mapping... */ 3311 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3312 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3313 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3314 nb_tx_queue_stats_mappings++; 3315 } 3316 } 3317 else { /*rx*/ 3318 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3319 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3320 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3321 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3322 existing_mapping_found = 1; 3323 break; 3324 } 3325 } 3326 if (!existing_mapping_found) { /* A new additional mapping... */ 3327 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3328 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3329 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3330 nb_rx_queue_stats_mappings++; 3331 } 3332 } 3333 } 3334 3335 void 3336 set_xstats_hide_zero(uint8_t on_off) 3337 { 3338 xstats_hide_zero = on_off; 3339 } 3340 3341 static inline void 3342 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3343 { 3344 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3345 3346 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3347 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3348 " tunnel_id: 0x%08x", 3349 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3350 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3351 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3352 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3353 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3354 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3355 3356 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3357 rte_be_to_cpu_16(mask->src_port_mask), 3358 rte_be_to_cpu_16(mask->dst_port_mask)); 3359 3360 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3361 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3362 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3363 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3364 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3365 3366 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3367 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3368 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3369 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3370 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3371 } 3372 3373 printf("\n"); 3374 } 3375 3376 static inline void 3377 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3378 { 3379 struct rte_eth_flex_payload_cfg *cfg; 3380 uint32_t i, j; 3381 3382 for (i = 0; i < flex_conf->nb_payloads; i++) { 3383 cfg = &flex_conf->flex_set[i]; 3384 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3385 printf("\n RAW: "); 3386 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3387 printf("\n L2_PAYLOAD: "); 3388 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3389 printf("\n L3_PAYLOAD: "); 3390 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3391 printf("\n L4_PAYLOAD: "); 3392 else 3393 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3394 for (j = 0; j < num; j++) 3395 printf(" %-5u", cfg->src_offset[j]); 3396 } 3397 printf("\n"); 3398 } 3399 3400 static char * 3401 flowtype_to_str(uint16_t flow_type) 3402 { 3403 struct flow_type_info { 3404 char str[32]; 3405 uint16_t ftype; 3406 }; 3407 3408 uint8_t i; 3409 static struct flow_type_info flowtype_str_table[] = { 3410 {"raw", RTE_ETH_FLOW_RAW}, 3411 {"ipv4", RTE_ETH_FLOW_IPV4}, 3412 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3413 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3414 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3415 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3416 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3417 {"ipv6", RTE_ETH_FLOW_IPV6}, 3418 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3419 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3420 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3421 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3422 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3423 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3424 {"port", RTE_ETH_FLOW_PORT}, 3425 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3426 {"geneve", RTE_ETH_FLOW_GENEVE}, 3427 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3428 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3429 }; 3430 3431 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3432 if (flowtype_str_table[i].ftype == flow_type) 3433 return flowtype_str_table[i].str; 3434 } 3435 3436 return NULL; 3437 } 3438 3439 static inline void 3440 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3441 { 3442 struct rte_eth_fdir_flex_mask *mask; 3443 uint32_t i, j; 3444 char *p; 3445 3446 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3447 mask = &flex_conf->flex_mask[i]; 3448 p = flowtype_to_str(mask->flow_type); 3449 printf("\n %s:\t", p ? p : "unknown"); 3450 for (j = 0; j < num; j++) 3451 printf(" %02x", mask->mask[j]); 3452 } 3453 printf("\n"); 3454 } 3455 3456 static inline void 3457 print_fdir_flow_type(uint32_t flow_types_mask) 3458 { 3459 int i; 3460 char *p; 3461 3462 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3463 if (!(flow_types_mask & (1 << i))) 3464 continue; 3465 p = flowtype_to_str(i); 3466 if (p) 3467 printf(" %s", p); 3468 else 3469 printf(" unknown"); 3470 } 3471 printf("\n"); 3472 } 3473 3474 void 3475 fdir_get_infos(portid_t port_id) 3476 { 3477 struct rte_eth_fdir_stats fdir_stat; 3478 struct rte_eth_fdir_info fdir_info; 3479 int ret; 3480 3481 static const char *fdir_stats_border = "########################"; 3482 3483 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3484 return; 3485 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3486 if (ret < 0) { 3487 printf("\n FDIR is not supported on port %-2d\n", 3488 port_id); 3489 return; 3490 } 3491 3492 memset(&fdir_info, 0, sizeof(fdir_info)); 3493 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3494 RTE_ETH_FILTER_INFO, &fdir_info); 3495 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3496 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3497 RTE_ETH_FILTER_STATS, &fdir_stat); 3498 printf("\n %s FDIR infos for port %-2d %s\n", 3499 fdir_stats_border, port_id, fdir_stats_border); 3500 printf(" MODE: "); 3501 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3502 printf(" PERFECT\n"); 3503 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3504 printf(" PERFECT-MAC-VLAN\n"); 3505 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3506 printf(" PERFECT-TUNNEL\n"); 3507 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3508 printf(" SIGNATURE\n"); 3509 else 3510 printf(" DISABLE\n"); 3511 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3512 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3513 printf(" SUPPORTED FLOW TYPE: "); 3514 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3515 } 3516 printf(" FLEX PAYLOAD INFO:\n"); 3517 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3518 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3519 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3520 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3521 fdir_info.flex_payload_unit, 3522 fdir_info.max_flex_payload_segment_num, 3523 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3524 printf(" MASK: "); 3525 print_fdir_mask(&fdir_info.mask); 3526 if (fdir_info.flex_conf.nb_payloads > 0) { 3527 printf(" FLEX PAYLOAD SRC OFFSET:"); 3528 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3529 } 3530 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3531 printf(" FLEX MASK CFG:"); 3532 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3533 } 3534 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3535 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3536 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3537 fdir_info.guarant_spc, fdir_info.best_spc); 3538 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3539 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3540 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3541 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3542 fdir_stat.collision, fdir_stat.free, 3543 fdir_stat.maxhash, fdir_stat.maxlen, 3544 fdir_stat.add, fdir_stat.remove, 3545 fdir_stat.f_add, fdir_stat.f_remove); 3546 printf(" %s############################%s\n", 3547 fdir_stats_border, fdir_stats_border); 3548 } 3549 3550 void 3551 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3552 { 3553 struct rte_port *port; 3554 struct rte_eth_fdir_flex_conf *flex_conf; 3555 int i, idx = 0; 3556 3557 port = &ports[port_id]; 3558 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3559 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3560 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3561 idx = i; 3562 break; 3563 } 3564 } 3565 if (i >= RTE_ETH_FLOW_MAX) { 3566 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3567 idx = flex_conf->nb_flexmasks; 3568 flex_conf->nb_flexmasks++; 3569 } else { 3570 printf("The flex mask table is full. Can not set flex" 3571 " mask for flow_type(%u).", cfg->flow_type); 3572 return; 3573 } 3574 } 3575 rte_memcpy(&flex_conf->flex_mask[idx], 3576 cfg, 3577 sizeof(struct rte_eth_fdir_flex_mask)); 3578 } 3579 3580 void 3581 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3582 { 3583 struct rte_port *port; 3584 struct rte_eth_fdir_flex_conf *flex_conf; 3585 int i, idx = 0; 3586 3587 port = &ports[port_id]; 3588 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3589 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3590 if (cfg->type == flex_conf->flex_set[i].type) { 3591 idx = i; 3592 break; 3593 } 3594 } 3595 if (i >= RTE_ETH_PAYLOAD_MAX) { 3596 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3597 idx = flex_conf->nb_payloads; 3598 flex_conf->nb_payloads++; 3599 } else { 3600 printf("The flex payload table is full. Can not set" 3601 " flex payload for type(%u).", cfg->type); 3602 return; 3603 } 3604 } 3605 rte_memcpy(&flex_conf->flex_set[idx], 3606 cfg, 3607 sizeof(struct rte_eth_flex_payload_cfg)); 3608 3609 } 3610 3611 void 3612 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3613 { 3614 #ifdef RTE_LIBRTE_IXGBE_PMD 3615 int diag; 3616 3617 if (is_rx) 3618 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3619 else 3620 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3621 3622 if (diag == 0) 3623 return; 3624 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3625 is_rx ? "rx" : "tx", port_id, diag); 3626 return; 3627 #endif 3628 printf("VF %s setting not supported for port %d\n", 3629 is_rx ? "Rx" : "Tx", port_id); 3630 RTE_SET_USED(vf); 3631 RTE_SET_USED(on); 3632 } 3633 3634 int 3635 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3636 { 3637 int diag; 3638 struct rte_eth_link link; 3639 int ret; 3640 3641 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3642 return 1; 3643 ret = eth_link_get_nowait_print_err(port_id, &link); 3644 if (ret < 0) 3645 return 1; 3646 if (rate > link.link_speed) { 3647 printf("Invalid rate value:%u bigger than link speed: %u\n", 3648 rate, link.link_speed); 3649 return 1; 3650 } 3651 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3652 if (diag == 0) 3653 return diag; 3654 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3655 port_id, diag); 3656 return diag; 3657 } 3658 3659 int 3660 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3661 { 3662 int diag = -ENOTSUP; 3663 3664 RTE_SET_USED(vf); 3665 RTE_SET_USED(rate); 3666 RTE_SET_USED(q_msk); 3667 3668 #ifdef RTE_LIBRTE_IXGBE_PMD 3669 if (diag == -ENOTSUP) 3670 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3671 q_msk); 3672 #endif 3673 #ifdef RTE_LIBRTE_BNXT_PMD 3674 if (diag == -ENOTSUP) 3675 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3676 #endif 3677 if (diag == 0) 3678 return diag; 3679 3680 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3681 port_id, diag); 3682 return diag; 3683 } 3684 3685 /* 3686 * Functions to manage the set of filtered Multicast MAC addresses. 3687 * 3688 * A pool of filtered multicast MAC addresses is associated with each port. 3689 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3690 * The address of the pool and the number of valid multicast MAC addresses 3691 * recorded in the pool are stored in the fields "mc_addr_pool" and 3692 * "mc_addr_nb" of the "rte_port" data structure. 3693 * 3694 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3695 * to be supplied a contiguous array of multicast MAC addresses. 3696 * To comply with this constraint, the set of multicast addresses recorded 3697 * into the pool are systematically compacted at the beginning of the pool. 3698 * Hence, when a multicast address is removed from the pool, all following 3699 * addresses, if any, are copied back to keep the set contiguous. 3700 */ 3701 #define MCAST_POOL_INC 32 3702 3703 static int 3704 mcast_addr_pool_extend(struct rte_port *port) 3705 { 3706 struct rte_ether_addr *mc_pool; 3707 size_t mc_pool_size; 3708 3709 /* 3710 * If a free entry is available at the end of the pool, just 3711 * increment the number of recorded multicast addresses. 3712 */ 3713 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3714 port->mc_addr_nb++; 3715 return 0; 3716 } 3717 3718 /* 3719 * [re]allocate a pool with MCAST_POOL_INC more entries. 3720 * The previous test guarantees that port->mc_addr_nb is a multiple 3721 * of MCAST_POOL_INC. 3722 */ 3723 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3724 MCAST_POOL_INC); 3725 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3726 mc_pool_size); 3727 if (mc_pool == NULL) { 3728 printf("allocation of pool of %u multicast addresses failed\n", 3729 port->mc_addr_nb + MCAST_POOL_INC); 3730 return -ENOMEM; 3731 } 3732 3733 port->mc_addr_pool = mc_pool; 3734 port->mc_addr_nb++; 3735 return 0; 3736 3737 } 3738 3739 static void 3740 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 3741 { 3742 if (mcast_addr_pool_extend(port) != 0) 3743 return; 3744 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 3745 } 3746 3747 static void 3748 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3749 { 3750 port->mc_addr_nb--; 3751 if (addr_idx == port->mc_addr_nb) { 3752 /* No need to recompact the set of multicast addressses. */ 3753 if (port->mc_addr_nb == 0) { 3754 /* free the pool of multicast addresses. */ 3755 free(port->mc_addr_pool); 3756 port->mc_addr_pool = NULL; 3757 } 3758 return; 3759 } 3760 memmove(&port->mc_addr_pool[addr_idx], 3761 &port->mc_addr_pool[addr_idx + 1], 3762 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3763 } 3764 3765 static int 3766 eth_port_multicast_addr_list_set(portid_t port_id) 3767 { 3768 struct rte_port *port; 3769 int diag; 3770 3771 port = &ports[port_id]; 3772 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3773 port->mc_addr_nb); 3774 if (diag < 0) 3775 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3776 port_id, port->mc_addr_nb, diag); 3777 3778 return diag; 3779 } 3780 3781 void 3782 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3783 { 3784 struct rte_port *port; 3785 uint32_t i; 3786 3787 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3788 return; 3789 3790 port = &ports[port_id]; 3791 3792 /* 3793 * Check that the added multicast MAC address is not already recorded 3794 * in the pool of multicast addresses. 3795 */ 3796 for (i = 0; i < port->mc_addr_nb; i++) { 3797 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3798 printf("multicast address already filtered by port\n"); 3799 return; 3800 } 3801 } 3802 3803 mcast_addr_pool_append(port, mc_addr); 3804 if (eth_port_multicast_addr_list_set(port_id) < 0) 3805 /* Rollback on failure, remove the address from the pool */ 3806 mcast_addr_pool_remove(port, i); 3807 } 3808 3809 void 3810 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3811 { 3812 struct rte_port *port; 3813 uint32_t i; 3814 3815 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3816 return; 3817 3818 port = &ports[port_id]; 3819 3820 /* 3821 * Search the pool of multicast MAC addresses for the removed address. 3822 */ 3823 for (i = 0; i < port->mc_addr_nb; i++) { 3824 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3825 break; 3826 } 3827 if (i == port->mc_addr_nb) { 3828 printf("multicast address not filtered by port %d\n", port_id); 3829 return; 3830 } 3831 3832 mcast_addr_pool_remove(port, i); 3833 if (eth_port_multicast_addr_list_set(port_id) < 0) 3834 /* Rollback on failure, add the address back into the pool */ 3835 mcast_addr_pool_append(port, mc_addr); 3836 } 3837 3838 void 3839 port_dcb_info_display(portid_t port_id) 3840 { 3841 struct rte_eth_dcb_info dcb_info; 3842 uint16_t i; 3843 int ret; 3844 static const char *border = "================"; 3845 3846 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3847 return; 3848 3849 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3850 if (ret) { 3851 printf("\n Failed to get dcb infos on port %-2d\n", 3852 port_id); 3853 return; 3854 } 3855 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3856 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3857 printf("\n TC : "); 3858 for (i = 0; i < dcb_info.nb_tcs; i++) 3859 printf("\t%4d", i); 3860 printf("\n Priority : "); 3861 for (i = 0; i < dcb_info.nb_tcs; i++) 3862 printf("\t%4d", dcb_info.prio_tc[i]); 3863 printf("\n BW percent :"); 3864 for (i = 0; i < dcb_info.nb_tcs; i++) 3865 printf("\t%4d%%", dcb_info.tc_bws[i]); 3866 printf("\n RXQ base : "); 3867 for (i = 0; i < dcb_info.nb_tcs; i++) 3868 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3869 printf("\n RXQ number :"); 3870 for (i = 0; i < dcb_info.nb_tcs; i++) 3871 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3872 printf("\n TXQ base : "); 3873 for (i = 0; i < dcb_info.nb_tcs; i++) 3874 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3875 printf("\n TXQ number :"); 3876 for (i = 0; i < dcb_info.nb_tcs; i++) 3877 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3878 printf("\n"); 3879 } 3880 3881 uint8_t * 3882 open_file(const char *file_path, uint32_t *size) 3883 { 3884 int fd = open(file_path, O_RDONLY); 3885 off_t pkg_size; 3886 uint8_t *buf = NULL; 3887 int ret = 0; 3888 struct stat st_buf; 3889 3890 if (size) 3891 *size = 0; 3892 3893 if (fd == -1) { 3894 printf("%s: Failed to open %s\n", __func__, file_path); 3895 return buf; 3896 } 3897 3898 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3899 close(fd); 3900 printf("%s: File operations failed\n", __func__); 3901 return buf; 3902 } 3903 3904 pkg_size = st_buf.st_size; 3905 if (pkg_size < 0) { 3906 close(fd); 3907 printf("%s: File operations failed\n", __func__); 3908 return buf; 3909 } 3910 3911 buf = (uint8_t *)malloc(pkg_size); 3912 if (!buf) { 3913 close(fd); 3914 printf("%s: Failed to malloc memory\n", __func__); 3915 return buf; 3916 } 3917 3918 ret = read(fd, buf, pkg_size); 3919 if (ret < 0) { 3920 close(fd); 3921 printf("%s: File read operation failed\n", __func__); 3922 close_file(buf); 3923 return NULL; 3924 } 3925 3926 if (size) 3927 *size = pkg_size; 3928 3929 close(fd); 3930 3931 return buf; 3932 } 3933 3934 int 3935 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3936 { 3937 FILE *fh = fopen(file_path, "wb"); 3938 3939 if (fh == NULL) { 3940 printf("%s: Failed to open %s\n", __func__, file_path); 3941 return -1; 3942 } 3943 3944 if (fwrite(buf, 1, size, fh) != size) { 3945 fclose(fh); 3946 printf("%s: File write operation failed\n", __func__); 3947 return -1; 3948 } 3949 3950 fclose(fh); 3951 3952 return 0; 3953 } 3954 3955 int 3956 close_file(uint8_t *buf) 3957 { 3958 if (buf) { 3959 free((void *)buf); 3960 return 0; 3961 } 3962 3963 return -1; 3964 } 3965 3966 void 3967 port_queue_region_info_display(portid_t port_id, void *buf) 3968 { 3969 #ifdef RTE_LIBRTE_I40E_PMD 3970 uint16_t i, j; 3971 struct rte_pmd_i40e_queue_regions *info = 3972 (struct rte_pmd_i40e_queue_regions *)buf; 3973 static const char *queue_region_info_stats_border = "-------"; 3974 3975 if (!info->queue_region_number) 3976 printf("there is no region has been set before"); 3977 3978 printf("\n %s All queue region info for port=%2d %s", 3979 queue_region_info_stats_border, port_id, 3980 queue_region_info_stats_border); 3981 printf("\n queue_region_number: %-14u \n", 3982 info->queue_region_number); 3983 3984 for (i = 0; i < info->queue_region_number; i++) { 3985 printf("\n region_id: %-14u queue_number: %-14u " 3986 "queue_start_index: %-14u \n", 3987 info->region[i].region_id, 3988 info->region[i].queue_num, 3989 info->region[i].queue_start_index); 3990 3991 printf(" user_priority_num is %-14u :", 3992 info->region[i].user_priority_num); 3993 for (j = 0; j < info->region[i].user_priority_num; j++) 3994 printf(" %-14u ", info->region[i].user_priority[j]); 3995 3996 printf("\n flowtype_num is %-14u :", 3997 info->region[i].flowtype_num); 3998 for (j = 0; j < info->region[i].flowtype_num; j++) 3999 printf(" %-14u ", info->region[i].hw_flowtype[j]); 4000 } 4001 #else 4002 RTE_SET_USED(port_id); 4003 RTE_SET_USED(buf); 4004 #endif 4005 4006 printf("\n\n"); 4007 } 4008 4009 void 4010 show_macs(portid_t port_id) 4011 { 4012 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4013 struct rte_eth_dev_info dev_info; 4014 struct rte_ether_addr *addr; 4015 uint32_t i, num_macs = 0; 4016 struct rte_eth_dev *dev; 4017 4018 dev = &rte_eth_devices[port_id]; 4019 4020 rte_eth_dev_info_get(port_id, &dev_info); 4021 4022 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4023 addr = &dev->data->mac_addrs[i]; 4024 4025 /* skip zero address */ 4026 if (rte_is_zero_ether_addr(addr)) 4027 continue; 4028 4029 num_macs++; 4030 } 4031 4032 printf("Number of MAC address added: %d\n", num_macs); 4033 4034 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4035 addr = &dev->data->mac_addrs[i]; 4036 4037 /* skip zero address */ 4038 if (rte_is_zero_ether_addr(addr)) 4039 continue; 4040 4041 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4042 printf(" %s\n", buf); 4043 } 4044 } 4045 4046 void 4047 show_mcast_macs(portid_t port_id) 4048 { 4049 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4050 struct rte_ether_addr *addr; 4051 struct rte_port *port; 4052 uint32_t i; 4053 4054 port = &ports[port_id]; 4055 4056 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4057 4058 for (i = 0; i < port->mc_addr_nb; i++) { 4059 addr = &port->mc_addr_pool[i]; 4060 4061 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4062 printf(" %s\n", buf); 4063 } 4064 } 4065