1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 53 #include "testpmd.h" 54 55 static char *flowtype_to_str(uint16_t flow_type); 56 57 static const struct { 58 enum tx_pkt_split split; 59 const char *name; 60 } tx_split_name[] = { 61 { 62 .split = TX_PKT_SPLIT_OFF, 63 .name = "off", 64 }, 65 { 66 .split = TX_PKT_SPLIT_ON, 67 .name = "on", 68 }, 69 { 70 .split = TX_PKT_SPLIT_RND, 71 .name = "rand", 72 }, 73 }; 74 75 const struct rss_type_info rss_type_table[] = { 76 { "all", ETH_RSS_IP | ETH_RSS_TCP | 77 ETH_RSS_UDP | ETH_RSS_SCTP | 78 ETH_RSS_L2_PAYLOAD }, 79 { "none", 0 }, 80 { "ipv4", ETH_RSS_IPV4 }, 81 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 82 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 83 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 84 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 85 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 86 { "ipv6", ETH_RSS_IPV6 }, 87 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 88 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 89 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 90 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 91 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 92 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 93 { "ipv6-ex", ETH_RSS_IPV6_EX }, 94 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 95 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 96 { "port", ETH_RSS_PORT }, 97 { "vxlan", ETH_RSS_VXLAN }, 98 { "geneve", ETH_RSS_GENEVE }, 99 { "nvgre", ETH_RSS_NVGRE }, 100 { "ip", ETH_RSS_IP }, 101 { "udp", ETH_RSS_UDP }, 102 { "tcp", ETH_RSS_TCP }, 103 { "sctp", ETH_RSS_SCTP }, 104 { "tunnel", ETH_RSS_TUNNEL }, 105 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 106 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 107 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 108 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 109 { NULL, 0 }, 110 }; 111 112 static void 113 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 114 { 115 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 116 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 117 printf("%s%s", name, buf); 118 } 119 120 void 121 nic_stats_display(portid_t port_id) 122 { 123 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 124 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 125 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 126 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 127 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 128 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 129 diff_cycles; 130 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 131 struct rte_eth_stats stats; 132 struct rte_port *port = &ports[port_id]; 133 uint8_t i; 134 135 static const char *nic_stats_border = "########################"; 136 137 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 138 print_valid_ports(); 139 return; 140 } 141 rte_eth_stats_get(port_id, &stats); 142 printf("\n %s NIC statistics for port %-2d %s\n", 143 nic_stats_border, port_id, nic_stats_border); 144 145 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 146 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 147 "%-"PRIu64"\n", 148 stats.ipackets, stats.imissed, stats.ibytes); 149 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 150 printf(" RX-nombuf: %-10"PRIu64"\n", 151 stats.rx_nombuf); 152 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 153 "%-"PRIu64"\n", 154 stats.opackets, stats.oerrors, stats.obytes); 155 } 156 else { 157 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 158 " RX-bytes: %10"PRIu64"\n", 159 stats.ipackets, stats.ierrors, stats.ibytes); 160 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 161 printf(" RX-nombuf: %10"PRIu64"\n", 162 stats.rx_nombuf); 163 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 164 " TX-bytes: %10"PRIu64"\n", 165 stats.opackets, stats.oerrors, stats.obytes); 166 } 167 168 if (port->rx_queue_stats_mapping_enabled) { 169 printf("\n"); 170 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 171 printf(" Stats reg %2d RX-packets: %10"PRIu64 172 " RX-errors: %10"PRIu64 173 " RX-bytes: %10"PRIu64"\n", 174 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 175 } 176 } 177 if (port->tx_queue_stats_mapping_enabled) { 178 printf("\n"); 179 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 180 printf(" Stats reg %2d TX-packets: %10"PRIu64 181 " TX-bytes: %10"PRIu64"\n", 182 i, stats.q_opackets[i], stats.q_obytes[i]); 183 } 184 } 185 186 diff_cycles = prev_cycles[port_id]; 187 prev_cycles[port_id] = rte_rdtsc(); 188 if (diff_cycles > 0) 189 diff_cycles = prev_cycles[port_id] - diff_cycles; 190 191 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 192 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 193 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 194 (stats.opackets - prev_pkts_tx[port_id]) : 0; 195 prev_pkts_rx[port_id] = stats.ipackets; 196 prev_pkts_tx[port_id] = stats.opackets; 197 mpps_rx = diff_cycles > 0 ? 198 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 199 mpps_tx = diff_cycles > 0 ? 200 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 201 202 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 203 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 204 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 205 (stats.obytes - prev_bytes_tx[port_id]) : 0; 206 prev_bytes_rx[port_id] = stats.ibytes; 207 prev_bytes_tx[port_id] = stats.obytes; 208 mbps_rx = diff_cycles > 0 ? 209 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 210 mbps_tx = diff_cycles > 0 ? 211 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 212 213 printf("\n Throughput (since last show)\n"); 214 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 215 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 216 mpps_tx, mbps_tx * 8); 217 218 printf(" %s############################%s\n", 219 nic_stats_border, nic_stats_border); 220 } 221 222 void 223 nic_stats_clear(portid_t port_id) 224 { 225 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 226 print_valid_ports(); 227 return; 228 } 229 rte_eth_stats_reset(port_id); 230 printf("\n NIC statistics for port %d cleared\n", port_id); 231 } 232 233 void 234 nic_xstats_display(portid_t port_id) 235 { 236 struct rte_eth_xstat *xstats; 237 int cnt_xstats, idx_xstat; 238 struct rte_eth_xstat_name *xstats_names; 239 240 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 241 print_valid_ports(); 242 return; 243 } 244 printf("###### NIC extended statistics for port %-2d\n", port_id); 245 if (!rte_eth_dev_is_valid_port(port_id)) { 246 printf("Error: Invalid port number %i\n", port_id); 247 return; 248 } 249 250 /* Get count */ 251 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 252 if (cnt_xstats < 0) { 253 printf("Error: Cannot get count of xstats\n"); 254 return; 255 } 256 257 /* Get id-name lookup table */ 258 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 259 if (xstats_names == NULL) { 260 printf("Cannot allocate memory for xstats lookup\n"); 261 return; 262 } 263 if (cnt_xstats != rte_eth_xstats_get_names( 264 port_id, xstats_names, cnt_xstats)) { 265 printf("Error: Cannot get xstats lookup\n"); 266 free(xstats_names); 267 return; 268 } 269 270 /* Get stats themselves */ 271 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 272 if (xstats == NULL) { 273 printf("Cannot allocate memory for xstats\n"); 274 free(xstats_names); 275 return; 276 } 277 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 278 printf("Error: Unable to get xstats\n"); 279 free(xstats_names); 280 free(xstats); 281 return; 282 } 283 284 /* Display xstats */ 285 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 286 if (xstats_hide_zero && !xstats[idx_xstat].value) 287 continue; 288 printf("%s: %"PRIu64"\n", 289 xstats_names[idx_xstat].name, 290 xstats[idx_xstat].value); 291 } 292 free(xstats_names); 293 free(xstats); 294 } 295 296 void 297 nic_xstats_clear(portid_t port_id) 298 { 299 int ret; 300 301 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 302 print_valid_ports(); 303 return; 304 } 305 ret = rte_eth_xstats_reset(port_id); 306 if (ret != 0) { 307 printf("%s: Error: failed to reset xstats (port %u): %s", 308 __func__, port_id, strerror(ret)); 309 } 310 } 311 312 void 313 nic_stats_mapping_display(portid_t port_id) 314 { 315 struct rte_port *port = &ports[port_id]; 316 uint16_t i; 317 318 static const char *nic_stats_mapping_border = "########################"; 319 320 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 321 print_valid_ports(); 322 return; 323 } 324 325 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 326 printf("Port id %d - either does not support queue statistic mapping or" 327 " no queue statistic mapping set\n", port_id); 328 return; 329 } 330 331 printf("\n %s NIC statistics mapping for port %-2d %s\n", 332 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 333 334 if (port->rx_queue_stats_mapping_enabled) { 335 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 336 if (rx_queue_stats_mappings[i].port_id == port_id) { 337 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 338 rx_queue_stats_mappings[i].queue_id, 339 rx_queue_stats_mappings[i].stats_counter_id); 340 } 341 } 342 printf("\n"); 343 } 344 345 346 if (port->tx_queue_stats_mapping_enabled) { 347 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 348 if (tx_queue_stats_mappings[i].port_id == port_id) { 349 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 350 tx_queue_stats_mappings[i].queue_id, 351 tx_queue_stats_mappings[i].stats_counter_id); 352 } 353 } 354 } 355 356 printf(" %s####################################%s\n", 357 nic_stats_mapping_border, nic_stats_mapping_border); 358 } 359 360 void 361 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 362 { 363 struct rte_eth_burst_mode mode; 364 struct rte_eth_rxq_info qinfo; 365 int32_t rc; 366 static const char *info_border = "*********************"; 367 368 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 369 if (rc != 0) { 370 printf("Failed to retrieve information for port: %u, " 371 "RX queue: %hu\nerror desc: %s(%d)\n", 372 port_id, queue_id, strerror(-rc), rc); 373 return; 374 } 375 376 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 377 info_border, port_id, queue_id, info_border); 378 379 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 380 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 381 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 382 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 383 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 384 printf("\nRX drop packets: %s", 385 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 386 printf("\nRX deferred start: %s", 387 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 388 printf("\nRX scattered packets: %s", 389 (qinfo.scattered_rx != 0) ? "on" : "off"); 390 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 391 392 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 393 printf("\nBurst mode: %s%s", 394 mode.info, 395 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 396 " (per queue)" : ""); 397 398 printf("\n"); 399 } 400 401 void 402 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 403 { 404 struct rte_eth_burst_mode mode; 405 struct rte_eth_txq_info qinfo; 406 int32_t rc; 407 static const char *info_border = "*********************"; 408 409 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 410 if (rc != 0) { 411 printf("Failed to retrieve information for port: %u, " 412 "TX queue: %hu\nerror desc: %s(%d)\n", 413 port_id, queue_id, strerror(-rc), rc); 414 return; 415 } 416 417 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 418 info_border, port_id, queue_id, info_border); 419 420 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 421 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 422 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 423 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 424 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 425 printf("\nTX deferred start: %s", 426 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 427 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 428 429 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 430 printf("\nBurst mode: %s%s", 431 mode.info, 432 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 433 " (per queue)" : ""); 434 435 printf("\n"); 436 } 437 438 static int bus_match_all(const struct rte_bus *bus, const void *data) 439 { 440 RTE_SET_USED(bus); 441 RTE_SET_USED(data); 442 return 0; 443 } 444 445 void 446 device_infos_display(const char *identifier) 447 { 448 static const char *info_border = "*********************"; 449 struct rte_bus *start = NULL, *next; 450 struct rte_dev_iterator dev_iter; 451 char name[RTE_ETH_NAME_MAX_LEN]; 452 struct rte_ether_addr mac_addr; 453 struct rte_device *dev; 454 struct rte_devargs da; 455 portid_t port_id; 456 char devstr[128]; 457 458 memset(&da, 0, sizeof(da)); 459 if (!identifier) 460 goto skip_parse; 461 462 if (rte_devargs_parsef(&da, "%s", identifier)) { 463 printf("cannot parse identifier\n"); 464 if (da.args) 465 free(da.args); 466 return; 467 } 468 469 skip_parse: 470 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 471 472 start = next; 473 if (identifier && da.bus != next) 474 continue; 475 476 /* Skip buses that don't have iterate method */ 477 if (!next->dev_iterate) 478 continue; 479 480 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 481 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 482 483 if (!dev->driver) 484 continue; 485 /* Check for matching device if identifier is present */ 486 if (identifier && 487 strncmp(da.name, dev->name, strlen(dev->name))) 488 continue; 489 printf("\n%s Infos for device %s %s\n", 490 info_border, dev->name, info_border); 491 printf("Bus name: %s", dev->bus->name); 492 printf("\nDriver name: %s", dev->driver->name); 493 printf("\nDevargs: %s", 494 dev->devargs ? dev->devargs->args : ""); 495 printf("\nConnect to socket: %d", dev->numa_node); 496 printf("\n"); 497 498 /* List ports with matching device name */ 499 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 500 printf("\n\tPort id: %-2d", port_id); 501 if (eth_macaddr_get_print_err(port_id, 502 &mac_addr) == 0) 503 print_ethaddr("\n\tMAC address: ", 504 &mac_addr); 505 rte_eth_dev_get_name_by_port(port_id, name); 506 printf("\n\tDevice name: %s", name); 507 printf("\n"); 508 } 509 } 510 }; 511 } 512 513 void 514 port_infos_display(portid_t port_id) 515 { 516 struct rte_port *port; 517 struct rte_ether_addr mac_addr; 518 struct rte_eth_link link; 519 struct rte_eth_dev_info dev_info; 520 int vlan_offload; 521 struct rte_mempool * mp; 522 static const char *info_border = "*********************"; 523 uint16_t mtu; 524 char name[RTE_ETH_NAME_MAX_LEN]; 525 int ret; 526 527 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 528 print_valid_ports(); 529 return; 530 } 531 port = &ports[port_id]; 532 ret = eth_link_get_nowait_print_err(port_id, &link); 533 if (ret < 0) 534 return; 535 536 ret = eth_dev_info_get_print_err(port_id, &dev_info); 537 if (ret != 0) 538 return; 539 540 printf("\n%s Infos for port %-2d %s\n", 541 info_border, port_id, info_border); 542 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 543 print_ethaddr("MAC address: ", &mac_addr); 544 rte_eth_dev_get_name_by_port(port_id, name); 545 printf("\nDevice name: %s", name); 546 printf("\nDriver name: %s", dev_info.driver_name); 547 if (dev_info.device->devargs && dev_info.device->devargs->args) 548 printf("\nDevargs: %s", dev_info.device->devargs->args); 549 printf("\nConnect to socket: %u", port->socket_id); 550 551 if (port_numa[port_id] != NUMA_NO_CONFIG) { 552 mp = mbuf_pool_find(port_numa[port_id]); 553 if (mp) 554 printf("\nmemory allocation on the socket: %d", 555 port_numa[port_id]); 556 } else 557 printf("\nmemory allocation on the socket: %u",port->socket_id); 558 559 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 560 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 561 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 562 ("full-duplex") : ("half-duplex")); 563 564 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 565 printf("MTU: %u\n", mtu); 566 567 printf("Promiscuous mode: %s\n", 568 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 569 printf("Allmulticast mode: %s\n", 570 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 571 printf("Maximum number of MAC addresses: %u\n", 572 (unsigned int)(port->dev_info.max_mac_addrs)); 573 printf("Maximum number of MAC addresses of hash filtering: %u\n", 574 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 575 576 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 577 if (vlan_offload >= 0){ 578 printf("VLAN offload: \n"); 579 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 580 printf(" strip on, "); 581 else 582 printf(" strip off, "); 583 584 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 585 printf("filter on, "); 586 else 587 printf("filter off, "); 588 589 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 590 printf("extend on, "); 591 else 592 printf("extend off, "); 593 594 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 595 printf("qinq strip on\n"); 596 else 597 printf("qinq strip off\n"); 598 } 599 600 if (dev_info.hash_key_size > 0) 601 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 602 if (dev_info.reta_size > 0) 603 printf("Redirection table size: %u\n", dev_info.reta_size); 604 if (!dev_info.flow_type_rss_offloads) 605 printf("No RSS offload flow type is supported.\n"); 606 else { 607 uint16_t i; 608 char *p; 609 610 printf("Supported RSS offload flow types:\n"); 611 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 612 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 613 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 614 continue; 615 p = flowtype_to_str(i); 616 if (p) 617 printf(" %s\n", p); 618 else 619 printf(" user defined %d\n", i); 620 } 621 } 622 623 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 624 printf("Maximum configurable length of RX packet: %u\n", 625 dev_info.max_rx_pktlen); 626 printf("Maximum configurable size of LRO aggregated packet: %u\n", 627 dev_info.max_lro_pkt_size); 628 if (dev_info.max_vfs) 629 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 630 if (dev_info.max_vmdq_pools) 631 printf("Maximum number of VMDq pools: %u\n", 632 dev_info.max_vmdq_pools); 633 634 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 635 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 636 printf("Max possible number of RXDs per queue: %hu\n", 637 dev_info.rx_desc_lim.nb_max); 638 printf("Min possible number of RXDs per queue: %hu\n", 639 dev_info.rx_desc_lim.nb_min); 640 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 641 642 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 643 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 644 printf("Max possible number of TXDs per queue: %hu\n", 645 dev_info.tx_desc_lim.nb_max); 646 printf("Min possible number of TXDs per queue: %hu\n", 647 dev_info.tx_desc_lim.nb_min); 648 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 649 printf("Max segment number per packet: %hu\n", 650 dev_info.tx_desc_lim.nb_seg_max); 651 printf("Max segment number per MTU/TSO: %hu\n", 652 dev_info.tx_desc_lim.nb_mtu_seg_max); 653 654 /* Show switch info only if valid switch domain and port id is set */ 655 if (dev_info.switch_info.domain_id != 656 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 657 if (dev_info.switch_info.name) 658 printf("Switch name: %s\n", dev_info.switch_info.name); 659 660 printf("Switch domain Id: %u\n", 661 dev_info.switch_info.domain_id); 662 printf("Switch Port Id: %u\n", 663 dev_info.switch_info.port_id); 664 } 665 } 666 667 void 668 port_summary_header_display(void) 669 { 670 uint16_t port_number; 671 672 port_number = rte_eth_dev_count_avail(); 673 printf("Number of available ports: %i\n", port_number); 674 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 675 "Driver", "Status", "Link"); 676 } 677 678 void 679 port_summary_display(portid_t port_id) 680 { 681 struct rte_ether_addr mac_addr; 682 struct rte_eth_link link; 683 struct rte_eth_dev_info dev_info; 684 char name[RTE_ETH_NAME_MAX_LEN]; 685 int ret; 686 687 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 688 print_valid_ports(); 689 return; 690 } 691 692 ret = eth_link_get_nowait_print_err(port_id, &link); 693 if (ret < 0) 694 return; 695 696 ret = eth_dev_info_get_print_err(port_id, &dev_info); 697 if (ret != 0) 698 return; 699 700 rte_eth_dev_get_name_by_port(port_id, name); 701 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 702 if (ret != 0) 703 return; 704 705 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 706 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 707 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 708 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 709 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 710 (unsigned int) link.link_speed); 711 } 712 713 void 714 port_offload_cap_display(portid_t port_id) 715 { 716 struct rte_eth_dev_info dev_info; 717 static const char *info_border = "************"; 718 int ret; 719 720 if (port_id_is_invalid(port_id, ENABLED_WARN)) 721 return; 722 723 ret = eth_dev_info_get_print_err(port_id, &dev_info); 724 if (ret != 0) 725 return; 726 727 printf("\n%s Port %d supported offload features: %s\n", 728 info_border, port_id, info_border); 729 730 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 731 printf("VLAN stripped: "); 732 if (ports[port_id].dev_conf.rxmode.offloads & 733 DEV_RX_OFFLOAD_VLAN_STRIP) 734 printf("on\n"); 735 else 736 printf("off\n"); 737 } 738 739 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 740 printf("Double VLANs stripped: "); 741 if (ports[port_id].dev_conf.rxmode.offloads & 742 DEV_RX_OFFLOAD_QINQ_STRIP) 743 printf("on\n"); 744 else 745 printf("off\n"); 746 } 747 748 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 749 printf("RX IPv4 checksum: "); 750 if (ports[port_id].dev_conf.rxmode.offloads & 751 DEV_RX_OFFLOAD_IPV4_CKSUM) 752 printf("on\n"); 753 else 754 printf("off\n"); 755 } 756 757 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 758 printf("RX UDP checksum: "); 759 if (ports[port_id].dev_conf.rxmode.offloads & 760 DEV_RX_OFFLOAD_UDP_CKSUM) 761 printf("on\n"); 762 else 763 printf("off\n"); 764 } 765 766 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 767 printf("RX TCP checksum: "); 768 if (ports[port_id].dev_conf.rxmode.offloads & 769 DEV_RX_OFFLOAD_TCP_CKSUM) 770 printf("on\n"); 771 else 772 printf("off\n"); 773 } 774 775 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 776 printf("RX SCTP checksum: "); 777 if (ports[port_id].dev_conf.rxmode.offloads & 778 DEV_RX_OFFLOAD_SCTP_CKSUM) 779 printf("on\n"); 780 else 781 printf("off\n"); 782 } 783 784 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 785 printf("RX Outer IPv4 checksum: "); 786 if (ports[port_id].dev_conf.rxmode.offloads & 787 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 788 printf("on\n"); 789 else 790 printf("off\n"); 791 } 792 793 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 794 printf("RX Outer UDP checksum: "); 795 if (ports[port_id].dev_conf.rxmode.offloads & 796 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 797 printf("on\n"); 798 else 799 printf("off\n"); 800 } 801 802 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 803 printf("Large receive offload: "); 804 if (ports[port_id].dev_conf.rxmode.offloads & 805 DEV_RX_OFFLOAD_TCP_LRO) 806 printf("on\n"); 807 else 808 printf("off\n"); 809 } 810 811 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 812 printf("HW timestamp: "); 813 if (ports[port_id].dev_conf.rxmode.offloads & 814 DEV_RX_OFFLOAD_TIMESTAMP) 815 printf("on\n"); 816 else 817 printf("off\n"); 818 } 819 820 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 821 printf("Rx Keep CRC: "); 822 if (ports[port_id].dev_conf.rxmode.offloads & 823 DEV_RX_OFFLOAD_KEEP_CRC) 824 printf("on\n"); 825 else 826 printf("off\n"); 827 } 828 829 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 830 printf("RX offload security: "); 831 if (ports[port_id].dev_conf.rxmode.offloads & 832 DEV_RX_OFFLOAD_SECURITY) 833 printf("on\n"); 834 else 835 printf("off\n"); 836 } 837 838 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 839 printf("VLAN insert: "); 840 if (ports[port_id].dev_conf.txmode.offloads & 841 DEV_TX_OFFLOAD_VLAN_INSERT) 842 printf("on\n"); 843 else 844 printf("off\n"); 845 } 846 847 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 848 printf("Double VLANs insert: "); 849 if (ports[port_id].dev_conf.txmode.offloads & 850 DEV_TX_OFFLOAD_QINQ_INSERT) 851 printf("on\n"); 852 else 853 printf("off\n"); 854 } 855 856 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 857 printf("TX IPv4 checksum: "); 858 if (ports[port_id].dev_conf.txmode.offloads & 859 DEV_TX_OFFLOAD_IPV4_CKSUM) 860 printf("on\n"); 861 else 862 printf("off\n"); 863 } 864 865 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 866 printf("TX UDP checksum: "); 867 if (ports[port_id].dev_conf.txmode.offloads & 868 DEV_TX_OFFLOAD_UDP_CKSUM) 869 printf("on\n"); 870 else 871 printf("off\n"); 872 } 873 874 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 875 printf("TX TCP checksum: "); 876 if (ports[port_id].dev_conf.txmode.offloads & 877 DEV_TX_OFFLOAD_TCP_CKSUM) 878 printf("on\n"); 879 else 880 printf("off\n"); 881 } 882 883 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 884 printf("TX SCTP checksum: "); 885 if (ports[port_id].dev_conf.txmode.offloads & 886 DEV_TX_OFFLOAD_SCTP_CKSUM) 887 printf("on\n"); 888 else 889 printf("off\n"); 890 } 891 892 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 893 printf("TX Outer IPv4 checksum: "); 894 if (ports[port_id].dev_conf.txmode.offloads & 895 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 896 printf("on\n"); 897 else 898 printf("off\n"); 899 } 900 901 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 902 printf("TX TCP segmentation: "); 903 if (ports[port_id].dev_conf.txmode.offloads & 904 DEV_TX_OFFLOAD_TCP_TSO) 905 printf("on\n"); 906 else 907 printf("off\n"); 908 } 909 910 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 911 printf("TX UDP segmentation: "); 912 if (ports[port_id].dev_conf.txmode.offloads & 913 DEV_TX_OFFLOAD_UDP_TSO) 914 printf("on\n"); 915 else 916 printf("off\n"); 917 } 918 919 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 920 printf("TSO for VXLAN tunnel packet: "); 921 if (ports[port_id].dev_conf.txmode.offloads & 922 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 923 printf("on\n"); 924 else 925 printf("off\n"); 926 } 927 928 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 929 printf("TSO for GRE tunnel packet: "); 930 if (ports[port_id].dev_conf.txmode.offloads & 931 DEV_TX_OFFLOAD_GRE_TNL_TSO) 932 printf("on\n"); 933 else 934 printf("off\n"); 935 } 936 937 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 938 printf("TSO for IPIP tunnel packet: "); 939 if (ports[port_id].dev_conf.txmode.offloads & 940 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 941 printf("on\n"); 942 else 943 printf("off\n"); 944 } 945 946 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 947 printf("TSO for GENEVE tunnel packet: "); 948 if (ports[port_id].dev_conf.txmode.offloads & 949 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 950 printf("on\n"); 951 else 952 printf("off\n"); 953 } 954 955 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 956 printf("IP tunnel TSO: "); 957 if (ports[port_id].dev_conf.txmode.offloads & 958 DEV_TX_OFFLOAD_IP_TNL_TSO) 959 printf("on\n"); 960 else 961 printf("off\n"); 962 } 963 964 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 965 printf("UDP tunnel TSO: "); 966 if (ports[port_id].dev_conf.txmode.offloads & 967 DEV_TX_OFFLOAD_UDP_TNL_TSO) 968 printf("on\n"); 969 else 970 printf("off\n"); 971 } 972 973 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 974 printf("TX Outer UDP checksum: "); 975 if (ports[port_id].dev_conf.txmode.offloads & 976 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 977 printf("on\n"); 978 else 979 printf("off\n"); 980 } 981 982 } 983 984 int 985 port_id_is_invalid(portid_t port_id, enum print_warning warning) 986 { 987 uint16_t pid; 988 989 if (port_id == (portid_t)RTE_PORT_ALL) 990 return 0; 991 992 RTE_ETH_FOREACH_DEV(pid) 993 if (port_id == pid) 994 return 0; 995 996 if (warning == ENABLED_WARN) 997 printf("Invalid port %d\n", port_id); 998 999 return 1; 1000 } 1001 1002 void print_valid_ports(void) 1003 { 1004 portid_t pid; 1005 1006 printf("The valid ports array is ["); 1007 RTE_ETH_FOREACH_DEV(pid) { 1008 printf(" %d", pid); 1009 } 1010 printf(" ]\n"); 1011 } 1012 1013 static int 1014 vlan_id_is_invalid(uint16_t vlan_id) 1015 { 1016 if (vlan_id < 4096) 1017 return 0; 1018 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1019 return 1; 1020 } 1021 1022 static int 1023 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1024 { 1025 const struct rte_pci_device *pci_dev; 1026 const struct rte_bus *bus; 1027 uint64_t pci_len; 1028 1029 if (reg_off & 0x3) { 1030 printf("Port register offset 0x%X not aligned on a 4-byte " 1031 "boundary\n", 1032 (unsigned)reg_off); 1033 return 1; 1034 } 1035 1036 if (!ports[port_id].dev_info.device) { 1037 printf("Invalid device\n"); 1038 return 0; 1039 } 1040 1041 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1042 if (bus && !strcmp(bus->name, "pci")) { 1043 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1044 } else { 1045 printf("Not a PCI device\n"); 1046 return 1; 1047 } 1048 1049 pci_len = pci_dev->mem_resource[0].len; 1050 if (reg_off >= pci_len) { 1051 printf("Port %d: register offset %u (0x%X) out of port PCI " 1052 "resource (length=%"PRIu64")\n", 1053 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1054 return 1; 1055 } 1056 return 0; 1057 } 1058 1059 static int 1060 reg_bit_pos_is_invalid(uint8_t bit_pos) 1061 { 1062 if (bit_pos <= 31) 1063 return 0; 1064 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1065 return 1; 1066 } 1067 1068 #define display_port_and_reg_off(port_id, reg_off) \ 1069 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1070 1071 static inline void 1072 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1073 { 1074 display_port_and_reg_off(port_id, (unsigned)reg_off); 1075 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1076 } 1077 1078 void 1079 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1080 { 1081 uint32_t reg_v; 1082 1083 1084 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1085 return; 1086 if (port_reg_off_is_invalid(port_id, reg_off)) 1087 return; 1088 if (reg_bit_pos_is_invalid(bit_x)) 1089 return; 1090 reg_v = port_id_pci_reg_read(port_id, reg_off); 1091 display_port_and_reg_off(port_id, (unsigned)reg_off); 1092 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1093 } 1094 1095 void 1096 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1097 uint8_t bit1_pos, uint8_t bit2_pos) 1098 { 1099 uint32_t reg_v; 1100 uint8_t l_bit; 1101 uint8_t h_bit; 1102 1103 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1104 return; 1105 if (port_reg_off_is_invalid(port_id, reg_off)) 1106 return; 1107 if (reg_bit_pos_is_invalid(bit1_pos)) 1108 return; 1109 if (reg_bit_pos_is_invalid(bit2_pos)) 1110 return; 1111 if (bit1_pos > bit2_pos) 1112 l_bit = bit2_pos, h_bit = bit1_pos; 1113 else 1114 l_bit = bit1_pos, h_bit = bit2_pos; 1115 1116 reg_v = port_id_pci_reg_read(port_id, reg_off); 1117 reg_v >>= l_bit; 1118 if (h_bit < 31) 1119 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1120 display_port_and_reg_off(port_id, (unsigned)reg_off); 1121 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1122 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1123 } 1124 1125 void 1126 port_reg_display(portid_t port_id, uint32_t reg_off) 1127 { 1128 uint32_t reg_v; 1129 1130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1131 return; 1132 if (port_reg_off_is_invalid(port_id, reg_off)) 1133 return; 1134 reg_v = port_id_pci_reg_read(port_id, reg_off); 1135 display_port_reg_value(port_id, reg_off, reg_v); 1136 } 1137 1138 void 1139 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1140 uint8_t bit_v) 1141 { 1142 uint32_t reg_v; 1143 1144 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1145 return; 1146 if (port_reg_off_is_invalid(port_id, reg_off)) 1147 return; 1148 if (reg_bit_pos_is_invalid(bit_pos)) 1149 return; 1150 if (bit_v > 1) { 1151 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1152 return; 1153 } 1154 reg_v = port_id_pci_reg_read(port_id, reg_off); 1155 if (bit_v == 0) 1156 reg_v &= ~(1 << bit_pos); 1157 else 1158 reg_v |= (1 << bit_pos); 1159 port_id_pci_reg_write(port_id, reg_off, reg_v); 1160 display_port_reg_value(port_id, reg_off, reg_v); 1161 } 1162 1163 void 1164 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1165 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1166 { 1167 uint32_t max_v; 1168 uint32_t reg_v; 1169 uint8_t l_bit; 1170 uint8_t h_bit; 1171 1172 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1173 return; 1174 if (port_reg_off_is_invalid(port_id, reg_off)) 1175 return; 1176 if (reg_bit_pos_is_invalid(bit1_pos)) 1177 return; 1178 if (reg_bit_pos_is_invalid(bit2_pos)) 1179 return; 1180 if (bit1_pos > bit2_pos) 1181 l_bit = bit2_pos, h_bit = bit1_pos; 1182 else 1183 l_bit = bit1_pos, h_bit = bit2_pos; 1184 1185 if ((h_bit - l_bit) < 31) 1186 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1187 else 1188 max_v = 0xFFFFFFFF; 1189 1190 if (value > max_v) { 1191 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1192 (unsigned)value, (unsigned)value, 1193 (unsigned)max_v, (unsigned)max_v); 1194 return; 1195 } 1196 reg_v = port_id_pci_reg_read(port_id, reg_off); 1197 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1198 reg_v |= (value << l_bit); /* Set changed bits */ 1199 port_id_pci_reg_write(port_id, reg_off, reg_v); 1200 display_port_reg_value(port_id, reg_off, reg_v); 1201 } 1202 1203 void 1204 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1205 { 1206 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1207 return; 1208 if (port_reg_off_is_invalid(port_id, reg_off)) 1209 return; 1210 port_id_pci_reg_write(port_id, reg_off, reg_v); 1211 display_port_reg_value(port_id, reg_off, reg_v); 1212 } 1213 1214 void 1215 port_mtu_set(portid_t port_id, uint16_t mtu) 1216 { 1217 int diag; 1218 struct rte_eth_dev_info dev_info; 1219 int ret; 1220 1221 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1222 return; 1223 1224 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1225 if (ret != 0) 1226 return; 1227 1228 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1229 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1230 mtu, dev_info.min_mtu, dev_info.max_mtu); 1231 return; 1232 } 1233 diag = rte_eth_dev_set_mtu(port_id, mtu); 1234 if (diag == 0) 1235 return; 1236 printf("Set MTU failed. diag=%d\n", diag); 1237 } 1238 1239 /* Generic flow management functions. */ 1240 1241 /** Generate a port_flow entry from attributes/pattern/actions. */ 1242 static struct port_flow * 1243 port_flow_new(const struct rte_flow_attr *attr, 1244 const struct rte_flow_item *pattern, 1245 const struct rte_flow_action *actions, 1246 struct rte_flow_error *error) 1247 { 1248 const struct rte_flow_conv_rule rule = { 1249 .attr_ro = attr, 1250 .pattern_ro = pattern, 1251 .actions_ro = actions, 1252 }; 1253 struct port_flow *pf; 1254 int ret; 1255 1256 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1257 if (ret < 0) 1258 return NULL; 1259 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1260 if (!pf) { 1261 rte_flow_error_set 1262 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1263 "calloc() failed"); 1264 return NULL; 1265 } 1266 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1267 error) >= 0) 1268 return pf; 1269 free(pf); 1270 return NULL; 1271 } 1272 1273 /** Print a message out of a flow error. */ 1274 static int 1275 port_flow_complain(struct rte_flow_error *error) 1276 { 1277 static const char *const errstrlist[] = { 1278 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1279 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1280 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1281 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1282 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1283 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1284 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1285 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1286 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1287 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1288 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1289 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1290 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1291 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1292 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1293 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1294 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1295 }; 1296 const char *errstr; 1297 char buf[32]; 1298 int err = rte_errno; 1299 1300 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1301 !errstrlist[error->type]) 1302 errstr = "unknown type"; 1303 else 1304 errstr = errstrlist[error->type]; 1305 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1306 error->type, errstr, 1307 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1308 error->cause), buf) : "", 1309 error->message ? error->message : "(no stated reason)", 1310 rte_strerror(err)); 1311 return -err; 1312 } 1313 1314 /** Validate flow rule. */ 1315 int 1316 port_flow_validate(portid_t port_id, 1317 const struct rte_flow_attr *attr, 1318 const struct rte_flow_item *pattern, 1319 const struct rte_flow_action *actions) 1320 { 1321 struct rte_flow_error error; 1322 1323 /* Poisoning to make sure PMDs update it in case of error. */ 1324 memset(&error, 0x11, sizeof(error)); 1325 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1326 return port_flow_complain(&error); 1327 printf("Flow rule validated\n"); 1328 return 0; 1329 } 1330 1331 /** Create flow rule. */ 1332 int 1333 port_flow_create(portid_t port_id, 1334 const struct rte_flow_attr *attr, 1335 const struct rte_flow_item *pattern, 1336 const struct rte_flow_action *actions) 1337 { 1338 struct rte_flow *flow; 1339 struct rte_port *port; 1340 struct port_flow *pf; 1341 uint32_t id; 1342 struct rte_flow_error error; 1343 1344 /* Poisoning to make sure PMDs update it in case of error. */ 1345 memset(&error, 0x22, sizeof(error)); 1346 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1347 if (!flow) 1348 return port_flow_complain(&error); 1349 port = &ports[port_id]; 1350 if (port->flow_list) { 1351 if (port->flow_list->id == UINT32_MAX) { 1352 printf("Highest rule ID is already assigned, delete" 1353 " it first"); 1354 rte_flow_destroy(port_id, flow, NULL); 1355 return -ENOMEM; 1356 } 1357 id = port->flow_list->id + 1; 1358 } else 1359 id = 0; 1360 pf = port_flow_new(attr, pattern, actions, &error); 1361 if (!pf) { 1362 rte_flow_destroy(port_id, flow, NULL); 1363 return port_flow_complain(&error); 1364 } 1365 pf->next = port->flow_list; 1366 pf->id = id; 1367 pf->flow = flow; 1368 port->flow_list = pf; 1369 printf("Flow rule #%u created\n", pf->id); 1370 return 0; 1371 } 1372 1373 /** Destroy a number of flow rules. */ 1374 int 1375 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1376 { 1377 struct rte_port *port; 1378 struct port_flow **tmp; 1379 uint32_t c = 0; 1380 int ret = 0; 1381 1382 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1383 port_id == (portid_t)RTE_PORT_ALL) 1384 return -EINVAL; 1385 port = &ports[port_id]; 1386 tmp = &port->flow_list; 1387 while (*tmp) { 1388 uint32_t i; 1389 1390 for (i = 0; i != n; ++i) { 1391 struct rte_flow_error error; 1392 struct port_flow *pf = *tmp; 1393 1394 if (rule[i] != pf->id) 1395 continue; 1396 /* 1397 * Poisoning to make sure PMDs update it in case 1398 * of error. 1399 */ 1400 memset(&error, 0x33, sizeof(error)); 1401 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1402 ret = port_flow_complain(&error); 1403 continue; 1404 } 1405 printf("Flow rule #%u destroyed\n", pf->id); 1406 *tmp = pf->next; 1407 free(pf); 1408 break; 1409 } 1410 if (i == n) 1411 tmp = &(*tmp)->next; 1412 ++c; 1413 } 1414 return ret; 1415 } 1416 1417 /** Remove all flow rules. */ 1418 int 1419 port_flow_flush(portid_t port_id) 1420 { 1421 struct rte_flow_error error; 1422 struct rte_port *port; 1423 int ret = 0; 1424 1425 /* Poisoning to make sure PMDs update it in case of error. */ 1426 memset(&error, 0x44, sizeof(error)); 1427 if (rte_flow_flush(port_id, &error)) { 1428 ret = port_flow_complain(&error); 1429 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1430 port_id == (portid_t)RTE_PORT_ALL) 1431 return ret; 1432 } 1433 port = &ports[port_id]; 1434 while (port->flow_list) { 1435 struct port_flow *pf = port->flow_list->next; 1436 1437 free(port->flow_list); 1438 port->flow_list = pf; 1439 } 1440 return ret; 1441 } 1442 1443 /** Dump all flow rules. */ 1444 int 1445 port_flow_dump(portid_t port_id, const char *file_name) 1446 { 1447 int ret = 0; 1448 FILE *file = stdout; 1449 struct rte_flow_error error; 1450 1451 if (file_name && strlen(file_name)) { 1452 file = fopen(file_name, "w"); 1453 if (!file) { 1454 printf("Failed to create file %s: %s\n", file_name, 1455 strerror(errno)); 1456 return -errno; 1457 } 1458 } 1459 ret = rte_flow_dev_dump(port_id, file, &error); 1460 if (ret) { 1461 port_flow_complain(&error); 1462 printf("Failed to dump flow: %s\n", strerror(-ret)); 1463 } else 1464 printf("Flow dump finished\n"); 1465 if (file_name && strlen(file_name)) 1466 fclose(file); 1467 return ret; 1468 } 1469 1470 /** Query a flow rule. */ 1471 int 1472 port_flow_query(portid_t port_id, uint32_t rule, 1473 const struct rte_flow_action *action) 1474 { 1475 struct rte_flow_error error; 1476 struct rte_port *port; 1477 struct port_flow *pf; 1478 const char *name; 1479 union { 1480 struct rte_flow_query_count count; 1481 } query; 1482 int ret; 1483 1484 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1485 port_id == (portid_t)RTE_PORT_ALL) 1486 return -EINVAL; 1487 port = &ports[port_id]; 1488 for (pf = port->flow_list; pf; pf = pf->next) 1489 if (pf->id == rule) 1490 break; 1491 if (!pf) { 1492 printf("Flow rule #%u not found\n", rule); 1493 return -ENOENT; 1494 } 1495 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1496 &name, sizeof(name), 1497 (void *)(uintptr_t)action->type, &error); 1498 if (ret < 0) 1499 return port_flow_complain(&error); 1500 switch (action->type) { 1501 case RTE_FLOW_ACTION_TYPE_COUNT: 1502 break; 1503 default: 1504 printf("Cannot query action type %d (%s)\n", 1505 action->type, name); 1506 return -ENOTSUP; 1507 } 1508 /* Poisoning to make sure PMDs update it in case of error. */ 1509 memset(&error, 0x55, sizeof(error)); 1510 memset(&query, 0, sizeof(query)); 1511 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1512 return port_flow_complain(&error); 1513 switch (action->type) { 1514 case RTE_FLOW_ACTION_TYPE_COUNT: 1515 printf("%s:\n" 1516 " hits_set: %u\n" 1517 " bytes_set: %u\n" 1518 " hits: %" PRIu64 "\n" 1519 " bytes: %" PRIu64 "\n", 1520 name, 1521 query.count.hits_set, 1522 query.count.bytes_set, 1523 query.count.hits, 1524 query.count.bytes); 1525 break; 1526 default: 1527 printf("Cannot display result for action type %d (%s)\n", 1528 action->type, name); 1529 break; 1530 } 1531 return 0; 1532 } 1533 1534 /** List flow rules. */ 1535 void 1536 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1537 { 1538 struct rte_port *port; 1539 struct port_flow *pf; 1540 struct port_flow *list = NULL; 1541 uint32_t i; 1542 1543 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1544 port_id == (portid_t)RTE_PORT_ALL) 1545 return; 1546 port = &ports[port_id]; 1547 if (!port->flow_list) 1548 return; 1549 /* Sort flows by group, priority and ID. */ 1550 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1551 struct port_flow **tmp; 1552 const struct rte_flow_attr *curr = pf->rule.attr; 1553 1554 if (n) { 1555 /* Filter out unwanted groups. */ 1556 for (i = 0; i != n; ++i) 1557 if (curr->group == group[i]) 1558 break; 1559 if (i == n) 1560 continue; 1561 } 1562 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1563 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1564 1565 if (curr->group > comp->group || 1566 (curr->group == comp->group && 1567 curr->priority > comp->priority) || 1568 (curr->group == comp->group && 1569 curr->priority == comp->priority && 1570 pf->id > (*tmp)->id)) 1571 continue; 1572 break; 1573 } 1574 pf->tmp = *tmp; 1575 *tmp = pf; 1576 } 1577 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1578 for (pf = list; pf != NULL; pf = pf->tmp) { 1579 const struct rte_flow_item *item = pf->rule.pattern; 1580 const struct rte_flow_action *action = pf->rule.actions; 1581 const char *name; 1582 1583 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1584 pf->id, 1585 pf->rule.attr->group, 1586 pf->rule.attr->priority, 1587 pf->rule.attr->ingress ? 'i' : '-', 1588 pf->rule.attr->egress ? 'e' : '-', 1589 pf->rule.attr->transfer ? 't' : '-'); 1590 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1591 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1592 &name, sizeof(name), 1593 (void *)(uintptr_t)item->type, 1594 NULL) <= 0) 1595 name = "[UNKNOWN]"; 1596 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1597 printf("%s ", name); 1598 ++item; 1599 } 1600 printf("=>"); 1601 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1602 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1603 &name, sizeof(name), 1604 (void *)(uintptr_t)action->type, 1605 NULL) <= 0) 1606 name = "[UNKNOWN]"; 1607 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1608 printf(" %s", name); 1609 ++action; 1610 } 1611 printf("\n"); 1612 } 1613 } 1614 1615 /** Restrict ingress traffic to the defined flow rules. */ 1616 int 1617 port_flow_isolate(portid_t port_id, int set) 1618 { 1619 struct rte_flow_error error; 1620 1621 /* Poisoning to make sure PMDs update it in case of error. */ 1622 memset(&error, 0x66, sizeof(error)); 1623 if (rte_flow_isolate(port_id, set, &error)) 1624 return port_flow_complain(&error); 1625 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1626 port_id, 1627 set ? "now restricted" : "not restricted anymore"); 1628 return 0; 1629 } 1630 1631 /* 1632 * RX/TX ring descriptors display functions. 1633 */ 1634 int 1635 rx_queue_id_is_invalid(queueid_t rxq_id) 1636 { 1637 if (rxq_id < nb_rxq) 1638 return 0; 1639 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1640 return 1; 1641 } 1642 1643 int 1644 tx_queue_id_is_invalid(queueid_t txq_id) 1645 { 1646 if (txq_id < nb_txq) 1647 return 0; 1648 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1649 return 1; 1650 } 1651 1652 static int 1653 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1654 { 1655 if (rxdesc_id < nb_rxd) 1656 return 0; 1657 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1658 rxdesc_id, nb_rxd); 1659 return 1; 1660 } 1661 1662 static int 1663 tx_desc_id_is_invalid(uint16_t txdesc_id) 1664 { 1665 if (txdesc_id < nb_txd) 1666 return 0; 1667 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1668 txdesc_id, nb_txd); 1669 return 1; 1670 } 1671 1672 static const struct rte_memzone * 1673 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1674 { 1675 char mz_name[RTE_MEMZONE_NAMESIZE]; 1676 const struct rte_memzone *mz; 1677 1678 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1679 port_id, q_id, ring_name); 1680 mz = rte_memzone_lookup(mz_name); 1681 if (mz == NULL) 1682 printf("%s ring memory zoneof (port %d, queue %d) not" 1683 "found (zone name = %s\n", 1684 ring_name, port_id, q_id, mz_name); 1685 return mz; 1686 } 1687 1688 union igb_ring_dword { 1689 uint64_t dword; 1690 struct { 1691 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1692 uint32_t lo; 1693 uint32_t hi; 1694 #else 1695 uint32_t hi; 1696 uint32_t lo; 1697 #endif 1698 } words; 1699 }; 1700 1701 struct igb_ring_desc_32_bytes { 1702 union igb_ring_dword lo_dword; 1703 union igb_ring_dword hi_dword; 1704 union igb_ring_dword resv1; 1705 union igb_ring_dword resv2; 1706 }; 1707 1708 struct igb_ring_desc_16_bytes { 1709 union igb_ring_dword lo_dword; 1710 union igb_ring_dword hi_dword; 1711 }; 1712 1713 static void 1714 ring_rxd_display_dword(union igb_ring_dword dword) 1715 { 1716 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1717 (unsigned)dword.words.hi); 1718 } 1719 1720 static void 1721 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1722 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1723 portid_t port_id, 1724 #else 1725 __rte_unused portid_t port_id, 1726 #endif 1727 uint16_t desc_id) 1728 { 1729 struct igb_ring_desc_16_bytes *ring = 1730 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1731 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1732 int ret; 1733 struct rte_eth_dev_info dev_info; 1734 1735 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1736 if (ret != 0) 1737 return; 1738 1739 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1740 /* 32 bytes RX descriptor, i40e only */ 1741 struct igb_ring_desc_32_bytes *ring = 1742 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1743 ring[desc_id].lo_dword.dword = 1744 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1745 ring_rxd_display_dword(ring[desc_id].lo_dword); 1746 ring[desc_id].hi_dword.dword = 1747 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1748 ring_rxd_display_dword(ring[desc_id].hi_dword); 1749 ring[desc_id].resv1.dword = 1750 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1751 ring_rxd_display_dword(ring[desc_id].resv1); 1752 ring[desc_id].resv2.dword = 1753 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1754 ring_rxd_display_dword(ring[desc_id].resv2); 1755 1756 return; 1757 } 1758 #endif 1759 /* 16 bytes RX descriptor */ 1760 ring[desc_id].lo_dword.dword = 1761 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1762 ring_rxd_display_dword(ring[desc_id].lo_dword); 1763 ring[desc_id].hi_dword.dword = 1764 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1765 ring_rxd_display_dword(ring[desc_id].hi_dword); 1766 } 1767 1768 static void 1769 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1770 { 1771 struct igb_ring_desc_16_bytes *ring; 1772 struct igb_ring_desc_16_bytes txd; 1773 1774 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1775 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1776 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1777 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1778 (unsigned)txd.lo_dword.words.lo, 1779 (unsigned)txd.lo_dword.words.hi, 1780 (unsigned)txd.hi_dword.words.lo, 1781 (unsigned)txd.hi_dword.words.hi); 1782 } 1783 1784 void 1785 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1786 { 1787 const struct rte_memzone *rx_mz; 1788 1789 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1790 return; 1791 if (rx_queue_id_is_invalid(rxq_id)) 1792 return; 1793 if (rx_desc_id_is_invalid(rxd_id)) 1794 return; 1795 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1796 if (rx_mz == NULL) 1797 return; 1798 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1799 } 1800 1801 void 1802 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1803 { 1804 const struct rte_memzone *tx_mz; 1805 1806 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1807 return; 1808 if (tx_queue_id_is_invalid(txq_id)) 1809 return; 1810 if (tx_desc_id_is_invalid(txd_id)) 1811 return; 1812 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1813 if (tx_mz == NULL) 1814 return; 1815 ring_tx_descriptor_display(tx_mz, txd_id); 1816 } 1817 1818 void 1819 fwd_lcores_config_display(void) 1820 { 1821 lcoreid_t lc_id; 1822 1823 printf("List of forwarding lcores:"); 1824 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1825 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1826 printf("\n"); 1827 } 1828 void 1829 rxtx_config_display(void) 1830 { 1831 portid_t pid; 1832 queueid_t qid; 1833 1834 printf(" %s packet forwarding%s packets/burst=%d\n", 1835 cur_fwd_eng->fwd_mode_name, 1836 retry_enabled == 0 ? "" : " with retry", 1837 nb_pkt_per_burst); 1838 1839 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1840 printf(" packet len=%u - nb packet segments=%d\n", 1841 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1842 1843 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1844 nb_fwd_lcores, nb_fwd_ports); 1845 1846 RTE_ETH_FOREACH_DEV(pid) { 1847 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1848 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1849 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1850 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1851 uint16_t nb_rx_desc_tmp; 1852 uint16_t nb_tx_desc_tmp; 1853 struct rte_eth_rxq_info rx_qinfo; 1854 struct rte_eth_txq_info tx_qinfo; 1855 int32_t rc; 1856 1857 /* per port config */ 1858 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1859 (unsigned int)pid, nb_rxq, nb_txq); 1860 1861 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1862 ports[pid].dev_conf.rxmode.offloads, 1863 ports[pid].dev_conf.txmode.offloads); 1864 1865 /* per rx queue config only for first queue to be less verbose */ 1866 for (qid = 0; qid < 1; qid++) { 1867 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1868 if (rc) 1869 nb_rx_desc_tmp = nb_rx_desc[qid]; 1870 else 1871 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1872 1873 printf(" RX queue: %d\n", qid); 1874 printf(" RX desc=%d - RX free threshold=%d\n", 1875 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1876 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1877 " wthresh=%d\n", 1878 rx_conf[qid].rx_thresh.pthresh, 1879 rx_conf[qid].rx_thresh.hthresh, 1880 rx_conf[qid].rx_thresh.wthresh); 1881 printf(" RX Offloads=0x%"PRIx64"\n", 1882 rx_conf[qid].offloads); 1883 } 1884 1885 /* per tx queue config only for first queue to be less verbose */ 1886 for (qid = 0; qid < 1; qid++) { 1887 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1888 if (rc) 1889 nb_tx_desc_tmp = nb_tx_desc[qid]; 1890 else 1891 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1892 1893 printf(" TX queue: %d\n", qid); 1894 printf(" TX desc=%d - TX free threshold=%d\n", 1895 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1896 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1897 " wthresh=%d\n", 1898 tx_conf[qid].tx_thresh.pthresh, 1899 tx_conf[qid].tx_thresh.hthresh, 1900 tx_conf[qid].tx_thresh.wthresh); 1901 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1902 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1903 } 1904 } 1905 } 1906 1907 void 1908 port_rss_reta_info(portid_t port_id, 1909 struct rte_eth_rss_reta_entry64 *reta_conf, 1910 uint16_t nb_entries) 1911 { 1912 uint16_t i, idx, shift; 1913 int ret; 1914 1915 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1916 return; 1917 1918 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1919 if (ret != 0) { 1920 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1921 return; 1922 } 1923 1924 for (i = 0; i < nb_entries; i++) { 1925 idx = i / RTE_RETA_GROUP_SIZE; 1926 shift = i % RTE_RETA_GROUP_SIZE; 1927 if (!(reta_conf[idx].mask & (1ULL << shift))) 1928 continue; 1929 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1930 i, reta_conf[idx].reta[shift]); 1931 } 1932 } 1933 1934 /* 1935 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1936 * key of the port. 1937 */ 1938 void 1939 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1940 { 1941 struct rte_eth_rss_conf rss_conf = {0}; 1942 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1943 uint64_t rss_hf; 1944 uint8_t i; 1945 int diag; 1946 struct rte_eth_dev_info dev_info; 1947 uint8_t hash_key_size; 1948 int ret; 1949 1950 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1951 return; 1952 1953 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1954 if (ret != 0) 1955 return; 1956 1957 if (dev_info.hash_key_size > 0 && 1958 dev_info.hash_key_size <= sizeof(rss_key)) 1959 hash_key_size = dev_info.hash_key_size; 1960 else { 1961 printf("dev_info did not provide a valid hash key size\n"); 1962 return; 1963 } 1964 1965 /* Get RSS hash key if asked to display it */ 1966 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1967 rss_conf.rss_key_len = hash_key_size; 1968 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1969 if (diag != 0) { 1970 switch (diag) { 1971 case -ENODEV: 1972 printf("port index %d invalid\n", port_id); 1973 break; 1974 case -ENOTSUP: 1975 printf("operation not supported by device\n"); 1976 break; 1977 default: 1978 printf("operation failed - diag=%d\n", diag); 1979 break; 1980 } 1981 return; 1982 } 1983 rss_hf = rss_conf.rss_hf; 1984 if (rss_hf == 0) { 1985 printf("RSS disabled\n"); 1986 return; 1987 } 1988 printf("RSS functions:\n "); 1989 for (i = 0; rss_type_table[i].str; i++) { 1990 if (rss_hf & rss_type_table[i].rss_type) 1991 printf("%s ", rss_type_table[i].str); 1992 } 1993 printf("\n"); 1994 if (!show_rss_key) 1995 return; 1996 printf("RSS key:\n"); 1997 for (i = 0; i < hash_key_size; i++) 1998 printf("%02X", rss_key[i]); 1999 printf("\n"); 2000 } 2001 2002 void 2003 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2004 uint hash_key_len) 2005 { 2006 struct rte_eth_rss_conf rss_conf; 2007 int diag; 2008 unsigned int i; 2009 2010 rss_conf.rss_key = NULL; 2011 rss_conf.rss_key_len = hash_key_len; 2012 rss_conf.rss_hf = 0; 2013 for (i = 0; rss_type_table[i].str; i++) { 2014 if (!strcmp(rss_type_table[i].str, rss_type)) 2015 rss_conf.rss_hf = rss_type_table[i].rss_type; 2016 } 2017 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2018 if (diag == 0) { 2019 rss_conf.rss_key = hash_key; 2020 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2021 } 2022 if (diag == 0) 2023 return; 2024 2025 switch (diag) { 2026 case -ENODEV: 2027 printf("port index %d invalid\n", port_id); 2028 break; 2029 case -ENOTSUP: 2030 printf("operation not supported by device\n"); 2031 break; 2032 default: 2033 printf("operation failed - diag=%d\n", diag); 2034 break; 2035 } 2036 } 2037 2038 /* 2039 * Setup forwarding configuration for each logical core. 2040 */ 2041 static void 2042 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2043 { 2044 streamid_t nb_fs_per_lcore; 2045 streamid_t nb_fs; 2046 streamid_t sm_id; 2047 lcoreid_t nb_extra; 2048 lcoreid_t nb_fc; 2049 lcoreid_t nb_lc; 2050 lcoreid_t lc_id; 2051 2052 nb_fs = cfg->nb_fwd_streams; 2053 nb_fc = cfg->nb_fwd_lcores; 2054 if (nb_fs <= nb_fc) { 2055 nb_fs_per_lcore = 1; 2056 nb_extra = 0; 2057 } else { 2058 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2059 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2060 } 2061 2062 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2063 sm_id = 0; 2064 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2065 fwd_lcores[lc_id]->stream_idx = sm_id; 2066 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2067 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2068 } 2069 2070 /* 2071 * Assign extra remaining streams, if any. 2072 */ 2073 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2074 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2075 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2076 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2077 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2078 } 2079 } 2080 2081 static portid_t 2082 fwd_topology_tx_port_get(portid_t rxp) 2083 { 2084 static int warning_once = 1; 2085 2086 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2087 2088 switch (port_topology) { 2089 default: 2090 case PORT_TOPOLOGY_PAIRED: 2091 if ((rxp & 0x1) == 0) { 2092 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2093 return rxp + 1; 2094 if (warning_once) { 2095 printf("\nWarning! port-topology=paired" 2096 " and odd forward ports number," 2097 " the last port will pair with" 2098 " itself.\n\n"); 2099 warning_once = 0; 2100 } 2101 return rxp; 2102 } 2103 return rxp - 1; 2104 case PORT_TOPOLOGY_CHAINED: 2105 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2106 case PORT_TOPOLOGY_LOOP: 2107 return rxp; 2108 } 2109 } 2110 2111 static void 2112 simple_fwd_config_setup(void) 2113 { 2114 portid_t i; 2115 2116 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2117 cur_fwd_config.nb_fwd_streams = 2118 (streamid_t) cur_fwd_config.nb_fwd_ports; 2119 2120 /* reinitialize forwarding streams */ 2121 init_fwd_streams(); 2122 2123 /* 2124 * In the simple forwarding test, the number of forwarding cores 2125 * must be lower or equal to the number of forwarding ports. 2126 */ 2127 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2128 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2129 cur_fwd_config.nb_fwd_lcores = 2130 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2131 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2132 2133 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2134 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2135 fwd_streams[i]->rx_queue = 0; 2136 fwd_streams[i]->tx_port = 2137 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2138 fwd_streams[i]->tx_queue = 0; 2139 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2140 fwd_streams[i]->retry_enabled = retry_enabled; 2141 } 2142 } 2143 2144 /** 2145 * For the RSS forwarding test all streams distributed over lcores. Each stream 2146 * being composed of a RX queue to poll on a RX port for input messages, 2147 * associated with a TX queue of a TX port where to send forwarded packets. 2148 */ 2149 static void 2150 rss_fwd_config_setup(void) 2151 { 2152 portid_t rxp; 2153 portid_t txp; 2154 queueid_t rxq; 2155 queueid_t nb_q; 2156 streamid_t sm_id; 2157 2158 nb_q = nb_rxq; 2159 if (nb_q > nb_txq) 2160 nb_q = nb_txq; 2161 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2162 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2163 cur_fwd_config.nb_fwd_streams = 2164 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2165 2166 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2167 cur_fwd_config.nb_fwd_lcores = 2168 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2169 2170 /* reinitialize forwarding streams */ 2171 init_fwd_streams(); 2172 2173 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2174 rxp = 0; rxq = 0; 2175 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2176 struct fwd_stream *fs; 2177 2178 fs = fwd_streams[sm_id]; 2179 txp = fwd_topology_tx_port_get(rxp); 2180 fs->rx_port = fwd_ports_ids[rxp]; 2181 fs->rx_queue = rxq; 2182 fs->tx_port = fwd_ports_ids[txp]; 2183 fs->tx_queue = rxq; 2184 fs->peer_addr = fs->tx_port; 2185 fs->retry_enabled = retry_enabled; 2186 rxp++; 2187 if (rxp < nb_fwd_ports) 2188 continue; 2189 rxp = 0; 2190 rxq++; 2191 } 2192 } 2193 2194 /** 2195 * For the DCB forwarding test, each core is assigned on each traffic class. 2196 * 2197 * Each core is assigned a multi-stream, each stream being composed of 2198 * a RX queue to poll on a RX port for input messages, associated with 2199 * a TX queue of a TX port where to send forwarded packets. All RX and 2200 * TX queues are mapping to the same traffic class. 2201 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2202 * the same core 2203 */ 2204 static void 2205 dcb_fwd_config_setup(void) 2206 { 2207 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2208 portid_t txp, rxp = 0; 2209 queueid_t txq, rxq = 0; 2210 lcoreid_t lc_id; 2211 uint16_t nb_rx_queue, nb_tx_queue; 2212 uint16_t i, j, k, sm_id = 0; 2213 uint8_t tc = 0; 2214 2215 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2216 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2217 cur_fwd_config.nb_fwd_streams = 2218 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2219 2220 /* reinitialize forwarding streams */ 2221 init_fwd_streams(); 2222 sm_id = 0; 2223 txp = 1; 2224 /* get the dcb info on the first RX and TX ports */ 2225 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2226 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2227 2228 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2229 fwd_lcores[lc_id]->stream_nb = 0; 2230 fwd_lcores[lc_id]->stream_idx = sm_id; 2231 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2232 /* if the nb_queue is zero, means this tc is 2233 * not enabled on the POOL 2234 */ 2235 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2236 break; 2237 k = fwd_lcores[lc_id]->stream_nb + 2238 fwd_lcores[lc_id]->stream_idx; 2239 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2240 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2241 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2242 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2243 for (j = 0; j < nb_rx_queue; j++) { 2244 struct fwd_stream *fs; 2245 2246 fs = fwd_streams[k + j]; 2247 fs->rx_port = fwd_ports_ids[rxp]; 2248 fs->rx_queue = rxq + j; 2249 fs->tx_port = fwd_ports_ids[txp]; 2250 fs->tx_queue = txq + j % nb_tx_queue; 2251 fs->peer_addr = fs->tx_port; 2252 fs->retry_enabled = retry_enabled; 2253 } 2254 fwd_lcores[lc_id]->stream_nb += 2255 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2256 } 2257 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2258 2259 tc++; 2260 if (tc < rxp_dcb_info.nb_tcs) 2261 continue; 2262 /* Restart from TC 0 on next RX port */ 2263 tc = 0; 2264 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2265 rxp = (portid_t) 2266 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2267 else 2268 rxp++; 2269 if (rxp >= nb_fwd_ports) 2270 return; 2271 /* get the dcb information on next RX and TX ports */ 2272 if ((rxp & 0x1) == 0) 2273 txp = (portid_t) (rxp + 1); 2274 else 2275 txp = (portid_t) (rxp - 1); 2276 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2277 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2278 } 2279 } 2280 2281 static void 2282 icmp_echo_config_setup(void) 2283 { 2284 portid_t rxp; 2285 queueid_t rxq; 2286 lcoreid_t lc_id; 2287 uint16_t sm_id; 2288 2289 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2290 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2291 (nb_txq * nb_fwd_ports); 2292 else 2293 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2294 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2295 cur_fwd_config.nb_fwd_streams = 2296 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2297 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2298 cur_fwd_config.nb_fwd_lcores = 2299 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2300 if (verbose_level > 0) { 2301 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2302 __FUNCTION__, 2303 cur_fwd_config.nb_fwd_lcores, 2304 cur_fwd_config.nb_fwd_ports, 2305 cur_fwd_config.nb_fwd_streams); 2306 } 2307 2308 /* reinitialize forwarding streams */ 2309 init_fwd_streams(); 2310 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2311 rxp = 0; rxq = 0; 2312 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2313 if (verbose_level > 0) 2314 printf(" core=%d: \n", lc_id); 2315 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2316 struct fwd_stream *fs; 2317 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2318 fs->rx_port = fwd_ports_ids[rxp]; 2319 fs->rx_queue = rxq; 2320 fs->tx_port = fs->rx_port; 2321 fs->tx_queue = rxq; 2322 fs->peer_addr = fs->tx_port; 2323 fs->retry_enabled = retry_enabled; 2324 if (verbose_level > 0) 2325 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2326 sm_id, fs->rx_port, fs->rx_queue, 2327 fs->tx_queue); 2328 rxq = (queueid_t) (rxq + 1); 2329 if (rxq == nb_rxq) { 2330 rxq = 0; 2331 rxp = (portid_t) (rxp + 1); 2332 } 2333 } 2334 } 2335 } 2336 2337 #if defined RTE_LIBRTE_PMD_SOFTNIC 2338 static void 2339 softnic_fwd_config_setup(void) 2340 { 2341 struct rte_port *port; 2342 portid_t pid, softnic_portid; 2343 queueid_t i; 2344 uint8_t softnic_enable = 0; 2345 2346 RTE_ETH_FOREACH_DEV(pid) { 2347 port = &ports[pid]; 2348 const char *driver = port->dev_info.driver_name; 2349 2350 if (strcmp(driver, "net_softnic") == 0) { 2351 softnic_portid = pid; 2352 softnic_enable = 1; 2353 break; 2354 } 2355 } 2356 2357 if (softnic_enable == 0) { 2358 printf("Softnic mode not configured(%s)!\n", __func__); 2359 return; 2360 } 2361 2362 cur_fwd_config.nb_fwd_ports = 1; 2363 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2364 2365 /* Re-initialize forwarding streams */ 2366 init_fwd_streams(); 2367 2368 /* 2369 * In the softnic forwarding test, the number of forwarding cores 2370 * is set to one and remaining are used for softnic packet processing. 2371 */ 2372 cur_fwd_config.nb_fwd_lcores = 1; 2373 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2374 2375 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2376 fwd_streams[i]->rx_port = softnic_portid; 2377 fwd_streams[i]->rx_queue = i; 2378 fwd_streams[i]->tx_port = softnic_portid; 2379 fwd_streams[i]->tx_queue = i; 2380 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2381 fwd_streams[i]->retry_enabled = retry_enabled; 2382 } 2383 } 2384 #endif 2385 2386 void 2387 fwd_config_setup(void) 2388 { 2389 cur_fwd_config.fwd_eng = cur_fwd_eng; 2390 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2391 icmp_echo_config_setup(); 2392 return; 2393 } 2394 2395 #if defined RTE_LIBRTE_PMD_SOFTNIC 2396 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2397 softnic_fwd_config_setup(); 2398 return; 2399 } 2400 #endif 2401 2402 if ((nb_rxq > 1) && (nb_txq > 1)){ 2403 if (dcb_config) 2404 dcb_fwd_config_setup(); 2405 else 2406 rss_fwd_config_setup(); 2407 } 2408 else 2409 simple_fwd_config_setup(); 2410 } 2411 2412 static const char * 2413 mp_alloc_to_str(uint8_t mode) 2414 { 2415 switch (mode) { 2416 case MP_ALLOC_NATIVE: 2417 return "native"; 2418 case MP_ALLOC_ANON: 2419 return "anon"; 2420 case MP_ALLOC_XMEM: 2421 return "xmem"; 2422 case MP_ALLOC_XMEM_HUGE: 2423 return "xmemhuge"; 2424 case MP_ALLOC_XBUF: 2425 return "xbuf"; 2426 default: 2427 return "invalid"; 2428 } 2429 } 2430 2431 void 2432 pkt_fwd_config_display(struct fwd_config *cfg) 2433 { 2434 struct fwd_stream *fs; 2435 lcoreid_t lc_id; 2436 streamid_t sm_id; 2437 2438 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2439 "NUMA support %s, MP allocation mode: %s\n", 2440 cfg->fwd_eng->fwd_mode_name, 2441 retry_enabled == 0 ? "" : " with retry", 2442 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2443 numa_support == 1 ? "enabled" : "disabled", 2444 mp_alloc_to_str(mp_alloc_type)); 2445 2446 if (retry_enabled) 2447 printf("TX retry num: %u, delay between TX retries: %uus\n", 2448 burst_tx_retry_num, burst_tx_delay_time); 2449 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2450 printf("Logical Core %u (socket %u) forwards packets on " 2451 "%d streams:", 2452 fwd_lcores_cpuids[lc_id], 2453 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2454 fwd_lcores[lc_id]->stream_nb); 2455 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2456 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2457 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2458 "P=%d/Q=%d (socket %u) ", 2459 fs->rx_port, fs->rx_queue, 2460 ports[fs->rx_port].socket_id, 2461 fs->tx_port, fs->tx_queue, 2462 ports[fs->tx_port].socket_id); 2463 print_ethaddr("peer=", 2464 &peer_eth_addrs[fs->peer_addr]); 2465 } 2466 printf("\n"); 2467 } 2468 printf("\n"); 2469 } 2470 2471 void 2472 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2473 { 2474 struct rte_ether_addr new_peer_addr; 2475 if (!rte_eth_dev_is_valid_port(port_id)) { 2476 printf("Error: Invalid port number %i\n", port_id); 2477 return; 2478 } 2479 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2480 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2481 return; 2482 } 2483 peer_eth_addrs[port_id] = new_peer_addr; 2484 } 2485 2486 int 2487 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2488 { 2489 unsigned int i; 2490 unsigned int lcore_cpuid; 2491 int record_now; 2492 2493 record_now = 0; 2494 again: 2495 for (i = 0; i < nb_lc; i++) { 2496 lcore_cpuid = lcorelist[i]; 2497 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2498 printf("lcore %u not enabled\n", lcore_cpuid); 2499 return -1; 2500 } 2501 if (lcore_cpuid == rte_get_master_lcore()) { 2502 printf("lcore %u cannot be masked on for running " 2503 "packet forwarding, which is the master lcore " 2504 "and reserved for command line parsing only\n", 2505 lcore_cpuid); 2506 return -1; 2507 } 2508 if (record_now) 2509 fwd_lcores_cpuids[i] = lcore_cpuid; 2510 } 2511 if (record_now == 0) { 2512 record_now = 1; 2513 goto again; 2514 } 2515 nb_cfg_lcores = (lcoreid_t) nb_lc; 2516 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2517 printf("previous number of forwarding cores %u - changed to " 2518 "number of configured cores %u\n", 2519 (unsigned int) nb_fwd_lcores, nb_lc); 2520 nb_fwd_lcores = (lcoreid_t) nb_lc; 2521 } 2522 2523 return 0; 2524 } 2525 2526 int 2527 set_fwd_lcores_mask(uint64_t lcoremask) 2528 { 2529 unsigned int lcorelist[64]; 2530 unsigned int nb_lc; 2531 unsigned int i; 2532 2533 if (lcoremask == 0) { 2534 printf("Invalid NULL mask of cores\n"); 2535 return -1; 2536 } 2537 nb_lc = 0; 2538 for (i = 0; i < 64; i++) { 2539 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2540 continue; 2541 lcorelist[nb_lc++] = i; 2542 } 2543 return set_fwd_lcores_list(lcorelist, nb_lc); 2544 } 2545 2546 void 2547 set_fwd_lcores_number(uint16_t nb_lc) 2548 { 2549 if (nb_lc > nb_cfg_lcores) { 2550 printf("nb fwd cores %u > %u (max. number of configured " 2551 "lcores) - ignored\n", 2552 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2553 return; 2554 } 2555 nb_fwd_lcores = (lcoreid_t) nb_lc; 2556 printf("Number of forwarding cores set to %u\n", 2557 (unsigned int) nb_fwd_lcores); 2558 } 2559 2560 void 2561 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2562 { 2563 unsigned int i; 2564 portid_t port_id; 2565 int record_now; 2566 2567 record_now = 0; 2568 again: 2569 for (i = 0; i < nb_pt; i++) { 2570 port_id = (portid_t) portlist[i]; 2571 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2572 return; 2573 if (record_now) 2574 fwd_ports_ids[i] = port_id; 2575 } 2576 if (record_now == 0) { 2577 record_now = 1; 2578 goto again; 2579 } 2580 nb_cfg_ports = (portid_t) nb_pt; 2581 if (nb_fwd_ports != (portid_t) nb_pt) { 2582 printf("previous number of forwarding ports %u - changed to " 2583 "number of configured ports %u\n", 2584 (unsigned int) nb_fwd_ports, nb_pt); 2585 nb_fwd_ports = (portid_t) nb_pt; 2586 } 2587 } 2588 2589 void 2590 set_fwd_ports_mask(uint64_t portmask) 2591 { 2592 unsigned int portlist[64]; 2593 unsigned int nb_pt; 2594 unsigned int i; 2595 2596 if (portmask == 0) { 2597 printf("Invalid NULL mask of ports\n"); 2598 return; 2599 } 2600 nb_pt = 0; 2601 RTE_ETH_FOREACH_DEV(i) { 2602 if (! ((uint64_t)(1ULL << i) & portmask)) 2603 continue; 2604 portlist[nb_pt++] = i; 2605 } 2606 set_fwd_ports_list(portlist, nb_pt); 2607 } 2608 2609 void 2610 set_fwd_ports_number(uint16_t nb_pt) 2611 { 2612 if (nb_pt > nb_cfg_ports) { 2613 printf("nb fwd ports %u > %u (number of configured " 2614 "ports) - ignored\n", 2615 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2616 return; 2617 } 2618 nb_fwd_ports = (portid_t) nb_pt; 2619 printf("Number of forwarding ports set to %u\n", 2620 (unsigned int) nb_fwd_ports); 2621 } 2622 2623 int 2624 port_is_forwarding(portid_t port_id) 2625 { 2626 unsigned int i; 2627 2628 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2629 return -1; 2630 2631 for (i = 0; i < nb_fwd_ports; i++) { 2632 if (fwd_ports_ids[i] == port_id) 2633 return 1; 2634 } 2635 2636 return 0; 2637 } 2638 2639 void 2640 set_nb_pkt_per_burst(uint16_t nb) 2641 { 2642 if (nb > MAX_PKT_BURST) { 2643 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2644 " ignored\n", 2645 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2646 return; 2647 } 2648 nb_pkt_per_burst = nb; 2649 printf("Number of packets per burst set to %u\n", 2650 (unsigned int) nb_pkt_per_burst); 2651 } 2652 2653 static const char * 2654 tx_split_get_name(enum tx_pkt_split split) 2655 { 2656 uint32_t i; 2657 2658 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2659 if (tx_split_name[i].split == split) 2660 return tx_split_name[i].name; 2661 } 2662 return NULL; 2663 } 2664 2665 void 2666 set_tx_pkt_split(const char *name) 2667 { 2668 uint32_t i; 2669 2670 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2671 if (strcmp(tx_split_name[i].name, name) == 0) { 2672 tx_pkt_split = tx_split_name[i].split; 2673 return; 2674 } 2675 } 2676 printf("unknown value: \"%s\"\n", name); 2677 } 2678 2679 void 2680 show_tx_pkt_segments(void) 2681 { 2682 uint32_t i, n; 2683 const char *split; 2684 2685 n = tx_pkt_nb_segs; 2686 split = tx_split_get_name(tx_pkt_split); 2687 2688 printf("Number of segments: %u\n", n); 2689 printf("Segment sizes: "); 2690 for (i = 0; i != n - 1; i++) 2691 printf("%hu,", tx_pkt_seg_lengths[i]); 2692 printf("%hu\n", tx_pkt_seg_lengths[i]); 2693 printf("Split packet: %s\n", split); 2694 } 2695 2696 void 2697 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2698 { 2699 uint16_t tx_pkt_len; 2700 unsigned i; 2701 2702 if (nb_segs >= (unsigned) nb_txd) { 2703 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2704 nb_segs, (unsigned int) nb_txd); 2705 return; 2706 } 2707 2708 /* 2709 * Check that each segment length is greater or equal than 2710 * the mbuf data sise. 2711 * Check also that the total packet length is greater or equal than the 2712 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2713 * 20 + 8). 2714 */ 2715 tx_pkt_len = 0; 2716 for (i = 0; i < nb_segs; i++) { 2717 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2718 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2719 i, seg_lengths[i], (unsigned) mbuf_data_size); 2720 return; 2721 } 2722 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2723 } 2724 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2725 printf("total packet length=%u < %d - give up\n", 2726 (unsigned) tx_pkt_len, 2727 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2728 return; 2729 } 2730 2731 for (i = 0; i < nb_segs; i++) 2732 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2733 2734 tx_pkt_length = tx_pkt_len; 2735 tx_pkt_nb_segs = (uint8_t) nb_segs; 2736 } 2737 2738 void 2739 setup_gro(const char *onoff, portid_t port_id) 2740 { 2741 if (!rte_eth_dev_is_valid_port(port_id)) { 2742 printf("invalid port id %u\n", port_id); 2743 return; 2744 } 2745 if (test_done == 0) { 2746 printf("Before enable/disable GRO," 2747 " please stop forwarding first\n"); 2748 return; 2749 } 2750 if (strcmp(onoff, "on") == 0) { 2751 if (gro_ports[port_id].enable != 0) { 2752 printf("Port %u has enabled GRO. Please" 2753 " disable GRO first\n", port_id); 2754 return; 2755 } 2756 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2757 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2758 gro_ports[port_id].param.max_flow_num = 2759 GRO_DEFAULT_FLOW_NUM; 2760 gro_ports[port_id].param.max_item_per_flow = 2761 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2762 } 2763 gro_ports[port_id].enable = 1; 2764 } else { 2765 if (gro_ports[port_id].enable == 0) { 2766 printf("Port %u has disabled GRO\n", port_id); 2767 return; 2768 } 2769 gro_ports[port_id].enable = 0; 2770 } 2771 } 2772 2773 void 2774 setup_gro_flush_cycles(uint8_t cycles) 2775 { 2776 if (test_done == 0) { 2777 printf("Before change flush interval for GRO," 2778 " please stop forwarding first.\n"); 2779 return; 2780 } 2781 2782 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2783 GRO_DEFAULT_FLUSH_CYCLES) { 2784 printf("The flushing cycle be in the range" 2785 " of 1 to %u. Revert to the default" 2786 " value %u.\n", 2787 GRO_MAX_FLUSH_CYCLES, 2788 GRO_DEFAULT_FLUSH_CYCLES); 2789 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2790 } 2791 2792 gro_flush_cycles = cycles; 2793 } 2794 2795 void 2796 show_gro(portid_t port_id) 2797 { 2798 struct rte_gro_param *param; 2799 uint32_t max_pkts_num; 2800 2801 param = &gro_ports[port_id].param; 2802 2803 if (!rte_eth_dev_is_valid_port(port_id)) { 2804 printf("Invalid port id %u.\n", port_id); 2805 return; 2806 } 2807 if (gro_ports[port_id].enable) { 2808 printf("GRO type: TCP/IPv4\n"); 2809 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2810 max_pkts_num = param->max_flow_num * 2811 param->max_item_per_flow; 2812 } else 2813 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2814 printf("Max number of packets to perform GRO: %u\n", 2815 max_pkts_num); 2816 printf("Flushing cycles: %u\n", gro_flush_cycles); 2817 } else 2818 printf("Port %u doesn't enable GRO.\n", port_id); 2819 } 2820 2821 void 2822 setup_gso(const char *mode, portid_t port_id) 2823 { 2824 if (!rte_eth_dev_is_valid_port(port_id)) { 2825 printf("invalid port id %u\n", port_id); 2826 return; 2827 } 2828 if (strcmp(mode, "on") == 0) { 2829 if (test_done == 0) { 2830 printf("before enabling GSO," 2831 " please stop forwarding first\n"); 2832 return; 2833 } 2834 gso_ports[port_id].enable = 1; 2835 } else if (strcmp(mode, "off") == 0) { 2836 if (test_done == 0) { 2837 printf("before disabling GSO," 2838 " please stop forwarding first\n"); 2839 return; 2840 } 2841 gso_ports[port_id].enable = 0; 2842 } 2843 } 2844 2845 char* 2846 list_pkt_forwarding_modes(void) 2847 { 2848 static char fwd_modes[128] = ""; 2849 const char *separator = "|"; 2850 struct fwd_engine *fwd_eng; 2851 unsigned i = 0; 2852 2853 if (strlen (fwd_modes) == 0) { 2854 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2855 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2856 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2857 strncat(fwd_modes, separator, 2858 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2859 } 2860 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2861 } 2862 2863 return fwd_modes; 2864 } 2865 2866 char* 2867 list_pkt_forwarding_retry_modes(void) 2868 { 2869 static char fwd_modes[128] = ""; 2870 const char *separator = "|"; 2871 struct fwd_engine *fwd_eng; 2872 unsigned i = 0; 2873 2874 if (strlen(fwd_modes) == 0) { 2875 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2876 if (fwd_eng == &rx_only_engine) 2877 continue; 2878 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2879 sizeof(fwd_modes) - 2880 strlen(fwd_modes) - 1); 2881 strncat(fwd_modes, separator, 2882 sizeof(fwd_modes) - 2883 strlen(fwd_modes) - 1); 2884 } 2885 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2886 } 2887 2888 return fwd_modes; 2889 } 2890 2891 void 2892 set_pkt_forwarding_mode(const char *fwd_mode_name) 2893 { 2894 struct fwd_engine *fwd_eng; 2895 unsigned i; 2896 2897 i = 0; 2898 while ((fwd_eng = fwd_engines[i]) != NULL) { 2899 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2900 printf("Set %s packet forwarding mode%s\n", 2901 fwd_mode_name, 2902 retry_enabled == 0 ? "" : " with retry"); 2903 cur_fwd_eng = fwd_eng; 2904 return; 2905 } 2906 i++; 2907 } 2908 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2909 } 2910 2911 void 2912 add_rx_dump_callbacks(portid_t portid) 2913 { 2914 struct rte_eth_dev_info dev_info; 2915 uint16_t queue; 2916 int ret; 2917 2918 if (port_id_is_invalid(portid, ENABLED_WARN)) 2919 return; 2920 2921 ret = eth_dev_info_get_print_err(portid, &dev_info); 2922 if (ret != 0) 2923 return; 2924 2925 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2926 if (!ports[portid].rx_dump_cb[queue]) 2927 ports[portid].rx_dump_cb[queue] = 2928 rte_eth_add_rx_callback(portid, queue, 2929 dump_rx_pkts, NULL); 2930 } 2931 2932 void 2933 add_tx_dump_callbacks(portid_t portid) 2934 { 2935 struct rte_eth_dev_info dev_info; 2936 uint16_t queue; 2937 int ret; 2938 2939 if (port_id_is_invalid(portid, ENABLED_WARN)) 2940 return; 2941 2942 ret = eth_dev_info_get_print_err(portid, &dev_info); 2943 if (ret != 0) 2944 return; 2945 2946 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2947 if (!ports[portid].tx_dump_cb[queue]) 2948 ports[portid].tx_dump_cb[queue] = 2949 rte_eth_add_tx_callback(portid, queue, 2950 dump_tx_pkts, NULL); 2951 } 2952 2953 void 2954 remove_rx_dump_callbacks(portid_t portid) 2955 { 2956 struct rte_eth_dev_info dev_info; 2957 uint16_t queue; 2958 int ret; 2959 2960 if (port_id_is_invalid(portid, ENABLED_WARN)) 2961 return; 2962 2963 ret = eth_dev_info_get_print_err(portid, &dev_info); 2964 if (ret != 0) 2965 return; 2966 2967 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2968 if (ports[portid].rx_dump_cb[queue]) { 2969 rte_eth_remove_rx_callback(portid, queue, 2970 ports[portid].rx_dump_cb[queue]); 2971 ports[portid].rx_dump_cb[queue] = NULL; 2972 } 2973 } 2974 2975 void 2976 remove_tx_dump_callbacks(portid_t portid) 2977 { 2978 struct rte_eth_dev_info dev_info; 2979 uint16_t queue; 2980 int ret; 2981 2982 if (port_id_is_invalid(portid, ENABLED_WARN)) 2983 return; 2984 2985 ret = eth_dev_info_get_print_err(portid, &dev_info); 2986 if (ret != 0) 2987 return; 2988 2989 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2990 if (ports[portid].tx_dump_cb[queue]) { 2991 rte_eth_remove_tx_callback(portid, queue, 2992 ports[portid].tx_dump_cb[queue]); 2993 ports[portid].tx_dump_cb[queue] = NULL; 2994 } 2995 } 2996 2997 void 2998 configure_rxtx_dump_callbacks(uint16_t verbose) 2999 { 3000 portid_t portid; 3001 3002 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3003 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3004 return; 3005 #endif 3006 3007 RTE_ETH_FOREACH_DEV(portid) 3008 { 3009 if (verbose == 1 || verbose > 2) 3010 add_rx_dump_callbacks(portid); 3011 else 3012 remove_rx_dump_callbacks(portid); 3013 if (verbose >= 2) 3014 add_tx_dump_callbacks(portid); 3015 else 3016 remove_tx_dump_callbacks(portid); 3017 } 3018 } 3019 3020 void 3021 set_verbose_level(uint16_t vb_level) 3022 { 3023 printf("Change verbose level from %u to %u\n", 3024 (unsigned int) verbose_level, (unsigned int) vb_level); 3025 verbose_level = vb_level; 3026 configure_rxtx_dump_callbacks(verbose_level); 3027 } 3028 3029 void 3030 vlan_extend_set(portid_t port_id, int on) 3031 { 3032 int diag; 3033 int vlan_offload; 3034 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3035 3036 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3037 return; 3038 3039 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3040 3041 if (on) { 3042 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3043 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3044 } else { 3045 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3046 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3047 } 3048 3049 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3050 if (diag < 0) 3051 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3052 "diag=%d\n", port_id, on, diag); 3053 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3054 } 3055 3056 void 3057 rx_vlan_strip_set(portid_t port_id, int on) 3058 { 3059 int diag; 3060 int vlan_offload; 3061 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3062 3063 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3064 return; 3065 3066 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3067 3068 if (on) { 3069 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3070 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3071 } else { 3072 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3073 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3074 } 3075 3076 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3077 if (diag < 0) 3078 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3079 "diag=%d\n", port_id, on, diag); 3080 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3081 } 3082 3083 void 3084 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3085 { 3086 int diag; 3087 3088 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3089 return; 3090 3091 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3092 if (diag < 0) 3093 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3094 "diag=%d\n", port_id, queue_id, on, diag); 3095 } 3096 3097 void 3098 rx_vlan_filter_set(portid_t port_id, int on) 3099 { 3100 int diag; 3101 int vlan_offload; 3102 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3103 3104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3105 return; 3106 3107 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3108 3109 if (on) { 3110 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3111 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3112 } else { 3113 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3114 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3115 } 3116 3117 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3118 if (diag < 0) 3119 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3120 "diag=%d\n", port_id, on, diag); 3121 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3122 } 3123 3124 void 3125 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3126 { 3127 int diag; 3128 int vlan_offload; 3129 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3130 3131 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3132 return; 3133 3134 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3135 3136 if (on) { 3137 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3138 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3139 } else { 3140 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3141 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3142 } 3143 3144 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3145 if (diag < 0) 3146 printf("%s(port_pi=%d, on=%d) failed " 3147 "diag=%d\n", __func__, port_id, on, diag); 3148 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3149 } 3150 3151 int 3152 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3153 { 3154 int diag; 3155 3156 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3157 return 1; 3158 if (vlan_id_is_invalid(vlan_id)) 3159 return 1; 3160 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3161 if (diag == 0) 3162 return 0; 3163 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3164 "diag=%d\n", 3165 port_id, vlan_id, on, diag); 3166 return -1; 3167 } 3168 3169 void 3170 rx_vlan_all_filter_set(portid_t port_id, int on) 3171 { 3172 uint16_t vlan_id; 3173 3174 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3175 return; 3176 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3177 if (rx_vft_set(port_id, vlan_id, on)) 3178 break; 3179 } 3180 } 3181 3182 void 3183 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3184 { 3185 int diag; 3186 3187 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3188 return; 3189 3190 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3191 if (diag == 0) 3192 return; 3193 3194 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3195 "diag=%d\n", 3196 port_id, vlan_type, tp_id, diag); 3197 } 3198 3199 void 3200 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3201 { 3202 struct rte_eth_dev_info dev_info; 3203 int ret; 3204 3205 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3206 return; 3207 if (vlan_id_is_invalid(vlan_id)) 3208 return; 3209 3210 if (ports[port_id].dev_conf.txmode.offloads & 3211 DEV_TX_OFFLOAD_QINQ_INSERT) { 3212 printf("Error, as QinQ has been enabled.\n"); 3213 return; 3214 } 3215 3216 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3217 if (ret != 0) 3218 return; 3219 3220 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3221 printf("Error: vlan insert is not supported by port %d\n", 3222 port_id); 3223 return; 3224 } 3225 3226 tx_vlan_reset(port_id); 3227 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3228 ports[port_id].tx_vlan_id = vlan_id; 3229 } 3230 3231 void 3232 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3233 { 3234 struct rte_eth_dev_info dev_info; 3235 int ret; 3236 3237 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3238 return; 3239 if (vlan_id_is_invalid(vlan_id)) 3240 return; 3241 if (vlan_id_is_invalid(vlan_id_outer)) 3242 return; 3243 3244 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3245 if (ret != 0) 3246 return; 3247 3248 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3249 printf("Error: qinq insert not supported by port %d\n", 3250 port_id); 3251 return; 3252 } 3253 3254 tx_vlan_reset(port_id); 3255 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3256 DEV_TX_OFFLOAD_QINQ_INSERT); 3257 ports[port_id].tx_vlan_id = vlan_id; 3258 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3259 } 3260 3261 void 3262 tx_vlan_reset(portid_t port_id) 3263 { 3264 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3265 return; 3266 ports[port_id].dev_conf.txmode.offloads &= 3267 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3268 DEV_TX_OFFLOAD_QINQ_INSERT); 3269 ports[port_id].tx_vlan_id = 0; 3270 ports[port_id].tx_vlan_id_outer = 0; 3271 } 3272 3273 void 3274 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3275 { 3276 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3277 return; 3278 3279 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3280 } 3281 3282 void 3283 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3284 { 3285 uint16_t i; 3286 uint8_t existing_mapping_found = 0; 3287 3288 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3289 return; 3290 3291 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3292 return; 3293 3294 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3295 printf("map_value not in required range 0..%d\n", 3296 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3297 return; 3298 } 3299 3300 if (!is_rx) { /*then tx*/ 3301 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3302 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3303 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3304 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3305 existing_mapping_found = 1; 3306 break; 3307 } 3308 } 3309 if (!existing_mapping_found) { /* A new additional mapping... */ 3310 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3311 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3312 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3313 nb_tx_queue_stats_mappings++; 3314 } 3315 } 3316 else { /*rx*/ 3317 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3318 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3319 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3320 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3321 existing_mapping_found = 1; 3322 break; 3323 } 3324 } 3325 if (!existing_mapping_found) { /* A new additional mapping... */ 3326 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3327 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3328 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3329 nb_rx_queue_stats_mappings++; 3330 } 3331 } 3332 } 3333 3334 void 3335 set_xstats_hide_zero(uint8_t on_off) 3336 { 3337 xstats_hide_zero = on_off; 3338 } 3339 3340 static inline void 3341 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3342 { 3343 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3344 3345 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3346 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3347 " tunnel_id: 0x%08x", 3348 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3349 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3350 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3351 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3352 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3353 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3354 3355 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3356 rte_be_to_cpu_16(mask->src_port_mask), 3357 rte_be_to_cpu_16(mask->dst_port_mask)); 3358 3359 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3360 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3361 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3362 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3363 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3364 3365 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3366 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3367 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3368 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3369 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3370 } 3371 3372 printf("\n"); 3373 } 3374 3375 static inline void 3376 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3377 { 3378 struct rte_eth_flex_payload_cfg *cfg; 3379 uint32_t i, j; 3380 3381 for (i = 0; i < flex_conf->nb_payloads; i++) { 3382 cfg = &flex_conf->flex_set[i]; 3383 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3384 printf("\n RAW: "); 3385 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3386 printf("\n L2_PAYLOAD: "); 3387 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3388 printf("\n L3_PAYLOAD: "); 3389 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3390 printf("\n L4_PAYLOAD: "); 3391 else 3392 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3393 for (j = 0; j < num; j++) 3394 printf(" %-5u", cfg->src_offset[j]); 3395 } 3396 printf("\n"); 3397 } 3398 3399 static char * 3400 flowtype_to_str(uint16_t flow_type) 3401 { 3402 struct flow_type_info { 3403 char str[32]; 3404 uint16_t ftype; 3405 }; 3406 3407 uint8_t i; 3408 static struct flow_type_info flowtype_str_table[] = { 3409 {"raw", RTE_ETH_FLOW_RAW}, 3410 {"ipv4", RTE_ETH_FLOW_IPV4}, 3411 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3412 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3413 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3414 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3415 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3416 {"ipv6", RTE_ETH_FLOW_IPV6}, 3417 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3418 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3419 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3420 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3421 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3422 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3423 {"port", RTE_ETH_FLOW_PORT}, 3424 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3425 {"geneve", RTE_ETH_FLOW_GENEVE}, 3426 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3427 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3428 }; 3429 3430 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3431 if (flowtype_str_table[i].ftype == flow_type) 3432 return flowtype_str_table[i].str; 3433 } 3434 3435 return NULL; 3436 } 3437 3438 static inline void 3439 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3440 { 3441 struct rte_eth_fdir_flex_mask *mask; 3442 uint32_t i, j; 3443 char *p; 3444 3445 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3446 mask = &flex_conf->flex_mask[i]; 3447 p = flowtype_to_str(mask->flow_type); 3448 printf("\n %s:\t", p ? p : "unknown"); 3449 for (j = 0; j < num; j++) 3450 printf(" %02x", mask->mask[j]); 3451 } 3452 printf("\n"); 3453 } 3454 3455 static inline void 3456 print_fdir_flow_type(uint32_t flow_types_mask) 3457 { 3458 int i; 3459 char *p; 3460 3461 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3462 if (!(flow_types_mask & (1 << i))) 3463 continue; 3464 p = flowtype_to_str(i); 3465 if (p) 3466 printf(" %s", p); 3467 else 3468 printf(" unknown"); 3469 } 3470 printf("\n"); 3471 } 3472 3473 void 3474 fdir_get_infos(portid_t port_id) 3475 { 3476 struct rte_eth_fdir_stats fdir_stat; 3477 struct rte_eth_fdir_info fdir_info; 3478 int ret; 3479 3480 static const char *fdir_stats_border = "########################"; 3481 3482 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3483 return; 3484 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3485 if (ret < 0) { 3486 printf("\n FDIR is not supported on port %-2d\n", 3487 port_id); 3488 return; 3489 } 3490 3491 memset(&fdir_info, 0, sizeof(fdir_info)); 3492 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3493 RTE_ETH_FILTER_INFO, &fdir_info); 3494 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3495 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3496 RTE_ETH_FILTER_STATS, &fdir_stat); 3497 printf("\n %s FDIR infos for port %-2d %s\n", 3498 fdir_stats_border, port_id, fdir_stats_border); 3499 printf(" MODE: "); 3500 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3501 printf(" PERFECT\n"); 3502 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3503 printf(" PERFECT-MAC-VLAN\n"); 3504 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3505 printf(" PERFECT-TUNNEL\n"); 3506 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3507 printf(" SIGNATURE\n"); 3508 else 3509 printf(" DISABLE\n"); 3510 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3511 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3512 printf(" SUPPORTED FLOW TYPE: "); 3513 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3514 } 3515 printf(" FLEX PAYLOAD INFO:\n"); 3516 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3517 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3518 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3519 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3520 fdir_info.flex_payload_unit, 3521 fdir_info.max_flex_payload_segment_num, 3522 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3523 printf(" MASK: "); 3524 print_fdir_mask(&fdir_info.mask); 3525 if (fdir_info.flex_conf.nb_payloads > 0) { 3526 printf(" FLEX PAYLOAD SRC OFFSET:"); 3527 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3528 } 3529 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3530 printf(" FLEX MASK CFG:"); 3531 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3532 } 3533 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3534 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3535 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3536 fdir_info.guarant_spc, fdir_info.best_spc); 3537 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3538 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3539 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3540 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3541 fdir_stat.collision, fdir_stat.free, 3542 fdir_stat.maxhash, fdir_stat.maxlen, 3543 fdir_stat.add, fdir_stat.remove, 3544 fdir_stat.f_add, fdir_stat.f_remove); 3545 printf(" %s############################%s\n", 3546 fdir_stats_border, fdir_stats_border); 3547 } 3548 3549 void 3550 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3551 { 3552 struct rte_port *port; 3553 struct rte_eth_fdir_flex_conf *flex_conf; 3554 int i, idx = 0; 3555 3556 port = &ports[port_id]; 3557 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3558 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3559 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3560 idx = i; 3561 break; 3562 } 3563 } 3564 if (i >= RTE_ETH_FLOW_MAX) { 3565 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3566 idx = flex_conf->nb_flexmasks; 3567 flex_conf->nb_flexmasks++; 3568 } else { 3569 printf("The flex mask table is full. Can not set flex" 3570 " mask for flow_type(%u).", cfg->flow_type); 3571 return; 3572 } 3573 } 3574 rte_memcpy(&flex_conf->flex_mask[idx], 3575 cfg, 3576 sizeof(struct rte_eth_fdir_flex_mask)); 3577 } 3578 3579 void 3580 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3581 { 3582 struct rte_port *port; 3583 struct rte_eth_fdir_flex_conf *flex_conf; 3584 int i, idx = 0; 3585 3586 port = &ports[port_id]; 3587 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3588 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3589 if (cfg->type == flex_conf->flex_set[i].type) { 3590 idx = i; 3591 break; 3592 } 3593 } 3594 if (i >= RTE_ETH_PAYLOAD_MAX) { 3595 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3596 idx = flex_conf->nb_payloads; 3597 flex_conf->nb_payloads++; 3598 } else { 3599 printf("The flex payload table is full. Can not set" 3600 " flex payload for type(%u).", cfg->type); 3601 return; 3602 } 3603 } 3604 rte_memcpy(&flex_conf->flex_set[idx], 3605 cfg, 3606 sizeof(struct rte_eth_flex_payload_cfg)); 3607 3608 } 3609 3610 void 3611 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3612 { 3613 #ifdef RTE_LIBRTE_IXGBE_PMD 3614 int diag; 3615 3616 if (is_rx) 3617 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3618 else 3619 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3620 3621 if (diag == 0) 3622 return; 3623 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3624 is_rx ? "rx" : "tx", port_id, diag); 3625 return; 3626 #endif 3627 printf("VF %s setting not supported for port %d\n", 3628 is_rx ? "Rx" : "Tx", port_id); 3629 RTE_SET_USED(vf); 3630 RTE_SET_USED(on); 3631 } 3632 3633 int 3634 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3635 { 3636 int diag; 3637 struct rte_eth_link link; 3638 int ret; 3639 3640 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3641 return 1; 3642 ret = eth_link_get_nowait_print_err(port_id, &link); 3643 if (ret < 0) 3644 return 1; 3645 if (rate > link.link_speed) { 3646 printf("Invalid rate value:%u bigger than link speed: %u\n", 3647 rate, link.link_speed); 3648 return 1; 3649 } 3650 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3651 if (diag == 0) 3652 return diag; 3653 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3654 port_id, diag); 3655 return diag; 3656 } 3657 3658 int 3659 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3660 { 3661 int diag = -ENOTSUP; 3662 3663 RTE_SET_USED(vf); 3664 RTE_SET_USED(rate); 3665 RTE_SET_USED(q_msk); 3666 3667 #ifdef RTE_LIBRTE_IXGBE_PMD 3668 if (diag == -ENOTSUP) 3669 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3670 q_msk); 3671 #endif 3672 #ifdef RTE_LIBRTE_BNXT_PMD 3673 if (diag == -ENOTSUP) 3674 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3675 #endif 3676 if (diag == 0) 3677 return diag; 3678 3679 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3680 port_id, diag); 3681 return diag; 3682 } 3683 3684 /* 3685 * Functions to manage the set of filtered Multicast MAC addresses. 3686 * 3687 * A pool of filtered multicast MAC addresses is associated with each port. 3688 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3689 * The address of the pool and the number of valid multicast MAC addresses 3690 * recorded in the pool are stored in the fields "mc_addr_pool" and 3691 * "mc_addr_nb" of the "rte_port" data structure. 3692 * 3693 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3694 * to be supplied a contiguous array of multicast MAC addresses. 3695 * To comply with this constraint, the set of multicast addresses recorded 3696 * into the pool are systematically compacted at the beginning of the pool. 3697 * Hence, when a multicast address is removed from the pool, all following 3698 * addresses, if any, are copied back to keep the set contiguous. 3699 */ 3700 #define MCAST_POOL_INC 32 3701 3702 static int 3703 mcast_addr_pool_extend(struct rte_port *port) 3704 { 3705 struct rte_ether_addr *mc_pool; 3706 size_t mc_pool_size; 3707 3708 /* 3709 * If a free entry is available at the end of the pool, just 3710 * increment the number of recorded multicast addresses. 3711 */ 3712 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3713 port->mc_addr_nb++; 3714 return 0; 3715 } 3716 3717 /* 3718 * [re]allocate a pool with MCAST_POOL_INC more entries. 3719 * The previous test guarantees that port->mc_addr_nb is a multiple 3720 * of MCAST_POOL_INC. 3721 */ 3722 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3723 MCAST_POOL_INC); 3724 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3725 mc_pool_size); 3726 if (mc_pool == NULL) { 3727 printf("allocation of pool of %u multicast addresses failed\n", 3728 port->mc_addr_nb + MCAST_POOL_INC); 3729 return -ENOMEM; 3730 } 3731 3732 port->mc_addr_pool = mc_pool; 3733 port->mc_addr_nb++; 3734 return 0; 3735 3736 } 3737 3738 static void 3739 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 3740 { 3741 if (mcast_addr_pool_extend(port) != 0) 3742 return; 3743 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 3744 } 3745 3746 static void 3747 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3748 { 3749 port->mc_addr_nb--; 3750 if (addr_idx == port->mc_addr_nb) { 3751 /* No need to recompact the set of multicast addressses. */ 3752 if (port->mc_addr_nb == 0) { 3753 /* free the pool of multicast addresses. */ 3754 free(port->mc_addr_pool); 3755 port->mc_addr_pool = NULL; 3756 } 3757 return; 3758 } 3759 memmove(&port->mc_addr_pool[addr_idx], 3760 &port->mc_addr_pool[addr_idx + 1], 3761 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3762 } 3763 3764 static int 3765 eth_port_multicast_addr_list_set(portid_t port_id) 3766 { 3767 struct rte_port *port; 3768 int diag; 3769 3770 port = &ports[port_id]; 3771 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3772 port->mc_addr_nb); 3773 if (diag < 0) 3774 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3775 port_id, port->mc_addr_nb, diag); 3776 3777 return diag; 3778 } 3779 3780 void 3781 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3782 { 3783 struct rte_port *port; 3784 uint32_t i; 3785 3786 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3787 return; 3788 3789 port = &ports[port_id]; 3790 3791 /* 3792 * Check that the added multicast MAC address is not already recorded 3793 * in the pool of multicast addresses. 3794 */ 3795 for (i = 0; i < port->mc_addr_nb; i++) { 3796 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3797 printf("multicast address already filtered by port\n"); 3798 return; 3799 } 3800 } 3801 3802 mcast_addr_pool_append(port, mc_addr); 3803 if (eth_port_multicast_addr_list_set(port_id) < 0) 3804 /* Rollback on failure, remove the address from the pool */ 3805 mcast_addr_pool_remove(port, i); 3806 } 3807 3808 void 3809 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3810 { 3811 struct rte_port *port; 3812 uint32_t i; 3813 3814 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3815 return; 3816 3817 port = &ports[port_id]; 3818 3819 /* 3820 * Search the pool of multicast MAC addresses for the removed address. 3821 */ 3822 for (i = 0; i < port->mc_addr_nb; i++) { 3823 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3824 break; 3825 } 3826 if (i == port->mc_addr_nb) { 3827 printf("multicast address not filtered by port %d\n", port_id); 3828 return; 3829 } 3830 3831 mcast_addr_pool_remove(port, i); 3832 if (eth_port_multicast_addr_list_set(port_id) < 0) 3833 /* Rollback on failure, add the address back into the pool */ 3834 mcast_addr_pool_append(port, mc_addr); 3835 } 3836 3837 void 3838 port_dcb_info_display(portid_t port_id) 3839 { 3840 struct rte_eth_dcb_info dcb_info; 3841 uint16_t i; 3842 int ret; 3843 static const char *border = "================"; 3844 3845 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3846 return; 3847 3848 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3849 if (ret) { 3850 printf("\n Failed to get dcb infos on port %-2d\n", 3851 port_id); 3852 return; 3853 } 3854 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3855 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3856 printf("\n TC : "); 3857 for (i = 0; i < dcb_info.nb_tcs; i++) 3858 printf("\t%4d", i); 3859 printf("\n Priority : "); 3860 for (i = 0; i < dcb_info.nb_tcs; i++) 3861 printf("\t%4d", dcb_info.prio_tc[i]); 3862 printf("\n BW percent :"); 3863 for (i = 0; i < dcb_info.nb_tcs; i++) 3864 printf("\t%4d%%", dcb_info.tc_bws[i]); 3865 printf("\n RXQ base : "); 3866 for (i = 0; i < dcb_info.nb_tcs; i++) 3867 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3868 printf("\n RXQ number :"); 3869 for (i = 0; i < dcb_info.nb_tcs; i++) 3870 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3871 printf("\n TXQ base : "); 3872 for (i = 0; i < dcb_info.nb_tcs; i++) 3873 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3874 printf("\n TXQ number :"); 3875 for (i = 0; i < dcb_info.nb_tcs; i++) 3876 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3877 printf("\n"); 3878 } 3879 3880 uint8_t * 3881 open_file(const char *file_path, uint32_t *size) 3882 { 3883 int fd = open(file_path, O_RDONLY); 3884 off_t pkg_size; 3885 uint8_t *buf = NULL; 3886 int ret = 0; 3887 struct stat st_buf; 3888 3889 if (size) 3890 *size = 0; 3891 3892 if (fd == -1) { 3893 printf("%s: Failed to open %s\n", __func__, file_path); 3894 return buf; 3895 } 3896 3897 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3898 close(fd); 3899 printf("%s: File operations failed\n", __func__); 3900 return buf; 3901 } 3902 3903 pkg_size = st_buf.st_size; 3904 if (pkg_size < 0) { 3905 close(fd); 3906 printf("%s: File operations failed\n", __func__); 3907 return buf; 3908 } 3909 3910 buf = (uint8_t *)malloc(pkg_size); 3911 if (!buf) { 3912 close(fd); 3913 printf("%s: Failed to malloc memory\n", __func__); 3914 return buf; 3915 } 3916 3917 ret = read(fd, buf, pkg_size); 3918 if (ret < 0) { 3919 close(fd); 3920 printf("%s: File read operation failed\n", __func__); 3921 close_file(buf); 3922 return NULL; 3923 } 3924 3925 if (size) 3926 *size = pkg_size; 3927 3928 close(fd); 3929 3930 return buf; 3931 } 3932 3933 int 3934 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3935 { 3936 FILE *fh = fopen(file_path, "wb"); 3937 3938 if (fh == NULL) { 3939 printf("%s: Failed to open %s\n", __func__, file_path); 3940 return -1; 3941 } 3942 3943 if (fwrite(buf, 1, size, fh) != size) { 3944 fclose(fh); 3945 printf("%s: File write operation failed\n", __func__); 3946 return -1; 3947 } 3948 3949 fclose(fh); 3950 3951 return 0; 3952 } 3953 3954 int 3955 close_file(uint8_t *buf) 3956 { 3957 if (buf) { 3958 free((void *)buf); 3959 return 0; 3960 } 3961 3962 return -1; 3963 } 3964 3965 void 3966 port_queue_region_info_display(portid_t port_id, void *buf) 3967 { 3968 #ifdef RTE_LIBRTE_I40E_PMD 3969 uint16_t i, j; 3970 struct rte_pmd_i40e_queue_regions *info = 3971 (struct rte_pmd_i40e_queue_regions *)buf; 3972 static const char *queue_region_info_stats_border = "-------"; 3973 3974 if (!info->queue_region_number) 3975 printf("there is no region has been set before"); 3976 3977 printf("\n %s All queue region info for port=%2d %s", 3978 queue_region_info_stats_border, port_id, 3979 queue_region_info_stats_border); 3980 printf("\n queue_region_number: %-14u \n", 3981 info->queue_region_number); 3982 3983 for (i = 0; i < info->queue_region_number; i++) { 3984 printf("\n region_id: %-14u queue_number: %-14u " 3985 "queue_start_index: %-14u \n", 3986 info->region[i].region_id, 3987 info->region[i].queue_num, 3988 info->region[i].queue_start_index); 3989 3990 printf(" user_priority_num is %-14u :", 3991 info->region[i].user_priority_num); 3992 for (j = 0; j < info->region[i].user_priority_num; j++) 3993 printf(" %-14u ", info->region[i].user_priority[j]); 3994 3995 printf("\n flowtype_num is %-14u :", 3996 info->region[i].flowtype_num); 3997 for (j = 0; j < info->region[i].flowtype_num; j++) 3998 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3999 } 4000 #else 4001 RTE_SET_USED(port_id); 4002 RTE_SET_USED(buf); 4003 #endif 4004 4005 printf("\n\n"); 4006 } 4007 4008 void 4009 show_macs(portid_t port_id) 4010 { 4011 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4012 struct rte_eth_dev_info dev_info; 4013 struct rte_ether_addr *addr; 4014 uint32_t i, num_macs = 0; 4015 struct rte_eth_dev *dev; 4016 4017 dev = &rte_eth_devices[port_id]; 4018 4019 rte_eth_dev_info_get(port_id, &dev_info); 4020 4021 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4022 addr = &dev->data->mac_addrs[i]; 4023 4024 /* skip zero address */ 4025 if (rte_is_zero_ether_addr(addr)) 4026 continue; 4027 4028 num_macs++; 4029 } 4030 4031 printf("Number of MAC address added: %d\n", num_macs); 4032 4033 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4034 addr = &dev->data->mac_addrs[i]; 4035 4036 /* skip zero address */ 4037 if (rte_is_zero_ether_addr(addr)) 4038 continue; 4039 4040 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4041 printf(" %s\n", buf); 4042 } 4043 } 4044 4045 void 4046 show_mcast_macs(portid_t port_id) 4047 { 4048 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4049 struct rte_ether_addr *addr; 4050 struct rte_port *port; 4051 uint32_t i; 4052 4053 port = &ports[port_id]; 4054 4055 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4056 4057 for (i = 0; i < port->mc_addr_nb; i++) { 4058 addr = &port->mc_addr_pool[i]; 4059 4060 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4061 printf(" %s\n", buf); 4062 } 4063 } 4064