1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 107 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 108 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 109 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 110 { NULL, 0 }, 111 }; 112 113 static void 114 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 115 { 116 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 117 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 118 printf("%s%s", name, buf); 119 } 120 121 void 122 nic_stats_display(portid_t port_id) 123 { 124 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 125 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 126 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 127 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 128 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 129 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 130 diff_cycles; 131 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 132 struct rte_eth_stats stats; 133 struct rte_port *port = &ports[port_id]; 134 uint8_t i; 135 136 static const char *nic_stats_border = "########################"; 137 138 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 139 print_valid_ports(); 140 return; 141 } 142 rte_eth_stats_get(port_id, &stats); 143 printf("\n %s NIC statistics for port %-2d %s\n", 144 nic_stats_border, port_id, nic_stats_border); 145 146 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 147 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 148 "%-"PRIu64"\n", 149 stats.ipackets, stats.imissed, stats.ibytes); 150 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 151 printf(" RX-nombuf: %-10"PRIu64"\n", 152 stats.rx_nombuf); 153 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 154 "%-"PRIu64"\n", 155 stats.opackets, stats.oerrors, stats.obytes); 156 } 157 else { 158 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 159 " RX-bytes: %10"PRIu64"\n", 160 stats.ipackets, stats.ierrors, stats.ibytes); 161 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 162 printf(" RX-nombuf: %10"PRIu64"\n", 163 stats.rx_nombuf); 164 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 165 " TX-bytes: %10"PRIu64"\n", 166 stats.opackets, stats.oerrors, stats.obytes); 167 } 168 169 if (port->rx_queue_stats_mapping_enabled) { 170 printf("\n"); 171 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 172 printf(" Stats reg %2d RX-packets: %10"PRIu64 173 " RX-errors: %10"PRIu64 174 " RX-bytes: %10"PRIu64"\n", 175 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 176 } 177 } 178 if (port->tx_queue_stats_mapping_enabled) { 179 printf("\n"); 180 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 181 printf(" Stats reg %2d TX-packets: %10"PRIu64 182 " TX-bytes: %10"PRIu64"\n", 183 i, stats.q_opackets[i], stats.q_obytes[i]); 184 } 185 } 186 187 diff_cycles = prev_cycles[port_id]; 188 prev_cycles[port_id] = rte_rdtsc(); 189 if (diff_cycles > 0) 190 diff_cycles = prev_cycles[port_id] - diff_cycles; 191 192 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 193 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 194 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 195 (stats.opackets - prev_pkts_tx[port_id]) : 0; 196 prev_pkts_rx[port_id] = stats.ipackets; 197 prev_pkts_tx[port_id] = stats.opackets; 198 mpps_rx = diff_cycles > 0 ? 199 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 200 mpps_tx = diff_cycles > 0 ? 201 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 202 203 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 204 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 205 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 206 (stats.obytes - prev_bytes_tx[port_id]) : 0; 207 prev_bytes_rx[port_id] = stats.ibytes; 208 prev_bytes_tx[port_id] = stats.obytes; 209 mbps_rx = diff_cycles > 0 ? 210 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 211 mbps_tx = diff_cycles > 0 ? 212 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 213 214 printf("\n Throughput (since last show)\n"); 215 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 216 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 217 mpps_tx, mbps_tx * 8); 218 219 printf(" %s############################%s\n", 220 nic_stats_border, nic_stats_border); 221 } 222 223 void 224 nic_stats_clear(portid_t port_id) 225 { 226 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 227 print_valid_ports(); 228 return; 229 } 230 rte_eth_stats_reset(port_id); 231 printf("\n NIC statistics for port %d cleared\n", port_id); 232 } 233 234 void 235 nic_xstats_display(portid_t port_id) 236 { 237 struct rte_eth_xstat *xstats; 238 int cnt_xstats, idx_xstat; 239 struct rte_eth_xstat_name *xstats_names; 240 241 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 242 print_valid_ports(); 243 return; 244 } 245 printf("###### NIC extended statistics for port %-2d\n", port_id); 246 if (!rte_eth_dev_is_valid_port(port_id)) { 247 printf("Error: Invalid port number %i\n", port_id); 248 return; 249 } 250 251 /* Get count */ 252 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 253 if (cnt_xstats < 0) { 254 printf("Error: Cannot get count of xstats\n"); 255 return; 256 } 257 258 /* Get id-name lookup table */ 259 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 260 if (xstats_names == NULL) { 261 printf("Cannot allocate memory for xstats lookup\n"); 262 return; 263 } 264 if (cnt_xstats != rte_eth_xstats_get_names( 265 port_id, xstats_names, cnt_xstats)) { 266 printf("Error: Cannot get xstats lookup\n"); 267 free(xstats_names); 268 return; 269 } 270 271 /* Get stats themselves */ 272 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 273 if (xstats == NULL) { 274 printf("Cannot allocate memory for xstats\n"); 275 free(xstats_names); 276 return; 277 } 278 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 279 printf("Error: Unable to get xstats\n"); 280 free(xstats_names); 281 free(xstats); 282 return; 283 } 284 285 /* Display xstats */ 286 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 287 if (xstats_hide_zero && !xstats[idx_xstat].value) 288 continue; 289 printf("%s: %"PRIu64"\n", 290 xstats_names[idx_xstat].name, 291 xstats[idx_xstat].value); 292 } 293 free(xstats_names); 294 free(xstats); 295 } 296 297 void 298 nic_xstats_clear(portid_t port_id) 299 { 300 int ret; 301 302 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 303 print_valid_ports(); 304 return; 305 } 306 ret = rte_eth_xstats_reset(port_id); 307 if (ret != 0) { 308 printf("%s: Error: failed to reset xstats (port %u): %s", 309 __func__, port_id, strerror(ret)); 310 } 311 } 312 313 void 314 nic_stats_mapping_display(portid_t port_id) 315 { 316 struct rte_port *port = &ports[port_id]; 317 uint16_t i; 318 319 static const char *nic_stats_mapping_border = "########################"; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 327 printf("Port id %d - either does not support queue statistic mapping or" 328 " no queue statistic mapping set\n", port_id); 329 return; 330 } 331 332 printf("\n %s NIC statistics mapping for port %-2d %s\n", 333 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 334 335 if (port->rx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 337 if (rx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 339 rx_queue_stats_mappings[i].queue_id, 340 rx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 printf("\n"); 344 } 345 346 347 if (port->tx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 349 if (tx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 351 tx_queue_stats_mappings[i].queue_id, 352 tx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 } 356 357 printf(" %s####################################%s\n", 358 nic_stats_mapping_border, nic_stats_mapping_border); 359 } 360 361 void 362 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 363 { 364 struct rte_eth_burst_mode mode; 365 struct rte_eth_rxq_info qinfo; 366 int32_t rc; 367 static const char *info_border = "*********************"; 368 369 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 370 if (rc != 0) { 371 printf("Failed to retrieve information for port: %u, " 372 "RX queue: %hu\nerror desc: %s(%d)\n", 373 port_id, queue_id, strerror(-rc), rc); 374 return; 375 } 376 377 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 378 info_border, port_id, queue_id, info_border); 379 380 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 381 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 382 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 383 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 384 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 385 printf("\nRX drop packets: %s", 386 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 387 printf("\nRX deferred start: %s", 388 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 389 printf("\nRX scattered packets: %s", 390 (qinfo.scattered_rx != 0) ? "on" : "off"); 391 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 392 393 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 394 printf("\nBurst mode: %s%s", 395 mode.info, 396 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 397 " (per queue)" : ""); 398 399 printf("\n"); 400 } 401 402 void 403 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 404 { 405 struct rte_eth_burst_mode mode; 406 struct rte_eth_txq_info qinfo; 407 int32_t rc; 408 static const char *info_border = "*********************"; 409 410 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 411 if (rc != 0) { 412 printf("Failed to retrieve information for port: %u, " 413 "TX queue: %hu\nerror desc: %s(%d)\n", 414 port_id, queue_id, strerror(-rc), rc); 415 return; 416 } 417 418 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 419 info_border, port_id, queue_id, info_border); 420 421 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 422 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 423 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 424 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 425 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 426 printf("\nTX deferred start: %s", 427 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 428 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 429 430 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 431 printf("\nBurst mode: %s%s", 432 mode.info, 433 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 434 " (per queue)" : ""); 435 436 printf("\n"); 437 } 438 439 static int bus_match_all(const struct rte_bus *bus, const void *data) 440 { 441 RTE_SET_USED(bus); 442 RTE_SET_USED(data); 443 return 0; 444 } 445 446 void 447 device_infos_display(const char *identifier) 448 { 449 static const char *info_border = "*********************"; 450 struct rte_bus *start = NULL, *next; 451 struct rte_dev_iterator dev_iter; 452 char name[RTE_ETH_NAME_MAX_LEN]; 453 struct rte_ether_addr mac_addr; 454 struct rte_device *dev; 455 struct rte_devargs da; 456 portid_t port_id; 457 char devstr[128]; 458 459 memset(&da, 0, sizeof(da)); 460 if (!identifier) 461 goto skip_parse; 462 463 if (rte_devargs_parsef(&da, "%s", identifier)) { 464 printf("cannot parse identifier\n"); 465 if (da.args) 466 free(da.args); 467 return; 468 } 469 470 skip_parse: 471 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 472 473 start = next; 474 if (identifier && da.bus != next) 475 continue; 476 477 /* Skip buses that don't have iterate method */ 478 if (!next->dev_iterate) 479 continue; 480 481 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 482 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 483 484 if (!dev->driver) 485 continue; 486 /* Check for matching device if identifier is present */ 487 if (identifier && 488 strncmp(da.name, dev->name, strlen(dev->name))) 489 continue; 490 printf("\n%s Infos for device %s %s\n", 491 info_border, dev->name, info_border); 492 printf("Bus name: %s", dev->bus->name); 493 printf("\nDriver name: %s", dev->driver->name); 494 printf("\nDevargs: %s", 495 dev->devargs ? dev->devargs->args : ""); 496 printf("\nConnect to socket: %d", dev->numa_node); 497 printf("\n"); 498 499 /* List ports with matching device name */ 500 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 501 printf("\n\tPort id: %-2d", port_id); 502 if (eth_macaddr_get_print_err(port_id, 503 &mac_addr) == 0) 504 print_ethaddr("\n\tMAC address: ", 505 &mac_addr); 506 rte_eth_dev_get_name_by_port(port_id, name); 507 printf("\n\tDevice name: %s", name); 508 printf("\n"); 509 } 510 } 511 }; 512 } 513 514 void 515 port_infos_display(portid_t port_id) 516 { 517 struct rte_port *port; 518 struct rte_ether_addr mac_addr; 519 struct rte_eth_link link; 520 struct rte_eth_dev_info dev_info; 521 int vlan_offload; 522 struct rte_mempool * mp; 523 static const char *info_border = "*********************"; 524 uint16_t mtu; 525 char name[RTE_ETH_NAME_MAX_LEN]; 526 int ret; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 529 print_valid_ports(); 530 return; 531 } 532 port = &ports[port_id]; 533 ret = eth_link_get_nowait_print_err(port_id, &link); 534 if (ret < 0) 535 return; 536 537 ret = eth_dev_info_get_print_err(port_id, &dev_info); 538 if (ret != 0) 539 return; 540 541 printf("\n%s Infos for port %-2d %s\n", 542 info_border, port_id, info_border); 543 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 544 print_ethaddr("MAC address: ", &mac_addr); 545 rte_eth_dev_get_name_by_port(port_id, name); 546 printf("\nDevice name: %s", name); 547 printf("\nDriver name: %s", dev_info.driver_name); 548 if (dev_info.device->devargs && dev_info.device->devargs->args) 549 printf("\nDevargs: %s", dev_info.device->devargs->args); 550 printf("\nConnect to socket: %u", port->socket_id); 551 552 if (port_numa[port_id] != NUMA_NO_CONFIG) { 553 mp = mbuf_pool_find(port_numa[port_id]); 554 if (mp) 555 printf("\nmemory allocation on the socket: %d", 556 port_numa[port_id]); 557 } else 558 printf("\nmemory allocation on the socket: %u",port->socket_id); 559 560 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 561 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 562 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 563 ("full-duplex") : ("half-duplex")); 564 565 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 566 printf("MTU: %u\n", mtu); 567 568 printf("Promiscuous mode: %s\n", 569 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 570 printf("Allmulticast mode: %s\n", 571 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 572 printf("Maximum number of MAC addresses: %u\n", 573 (unsigned int)(port->dev_info.max_mac_addrs)); 574 printf("Maximum number of MAC addresses of hash filtering: %u\n", 575 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 576 577 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 578 if (vlan_offload >= 0){ 579 printf("VLAN offload: \n"); 580 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 581 printf(" strip on, "); 582 else 583 printf(" strip off, "); 584 585 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 586 printf("filter on, "); 587 else 588 printf("filter off, "); 589 590 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 591 printf("extend on, "); 592 else 593 printf("extend off, "); 594 595 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 596 printf("qinq strip on\n"); 597 else 598 printf("qinq strip off\n"); 599 } 600 601 if (dev_info.hash_key_size > 0) 602 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 603 if (dev_info.reta_size > 0) 604 printf("Redirection table size: %u\n", dev_info.reta_size); 605 if (!dev_info.flow_type_rss_offloads) 606 printf("No RSS offload flow type is supported.\n"); 607 else { 608 uint16_t i; 609 char *p; 610 611 printf("Supported RSS offload flow types:\n"); 612 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 613 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 614 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 615 continue; 616 p = flowtype_to_str(i); 617 if (p) 618 printf(" %s\n", p); 619 else 620 printf(" user defined %d\n", i); 621 } 622 } 623 624 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 625 printf("Maximum configurable length of RX packet: %u\n", 626 dev_info.max_rx_pktlen); 627 printf("Maximum configurable size of LRO aggregated packet: %u\n", 628 dev_info.max_lro_pkt_size); 629 if (dev_info.max_vfs) 630 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 631 if (dev_info.max_vmdq_pools) 632 printf("Maximum number of VMDq pools: %u\n", 633 dev_info.max_vmdq_pools); 634 635 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 636 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 637 printf("Max possible number of RXDs per queue: %hu\n", 638 dev_info.rx_desc_lim.nb_max); 639 printf("Min possible number of RXDs per queue: %hu\n", 640 dev_info.rx_desc_lim.nb_min); 641 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 642 643 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 644 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 645 printf("Max possible number of TXDs per queue: %hu\n", 646 dev_info.tx_desc_lim.nb_max); 647 printf("Min possible number of TXDs per queue: %hu\n", 648 dev_info.tx_desc_lim.nb_min); 649 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 650 printf("Max segment number per packet: %hu\n", 651 dev_info.tx_desc_lim.nb_seg_max); 652 printf("Max segment number per MTU/TSO: %hu\n", 653 dev_info.tx_desc_lim.nb_mtu_seg_max); 654 655 /* Show switch info only if valid switch domain and port id is set */ 656 if (dev_info.switch_info.domain_id != 657 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 658 if (dev_info.switch_info.name) 659 printf("Switch name: %s\n", dev_info.switch_info.name); 660 661 printf("Switch domain Id: %u\n", 662 dev_info.switch_info.domain_id); 663 printf("Switch Port Id: %u\n", 664 dev_info.switch_info.port_id); 665 } 666 } 667 668 void 669 port_summary_header_display(void) 670 { 671 uint16_t port_number; 672 673 port_number = rte_eth_dev_count_avail(); 674 printf("Number of available ports: %i\n", port_number); 675 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 676 "Driver", "Status", "Link"); 677 } 678 679 void 680 port_summary_display(portid_t port_id) 681 { 682 struct rte_ether_addr mac_addr; 683 struct rte_eth_link link; 684 struct rte_eth_dev_info dev_info; 685 char name[RTE_ETH_NAME_MAX_LEN]; 686 int ret; 687 688 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 689 print_valid_ports(); 690 return; 691 } 692 693 ret = eth_link_get_nowait_print_err(port_id, &link); 694 if (ret < 0) 695 return; 696 697 ret = eth_dev_info_get_print_err(port_id, &dev_info); 698 if (ret != 0) 699 return; 700 701 rte_eth_dev_get_name_by_port(port_id, name); 702 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 703 if (ret != 0) 704 return; 705 706 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 707 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 708 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 709 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 710 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 711 (unsigned int) link.link_speed); 712 } 713 714 void 715 port_offload_cap_display(portid_t port_id) 716 { 717 struct rte_eth_dev_info dev_info; 718 static const char *info_border = "************"; 719 int ret; 720 721 if (port_id_is_invalid(port_id, ENABLED_WARN)) 722 return; 723 724 ret = eth_dev_info_get_print_err(port_id, &dev_info); 725 if (ret != 0) 726 return; 727 728 printf("\n%s Port %d supported offload features: %s\n", 729 info_border, port_id, info_border); 730 731 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 732 printf("VLAN stripped: "); 733 if (ports[port_id].dev_conf.rxmode.offloads & 734 DEV_RX_OFFLOAD_VLAN_STRIP) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 741 printf("Double VLANs stripped: "); 742 if (ports[port_id].dev_conf.rxmode.offloads & 743 DEV_RX_OFFLOAD_QINQ_STRIP) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 750 printf("RX IPv4 checksum: "); 751 if (ports[port_id].dev_conf.rxmode.offloads & 752 DEV_RX_OFFLOAD_IPV4_CKSUM) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 758 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 759 printf("RX UDP checksum: "); 760 if (ports[port_id].dev_conf.rxmode.offloads & 761 DEV_RX_OFFLOAD_UDP_CKSUM) 762 printf("on\n"); 763 else 764 printf("off\n"); 765 } 766 767 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 768 printf("RX TCP checksum: "); 769 if (ports[port_id].dev_conf.rxmode.offloads & 770 DEV_RX_OFFLOAD_TCP_CKSUM) 771 printf("on\n"); 772 else 773 printf("off\n"); 774 } 775 776 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 777 printf("RX SCTP checksum: "); 778 if (ports[port_id].dev_conf.rxmode.offloads & 779 DEV_RX_OFFLOAD_SCTP_CKSUM) 780 printf("on\n"); 781 else 782 printf("off\n"); 783 } 784 785 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 786 printf("RX Outer IPv4 checksum: "); 787 if (ports[port_id].dev_conf.rxmode.offloads & 788 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 789 printf("on\n"); 790 else 791 printf("off\n"); 792 } 793 794 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 795 printf("RX Outer UDP checksum: "); 796 if (ports[port_id].dev_conf.rxmode.offloads & 797 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 798 printf("on\n"); 799 else 800 printf("off\n"); 801 } 802 803 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 804 printf("Large receive offload: "); 805 if (ports[port_id].dev_conf.rxmode.offloads & 806 DEV_RX_OFFLOAD_TCP_LRO) 807 printf("on\n"); 808 else 809 printf("off\n"); 810 } 811 812 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 813 printf("HW timestamp: "); 814 if (ports[port_id].dev_conf.rxmode.offloads & 815 DEV_RX_OFFLOAD_TIMESTAMP) 816 printf("on\n"); 817 else 818 printf("off\n"); 819 } 820 821 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 822 printf("Rx Keep CRC: "); 823 if (ports[port_id].dev_conf.rxmode.offloads & 824 DEV_RX_OFFLOAD_KEEP_CRC) 825 printf("on\n"); 826 else 827 printf("off\n"); 828 } 829 830 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 831 printf("RX offload security: "); 832 if (ports[port_id].dev_conf.rxmode.offloads & 833 DEV_RX_OFFLOAD_SECURITY) 834 printf("on\n"); 835 else 836 printf("off\n"); 837 } 838 839 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 840 printf("VLAN insert: "); 841 if (ports[port_id].dev_conf.txmode.offloads & 842 DEV_TX_OFFLOAD_VLAN_INSERT) 843 printf("on\n"); 844 else 845 printf("off\n"); 846 } 847 848 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 849 printf("Double VLANs insert: "); 850 if (ports[port_id].dev_conf.txmode.offloads & 851 DEV_TX_OFFLOAD_QINQ_INSERT) 852 printf("on\n"); 853 else 854 printf("off\n"); 855 } 856 857 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 858 printf("TX IPv4 checksum: "); 859 if (ports[port_id].dev_conf.txmode.offloads & 860 DEV_TX_OFFLOAD_IPV4_CKSUM) 861 printf("on\n"); 862 else 863 printf("off\n"); 864 } 865 866 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 867 printf("TX UDP checksum: "); 868 if (ports[port_id].dev_conf.txmode.offloads & 869 DEV_TX_OFFLOAD_UDP_CKSUM) 870 printf("on\n"); 871 else 872 printf("off\n"); 873 } 874 875 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 876 printf("TX TCP checksum: "); 877 if (ports[port_id].dev_conf.txmode.offloads & 878 DEV_TX_OFFLOAD_TCP_CKSUM) 879 printf("on\n"); 880 else 881 printf("off\n"); 882 } 883 884 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 885 printf("TX SCTP checksum: "); 886 if (ports[port_id].dev_conf.txmode.offloads & 887 DEV_TX_OFFLOAD_SCTP_CKSUM) 888 printf("on\n"); 889 else 890 printf("off\n"); 891 } 892 893 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 894 printf("TX Outer IPv4 checksum: "); 895 if (ports[port_id].dev_conf.txmode.offloads & 896 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 897 printf("on\n"); 898 else 899 printf("off\n"); 900 } 901 902 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 903 printf("TX TCP segmentation: "); 904 if (ports[port_id].dev_conf.txmode.offloads & 905 DEV_TX_OFFLOAD_TCP_TSO) 906 printf("on\n"); 907 else 908 printf("off\n"); 909 } 910 911 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 912 printf("TX UDP segmentation: "); 913 if (ports[port_id].dev_conf.txmode.offloads & 914 DEV_TX_OFFLOAD_UDP_TSO) 915 printf("on\n"); 916 else 917 printf("off\n"); 918 } 919 920 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 921 printf("TSO for VXLAN tunnel packet: "); 922 if (ports[port_id].dev_conf.txmode.offloads & 923 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 924 printf("on\n"); 925 else 926 printf("off\n"); 927 } 928 929 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 930 printf("TSO for GRE tunnel packet: "); 931 if (ports[port_id].dev_conf.txmode.offloads & 932 DEV_TX_OFFLOAD_GRE_TNL_TSO) 933 printf("on\n"); 934 else 935 printf("off\n"); 936 } 937 938 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 939 printf("TSO for IPIP tunnel packet: "); 940 if (ports[port_id].dev_conf.txmode.offloads & 941 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 942 printf("on\n"); 943 else 944 printf("off\n"); 945 } 946 947 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 948 printf("TSO for GENEVE tunnel packet: "); 949 if (ports[port_id].dev_conf.txmode.offloads & 950 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 951 printf("on\n"); 952 else 953 printf("off\n"); 954 } 955 956 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 957 printf("IP tunnel TSO: "); 958 if (ports[port_id].dev_conf.txmode.offloads & 959 DEV_TX_OFFLOAD_IP_TNL_TSO) 960 printf("on\n"); 961 else 962 printf("off\n"); 963 } 964 965 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 966 printf("UDP tunnel TSO: "); 967 if (ports[port_id].dev_conf.txmode.offloads & 968 DEV_TX_OFFLOAD_UDP_TNL_TSO) 969 printf("on\n"); 970 else 971 printf("off\n"); 972 } 973 974 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 975 printf("TX Outer UDP checksum: "); 976 if (ports[port_id].dev_conf.txmode.offloads & 977 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 978 printf("on\n"); 979 else 980 printf("off\n"); 981 } 982 983 } 984 985 int 986 port_id_is_invalid(portid_t port_id, enum print_warning warning) 987 { 988 uint16_t pid; 989 990 if (port_id == (portid_t)RTE_PORT_ALL) 991 return 0; 992 993 RTE_ETH_FOREACH_DEV(pid) 994 if (port_id == pid) 995 return 0; 996 997 if (warning == ENABLED_WARN) 998 printf("Invalid port %d\n", port_id); 999 1000 return 1; 1001 } 1002 1003 void print_valid_ports(void) 1004 { 1005 portid_t pid; 1006 1007 printf("The valid ports array is ["); 1008 RTE_ETH_FOREACH_DEV(pid) { 1009 printf(" %d", pid); 1010 } 1011 printf(" ]\n"); 1012 } 1013 1014 static int 1015 vlan_id_is_invalid(uint16_t vlan_id) 1016 { 1017 if (vlan_id < 4096) 1018 return 0; 1019 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1020 return 1; 1021 } 1022 1023 static int 1024 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1025 { 1026 const struct rte_pci_device *pci_dev; 1027 const struct rte_bus *bus; 1028 uint64_t pci_len; 1029 1030 if (reg_off & 0x3) { 1031 printf("Port register offset 0x%X not aligned on a 4-byte " 1032 "boundary\n", 1033 (unsigned)reg_off); 1034 return 1; 1035 } 1036 1037 if (!ports[port_id].dev_info.device) { 1038 printf("Invalid device\n"); 1039 return 0; 1040 } 1041 1042 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1043 if (bus && !strcmp(bus->name, "pci")) { 1044 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1045 } else { 1046 printf("Not a PCI device\n"); 1047 return 1; 1048 } 1049 1050 pci_len = pci_dev->mem_resource[0].len; 1051 if (reg_off >= pci_len) { 1052 printf("Port %d: register offset %u (0x%X) out of port PCI " 1053 "resource (length=%"PRIu64")\n", 1054 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1055 return 1; 1056 } 1057 return 0; 1058 } 1059 1060 static int 1061 reg_bit_pos_is_invalid(uint8_t bit_pos) 1062 { 1063 if (bit_pos <= 31) 1064 return 0; 1065 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1066 return 1; 1067 } 1068 1069 #define display_port_and_reg_off(port_id, reg_off) \ 1070 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1071 1072 static inline void 1073 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1074 { 1075 display_port_and_reg_off(port_id, (unsigned)reg_off); 1076 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1077 } 1078 1079 void 1080 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1081 { 1082 uint32_t reg_v; 1083 1084 1085 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1086 return; 1087 if (port_reg_off_is_invalid(port_id, reg_off)) 1088 return; 1089 if (reg_bit_pos_is_invalid(bit_x)) 1090 return; 1091 reg_v = port_id_pci_reg_read(port_id, reg_off); 1092 display_port_and_reg_off(port_id, (unsigned)reg_off); 1093 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1094 } 1095 1096 void 1097 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1098 uint8_t bit1_pos, uint8_t bit2_pos) 1099 { 1100 uint32_t reg_v; 1101 uint8_t l_bit; 1102 uint8_t h_bit; 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit1_pos)) 1109 return; 1110 if (reg_bit_pos_is_invalid(bit2_pos)) 1111 return; 1112 if (bit1_pos > bit2_pos) 1113 l_bit = bit2_pos, h_bit = bit1_pos; 1114 else 1115 l_bit = bit1_pos, h_bit = bit2_pos; 1116 1117 reg_v = port_id_pci_reg_read(port_id, reg_off); 1118 reg_v >>= l_bit; 1119 if (h_bit < 31) 1120 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1121 display_port_and_reg_off(port_id, (unsigned)reg_off); 1122 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1123 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1124 } 1125 1126 void 1127 port_reg_display(portid_t port_id, uint32_t reg_off) 1128 { 1129 uint32_t reg_v; 1130 1131 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1132 return; 1133 if (port_reg_off_is_invalid(port_id, reg_off)) 1134 return; 1135 reg_v = port_id_pci_reg_read(port_id, reg_off); 1136 display_port_reg_value(port_id, reg_off, reg_v); 1137 } 1138 1139 void 1140 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1141 uint8_t bit_v) 1142 { 1143 uint32_t reg_v; 1144 1145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1146 return; 1147 if (port_reg_off_is_invalid(port_id, reg_off)) 1148 return; 1149 if (reg_bit_pos_is_invalid(bit_pos)) 1150 return; 1151 if (bit_v > 1) { 1152 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1153 return; 1154 } 1155 reg_v = port_id_pci_reg_read(port_id, reg_off); 1156 if (bit_v == 0) 1157 reg_v &= ~(1 << bit_pos); 1158 else 1159 reg_v |= (1 << bit_pos); 1160 port_id_pci_reg_write(port_id, reg_off, reg_v); 1161 display_port_reg_value(port_id, reg_off, reg_v); 1162 } 1163 1164 void 1165 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1166 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1167 { 1168 uint32_t max_v; 1169 uint32_t reg_v; 1170 uint8_t l_bit; 1171 uint8_t h_bit; 1172 1173 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1174 return; 1175 if (port_reg_off_is_invalid(port_id, reg_off)) 1176 return; 1177 if (reg_bit_pos_is_invalid(bit1_pos)) 1178 return; 1179 if (reg_bit_pos_is_invalid(bit2_pos)) 1180 return; 1181 if (bit1_pos > bit2_pos) 1182 l_bit = bit2_pos, h_bit = bit1_pos; 1183 else 1184 l_bit = bit1_pos, h_bit = bit2_pos; 1185 1186 if ((h_bit - l_bit) < 31) 1187 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1188 else 1189 max_v = 0xFFFFFFFF; 1190 1191 if (value > max_v) { 1192 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1193 (unsigned)value, (unsigned)value, 1194 (unsigned)max_v, (unsigned)max_v); 1195 return; 1196 } 1197 reg_v = port_id_pci_reg_read(port_id, reg_off); 1198 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1199 reg_v |= (value << l_bit); /* Set changed bits */ 1200 port_id_pci_reg_write(port_id, reg_off, reg_v); 1201 display_port_reg_value(port_id, reg_off, reg_v); 1202 } 1203 1204 void 1205 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1206 { 1207 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1208 return; 1209 if (port_reg_off_is_invalid(port_id, reg_off)) 1210 return; 1211 port_id_pci_reg_write(port_id, reg_off, reg_v); 1212 display_port_reg_value(port_id, reg_off, reg_v); 1213 } 1214 1215 void 1216 port_mtu_set(portid_t port_id, uint16_t mtu) 1217 { 1218 int diag; 1219 struct rte_eth_dev_info dev_info; 1220 int ret; 1221 1222 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1223 return; 1224 1225 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1226 if (ret != 0) 1227 return; 1228 1229 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1230 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1231 mtu, dev_info.min_mtu, dev_info.max_mtu); 1232 return; 1233 } 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag == 0) 1236 return; 1237 printf("Set MTU failed. diag=%d\n", diag); 1238 } 1239 1240 /* Generic flow management functions. */ 1241 1242 /** Generate a port_flow entry from attributes/pattern/actions. */ 1243 static struct port_flow * 1244 port_flow_new(const struct rte_flow_attr *attr, 1245 const struct rte_flow_item *pattern, 1246 const struct rte_flow_action *actions, 1247 struct rte_flow_error *error) 1248 { 1249 const struct rte_flow_conv_rule rule = { 1250 .attr_ro = attr, 1251 .pattern_ro = pattern, 1252 .actions_ro = actions, 1253 }; 1254 struct port_flow *pf; 1255 int ret; 1256 1257 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1258 if (ret < 0) 1259 return NULL; 1260 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1261 if (!pf) { 1262 rte_flow_error_set 1263 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1264 "calloc() failed"); 1265 return NULL; 1266 } 1267 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1268 error) >= 0) 1269 return pf; 1270 free(pf); 1271 return NULL; 1272 } 1273 1274 /** Print a message out of a flow error. */ 1275 static int 1276 port_flow_complain(struct rte_flow_error *error) 1277 { 1278 static const char *const errstrlist[] = { 1279 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1280 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1281 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1282 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1283 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1284 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1285 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1286 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1287 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1288 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1289 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1290 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1291 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1292 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1293 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1294 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1295 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1296 }; 1297 const char *errstr; 1298 char buf[32]; 1299 int err = rte_errno; 1300 1301 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1302 !errstrlist[error->type]) 1303 errstr = "unknown type"; 1304 else 1305 errstr = errstrlist[error->type]; 1306 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1307 error->type, errstr, 1308 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1309 error->cause), buf) : "", 1310 error->message ? error->message : "(no stated reason)", 1311 rte_strerror(err)); 1312 return -err; 1313 } 1314 1315 /** Validate flow rule. */ 1316 int 1317 port_flow_validate(portid_t port_id, 1318 const struct rte_flow_attr *attr, 1319 const struct rte_flow_item *pattern, 1320 const struct rte_flow_action *actions) 1321 { 1322 struct rte_flow_error error; 1323 1324 /* Poisoning to make sure PMDs update it in case of error. */ 1325 memset(&error, 0x11, sizeof(error)); 1326 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1327 return port_flow_complain(&error); 1328 printf("Flow rule validated\n"); 1329 return 0; 1330 } 1331 1332 /** Create flow rule. */ 1333 int 1334 port_flow_create(portid_t port_id, 1335 const struct rte_flow_attr *attr, 1336 const struct rte_flow_item *pattern, 1337 const struct rte_flow_action *actions) 1338 { 1339 struct rte_flow *flow; 1340 struct rte_port *port; 1341 struct port_flow *pf; 1342 uint32_t id; 1343 struct rte_flow_error error; 1344 1345 /* Poisoning to make sure PMDs update it in case of error. */ 1346 memset(&error, 0x22, sizeof(error)); 1347 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1348 if (!flow) 1349 return port_flow_complain(&error); 1350 port = &ports[port_id]; 1351 if (port->flow_list) { 1352 if (port->flow_list->id == UINT32_MAX) { 1353 printf("Highest rule ID is already assigned, delete" 1354 " it first"); 1355 rte_flow_destroy(port_id, flow, NULL); 1356 return -ENOMEM; 1357 } 1358 id = port->flow_list->id + 1; 1359 } else 1360 id = 0; 1361 pf = port_flow_new(attr, pattern, actions, &error); 1362 if (!pf) { 1363 rte_flow_destroy(port_id, flow, NULL); 1364 return port_flow_complain(&error); 1365 } 1366 pf->next = port->flow_list; 1367 pf->id = id; 1368 pf->flow = flow; 1369 port->flow_list = pf; 1370 printf("Flow rule #%u created\n", pf->id); 1371 return 0; 1372 } 1373 1374 /** Destroy a number of flow rules. */ 1375 int 1376 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1377 { 1378 struct rte_port *port; 1379 struct port_flow **tmp; 1380 uint32_t c = 0; 1381 int ret = 0; 1382 1383 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1384 port_id == (portid_t)RTE_PORT_ALL) 1385 return -EINVAL; 1386 port = &ports[port_id]; 1387 tmp = &port->flow_list; 1388 while (*tmp) { 1389 uint32_t i; 1390 1391 for (i = 0; i != n; ++i) { 1392 struct rte_flow_error error; 1393 struct port_flow *pf = *tmp; 1394 1395 if (rule[i] != pf->id) 1396 continue; 1397 /* 1398 * Poisoning to make sure PMDs update it in case 1399 * of error. 1400 */ 1401 memset(&error, 0x33, sizeof(error)); 1402 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1403 ret = port_flow_complain(&error); 1404 continue; 1405 } 1406 printf("Flow rule #%u destroyed\n", pf->id); 1407 *tmp = pf->next; 1408 free(pf); 1409 break; 1410 } 1411 if (i == n) 1412 tmp = &(*tmp)->next; 1413 ++c; 1414 } 1415 return ret; 1416 } 1417 1418 /** Remove all flow rules. */ 1419 int 1420 port_flow_flush(portid_t port_id) 1421 { 1422 struct rte_flow_error error; 1423 struct rte_port *port; 1424 int ret = 0; 1425 1426 /* Poisoning to make sure PMDs update it in case of error. */ 1427 memset(&error, 0x44, sizeof(error)); 1428 if (rte_flow_flush(port_id, &error)) { 1429 ret = port_flow_complain(&error); 1430 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1431 port_id == (portid_t)RTE_PORT_ALL) 1432 return ret; 1433 } 1434 port = &ports[port_id]; 1435 while (port->flow_list) { 1436 struct port_flow *pf = port->flow_list->next; 1437 1438 free(port->flow_list); 1439 port->flow_list = pf; 1440 } 1441 return ret; 1442 } 1443 1444 /** Dump all flow rules. */ 1445 int 1446 port_flow_dump(portid_t port_id, const char *file_name) 1447 { 1448 int ret = 0; 1449 FILE *file = stdout; 1450 struct rte_flow_error error; 1451 1452 if (file_name && strlen(file_name)) { 1453 file = fopen(file_name, "w"); 1454 if (!file) { 1455 printf("Failed to create file %s: %s\n", file_name, 1456 strerror(errno)); 1457 return -errno; 1458 } 1459 } 1460 ret = rte_flow_dev_dump(port_id, file, &error); 1461 if (ret) { 1462 port_flow_complain(&error); 1463 printf("Failed to dump flow: %s\n", strerror(-ret)); 1464 } else 1465 printf("Flow dump finished\n"); 1466 if (file_name && strlen(file_name)) 1467 fclose(file); 1468 return ret; 1469 } 1470 1471 /** Query a flow rule. */ 1472 int 1473 port_flow_query(portid_t port_id, uint32_t rule, 1474 const struct rte_flow_action *action) 1475 { 1476 struct rte_flow_error error; 1477 struct rte_port *port; 1478 struct port_flow *pf; 1479 const char *name; 1480 union { 1481 struct rte_flow_query_count count; 1482 } query; 1483 int ret; 1484 1485 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1486 port_id == (portid_t)RTE_PORT_ALL) 1487 return -EINVAL; 1488 port = &ports[port_id]; 1489 for (pf = port->flow_list; pf; pf = pf->next) 1490 if (pf->id == rule) 1491 break; 1492 if (!pf) { 1493 printf("Flow rule #%u not found\n", rule); 1494 return -ENOENT; 1495 } 1496 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1497 &name, sizeof(name), 1498 (void *)(uintptr_t)action->type, &error); 1499 if (ret < 0) 1500 return port_flow_complain(&error); 1501 switch (action->type) { 1502 case RTE_FLOW_ACTION_TYPE_COUNT: 1503 break; 1504 default: 1505 printf("Cannot query action type %d (%s)\n", 1506 action->type, name); 1507 return -ENOTSUP; 1508 } 1509 /* Poisoning to make sure PMDs update it in case of error. */ 1510 memset(&error, 0x55, sizeof(error)); 1511 memset(&query, 0, sizeof(query)); 1512 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1513 return port_flow_complain(&error); 1514 switch (action->type) { 1515 case RTE_FLOW_ACTION_TYPE_COUNT: 1516 printf("%s:\n" 1517 " hits_set: %u\n" 1518 " bytes_set: %u\n" 1519 " hits: %" PRIu64 "\n" 1520 " bytes: %" PRIu64 "\n", 1521 name, 1522 query.count.hits_set, 1523 query.count.bytes_set, 1524 query.count.hits, 1525 query.count.bytes); 1526 break; 1527 default: 1528 printf("Cannot display result for action type %d (%s)\n", 1529 action->type, name); 1530 break; 1531 } 1532 return 0; 1533 } 1534 1535 /** List flow rules. */ 1536 void 1537 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1538 { 1539 struct rte_port *port; 1540 struct port_flow *pf; 1541 struct port_flow *list = NULL; 1542 uint32_t i; 1543 1544 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1545 port_id == (portid_t)RTE_PORT_ALL) 1546 return; 1547 port = &ports[port_id]; 1548 if (!port->flow_list) 1549 return; 1550 /* Sort flows by group, priority and ID. */ 1551 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1552 struct port_flow **tmp; 1553 const struct rte_flow_attr *curr = pf->rule.attr; 1554 1555 if (n) { 1556 /* Filter out unwanted groups. */ 1557 for (i = 0; i != n; ++i) 1558 if (curr->group == group[i]) 1559 break; 1560 if (i == n) 1561 continue; 1562 } 1563 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1564 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1565 1566 if (curr->group > comp->group || 1567 (curr->group == comp->group && 1568 curr->priority > comp->priority) || 1569 (curr->group == comp->group && 1570 curr->priority == comp->priority && 1571 pf->id > (*tmp)->id)) 1572 continue; 1573 break; 1574 } 1575 pf->tmp = *tmp; 1576 *tmp = pf; 1577 } 1578 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1579 for (pf = list; pf != NULL; pf = pf->tmp) { 1580 const struct rte_flow_item *item = pf->rule.pattern; 1581 const struct rte_flow_action *action = pf->rule.actions; 1582 const char *name; 1583 1584 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1585 pf->id, 1586 pf->rule.attr->group, 1587 pf->rule.attr->priority, 1588 pf->rule.attr->ingress ? 'i' : '-', 1589 pf->rule.attr->egress ? 'e' : '-', 1590 pf->rule.attr->transfer ? 't' : '-'); 1591 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1592 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1593 &name, sizeof(name), 1594 (void *)(uintptr_t)item->type, 1595 NULL) <= 0) 1596 name = "[UNKNOWN]"; 1597 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1598 printf("%s ", name); 1599 ++item; 1600 } 1601 printf("=>"); 1602 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1603 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1604 &name, sizeof(name), 1605 (void *)(uintptr_t)action->type, 1606 NULL) <= 0) 1607 name = "[UNKNOWN]"; 1608 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1609 printf(" %s", name); 1610 ++action; 1611 } 1612 printf("\n"); 1613 } 1614 } 1615 1616 /** Restrict ingress traffic to the defined flow rules. */ 1617 int 1618 port_flow_isolate(portid_t port_id, int set) 1619 { 1620 struct rte_flow_error error; 1621 1622 /* Poisoning to make sure PMDs update it in case of error. */ 1623 memset(&error, 0x66, sizeof(error)); 1624 if (rte_flow_isolate(port_id, set, &error)) 1625 return port_flow_complain(&error); 1626 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1627 port_id, 1628 set ? "now restricted" : "not restricted anymore"); 1629 return 0; 1630 } 1631 1632 /* 1633 * RX/TX ring descriptors display functions. 1634 */ 1635 int 1636 rx_queue_id_is_invalid(queueid_t rxq_id) 1637 { 1638 if (rxq_id < nb_rxq) 1639 return 0; 1640 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1641 return 1; 1642 } 1643 1644 int 1645 tx_queue_id_is_invalid(queueid_t txq_id) 1646 { 1647 if (txq_id < nb_txq) 1648 return 0; 1649 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1650 return 1; 1651 } 1652 1653 static int 1654 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1655 { 1656 if (rxdesc_id < nb_rxd) 1657 return 0; 1658 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1659 rxdesc_id, nb_rxd); 1660 return 1; 1661 } 1662 1663 static int 1664 tx_desc_id_is_invalid(uint16_t txdesc_id) 1665 { 1666 if (txdesc_id < nb_txd) 1667 return 0; 1668 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1669 txdesc_id, nb_txd); 1670 return 1; 1671 } 1672 1673 static const struct rte_memzone * 1674 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1675 { 1676 char mz_name[RTE_MEMZONE_NAMESIZE]; 1677 const struct rte_memzone *mz; 1678 1679 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1680 port_id, q_id, ring_name); 1681 mz = rte_memzone_lookup(mz_name); 1682 if (mz == NULL) 1683 printf("%s ring memory zoneof (port %d, queue %d) not" 1684 "found (zone name = %s\n", 1685 ring_name, port_id, q_id, mz_name); 1686 return mz; 1687 } 1688 1689 union igb_ring_dword { 1690 uint64_t dword; 1691 struct { 1692 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1693 uint32_t lo; 1694 uint32_t hi; 1695 #else 1696 uint32_t hi; 1697 uint32_t lo; 1698 #endif 1699 } words; 1700 }; 1701 1702 struct igb_ring_desc_32_bytes { 1703 union igb_ring_dword lo_dword; 1704 union igb_ring_dword hi_dword; 1705 union igb_ring_dword resv1; 1706 union igb_ring_dword resv2; 1707 }; 1708 1709 struct igb_ring_desc_16_bytes { 1710 union igb_ring_dword lo_dword; 1711 union igb_ring_dword hi_dword; 1712 }; 1713 1714 static void 1715 ring_rxd_display_dword(union igb_ring_dword dword) 1716 { 1717 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1718 (unsigned)dword.words.hi); 1719 } 1720 1721 static void 1722 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1723 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1724 portid_t port_id, 1725 #else 1726 __rte_unused portid_t port_id, 1727 #endif 1728 uint16_t desc_id) 1729 { 1730 struct igb_ring_desc_16_bytes *ring = 1731 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1732 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1733 int ret; 1734 struct rte_eth_dev_info dev_info; 1735 1736 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1737 if (ret != 0) 1738 return; 1739 1740 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1741 /* 32 bytes RX descriptor, i40e only */ 1742 struct igb_ring_desc_32_bytes *ring = 1743 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1744 ring[desc_id].lo_dword.dword = 1745 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1746 ring_rxd_display_dword(ring[desc_id].lo_dword); 1747 ring[desc_id].hi_dword.dword = 1748 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1749 ring_rxd_display_dword(ring[desc_id].hi_dword); 1750 ring[desc_id].resv1.dword = 1751 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1752 ring_rxd_display_dword(ring[desc_id].resv1); 1753 ring[desc_id].resv2.dword = 1754 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1755 ring_rxd_display_dword(ring[desc_id].resv2); 1756 1757 return; 1758 } 1759 #endif 1760 /* 16 bytes RX descriptor */ 1761 ring[desc_id].lo_dword.dword = 1762 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1763 ring_rxd_display_dword(ring[desc_id].lo_dword); 1764 ring[desc_id].hi_dword.dword = 1765 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1766 ring_rxd_display_dword(ring[desc_id].hi_dword); 1767 } 1768 1769 static void 1770 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1771 { 1772 struct igb_ring_desc_16_bytes *ring; 1773 struct igb_ring_desc_16_bytes txd; 1774 1775 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1776 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1777 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1778 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1779 (unsigned)txd.lo_dword.words.lo, 1780 (unsigned)txd.lo_dword.words.hi, 1781 (unsigned)txd.hi_dword.words.lo, 1782 (unsigned)txd.hi_dword.words.hi); 1783 } 1784 1785 void 1786 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1787 { 1788 const struct rte_memzone *rx_mz; 1789 1790 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1791 return; 1792 if (rx_queue_id_is_invalid(rxq_id)) 1793 return; 1794 if (rx_desc_id_is_invalid(rxd_id)) 1795 return; 1796 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1797 if (rx_mz == NULL) 1798 return; 1799 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1800 } 1801 1802 void 1803 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1804 { 1805 const struct rte_memzone *tx_mz; 1806 1807 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1808 return; 1809 if (tx_queue_id_is_invalid(txq_id)) 1810 return; 1811 if (tx_desc_id_is_invalid(txd_id)) 1812 return; 1813 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1814 if (tx_mz == NULL) 1815 return; 1816 ring_tx_descriptor_display(tx_mz, txd_id); 1817 } 1818 1819 void 1820 fwd_lcores_config_display(void) 1821 { 1822 lcoreid_t lc_id; 1823 1824 printf("List of forwarding lcores:"); 1825 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1826 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1827 printf("\n"); 1828 } 1829 void 1830 rxtx_config_display(void) 1831 { 1832 portid_t pid; 1833 queueid_t qid; 1834 1835 printf(" %s packet forwarding%s packets/burst=%d\n", 1836 cur_fwd_eng->fwd_mode_name, 1837 retry_enabled == 0 ? "" : " with retry", 1838 nb_pkt_per_burst); 1839 1840 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1841 printf(" packet len=%u - nb packet segments=%d\n", 1842 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1843 1844 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1845 nb_fwd_lcores, nb_fwd_ports); 1846 1847 RTE_ETH_FOREACH_DEV(pid) { 1848 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1849 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1850 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1851 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1852 uint16_t nb_rx_desc_tmp; 1853 uint16_t nb_tx_desc_tmp; 1854 struct rte_eth_rxq_info rx_qinfo; 1855 struct rte_eth_txq_info tx_qinfo; 1856 int32_t rc; 1857 1858 /* per port config */ 1859 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1860 (unsigned int)pid, nb_rxq, nb_txq); 1861 1862 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1863 ports[pid].dev_conf.rxmode.offloads, 1864 ports[pid].dev_conf.txmode.offloads); 1865 1866 /* per rx queue config only for first queue to be less verbose */ 1867 for (qid = 0; qid < 1; qid++) { 1868 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1869 if (rc) 1870 nb_rx_desc_tmp = nb_rx_desc[qid]; 1871 else 1872 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1873 1874 printf(" RX queue: %d\n", qid); 1875 printf(" RX desc=%d - RX free threshold=%d\n", 1876 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1877 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1878 " wthresh=%d\n", 1879 rx_conf[qid].rx_thresh.pthresh, 1880 rx_conf[qid].rx_thresh.hthresh, 1881 rx_conf[qid].rx_thresh.wthresh); 1882 printf(" RX Offloads=0x%"PRIx64"\n", 1883 rx_conf[qid].offloads); 1884 } 1885 1886 /* per tx queue config only for first queue to be less verbose */ 1887 for (qid = 0; qid < 1; qid++) { 1888 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1889 if (rc) 1890 nb_tx_desc_tmp = nb_tx_desc[qid]; 1891 else 1892 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1893 1894 printf(" TX queue: %d\n", qid); 1895 printf(" TX desc=%d - TX free threshold=%d\n", 1896 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1897 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1898 " wthresh=%d\n", 1899 tx_conf[qid].tx_thresh.pthresh, 1900 tx_conf[qid].tx_thresh.hthresh, 1901 tx_conf[qid].tx_thresh.wthresh); 1902 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1903 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1904 } 1905 } 1906 } 1907 1908 void 1909 port_rss_reta_info(portid_t port_id, 1910 struct rte_eth_rss_reta_entry64 *reta_conf, 1911 uint16_t nb_entries) 1912 { 1913 uint16_t i, idx, shift; 1914 int ret; 1915 1916 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1917 return; 1918 1919 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1920 if (ret != 0) { 1921 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1922 return; 1923 } 1924 1925 for (i = 0; i < nb_entries; i++) { 1926 idx = i / RTE_RETA_GROUP_SIZE; 1927 shift = i % RTE_RETA_GROUP_SIZE; 1928 if (!(reta_conf[idx].mask & (1ULL << shift))) 1929 continue; 1930 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1931 i, reta_conf[idx].reta[shift]); 1932 } 1933 } 1934 1935 /* 1936 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1937 * key of the port. 1938 */ 1939 void 1940 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1941 { 1942 struct rte_eth_rss_conf rss_conf = {0}; 1943 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1944 uint64_t rss_hf; 1945 uint8_t i; 1946 int diag; 1947 struct rte_eth_dev_info dev_info; 1948 uint8_t hash_key_size; 1949 int ret; 1950 1951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1952 return; 1953 1954 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1955 if (ret != 0) 1956 return; 1957 1958 if (dev_info.hash_key_size > 0 && 1959 dev_info.hash_key_size <= sizeof(rss_key)) 1960 hash_key_size = dev_info.hash_key_size; 1961 else { 1962 printf("dev_info did not provide a valid hash key size\n"); 1963 return; 1964 } 1965 1966 /* Get RSS hash key if asked to display it */ 1967 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1968 rss_conf.rss_key_len = hash_key_size; 1969 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1970 if (diag != 0) { 1971 switch (diag) { 1972 case -ENODEV: 1973 printf("port index %d invalid\n", port_id); 1974 break; 1975 case -ENOTSUP: 1976 printf("operation not supported by device\n"); 1977 break; 1978 default: 1979 printf("operation failed - diag=%d\n", diag); 1980 break; 1981 } 1982 return; 1983 } 1984 rss_hf = rss_conf.rss_hf; 1985 if (rss_hf == 0) { 1986 printf("RSS disabled\n"); 1987 return; 1988 } 1989 printf("RSS functions:\n "); 1990 for (i = 0; rss_type_table[i].str; i++) { 1991 if (rss_hf & rss_type_table[i].rss_type) 1992 printf("%s ", rss_type_table[i].str); 1993 } 1994 printf("\n"); 1995 if (!show_rss_key) 1996 return; 1997 printf("RSS key:\n"); 1998 for (i = 0; i < hash_key_size; i++) 1999 printf("%02X", rss_key[i]); 2000 printf("\n"); 2001 } 2002 2003 void 2004 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2005 uint hash_key_len) 2006 { 2007 struct rte_eth_rss_conf rss_conf; 2008 int diag; 2009 unsigned int i; 2010 2011 rss_conf.rss_key = NULL; 2012 rss_conf.rss_key_len = hash_key_len; 2013 rss_conf.rss_hf = 0; 2014 for (i = 0; rss_type_table[i].str; i++) { 2015 if (!strcmp(rss_type_table[i].str, rss_type)) 2016 rss_conf.rss_hf = rss_type_table[i].rss_type; 2017 } 2018 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2019 if (diag == 0) { 2020 rss_conf.rss_key = hash_key; 2021 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2022 } 2023 if (diag == 0) 2024 return; 2025 2026 switch (diag) { 2027 case -ENODEV: 2028 printf("port index %d invalid\n", port_id); 2029 break; 2030 case -ENOTSUP: 2031 printf("operation not supported by device\n"); 2032 break; 2033 default: 2034 printf("operation failed - diag=%d\n", diag); 2035 break; 2036 } 2037 } 2038 2039 /* 2040 * Setup forwarding configuration for each logical core. 2041 */ 2042 static void 2043 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2044 { 2045 streamid_t nb_fs_per_lcore; 2046 streamid_t nb_fs; 2047 streamid_t sm_id; 2048 lcoreid_t nb_extra; 2049 lcoreid_t nb_fc; 2050 lcoreid_t nb_lc; 2051 lcoreid_t lc_id; 2052 2053 nb_fs = cfg->nb_fwd_streams; 2054 nb_fc = cfg->nb_fwd_lcores; 2055 if (nb_fs <= nb_fc) { 2056 nb_fs_per_lcore = 1; 2057 nb_extra = 0; 2058 } else { 2059 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2060 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2061 } 2062 2063 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2064 sm_id = 0; 2065 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2066 fwd_lcores[lc_id]->stream_idx = sm_id; 2067 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2068 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2069 } 2070 2071 /* 2072 * Assign extra remaining streams, if any. 2073 */ 2074 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2075 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2076 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2077 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2078 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2079 } 2080 } 2081 2082 static portid_t 2083 fwd_topology_tx_port_get(portid_t rxp) 2084 { 2085 static int warning_once = 1; 2086 2087 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2088 2089 switch (port_topology) { 2090 default: 2091 case PORT_TOPOLOGY_PAIRED: 2092 if ((rxp & 0x1) == 0) { 2093 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2094 return rxp + 1; 2095 if (warning_once) { 2096 printf("\nWarning! port-topology=paired" 2097 " and odd forward ports number," 2098 " the last port will pair with" 2099 " itself.\n\n"); 2100 warning_once = 0; 2101 } 2102 return rxp; 2103 } 2104 return rxp - 1; 2105 case PORT_TOPOLOGY_CHAINED: 2106 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2107 case PORT_TOPOLOGY_LOOP: 2108 return rxp; 2109 } 2110 } 2111 2112 static void 2113 simple_fwd_config_setup(void) 2114 { 2115 portid_t i; 2116 2117 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2118 cur_fwd_config.nb_fwd_streams = 2119 (streamid_t) cur_fwd_config.nb_fwd_ports; 2120 2121 /* reinitialize forwarding streams */ 2122 init_fwd_streams(); 2123 2124 /* 2125 * In the simple forwarding test, the number of forwarding cores 2126 * must be lower or equal to the number of forwarding ports. 2127 */ 2128 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2129 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2130 cur_fwd_config.nb_fwd_lcores = 2131 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2132 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2133 2134 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2135 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2136 fwd_streams[i]->rx_queue = 0; 2137 fwd_streams[i]->tx_port = 2138 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2139 fwd_streams[i]->tx_queue = 0; 2140 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2141 fwd_streams[i]->retry_enabled = retry_enabled; 2142 } 2143 } 2144 2145 /** 2146 * For the RSS forwarding test all streams distributed over lcores. Each stream 2147 * being composed of a RX queue to poll on a RX port for input messages, 2148 * associated with a TX queue of a TX port where to send forwarded packets. 2149 */ 2150 static void 2151 rss_fwd_config_setup(void) 2152 { 2153 portid_t rxp; 2154 portid_t txp; 2155 queueid_t rxq; 2156 queueid_t nb_q; 2157 streamid_t sm_id; 2158 2159 nb_q = nb_rxq; 2160 if (nb_q > nb_txq) 2161 nb_q = nb_txq; 2162 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2163 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2164 cur_fwd_config.nb_fwd_streams = 2165 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2166 2167 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2168 cur_fwd_config.nb_fwd_lcores = 2169 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2170 2171 /* reinitialize forwarding streams */ 2172 init_fwd_streams(); 2173 2174 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2175 rxp = 0; rxq = 0; 2176 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2177 struct fwd_stream *fs; 2178 2179 fs = fwd_streams[sm_id]; 2180 txp = fwd_topology_tx_port_get(rxp); 2181 fs->rx_port = fwd_ports_ids[rxp]; 2182 fs->rx_queue = rxq; 2183 fs->tx_port = fwd_ports_ids[txp]; 2184 fs->tx_queue = rxq; 2185 fs->peer_addr = fs->tx_port; 2186 fs->retry_enabled = retry_enabled; 2187 rxp++; 2188 if (rxp < nb_fwd_ports) 2189 continue; 2190 rxp = 0; 2191 rxq++; 2192 } 2193 } 2194 2195 /** 2196 * For the DCB forwarding test, each core is assigned on each traffic class. 2197 * 2198 * Each core is assigned a multi-stream, each stream being composed of 2199 * a RX queue to poll on a RX port for input messages, associated with 2200 * a TX queue of a TX port where to send forwarded packets. All RX and 2201 * TX queues are mapping to the same traffic class. 2202 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2203 * the same core 2204 */ 2205 static void 2206 dcb_fwd_config_setup(void) 2207 { 2208 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2209 portid_t txp, rxp = 0; 2210 queueid_t txq, rxq = 0; 2211 lcoreid_t lc_id; 2212 uint16_t nb_rx_queue, nb_tx_queue; 2213 uint16_t i, j, k, sm_id = 0; 2214 uint8_t tc = 0; 2215 2216 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2217 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2218 cur_fwd_config.nb_fwd_streams = 2219 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2220 2221 /* reinitialize forwarding streams */ 2222 init_fwd_streams(); 2223 sm_id = 0; 2224 txp = 1; 2225 /* get the dcb info on the first RX and TX ports */ 2226 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2227 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2228 2229 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2230 fwd_lcores[lc_id]->stream_nb = 0; 2231 fwd_lcores[lc_id]->stream_idx = sm_id; 2232 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2233 /* if the nb_queue is zero, means this tc is 2234 * not enabled on the POOL 2235 */ 2236 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2237 break; 2238 k = fwd_lcores[lc_id]->stream_nb + 2239 fwd_lcores[lc_id]->stream_idx; 2240 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2241 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2242 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2243 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2244 for (j = 0; j < nb_rx_queue; j++) { 2245 struct fwd_stream *fs; 2246 2247 fs = fwd_streams[k + j]; 2248 fs->rx_port = fwd_ports_ids[rxp]; 2249 fs->rx_queue = rxq + j; 2250 fs->tx_port = fwd_ports_ids[txp]; 2251 fs->tx_queue = txq + j % nb_tx_queue; 2252 fs->peer_addr = fs->tx_port; 2253 fs->retry_enabled = retry_enabled; 2254 } 2255 fwd_lcores[lc_id]->stream_nb += 2256 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2257 } 2258 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2259 2260 tc++; 2261 if (tc < rxp_dcb_info.nb_tcs) 2262 continue; 2263 /* Restart from TC 0 on next RX port */ 2264 tc = 0; 2265 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2266 rxp = (portid_t) 2267 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2268 else 2269 rxp++; 2270 if (rxp >= nb_fwd_ports) 2271 return; 2272 /* get the dcb information on next RX and TX ports */ 2273 if ((rxp & 0x1) == 0) 2274 txp = (portid_t) (rxp + 1); 2275 else 2276 txp = (portid_t) (rxp - 1); 2277 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2278 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2279 } 2280 } 2281 2282 static void 2283 icmp_echo_config_setup(void) 2284 { 2285 portid_t rxp; 2286 queueid_t rxq; 2287 lcoreid_t lc_id; 2288 uint16_t sm_id; 2289 2290 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2291 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2292 (nb_txq * nb_fwd_ports); 2293 else 2294 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2295 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2296 cur_fwd_config.nb_fwd_streams = 2297 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2298 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2299 cur_fwd_config.nb_fwd_lcores = 2300 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2301 if (verbose_level > 0) { 2302 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2303 __FUNCTION__, 2304 cur_fwd_config.nb_fwd_lcores, 2305 cur_fwd_config.nb_fwd_ports, 2306 cur_fwd_config.nb_fwd_streams); 2307 } 2308 2309 /* reinitialize forwarding streams */ 2310 init_fwd_streams(); 2311 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2312 rxp = 0; rxq = 0; 2313 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2314 if (verbose_level > 0) 2315 printf(" core=%d: \n", lc_id); 2316 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2317 struct fwd_stream *fs; 2318 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2319 fs->rx_port = fwd_ports_ids[rxp]; 2320 fs->rx_queue = rxq; 2321 fs->tx_port = fs->rx_port; 2322 fs->tx_queue = rxq; 2323 fs->peer_addr = fs->tx_port; 2324 fs->retry_enabled = retry_enabled; 2325 if (verbose_level > 0) 2326 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2327 sm_id, fs->rx_port, fs->rx_queue, 2328 fs->tx_queue); 2329 rxq = (queueid_t) (rxq + 1); 2330 if (rxq == nb_rxq) { 2331 rxq = 0; 2332 rxp = (portid_t) (rxp + 1); 2333 } 2334 } 2335 } 2336 } 2337 2338 #if defined RTE_LIBRTE_PMD_SOFTNIC 2339 static void 2340 softnic_fwd_config_setup(void) 2341 { 2342 struct rte_port *port; 2343 portid_t pid, softnic_portid; 2344 queueid_t i; 2345 uint8_t softnic_enable = 0; 2346 2347 RTE_ETH_FOREACH_DEV(pid) { 2348 port = &ports[pid]; 2349 const char *driver = port->dev_info.driver_name; 2350 2351 if (strcmp(driver, "net_softnic") == 0) { 2352 softnic_portid = pid; 2353 softnic_enable = 1; 2354 break; 2355 } 2356 } 2357 2358 if (softnic_enable == 0) { 2359 printf("Softnic mode not configured(%s)!\n", __func__); 2360 return; 2361 } 2362 2363 cur_fwd_config.nb_fwd_ports = 1; 2364 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2365 2366 /* Re-initialize forwarding streams */ 2367 init_fwd_streams(); 2368 2369 /* 2370 * In the softnic forwarding test, the number of forwarding cores 2371 * is set to one and remaining are used for softnic packet processing. 2372 */ 2373 cur_fwd_config.nb_fwd_lcores = 1; 2374 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2375 2376 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2377 fwd_streams[i]->rx_port = softnic_portid; 2378 fwd_streams[i]->rx_queue = i; 2379 fwd_streams[i]->tx_port = softnic_portid; 2380 fwd_streams[i]->tx_queue = i; 2381 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2382 fwd_streams[i]->retry_enabled = retry_enabled; 2383 } 2384 } 2385 #endif 2386 2387 void 2388 fwd_config_setup(void) 2389 { 2390 cur_fwd_config.fwd_eng = cur_fwd_eng; 2391 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2392 icmp_echo_config_setup(); 2393 return; 2394 } 2395 2396 #if defined RTE_LIBRTE_PMD_SOFTNIC 2397 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2398 softnic_fwd_config_setup(); 2399 return; 2400 } 2401 #endif 2402 2403 if ((nb_rxq > 1) && (nb_txq > 1)){ 2404 if (dcb_config) 2405 dcb_fwd_config_setup(); 2406 else 2407 rss_fwd_config_setup(); 2408 } 2409 else 2410 simple_fwd_config_setup(); 2411 } 2412 2413 static const char * 2414 mp_alloc_to_str(uint8_t mode) 2415 { 2416 switch (mode) { 2417 case MP_ALLOC_NATIVE: 2418 return "native"; 2419 case MP_ALLOC_ANON: 2420 return "anon"; 2421 case MP_ALLOC_XMEM: 2422 return "xmem"; 2423 case MP_ALLOC_XMEM_HUGE: 2424 return "xmemhuge"; 2425 default: 2426 return "invalid"; 2427 } 2428 } 2429 2430 void 2431 pkt_fwd_config_display(struct fwd_config *cfg) 2432 { 2433 struct fwd_stream *fs; 2434 lcoreid_t lc_id; 2435 streamid_t sm_id; 2436 2437 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2438 "NUMA support %s, MP allocation mode: %s\n", 2439 cfg->fwd_eng->fwd_mode_name, 2440 retry_enabled == 0 ? "" : " with retry", 2441 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2442 numa_support == 1 ? "enabled" : "disabled", 2443 mp_alloc_to_str(mp_alloc_type)); 2444 2445 if (retry_enabled) 2446 printf("TX retry num: %u, delay between TX retries: %uus\n", 2447 burst_tx_retry_num, burst_tx_delay_time); 2448 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2449 printf("Logical Core %u (socket %u) forwards packets on " 2450 "%d streams:", 2451 fwd_lcores_cpuids[lc_id], 2452 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2453 fwd_lcores[lc_id]->stream_nb); 2454 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2455 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2456 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2457 "P=%d/Q=%d (socket %u) ", 2458 fs->rx_port, fs->rx_queue, 2459 ports[fs->rx_port].socket_id, 2460 fs->tx_port, fs->tx_queue, 2461 ports[fs->tx_port].socket_id); 2462 print_ethaddr("peer=", 2463 &peer_eth_addrs[fs->peer_addr]); 2464 } 2465 printf("\n"); 2466 } 2467 printf("\n"); 2468 } 2469 2470 void 2471 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2472 { 2473 struct rte_ether_addr new_peer_addr; 2474 if (!rte_eth_dev_is_valid_port(port_id)) { 2475 printf("Error: Invalid port number %i\n", port_id); 2476 return; 2477 } 2478 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2479 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2480 return; 2481 } 2482 peer_eth_addrs[port_id] = new_peer_addr; 2483 } 2484 2485 int 2486 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2487 { 2488 unsigned int i; 2489 unsigned int lcore_cpuid; 2490 int record_now; 2491 2492 record_now = 0; 2493 again: 2494 for (i = 0; i < nb_lc; i++) { 2495 lcore_cpuid = lcorelist[i]; 2496 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2497 printf("lcore %u not enabled\n", lcore_cpuid); 2498 return -1; 2499 } 2500 if (lcore_cpuid == rte_get_master_lcore()) { 2501 printf("lcore %u cannot be masked on for running " 2502 "packet forwarding, which is the master lcore " 2503 "and reserved for command line parsing only\n", 2504 lcore_cpuid); 2505 return -1; 2506 } 2507 if (record_now) 2508 fwd_lcores_cpuids[i] = lcore_cpuid; 2509 } 2510 if (record_now == 0) { 2511 record_now = 1; 2512 goto again; 2513 } 2514 nb_cfg_lcores = (lcoreid_t) nb_lc; 2515 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2516 printf("previous number of forwarding cores %u - changed to " 2517 "number of configured cores %u\n", 2518 (unsigned int) nb_fwd_lcores, nb_lc); 2519 nb_fwd_lcores = (lcoreid_t) nb_lc; 2520 } 2521 2522 return 0; 2523 } 2524 2525 int 2526 set_fwd_lcores_mask(uint64_t lcoremask) 2527 { 2528 unsigned int lcorelist[64]; 2529 unsigned int nb_lc; 2530 unsigned int i; 2531 2532 if (lcoremask == 0) { 2533 printf("Invalid NULL mask of cores\n"); 2534 return -1; 2535 } 2536 nb_lc = 0; 2537 for (i = 0; i < 64; i++) { 2538 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2539 continue; 2540 lcorelist[nb_lc++] = i; 2541 } 2542 return set_fwd_lcores_list(lcorelist, nb_lc); 2543 } 2544 2545 void 2546 set_fwd_lcores_number(uint16_t nb_lc) 2547 { 2548 if (nb_lc > nb_cfg_lcores) { 2549 printf("nb fwd cores %u > %u (max. number of configured " 2550 "lcores) - ignored\n", 2551 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2552 return; 2553 } 2554 nb_fwd_lcores = (lcoreid_t) nb_lc; 2555 printf("Number of forwarding cores set to %u\n", 2556 (unsigned int) nb_fwd_lcores); 2557 } 2558 2559 void 2560 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2561 { 2562 unsigned int i; 2563 portid_t port_id; 2564 int record_now; 2565 2566 record_now = 0; 2567 again: 2568 for (i = 0; i < nb_pt; i++) { 2569 port_id = (portid_t) portlist[i]; 2570 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2571 return; 2572 if (record_now) 2573 fwd_ports_ids[i] = port_id; 2574 } 2575 if (record_now == 0) { 2576 record_now = 1; 2577 goto again; 2578 } 2579 nb_cfg_ports = (portid_t) nb_pt; 2580 if (nb_fwd_ports != (portid_t) nb_pt) { 2581 printf("previous number of forwarding ports %u - changed to " 2582 "number of configured ports %u\n", 2583 (unsigned int) nb_fwd_ports, nb_pt); 2584 nb_fwd_ports = (portid_t) nb_pt; 2585 } 2586 } 2587 2588 void 2589 set_fwd_ports_mask(uint64_t portmask) 2590 { 2591 unsigned int portlist[64]; 2592 unsigned int nb_pt; 2593 unsigned int i; 2594 2595 if (portmask == 0) { 2596 printf("Invalid NULL mask of ports\n"); 2597 return; 2598 } 2599 nb_pt = 0; 2600 RTE_ETH_FOREACH_DEV(i) { 2601 if (! ((uint64_t)(1ULL << i) & portmask)) 2602 continue; 2603 portlist[nb_pt++] = i; 2604 } 2605 set_fwd_ports_list(portlist, nb_pt); 2606 } 2607 2608 void 2609 set_fwd_ports_number(uint16_t nb_pt) 2610 { 2611 if (nb_pt > nb_cfg_ports) { 2612 printf("nb fwd ports %u > %u (number of configured " 2613 "ports) - ignored\n", 2614 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2615 return; 2616 } 2617 nb_fwd_ports = (portid_t) nb_pt; 2618 printf("Number of forwarding ports set to %u\n", 2619 (unsigned int) nb_fwd_ports); 2620 } 2621 2622 int 2623 port_is_forwarding(portid_t port_id) 2624 { 2625 unsigned int i; 2626 2627 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2628 return -1; 2629 2630 for (i = 0; i < nb_fwd_ports; i++) { 2631 if (fwd_ports_ids[i] == port_id) 2632 return 1; 2633 } 2634 2635 return 0; 2636 } 2637 2638 void 2639 set_nb_pkt_per_burst(uint16_t nb) 2640 { 2641 if (nb > MAX_PKT_BURST) { 2642 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2643 " ignored\n", 2644 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2645 return; 2646 } 2647 nb_pkt_per_burst = nb; 2648 printf("Number of packets per burst set to %u\n", 2649 (unsigned int) nb_pkt_per_burst); 2650 } 2651 2652 static const char * 2653 tx_split_get_name(enum tx_pkt_split split) 2654 { 2655 uint32_t i; 2656 2657 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2658 if (tx_split_name[i].split == split) 2659 return tx_split_name[i].name; 2660 } 2661 return NULL; 2662 } 2663 2664 void 2665 set_tx_pkt_split(const char *name) 2666 { 2667 uint32_t i; 2668 2669 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2670 if (strcmp(tx_split_name[i].name, name) == 0) { 2671 tx_pkt_split = tx_split_name[i].split; 2672 return; 2673 } 2674 } 2675 printf("unknown value: \"%s\"\n", name); 2676 } 2677 2678 void 2679 show_tx_pkt_segments(void) 2680 { 2681 uint32_t i, n; 2682 const char *split; 2683 2684 n = tx_pkt_nb_segs; 2685 split = tx_split_get_name(tx_pkt_split); 2686 2687 printf("Number of segments: %u\n", n); 2688 printf("Segment sizes: "); 2689 for (i = 0; i != n - 1; i++) 2690 printf("%hu,", tx_pkt_seg_lengths[i]); 2691 printf("%hu\n", tx_pkt_seg_lengths[i]); 2692 printf("Split packet: %s\n", split); 2693 } 2694 2695 void 2696 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2697 { 2698 uint16_t tx_pkt_len; 2699 unsigned i; 2700 2701 if (nb_segs >= (unsigned) nb_txd) { 2702 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2703 nb_segs, (unsigned int) nb_txd); 2704 return; 2705 } 2706 2707 /* 2708 * Check that each segment length is greater or equal than 2709 * the mbuf data sise. 2710 * Check also that the total packet length is greater or equal than the 2711 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2712 * 20 + 8). 2713 */ 2714 tx_pkt_len = 0; 2715 for (i = 0; i < nb_segs; i++) { 2716 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2717 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2718 i, seg_lengths[i], (unsigned) mbuf_data_size); 2719 return; 2720 } 2721 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2722 } 2723 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2724 printf("total packet length=%u < %d - give up\n", 2725 (unsigned) tx_pkt_len, 2726 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2727 return; 2728 } 2729 2730 for (i = 0; i < nb_segs; i++) 2731 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2732 2733 tx_pkt_length = tx_pkt_len; 2734 tx_pkt_nb_segs = (uint8_t) nb_segs; 2735 } 2736 2737 void 2738 setup_gro(const char *onoff, portid_t port_id) 2739 { 2740 if (!rte_eth_dev_is_valid_port(port_id)) { 2741 printf("invalid port id %u\n", port_id); 2742 return; 2743 } 2744 if (test_done == 0) { 2745 printf("Before enable/disable GRO," 2746 " please stop forwarding first\n"); 2747 return; 2748 } 2749 if (strcmp(onoff, "on") == 0) { 2750 if (gro_ports[port_id].enable != 0) { 2751 printf("Port %u has enabled GRO. Please" 2752 " disable GRO first\n", port_id); 2753 return; 2754 } 2755 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2756 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2757 gro_ports[port_id].param.max_flow_num = 2758 GRO_DEFAULT_FLOW_NUM; 2759 gro_ports[port_id].param.max_item_per_flow = 2760 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2761 } 2762 gro_ports[port_id].enable = 1; 2763 } else { 2764 if (gro_ports[port_id].enable == 0) { 2765 printf("Port %u has disabled GRO\n", port_id); 2766 return; 2767 } 2768 gro_ports[port_id].enable = 0; 2769 } 2770 } 2771 2772 void 2773 setup_gro_flush_cycles(uint8_t cycles) 2774 { 2775 if (test_done == 0) { 2776 printf("Before change flush interval for GRO," 2777 " please stop forwarding first.\n"); 2778 return; 2779 } 2780 2781 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2782 GRO_DEFAULT_FLUSH_CYCLES) { 2783 printf("The flushing cycle be in the range" 2784 " of 1 to %u. Revert to the default" 2785 " value %u.\n", 2786 GRO_MAX_FLUSH_CYCLES, 2787 GRO_DEFAULT_FLUSH_CYCLES); 2788 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2789 } 2790 2791 gro_flush_cycles = cycles; 2792 } 2793 2794 void 2795 show_gro(portid_t port_id) 2796 { 2797 struct rte_gro_param *param; 2798 uint32_t max_pkts_num; 2799 2800 param = &gro_ports[port_id].param; 2801 2802 if (!rte_eth_dev_is_valid_port(port_id)) { 2803 printf("Invalid port id %u.\n", port_id); 2804 return; 2805 } 2806 if (gro_ports[port_id].enable) { 2807 printf("GRO type: TCP/IPv4\n"); 2808 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2809 max_pkts_num = param->max_flow_num * 2810 param->max_item_per_flow; 2811 } else 2812 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2813 printf("Max number of packets to perform GRO: %u\n", 2814 max_pkts_num); 2815 printf("Flushing cycles: %u\n", gro_flush_cycles); 2816 } else 2817 printf("Port %u doesn't enable GRO.\n", port_id); 2818 } 2819 2820 void 2821 setup_gso(const char *mode, portid_t port_id) 2822 { 2823 if (!rte_eth_dev_is_valid_port(port_id)) { 2824 printf("invalid port id %u\n", port_id); 2825 return; 2826 } 2827 if (strcmp(mode, "on") == 0) { 2828 if (test_done == 0) { 2829 printf("before enabling GSO," 2830 " please stop forwarding first\n"); 2831 return; 2832 } 2833 gso_ports[port_id].enable = 1; 2834 } else if (strcmp(mode, "off") == 0) { 2835 if (test_done == 0) { 2836 printf("before disabling GSO," 2837 " please stop forwarding first\n"); 2838 return; 2839 } 2840 gso_ports[port_id].enable = 0; 2841 } 2842 } 2843 2844 char* 2845 list_pkt_forwarding_modes(void) 2846 { 2847 static char fwd_modes[128] = ""; 2848 const char *separator = "|"; 2849 struct fwd_engine *fwd_eng; 2850 unsigned i = 0; 2851 2852 if (strlen (fwd_modes) == 0) { 2853 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2854 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2855 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2856 strncat(fwd_modes, separator, 2857 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2858 } 2859 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2860 } 2861 2862 return fwd_modes; 2863 } 2864 2865 char* 2866 list_pkt_forwarding_retry_modes(void) 2867 { 2868 static char fwd_modes[128] = ""; 2869 const char *separator = "|"; 2870 struct fwd_engine *fwd_eng; 2871 unsigned i = 0; 2872 2873 if (strlen(fwd_modes) == 0) { 2874 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2875 if (fwd_eng == &rx_only_engine) 2876 continue; 2877 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2878 sizeof(fwd_modes) - 2879 strlen(fwd_modes) - 1); 2880 strncat(fwd_modes, separator, 2881 sizeof(fwd_modes) - 2882 strlen(fwd_modes) - 1); 2883 } 2884 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2885 } 2886 2887 return fwd_modes; 2888 } 2889 2890 void 2891 set_pkt_forwarding_mode(const char *fwd_mode_name) 2892 { 2893 struct fwd_engine *fwd_eng; 2894 unsigned i; 2895 2896 i = 0; 2897 while ((fwd_eng = fwd_engines[i]) != NULL) { 2898 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2899 printf("Set %s packet forwarding mode%s\n", 2900 fwd_mode_name, 2901 retry_enabled == 0 ? "" : " with retry"); 2902 cur_fwd_eng = fwd_eng; 2903 return; 2904 } 2905 i++; 2906 } 2907 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2908 } 2909 2910 void 2911 add_rx_dump_callbacks(portid_t portid) 2912 { 2913 struct rte_eth_dev_info dev_info; 2914 uint16_t queue; 2915 int ret; 2916 2917 if (port_id_is_invalid(portid, ENABLED_WARN)) 2918 return; 2919 2920 ret = eth_dev_info_get_print_err(portid, &dev_info); 2921 if (ret != 0) 2922 return; 2923 2924 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2925 if (!ports[portid].rx_dump_cb[queue]) 2926 ports[portid].rx_dump_cb[queue] = 2927 rte_eth_add_rx_callback(portid, queue, 2928 dump_rx_pkts, NULL); 2929 } 2930 2931 void 2932 add_tx_dump_callbacks(portid_t portid) 2933 { 2934 struct rte_eth_dev_info dev_info; 2935 uint16_t queue; 2936 int ret; 2937 2938 if (port_id_is_invalid(portid, ENABLED_WARN)) 2939 return; 2940 2941 ret = eth_dev_info_get_print_err(portid, &dev_info); 2942 if (ret != 0) 2943 return; 2944 2945 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2946 if (!ports[portid].tx_dump_cb[queue]) 2947 ports[portid].tx_dump_cb[queue] = 2948 rte_eth_add_tx_callback(portid, queue, 2949 dump_tx_pkts, NULL); 2950 } 2951 2952 void 2953 remove_rx_dump_callbacks(portid_t portid) 2954 { 2955 struct rte_eth_dev_info dev_info; 2956 uint16_t queue; 2957 int ret; 2958 2959 if (port_id_is_invalid(portid, ENABLED_WARN)) 2960 return; 2961 2962 ret = eth_dev_info_get_print_err(portid, &dev_info); 2963 if (ret != 0) 2964 return; 2965 2966 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2967 if (ports[portid].rx_dump_cb[queue]) { 2968 rte_eth_remove_rx_callback(portid, queue, 2969 ports[portid].rx_dump_cb[queue]); 2970 ports[portid].rx_dump_cb[queue] = NULL; 2971 } 2972 } 2973 2974 void 2975 remove_tx_dump_callbacks(portid_t portid) 2976 { 2977 struct rte_eth_dev_info dev_info; 2978 uint16_t queue; 2979 int ret; 2980 2981 if (port_id_is_invalid(portid, ENABLED_WARN)) 2982 return; 2983 2984 ret = eth_dev_info_get_print_err(portid, &dev_info); 2985 if (ret != 0) 2986 return; 2987 2988 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2989 if (ports[portid].tx_dump_cb[queue]) { 2990 rte_eth_remove_tx_callback(portid, queue, 2991 ports[portid].tx_dump_cb[queue]); 2992 ports[portid].tx_dump_cb[queue] = NULL; 2993 } 2994 } 2995 2996 void 2997 configure_rxtx_dump_callbacks(uint16_t verbose) 2998 { 2999 portid_t portid; 3000 3001 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3002 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3003 return; 3004 #endif 3005 3006 RTE_ETH_FOREACH_DEV(portid) 3007 { 3008 if (verbose == 1 || verbose > 2) 3009 add_rx_dump_callbacks(portid); 3010 else 3011 remove_rx_dump_callbacks(portid); 3012 if (verbose >= 2) 3013 add_tx_dump_callbacks(portid); 3014 else 3015 remove_tx_dump_callbacks(portid); 3016 } 3017 } 3018 3019 void 3020 set_verbose_level(uint16_t vb_level) 3021 { 3022 printf("Change verbose level from %u to %u\n", 3023 (unsigned int) verbose_level, (unsigned int) vb_level); 3024 verbose_level = vb_level; 3025 configure_rxtx_dump_callbacks(verbose_level); 3026 } 3027 3028 void 3029 vlan_extend_set(portid_t port_id, int on) 3030 { 3031 int diag; 3032 int vlan_offload; 3033 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3034 3035 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3036 return; 3037 3038 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3039 3040 if (on) { 3041 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3042 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3043 } else { 3044 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3045 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3046 } 3047 3048 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3049 if (diag < 0) 3050 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3051 "diag=%d\n", port_id, on, diag); 3052 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3053 } 3054 3055 void 3056 rx_vlan_strip_set(portid_t port_id, int on) 3057 { 3058 int diag; 3059 int vlan_offload; 3060 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3061 3062 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3063 return; 3064 3065 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3066 3067 if (on) { 3068 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3069 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3070 } else { 3071 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3072 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3073 } 3074 3075 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3076 if (diag < 0) 3077 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3078 "diag=%d\n", port_id, on, diag); 3079 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3080 } 3081 3082 void 3083 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3084 { 3085 int diag; 3086 3087 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3088 return; 3089 3090 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3091 if (diag < 0) 3092 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3093 "diag=%d\n", port_id, queue_id, on, diag); 3094 } 3095 3096 void 3097 rx_vlan_filter_set(portid_t port_id, int on) 3098 { 3099 int diag; 3100 int vlan_offload; 3101 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3102 3103 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3104 return; 3105 3106 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3107 3108 if (on) { 3109 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3110 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3111 } else { 3112 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3113 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3114 } 3115 3116 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3117 if (diag < 0) 3118 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3119 "diag=%d\n", port_id, on, diag); 3120 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3121 } 3122 3123 void 3124 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3125 { 3126 int diag; 3127 int vlan_offload; 3128 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3129 3130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3131 return; 3132 3133 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3134 3135 if (on) { 3136 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3137 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3138 } else { 3139 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3140 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3141 } 3142 3143 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3144 if (diag < 0) 3145 printf("%s(port_pi=%d, on=%d) failed " 3146 "diag=%d\n", __func__, port_id, on, diag); 3147 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3148 } 3149 3150 int 3151 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3152 { 3153 int diag; 3154 3155 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3156 return 1; 3157 if (vlan_id_is_invalid(vlan_id)) 3158 return 1; 3159 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3160 if (diag == 0) 3161 return 0; 3162 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3163 "diag=%d\n", 3164 port_id, vlan_id, on, diag); 3165 return -1; 3166 } 3167 3168 void 3169 rx_vlan_all_filter_set(portid_t port_id, int on) 3170 { 3171 uint16_t vlan_id; 3172 3173 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3174 return; 3175 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3176 if (rx_vft_set(port_id, vlan_id, on)) 3177 break; 3178 } 3179 } 3180 3181 void 3182 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3183 { 3184 int diag; 3185 3186 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3187 return; 3188 3189 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3190 if (diag == 0) 3191 return; 3192 3193 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3194 "diag=%d\n", 3195 port_id, vlan_type, tp_id, diag); 3196 } 3197 3198 void 3199 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3200 { 3201 struct rte_eth_dev_info dev_info; 3202 int ret; 3203 3204 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3205 return; 3206 if (vlan_id_is_invalid(vlan_id)) 3207 return; 3208 3209 if (ports[port_id].dev_conf.txmode.offloads & 3210 DEV_TX_OFFLOAD_QINQ_INSERT) { 3211 printf("Error, as QinQ has been enabled.\n"); 3212 return; 3213 } 3214 3215 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3216 if (ret != 0) 3217 return; 3218 3219 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3220 printf("Error: vlan insert is not supported by port %d\n", 3221 port_id); 3222 return; 3223 } 3224 3225 tx_vlan_reset(port_id); 3226 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3227 ports[port_id].tx_vlan_id = vlan_id; 3228 } 3229 3230 void 3231 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3232 { 3233 struct rte_eth_dev_info dev_info; 3234 int ret; 3235 3236 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3237 return; 3238 if (vlan_id_is_invalid(vlan_id)) 3239 return; 3240 if (vlan_id_is_invalid(vlan_id_outer)) 3241 return; 3242 3243 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3244 if (ret != 0) 3245 return; 3246 3247 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3248 printf("Error: qinq insert not supported by port %d\n", 3249 port_id); 3250 return; 3251 } 3252 3253 tx_vlan_reset(port_id); 3254 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3255 DEV_TX_OFFLOAD_QINQ_INSERT); 3256 ports[port_id].tx_vlan_id = vlan_id; 3257 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3258 } 3259 3260 void 3261 tx_vlan_reset(portid_t port_id) 3262 { 3263 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3264 return; 3265 ports[port_id].dev_conf.txmode.offloads &= 3266 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3267 DEV_TX_OFFLOAD_QINQ_INSERT); 3268 ports[port_id].tx_vlan_id = 0; 3269 ports[port_id].tx_vlan_id_outer = 0; 3270 } 3271 3272 void 3273 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3274 { 3275 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3276 return; 3277 3278 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3279 } 3280 3281 void 3282 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3283 { 3284 uint16_t i; 3285 uint8_t existing_mapping_found = 0; 3286 3287 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3288 return; 3289 3290 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3291 return; 3292 3293 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3294 printf("map_value not in required range 0..%d\n", 3295 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3296 return; 3297 } 3298 3299 if (!is_rx) { /*then tx*/ 3300 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3301 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3302 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3303 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3304 existing_mapping_found = 1; 3305 break; 3306 } 3307 } 3308 if (!existing_mapping_found) { /* A new additional mapping... */ 3309 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3310 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3311 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3312 nb_tx_queue_stats_mappings++; 3313 } 3314 } 3315 else { /*rx*/ 3316 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3317 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3318 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3319 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3320 existing_mapping_found = 1; 3321 break; 3322 } 3323 } 3324 if (!existing_mapping_found) { /* A new additional mapping... */ 3325 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3326 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3327 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3328 nb_rx_queue_stats_mappings++; 3329 } 3330 } 3331 } 3332 3333 void 3334 set_xstats_hide_zero(uint8_t on_off) 3335 { 3336 xstats_hide_zero = on_off; 3337 } 3338 3339 static inline void 3340 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3341 { 3342 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3343 3344 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3345 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3346 " tunnel_id: 0x%08x", 3347 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3348 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3349 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3350 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3351 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3352 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3353 3354 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3355 rte_be_to_cpu_16(mask->src_port_mask), 3356 rte_be_to_cpu_16(mask->dst_port_mask)); 3357 3358 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3359 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3360 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3361 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3362 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3363 3364 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3365 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3366 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3367 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3368 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3369 } 3370 3371 printf("\n"); 3372 } 3373 3374 static inline void 3375 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3376 { 3377 struct rte_eth_flex_payload_cfg *cfg; 3378 uint32_t i, j; 3379 3380 for (i = 0; i < flex_conf->nb_payloads; i++) { 3381 cfg = &flex_conf->flex_set[i]; 3382 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3383 printf("\n RAW: "); 3384 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3385 printf("\n L2_PAYLOAD: "); 3386 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3387 printf("\n L3_PAYLOAD: "); 3388 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3389 printf("\n L4_PAYLOAD: "); 3390 else 3391 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3392 for (j = 0; j < num; j++) 3393 printf(" %-5u", cfg->src_offset[j]); 3394 } 3395 printf("\n"); 3396 } 3397 3398 static char * 3399 flowtype_to_str(uint16_t flow_type) 3400 { 3401 struct flow_type_info { 3402 char str[32]; 3403 uint16_t ftype; 3404 }; 3405 3406 uint8_t i; 3407 static struct flow_type_info flowtype_str_table[] = { 3408 {"raw", RTE_ETH_FLOW_RAW}, 3409 {"ipv4", RTE_ETH_FLOW_IPV4}, 3410 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3411 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3412 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3413 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3414 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3415 {"ipv6", RTE_ETH_FLOW_IPV6}, 3416 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3417 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3418 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3419 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3420 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3421 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3422 {"port", RTE_ETH_FLOW_PORT}, 3423 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3424 {"geneve", RTE_ETH_FLOW_GENEVE}, 3425 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3426 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3427 }; 3428 3429 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3430 if (flowtype_str_table[i].ftype == flow_type) 3431 return flowtype_str_table[i].str; 3432 } 3433 3434 return NULL; 3435 } 3436 3437 static inline void 3438 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3439 { 3440 struct rte_eth_fdir_flex_mask *mask; 3441 uint32_t i, j; 3442 char *p; 3443 3444 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3445 mask = &flex_conf->flex_mask[i]; 3446 p = flowtype_to_str(mask->flow_type); 3447 printf("\n %s:\t", p ? p : "unknown"); 3448 for (j = 0; j < num; j++) 3449 printf(" %02x", mask->mask[j]); 3450 } 3451 printf("\n"); 3452 } 3453 3454 static inline void 3455 print_fdir_flow_type(uint32_t flow_types_mask) 3456 { 3457 int i; 3458 char *p; 3459 3460 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3461 if (!(flow_types_mask & (1 << i))) 3462 continue; 3463 p = flowtype_to_str(i); 3464 if (p) 3465 printf(" %s", p); 3466 else 3467 printf(" unknown"); 3468 } 3469 printf("\n"); 3470 } 3471 3472 void 3473 fdir_get_infos(portid_t port_id) 3474 { 3475 struct rte_eth_fdir_stats fdir_stat; 3476 struct rte_eth_fdir_info fdir_info; 3477 int ret; 3478 3479 static const char *fdir_stats_border = "########################"; 3480 3481 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3482 return; 3483 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3484 if (ret < 0) { 3485 printf("\n FDIR is not supported on port %-2d\n", 3486 port_id); 3487 return; 3488 } 3489 3490 memset(&fdir_info, 0, sizeof(fdir_info)); 3491 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3492 RTE_ETH_FILTER_INFO, &fdir_info); 3493 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3494 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3495 RTE_ETH_FILTER_STATS, &fdir_stat); 3496 printf("\n %s FDIR infos for port %-2d %s\n", 3497 fdir_stats_border, port_id, fdir_stats_border); 3498 printf(" MODE: "); 3499 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3500 printf(" PERFECT\n"); 3501 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3502 printf(" PERFECT-MAC-VLAN\n"); 3503 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3504 printf(" PERFECT-TUNNEL\n"); 3505 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3506 printf(" SIGNATURE\n"); 3507 else 3508 printf(" DISABLE\n"); 3509 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3510 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3511 printf(" SUPPORTED FLOW TYPE: "); 3512 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3513 } 3514 printf(" FLEX PAYLOAD INFO:\n"); 3515 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3516 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3517 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3518 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3519 fdir_info.flex_payload_unit, 3520 fdir_info.max_flex_payload_segment_num, 3521 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3522 printf(" MASK: "); 3523 print_fdir_mask(&fdir_info.mask); 3524 if (fdir_info.flex_conf.nb_payloads > 0) { 3525 printf(" FLEX PAYLOAD SRC OFFSET:"); 3526 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3527 } 3528 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3529 printf(" FLEX MASK CFG:"); 3530 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3531 } 3532 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3533 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3534 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3535 fdir_info.guarant_spc, fdir_info.best_spc); 3536 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3537 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3538 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3539 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3540 fdir_stat.collision, fdir_stat.free, 3541 fdir_stat.maxhash, fdir_stat.maxlen, 3542 fdir_stat.add, fdir_stat.remove, 3543 fdir_stat.f_add, fdir_stat.f_remove); 3544 printf(" %s############################%s\n", 3545 fdir_stats_border, fdir_stats_border); 3546 } 3547 3548 void 3549 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3550 { 3551 struct rte_port *port; 3552 struct rte_eth_fdir_flex_conf *flex_conf; 3553 int i, idx = 0; 3554 3555 port = &ports[port_id]; 3556 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3557 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3558 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3559 idx = i; 3560 break; 3561 } 3562 } 3563 if (i >= RTE_ETH_FLOW_MAX) { 3564 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3565 idx = flex_conf->nb_flexmasks; 3566 flex_conf->nb_flexmasks++; 3567 } else { 3568 printf("The flex mask table is full. Can not set flex" 3569 " mask for flow_type(%u).", cfg->flow_type); 3570 return; 3571 } 3572 } 3573 rte_memcpy(&flex_conf->flex_mask[idx], 3574 cfg, 3575 sizeof(struct rte_eth_fdir_flex_mask)); 3576 } 3577 3578 void 3579 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3580 { 3581 struct rte_port *port; 3582 struct rte_eth_fdir_flex_conf *flex_conf; 3583 int i, idx = 0; 3584 3585 port = &ports[port_id]; 3586 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3587 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3588 if (cfg->type == flex_conf->flex_set[i].type) { 3589 idx = i; 3590 break; 3591 } 3592 } 3593 if (i >= RTE_ETH_PAYLOAD_MAX) { 3594 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3595 idx = flex_conf->nb_payloads; 3596 flex_conf->nb_payloads++; 3597 } else { 3598 printf("The flex payload table is full. Can not set" 3599 " flex payload for type(%u).", cfg->type); 3600 return; 3601 } 3602 } 3603 rte_memcpy(&flex_conf->flex_set[idx], 3604 cfg, 3605 sizeof(struct rte_eth_flex_payload_cfg)); 3606 3607 } 3608 3609 void 3610 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3611 { 3612 #ifdef RTE_LIBRTE_IXGBE_PMD 3613 int diag; 3614 3615 if (is_rx) 3616 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3617 else 3618 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3619 3620 if (diag == 0) 3621 return; 3622 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3623 is_rx ? "rx" : "tx", port_id, diag); 3624 return; 3625 #endif 3626 printf("VF %s setting not supported for port %d\n", 3627 is_rx ? "Rx" : "Tx", port_id); 3628 RTE_SET_USED(vf); 3629 RTE_SET_USED(on); 3630 } 3631 3632 int 3633 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3634 { 3635 int diag; 3636 struct rte_eth_link link; 3637 int ret; 3638 3639 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3640 return 1; 3641 ret = eth_link_get_nowait_print_err(port_id, &link); 3642 if (ret < 0) 3643 return 1; 3644 if (rate > link.link_speed) { 3645 printf("Invalid rate value:%u bigger than link speed: %u\n", 3646 rate, link.link_speed); 3647 return 1; 3648 } 3649 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3650 if (diag == 0) 3651 return diag; 3652 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3653 port_id, diag); 3654 return diag; 3655 } 3656 3657 int 3658 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3659 { 3660 int diag = -ENOTSUP; 3661 3662 RTE_SET_USED(vf); 3663 RTE_SET_USED(rate); 3664 RTE_SET_USED(q_msk); 3665 3666 #ifdef RTE_LIBRTE_IXGBE_PMD 3667 if (diag == -ENOTSUP) 3668 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3669 q_msk); 3670 #endif 3671 #ifdef RTE_LIBRTE_BNXT_PMD 3672 if (diag == -ENOTSUP) 3673 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3674 #endif 3675 if (diag == 0) 3676 return diag; 3677 3678 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3679 port_id, diag); 3680 return diag; 3681 } 3682 3683 /* 3684 * Functions to manage the set of filtered Multicast MAC addresses. 3685 * 3686 * A pool of filtered multicast MAC addresses is associated with each port. 3687 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3688 * The address of the pool and the number of valid multicast MAC addresses 3689 * recorded in the pool are stored in the fields "mc_addr_pool" and 3690 * "mc_addr_nb" of the "rte_port" data structure. 3691 * 3692 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3693 * to be supplied a contiguous array of multicast MAC addresses. 3694 * To comply with this constraint, the set of multicast addresses recorded 3695 * into the pool are systematically compacted at the beginning of the pool. 3696 * Hence, when a multicast address is removed from the pool, all following 3697 * addresses, if any, are copied back to keep the set contiguous. 3698 */ 3699 #define MCAST_POOL_INC 32 3700 3701 static int 3702 mcast_addr_pool_extend(struct rte_port *port) 3703 { 3704 struct rte_ether_addr *mc_pool; 3705 size_t mc_pool_size; 3706 3707 /* 3708 * If a free entry is available at the end of the pool, just 3709 * increment the number of recorded multicast addresses. 3710 */ 3711 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3712 port->mc_addr_nb++; 3713 return 0; 3714 } 3715 3716 /* 3717 * [re]allocate a pool with MCAST_POOL_INC more entries. 3718 * The previous test guarantees that port->mc_addr_nb is a multiple 3719 * of MCAST_POOL_INC. 3720 */ 3721 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3722 MCAST_POOL_INC); 3723 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3724 mc_pool_size); 3725 if (mc_pool == NULL) { 3726 printf("allocation of pool of %u multicast addresses failed\n", 3727 port->mc_addr_nb + MCAST_POOL_INC); 3728 return -ENOMEM; 3729 } 3730 3731 port->mc_addr_pool = mc_pool; 3732 port->mc_addr_nb++; 3733 return 0; 3734 3735 } 3736 3737 static void 3738 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 3739 { 3740 if (mcast_addr_pool_extend(port) != 0) 3741 return; 3742 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 3743 } 3744 3745 static void 3746 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3747 { 3748 port->mc_addr_nb--; 3749 if (addr_idx == port->mc_addr_nb) { 3750 /* No need to recompact the set of multicast addressses. */ 3751 if (port->mc_addr_nb == 0) { 3752 /* free the pool of multicast addresses. */ 3753 free(port->mc_addr_pool); 3754 port->mc_addr_pool = NULL; 3755 } 3756 return; 3757 } 3758 memmove(&port->mc_addr_pool[addr_idx], 3759 &port->mc_addr_pool[addr_idx + 1], 3760 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3761 } 3762 3763 static int 3764 eth_port_multicast_addr_list_set(portid_t port_id) 3765 { 3766 struct rte_port *port; 3767 int diag; 3768 3769 port = &ports[port_id]; 3770 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3771 port->mc_addr_nb); 3772 if (diag < 0) 3773 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3774 port_id, port->mc_addr_nb, diag); 3775 3776 return diag; 3777 } 3778 3779 void 3780 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3781 { 3782 struct rte_port *port; 3783 uint32_t i; 3784 3785 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3786 return; 3787 3788 port = &ports[port_id]; 3789 3790 /* 3791 * Check that the added multicast MAC address is not already recorded 3792 * in the pool of multicast addresses. 3793 */ 3794 for (i = 0; i < port->mc_addr_nb; i++) { 3795 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3796 printf("multicast address already filtered by port\n"); 3797 return; 3798 } 3799 } 3800 3801 mcast_addr_pool_append(port, mc_addr); 3802 if (eth_port_multicast_addr_list_set(port_id) < 0) 3803 /* Rollback on failure, remove the address from the pool */ 3804 mcast_addr_pool_remove(port, i); 3805 } 3806 3807 void 3808 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3809 { 3810 struct rte_port *port; 3811 uint32_t i; 3812 3813 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3814 return; 3815 3816 port = &ports[port_id]; 3817 3818 /* 3819 * Search the pool of multicast MAC addresses for the removed address. 3820 */ 3821 for (i = 0; i < port->mc_addr_nb; i++) { 3822 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3823 break; 3824 } 3825 if (i == port->mc_addr_nb) { 3826 printf("multicast address not filtered by port %d\n", port_id); 3827 return; 3828 } 3829 3830 mcast_addr_pool_remove(port, i); 3831 if (eth_port_multicast_addr_list_set(port_id) < 0) 3832 /* Rollback on failure, add the address back into the pool */ 3833 mcast_addr_pool_append(port, mc_addr); 3834 } 3835 3836 void 3837 port_dcb_info_display(portid_t port_id) 3838 { 3839 struct rte_eth_dcb_info dcb_info; 3840 uint16_t i; 3841 int ret; 3842 static const char *border = "================"; 3843 3844 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3845 return; 3846 3847 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3848 if (ret) { 3849 printf("\n Failed to get dcb infos on port %-2d\n", 3850 port_id); 3851 return; 3852 } 3853 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3854 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3855 printf("\n TC : "); 3856 for (i = 0; i < dcb_info.nb_tcs; i++) 3857 printf("\t%4d", i); 3858 printf("\n Priority : "); 3859 for (i = 0; i < dcb_info.nb_tcs; i++) 3860 printf("\t%4d", dcb_info.prio_tc[i]); 3861 printf("\n BW percent :"); 3862 for (i = 0; i < dcb_info.nb_tcs; i++) 3863 printf("\t%4d%%", dcb_info.tc_bws[i]); 3864 printf("\n RXQ base : "); 3865 for (i = 0; i < dcb_info.nb_tcs; i++) 3866 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3867 printf("\n RXQ number :"); 3868 for (i = 0; i < dcb_info.nb_tcs; i++) 3869 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3870 printf("\n TXQ base : "); 3871 for (i = 0; i < dcb_info.nb_tcs; i++) 3872 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3873 printf("\n TXQ number :"); 3874 for (i = 0; i < dcb_info.nb_tcs; i++) 3875 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3876 printf("\n"); 3877 } 3878 3879 uint8_t * 3880 open_file(const char *file_path, uint32_t *size) 3881 { 3882 int fd = open(file_path, O_RDONLY); 3883 off_t pkg_size; 3884 uint8_t *buf = NULL; 3885 int ret = 0; 3886 struct stat st_buf; 3887 3888 if (size) 3889 *size = 0; 3890 3891 if (fd == -1) { 3892 printf("%s: Failed to open %s\n", __func__, file_path); 3893 return buf; 3894 } 3895 3896 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3897 close(fd); 3898 printf("%s: File operations failed\n", __func__); 3899 return buf; 3900 } 3901 3902 pkg_size = st_buf.st_size; 3903 if (pkg_size < 0) { 3904 close(fd); 3905 printf("%s: File operations failed\n", __func__); 3906 return buf; 3907 } 3908 3909 buf = (uint8_t *)malloc(pkg_size); 3910 if (!buf) { 3911 close(fd); 3912 printf("%s: Failed to malloc memory\n", __func__); 3913 return buf; 3914 } 3915 3916 ret = read(fd, buf, pkg_size); 3917 if (ret < 0) { 3918 close(fd); 3919 printf("%s: File read operation failed\n", __func__); 3920 close_file(buf); 3921 return NULL; 3922 } 3923 3924 if (size) 3925 *size = pkg_size; 3926 3927 close(fd); 3928 3929 return buf; 3930 } 3931 3932 int 3933 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3934 { 3935 FILE *fh = fopen(file_path, "wb"); 3936 3937 if (fh == NULL) { 3938 printf("%s: Failed to open %s\n", __func__, file_path); 3939 return -1; 3940 } 3941 3942 if (fwrite(buf, 1, size, fh) != size) { 3943 fclose(fh); 3944 printf("%s: File write operation failed\n", __func__); 3945 return -1; 3946 } 3947 3948 fclose(fh); 3949 3950 return 0; 3951 } 3952 3953 int 3954 close_file(uint8_t *buf) 3955 { 3956 if (buf) { 3957 free((void *)buf); 3958 return 0; 3959 } 3960 3961 return -1; 3962 } 3963 3964 void 3965 port_queue_region_info_display(portid_t port_id, void *buf) 3966 { 3967 #ifdef RTE_LIBRTE_I40E_PMD 3968 uint16_t i, j; 3969 struct rte_pmd_i40e_queue_regions *info = 3970 (struct rte_pmd_i40e_queue_regions *)buf; 3971 static const char *queue_region_info_stats_border = "-------"; 3972 3973 if (!info->queue_region_number) 3974 printf("there is no region has been set before"); 3975 3976 printf("\n %s All queue region info for port=%2d %s", 3977 queue_region_info_stats_border, port_id, 3978 queue_region_info_stats_border); 3979 printf("\n queue_region_number: %-14u \n", 3980 info->queue_region_number); 3981 3982 for (i = 0; i < info->queue_region_number; i++) { 3983 printf("\n region_id: %-14u queue_number: %-14u " 3984 "queue_start_index: %-14u \n", 3985 info->region[i].region_id, 3986 info->region[i].queue_num, 3987 info->region[i].queue_start_index); 3988 3989 printf(" user_priority_num is %-14u :", 3990 info->region[i].user_priority_num); 3991 for (j = 0; j < info->region[i].user_priority_num; j++) 3992 printf(" %-14u ", info->region[i].user_priority[j]); 3993 3994 printf("\n flowtype_num is %-14u :", 3995 info->region[i].flowtype_num); 3996 for (j = 0; j < info->region[i].flowtype_num; j++) 3997 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3998 } 3999 #else 4000 RTE_SET_USED(port_id); 4001 RTE_SET_USED(buf); 4002 #endif 4003 4004 printf("\n\n"); 4005 } 4006 4007 void 4008 show_macs(portid_t port_id) 4009 { 4010 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4011 struct rte_eth_dev_info dev_info; 4012 struct rte_ether_addr *addr; 4013 uint32_t i, num_macs = 0; 4014 struct rte_eth_dev *dev; 4015 4016 dev = &rte_eth_devices[port_id]; 4017 4018 rte_eth_dev_info_get(port_id, &dev_info); 4019 4020 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4021 addr = &dev->data->mac_addrs[i]; 4022 4023 /* skip zero address */ 4024 if (rte_is_zero_ether_addr(addr)) 4025 continue; 4026 4027 num_macs++; 4028 } 4029 4030 printf("Number of MAC address added: %d\n", num_macs); 4031 4032 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4033 addr = &dev->data->mac_addrs[i]; 4034 4035 /* skip zero address */ 4036 if (rte_is_zero_ether_addr(addr)) 4037 continue; 4038 4039 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4040 printf(" %s\n", buf); 4041 } 4042 } 4043 4044 void 4045 show_mcast_macs(portid_t port_id) 4046 { 4047 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4048 struct rte_ether_addr *addr; 4049 struct rte_port *port; 4050 uint32_t i; 4051 4052 port = &ports[port_id]; 4053 4054 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4055 4056 for (i = 0; i < port->mc_addr_nb; i++) { 4057 addr = &port->mc_addr_pool[i]; 4058 4059 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4060 printf(" %s\n", buf); 4061 } 4062 } 4063