1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 53 #include "testpmd.h" 54 55 #define ETHDEV_FWVERS_LEN 32 56 57 static char *flowtype_to_str(uint16_t flow_type); 58 59 static const struct { 60 enum tx_pkt_split split; 61 const char *name; 62 } tx_split_name[] = { 63 { 64 .split = TX_PKT_SPLIT_OFF, 65 .name = "off", 66 }, 67 { 68 .split = TX_PKT_SPLIT_ON, 69 .name = "on", 70 }, 71 { 72 .split = TX_PKT_SPLIT_RND, 73 .name = "rand", 74 }, 75 }; 76 77 const struct rss_type_info rss_type_table[] = { 78 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 79 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 80 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP}, 81 { "none", 0 }, 82 { "eth", ETH_RSS_ETH }, 83 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 84 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 85 { "vlan", ETH_RSS_VLAN }, 86 { "s-vlan", ETH_RSS_S_VLAN }, 87 { "c-vlan", ETH_RSS_C_VLAN }, 88 { "ipv4", ETH_RSS_IPV4 }, 89 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 90 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 91 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 92 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 93 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 94 { "ipv6", ETH_RSS_IPV6 }, 95 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 96 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 97 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 98 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 99 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 100 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 101 { "ipv6-ex", ETH_RSS_IPV6_EX }, 102 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 103 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 104 { "port", ETH_RSS_PORT }, 105 { "vxlan", ETH_RSS_VXLAN }, 106 { "geneve", ETH_RSS_GENEVE }, 107 { "nvgre", ETH_RSS_NVGRE }, 108 { "ip", ETH_RSS_IP }, 109 { "udp", ETH_RSS_UDP }, 110 { "tcp", ETH_RSS_TCP }, 111 { "sctp", ETH_RSS_SCTP }, 112 { "tunnel", ETH_RSS_TUNNEL }, 113 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 114 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 115 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 116 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 117 { "esp", ETH_RSS_ESP }, 118 { "ah", ETH_RSS_AH }, 119 { "l2tpv3", ETH_RSS_L2TPV3 }, 120 { "pfcp", ETH_RSS_PFCP }, 121 { NULL, 0 }, 122 }; 123 124 static void 125 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 126 { 127 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 128 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 129 printf("%s%s", name, buf); 130 } 131 132 void 133 nic_stats_display(portid_t port_id) 134 { 135 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 136 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 137 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 138 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 139 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 140 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 141 diff_cycles; 142 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 143 struct rte_eth_stats stats; 144 struct rte_port *port = &ports[port_id]; 145 uint8_t i; 146 147 static const char *nic_stats_border = "########################"; 148 149 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 150 print_valid_ports(); 151 return; 152 } 153 rte_eth_stats_get(port_id, &stats); 154 printf("\n %s NIC statistics for port %-2d %s\n", 155 nic_stats_border, port_id, nic_stats_border); 156 157 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 158 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 159 "%-"PRIu64"\n", 160 stats.ipackets, stats.imissed, stats.ibytes); 161 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 162 printf(" RX-nombuf: %-10"PRIu64"\n", 163 stats.rx_nombuf); 164 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 165 "%-"PRIu64"\n", 166 stats.opackets, stats.oerrors, stats.obytes); 167 } 168 else { 169 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 170 " RX-bytes: %10"PRIu64"\n", 171 stats.ipackets, stats.ierrors, stats.ibytes); 172 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 173 printf(" RX-nombuf: %10"PRIu64"\n", 174 stats.rx_nombuf); 175 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 176 " TX-bytes: %10"PRIu64"\n", 177 stats.opackets, stats.oerrors, stats.obytes); 178 } 179 180 if (port->rx_queue_stats_mapping_enabled) { 181 printf("\n"); 182 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 183 printf(" Stats reg %2d RX-packets: %10"PRIu64 184 " RX-errors: %10"PRIu64 185 " RX-bytes: %10"PRIu64"\n", 186 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 187 } 188 } 189 if (port->tx_queue_stats_mapping_enabled) { 190 printf("\n"); 191 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 192 printf(" Stats reg %2d TX-packets: %10"PRIu64 193 " TX-bytes: %10"PRIu64"\n", 194 i, stats.q_opackets[i], stats.q_obytes[i]); 195 } 196 } 197 198 diff_cycles = prev_cycles[port_id]; 199 prev_cycles[port_id] = rte_rdtsc(); 200 if (diff_cycles > 0) 201 diff_cycles = prev_cycles[port_id] - diff_cycles; 202 203 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 204 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 205 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 206 (stats.opackets - prev_pkts_tx[port_id]) : 0; 207 prev_pkts_rx[port_id] = stats.ipackets; 208 prev_pkts_tx[port_id] = stats.opackets; 209 mpps_rx = diff_cycles > 0 ? 210 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 211 mpps_tx = diff_cycles > 0 ? 212 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 213 214 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 215 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 216 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 217 (stats.obytes - prev_bytes_tx[port_id]) : 0; 218 prev_bytes_rx[port_id] = stats.ibytes; 219 prev_bytes_tx[port_id] = stats.obytes; 220 mbps_rx = diff_cycles > 0 ? 221 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 222 mbps_tx = diff_cycles > 0 ? 223 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 224 225 printf("\n Throughput (since last show)\n"); 226 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 227 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 228 mpps_tx, mbps_tx * 8); 229 230 printf(" %s############################%s\n", 231 nic_stats_border, nic_stats_border); 232 } 233 234 void 235 nic_stats_clear(portid_t port_id) 236 { 237 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 238 print_valid_ports(); 239 return; 240 } 241 rte_eth_stats_reset(port_id); 242 printf("\n NIC statistics for port %d cleared\n", port_id); 243 } 244 245 void 246 nic_xstats_display(portid_t port_id) 247 { 248 struct rte_eth_xstat *xstats; 249 int cnt_xstats, idx_xstat; 250 struct rte_eth_xstat_name *xstats_names; 251 252 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 253 print_valid_ports(); 254 return; 255 } 256 printf("###### NIC extended statistics for port %-2d\n", port_id); 257 if (!rte_eth_dev_is_valid_port(port_id)) { 258 printf("Error: Invalid port number %i\n", port_id); 259 return; 260 } 261 262 /* Get count */ 263 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 264 if (cnt_xstats < 0) { 265 printf("Error: Cannot get count of xstats\n"); 266 return; 267 } 268 269 /* Get id-name lookup table */ 270 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 271 if (xstats_names == NULL) { 272 printf("Cannot allocate memory for xstats lookup\n"); 273 return; 274 } 275 if (cnt_xstats != rte_eth_xstats_get_names( 276 port_id, xstats_names, cnt_xstats)) { 277 printf("Error: Cannot get xstats lookup\n"); 278 free(xstats_names); 279 return; 280 } 281 282 /* Get stats themselves */ 283 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 284 if (xstats == NULL) { 285 printf("Cannot allocate memory for xstats\n"); 286 free(xstats_names); 287 return; 288 } 289 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 290 printf("Error: Unable to get xstats\n"); 291 free(xstats_names); 292 free(xstats); 293 return; 294 } 295 296 /* Display xstats */ 297 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 298 if (xstats_hide_zero && !xstats[idx_xstat].value) 299 continue; 300 printf("%s: %"PRIu64"\n", 301 xstats_names[idx_xstat].name, 302 xstats[idx_xstat].value); 303 } 304 free(xstats_names); 305 free(xstats); 306 } 307 308 void 309 nic_xstats_clear(portid_t port_id) 310 { 311 int ret; 312 313 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 314 print_valid_ports(); 315 return; 316 } 317 ret = rte_eth_xstats_reset(port_id); 318 if (ret != 0) { 319 printf("%s: Error: failed to reset xstats (port %u): %s", 320 __func__, port_id, strerror(ret)); 321 } 322 } 323 324 void 325 nic_stats_mapping_display(portid_t port_id) 326 { 327 struct rte_port *port = &ports[port_id]; 328 uint16_t i; 329 330 static const char *nic_stats_mapping_border = "########################"; 331 332 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 333 print_valid_ports(); 334 return; 335 } 336 337 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 338 printf("Port id %d - either does not support queue statistic mapping or" 339 " no queue statistic mapping set\n", port_id); 340 return; 341 } 342 343 printf("\n %s NIC statistics mapping for port %-2d %s\n", 344 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 345 346 if (port->rx_queue_stats_mapping_enabled) { 347 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 348 if (rx_queue_stats_mappings[i].port_id == port_id) { 349 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 350 rx_queue_stats_mappings[i].queue_id, 351 rx_queue_stats_mappings[i].stats_counter_id); 352 } 353 } 354 printf("\n"); 355 } 356 357 358 if (port->tx_queue_stats_mapping_enabled) { 359 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 360 if (tx_queue_stats_mappings[i].port_id == port_id) { 361 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 362 tx_queue_stats_mappings[i].queue_id, 363 tx_queue_stats_mappings[i].stats_counter_id); 364 } 365 } 366 } 367 368 printf(" %s####################################%s\n", 369 nic_stats_mapping_border, nic_stats_mapping_border); 370 } 371 372 void 373 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 374 { 375 struct rte_eth_burst_mode mode; 376 struct rte_eth_rxq_info qinfo; 377 int32_t rc; 378 static const char *info_border = "*********************"; 379 380 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 381 if (rc != 0) { 382 printf("Failed to retrieve information for port: %u, " 383 "RX queue: %hu\nerror desc: %s(%d)\n", 384 port_id, queue_id, strerror(-rc), rc); 385 return; 386 } 387 388 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 389 info_border, port_id, queue_id, info_border); 390 391 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 392 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 393 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 394 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 395 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 396 printf("\nRX drop packets: %s", 397 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 398 printf("\nRX deferred start: %s", 399 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 400 printf("\nRX scattered packets: %s", 401 (qinfo.scattered_rx != 0) ? "on" : "off"); 402 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 403 404 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 405 printf("\nBurst mode: %s%s", 406 mode.info, 407 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 408 " (per queue)" : ""); 409 410 printf("\n"); 411 } 412 413 void 414 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 415 { 416 struct rte_eth_burst_mode mode; 417 struct rte_eth_txq_info qinfo; 418 int32_t rc; 419 static const char *info_border = "*********************"; 420 421 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 422 if (rc != 0) { 423 printf("Failed to retrieve information for port: %u, " 424 "TX queue: %hu\nerror desc: %s(%d)\n", 425 port_id, queue_id, strerror(-rc), rc); 426 return; 427 } 428 429 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 430 info_border, port_id, queue_id, info_border); 431 432 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 433 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 434 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 435 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 436 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 437 printf("\nTX deferred start: %s", 438 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 439 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 440 441 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 442 printf("\nBurst mode: %s%s", 443 mode.info, 444 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 445 " (per queue)" : ""); 446 447 printf("\n"); 448 } 449 450 static int bus_match_all(const struct rte_bus *bus, const void *data) 451 { 452 RTE_SET_USED(bus); 453 RTE_SET_USED(data); 454 return 0; 455 } 456 457 void 458 device_infos_display(const char *identifier) 459 { 460 static const char *info_border = "*********************"; 461 struct rte_bus *start = NULL, *next; 462 struct rte_dev_iterator dev_iter; 463 char name[RTE_ETH_NAME_MAX_LEN]; 464 struct rte_ether_addr mac_addr; 465 struct rte_device *dev; 466 struct rte_devargs da; 467 portid_t port_id; 468 char devstr[128]; 469 470 memset(&da, 0, sizeof(da)); 471 if (!identifier) 472 goto skip_parse; 473 474 if (rte_devargs_parsef(&da, "%s", identifier)) { 475 printf("cannot parse identifier\n"); 476 if (da.args) 477 free(da.args); 478 return; 479 } 480 481 skip_parse: 482 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 483 484 start = next; 485 if (identifier && da.bus != next) 486 continue; 487 488 /* Skip buses that don't have iterate method */ 489 if (!next->dev_iterate) 490 continue; 491 492 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 493 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 494 495 if (!dev->driver) 496 continue; 497 /* Check for matching device if identifier is present */ 498 if (identifier && 499 strncmp(da.name, dev->name, strlen(dev->name))) 500 continue; 501 printf("\n%s Infos for device %s %s\n", 502 info_border, dev->name, info_border); 503 printf("Bus name: %s", dev->bus->name); 504 printf("\nDriver name: %s", dev->driver->name); 505 printf("\nDevargs: %s", 506 dev->devargs ? dev->devargs->args : ""); 507 printf("\nConnect to socket: %d", dev->numa_node); 508 printf("\n"); 509 510 /* List ports with matching device name */ 511 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 512 printf("\n\tPort id: %-2d", port_id); 513 if (eth_macaddr_get_print_err(port_id, 514 &mac_addr) == 0) 515 print_ethaddr("\n\tMAC address: ", 516 &mac_addr); 517 rte_eth_dev_get_name_by_port(port_id, name); 518 printf("\n\tDevice name: %s", name); 519 printf("\n"); 520 } 521 } 522 }; 523 } 524 525 void 526 port_infos_display(portid_t port_id) 527 { 528 struct rte_port *port; 529 struct rte_ether_addr mac_addr; 530 struct rte_eth_link link; 531 struct rte_eth_dev_info dev_info; 532 int vlan_offload; 533 struct rte_mempool * mp; 534 static const char *info_border = "*********************"; 535 uint16_t mtu; 536 char name[RTE_ETH_NAME_MAX_LEN]; 537 int ret; 538 char fw_version[ETHDEV_FWVERS_LEN]; 539 540 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 541 print_valid_ports(); 542 return; 543 } 544 port = &ports[port_id]; 545 ret = eth_link_get_nowait_print_err(port_id, &link); 546 if (ret < 0) 547 return; 548 549 ret = eth_dev_info_get_print_err(port_id, &dev_info); 550 if (ret != 0) 551 return; 552 553 printf("\n%s Infos for port %-2d %s\n", 554 info_border, port_id, info_border); 555 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 556 print_ethaddr("MAC address: ", &mac_addr); 557 rte_eth_dev_get_name_by_port(port_id, name); 558 printf("\nDevice name: %s", name); 559 printf("\nDriver name: %s", dev_info.driver_name); 560 561 if (rte_eth_dev_fw_version_get(port_id, fw_version, 562 ETHDEV_FWVERS_LEN) == 0) 563 printf("\nFirmware-version: %s", fw_version); 564 else 565 printf("\nFirmware-version: %s", "not available"); 566 567 if (dev_info.device->devargs && dev_info.device->devargs->args) 568 printf("\nDevargs: %s", dev_info.device->devargs->args); 569 printf("\nConnect to socket: %u", port->socket_id); 570 571 if (port_numa[port_id] != NUMA_NO_CONFIG) { 572 mp = mbuf_pool_find(port_numa[port_id]); 573 if (mp) 574 printf("\nmemory allocation on the socket: %d", 575 port_numa[port_id]); 576 } else 577 printf("\nmemory allocation on the socket: %u",port->socket_id); 578 579 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 580 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 581 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 582 ("full-duplex") : ("half-duplex")); 583 584 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 585 printf("MTU: %u\n", mtu); 586 587 printf("Promiscuous mode: %s\n", 588 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 589 printf("Allmulticast mode: %s\n", 590 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 591 printf("Maximum number of MAC addresses: %u\n", 592 (unsigned int)(port->dev_info.max_mac_addrs)); 593 printf("Maximum number of MAC addresses of hash filtering: %u\n", 594 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 595 596 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 597 if (vlan_offload >= 0){ 598 printf("VLAN offload: \n"); 599 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 600 printf(" strip on, "); 601 else 602 printf(" strip off, "); 603 604 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 605 printf("filter on, "); 606 else 607 printf("filter off, "); 608 609 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 610 printf("extend on, "); 611 else 612 printf("extend off, "); 613 614 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 615 printf("qinq strip on\n"); 616 else 617 printf("qinq strip off\n"); 618 } 619 620 if (dev_info.hash_key_size > 0) 621 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 622 if (dev_info.reta_size > 0) 623 printf("Redirection table size: %u\n", dev_info.reta_size); 624 if (!dev_info.flow_type_rss_offloads) 625 printf("No RSS offload flow type is supported.\n"); 626 else { 627 uint16_t i; 628 char *p; 629 630 printf("Supported RSS offload flow types:\n"); 631 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 632 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 633 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 634 continue; 635 p = flowtype_to_str(i); 636 if (p) 637 printf(" %s\n", p); 638 else 639 printf(" user defined %d\n", i); 640 } 641 } 642 643 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 644 printf("Maximum configurable length of RX packet: %u\n", 645 dev_info.max_rx_pktlen); 646 printf("Maximum configurable size of LRO aggregated packet: %u\n", 647 dev_info.max_lro_pkt_size); 648 if (dev_info.max_vfs) 649 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 650 if (dev_info.max_vmdq_pools) 651 printf("Maximum number of VMDq pools: %u\n", 652 dev_info.max_vmdq_pools); 653 654 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 655 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 656 printf("Max possible number of RXDs per queue: %hu\n", 657 dev_info.rx_desc_lim.nb_max); 658 printf("Min possible number of RXDs per queue: %hu\n", 659 dev_info.rx_desc_lim.nb_min); 660 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 661 662 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 663 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 664 printf("Max possible number of TXDs per queue: %hu\n", 665 dev_info.tx_desc_lim.nb_max); 666 printf("Min possible number of TXDs per queue: %hu\n", 667 dev_info.tx_desc_lim.nb_min); 668 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 669 printf("Max segment number per packet: %hu\n", 670 dev_info.tx_desc_lim.nb_seg_max); 671 printf("Max segment number per MTU/TSO: %hu\n", 672 dev_info.tx_desc_lim.nb_mtu_seg_max); 673 674 /* Show switch info only if valid switch domain and port id is set */ 675 if (dev_info.switch_info.domain_id != 676 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 677 if (dev_info.switch_info.name) 678 printf("Switch name: %s\n", dev_info.switch_info.name); 679 680 printf("Switch domain Id: %u\n", 681 dev_info.switch_info.domain_id); 682 printf("Switch Port Id: %u\n", 683 dev_info.switch_info.port_id); 684 } 685 } 686 687 void 688 port_summary_header_display(void) 689 { 690 uint16_t port_number; 691 692 port_number = rte_eth_dev_count_avail(); 693 printf("Number of available ports: %i\n", port_number); 694 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 695 "Driver", "Status", "Link"); 696 } 697 698 void 699 port_summary_display(portid_t port_id) 700 { 701 struct rte_ether_addr mac_addr; 702 struct rte_eth_link link; 703 struct rte_eth_dev_info dev_info; 704 char name[RTE_ETH_NAME_MAX_LEN]; 705 int ret; 706 707 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 708 print_valid_ports(); 709 return; 710 } 711 712 ret = eth_link_get_nowait_print_err(port_id, &link); 713 if (ret < 0) 714 return; 715 716 ret = eth_dev_info_get_print_err(port_id, &dev_info); 717 if (ret != 0) 718 return; 719 720 rte_eth_dev_get_name_by_port(port_id, name); 721 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 722 if (ret != 0) 723 return; 724 725 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 726 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 727 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 728 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 729 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 730 (unsigned int) link.link_speed); 731 } 732 733 void 734 port_offload_cap_display(portid_t port_id) 735 { 736 struct rte_eth_dev_info dev_info; 737 static const char *info_border = "************"; 738 int ret; 739 740 if (port_id_is_invalid(port_id, ENABLED_WARN)) 741 return; 742 743 ret = eth_dev_info_get_print_err(port_id, &dev_info); 744 if (ret != 0) 745 return; 746 747 printf("\n%s Port %d supported offload features: %s\n", 748 info_border, port_id, info_border); 749 750 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 751 printf("VLAN stripped: "); 752 if (ports[port_id].dev_conf.rxmode.offloads & 753 DEV_RX_OFFLOAD_VLAN_STRIP) 754 printf("on\n"); 755 else 756 printf("off\n"); 757 } 758 759 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 760 printf("Double VLANs stripped: "); 761 if (ports[port_id].dev_conf.rxmode.offloads & 762 DEV_RX_OFFLOAD_QINQ_STRIP) 763 printf("on\n"); 764 else 765 printf("off\n"); 766 } 767 768 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 769 printf("RX IPv4 checksum: "); 770 if (ports[port_id].dev_conf.rxmode.offloads & 771 DEV_RX_OFFLOAD_IPV4_CKSUM) 772 printf("on\n"); 773 else 774 printf("off\n"); 775 } 776 777 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 778 printf("RX UDP checksum: "); 779 if (ports[port_id].dev_conf.rxmode.offloads & 780 DEV_RX_OFFLOAD_UDP_CKSUM) 781 printf("on\n"); 782 else 783 printf("off\n"); 784 } 785 786 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 787 printf("RX TCP checksum: "); 788 if (ports[port_id].dev_conf.rxmode.offloads & 789 DEV_RX_OFFLOAD_TCP_CKSUM) 790 printf("on\n"); 791 else 792 printf("off\n"); 793 } 794 795 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 796 printf("RX SCTP checksum: "); 797 if (ports[port_id].dev_conf.rxmode.offloads & 798 DEV_RX_OFFLOAD_SCTP_CKSUM) 799 printf("on\n"); 800 else 801 printf("off\n"); 802 } 803 804 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 805 printf("RX Outer IPv4 checksum: "); 806 if (ports[port_id].dev_conf.rxmode.offloads & 807 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 808 printf("on\n"); 809 else 810 printf("off\n"); 811 } 812 813 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 814 printf("RX Outer UDP checksum: "); 815 if (ports[port_id].dev_conf.rxmode.offloads & 816 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 817 printf("on\n"); 818 else 819 printf("off\n"); 820 } 821 822 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 823 printf("Large receive offload: "); 824 if (ports[port_id].dev_conf.rxmode.offloads & 825 DEV_RX_OFFLOAD_TCP_LRO) 826 printf("on\n"); 827 else 828 printf("off\n"); 829 } 830 831 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 832 printf("HW timestamp: "); 833 if (ports[port_id].dev_conf.rxmode.offloads & 834 DEV_RX_OFFLOAD_TIMESTAMP) 835 printf("on\n"); 836 else 837 printf("off\n"); 838 } 839 840 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 841 printf("Rx Keep CRC: "); 842 if (ports[port_id].dev_conf.rxmode.offloads & 843 DEV_RX_OFFLOAD_KEEP_CRC) 844 printf("on\n"); 845 else 846 printf("off\n"); 847 } 848 849 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 850 printf("RX offload security: "); 851 if (ports[port_id].dev_conf.rxmode.offloads & 852 DEV_RX_OFFLOAD_SECURITY) 853 printf("on\n"); 854 else 855 printf("off\n"); 856 } 857 858 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 859 printf("VLAN insert: "); 860 if (ports[port_id].dev_conf.txmode.offloads & 861 DEV_TX_OFFLOAD_VLAN_INSERT) 862 printf("on\n"); 863 else 864 printf("off\n"); 865 } 866 867 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 868 printf("Double VLANs insert: "); 869 if (ports[port_id].dev_conf.txmode.offloads & 870 DEV_TX_OFFLOAD_QINQ_INSERT) 871 printf("on\n"); 872 else 873 printf("off\n"); 874 } 875 876 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 877 printf("TX IPv4 checksum: "); 878 if (ports[port_id].dev_conf.txmode.offloads & 879 DEV_TX_OFFLOAD_IPV4_CKSUM) 880 printf("on\n"); 881 else 882 printf("off\n"); 883 } 884 885 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 886 printf("TX UDP checksum: "); 887 if (ports[port_id].dev_conf.txmode.offloads & 888 DEV_TX_OFFLOAD_UDP_CKSUM) 889 printf("on\n"); 890 else 891 printf("off\n"); 892 } 893 894 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 895 printf("TX TCP checksum: "); 896 if (ports[port_id].dev_conf.txmode.offloads & 897 DEV_TX_OFFLOAD_TCP_CKSUM) 898 printf("on\n"); 899 else 900 printf("off\n"); 901 } 902 903 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 904 printf("TX SCTP checksum: "); 905 if (ports[port_id].dev_conf.txmode.offloads & 906 DEV_TX_OFFLOAD_SCTP_CKSUM) 907 printf("on\n"); 908 else 909 printf("off\n"); 910 } 911 912 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 913 printf("TX Outer IPv4 checksum: "); 914 if (ports[port_id].dev_conf.txmode.offloads & 915 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 916 printf("on\n"); 917 else 918 printf("off\n"); 919 } 920 921 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 922 printf("TX TCP segmentation: "); 923 if (ports[port_id].dev_conf.txmode.offloads & 924 DEV_TX_OFFLOAD_TCP_TSO) 925 printf("on\n"); 926 else 927 printf("off\n"); 928 } 929 930 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 931 printf("TX UDP segmentation: "); 932 if (ports[port_id].dev_conf.txmode.offloads & 933 DEV_TX_OFFLOAD_UDP_TSO) 934 printf("on\n"); 935 else 936 printf("off\n"); 937 } 938 939 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 940 printf("TSO for VXLAN tunnel packet: "); 941 if (ports[port_id].dev_conf.txmode.offloads & 942 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 943 printf("on\n"); 944 else 945 printf("off\n"); 946 } 947 948 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 949 printf("TSO for GRE tunnel packet: "); 950 if (ports[port_id].dev_conf.txmode.offloads & 951 DEV_TX_OFFLOAD_GRE_TNL_TSO) 952 printf("on\n"); 953 else 954 printf("off\n"); 955 } 956 957 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 958 printf("TSO for IPIP tunnel packet: "); 959 if (ports[port_id].dev_conf.txmode.offloads & 960 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 961 printf("on\n"); 962 else 963 printf("off\n"); 964 } 965 966 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 967 printf("TSO for GENEVE tunnel packet: "); 968 if (ports[port_id].dev_conf.txmode.offloads & 969 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 970 printf("on\n"); 971 else 972 printf("off\n"); 973 } 974 975 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 976 printf("IP tunnel TSO: "); 977 if (ports[port_id].dev_conf.txmode.offloads & 978 DEV_TX_OFFLOAD_IP_TNL_TSO) 979 printf("on\n"); 980 else 981 printf("off\n"); 982 } 983 984 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 985 printf("UDP tunnel TSO: "); 986 if (ports[port_id].dev_conf.txmode.offloads & 987 DEV_TX_OFFLOAD_UDP_TNL_TSO) 988 printf("on\n"); 989 else 990 printf("off\n"); 991 } 992 993 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 994 printf("TX Outer UDP checksum: "); 995 if (ports[port_id].dev_conf.txmode.offloads & 996 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 997 printf("on\n"); 998 else 999 printf("off\n"); 1000 } 1001 1002 } 1003 1004 int 1005 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1006 { 1007 uint16_t pid; 1008 1009 if (port_id == (portid_t)RTE_PORT_ALL) 1010 return 0; 1011 1012 RTE_ETH_FOREACH_DEV(pid) 1013 if (port_id == pid) 1014 return 0; 1015 1016 if (warning == ENABLED_WARN) 1017 printf("Invalid port %d\n", port_id); 1018 1019 return 1; 1020 } 1021 1022 void print_valid_ports(void) 1023 { 1024 portid_t pid; 1025 1026 printf("The valid ports array is ["); 1027 RTE_ETH_FOREACH_DEV(pid) { 1028 printf(" %d", pid); 1029 } 1030 printf(" ]\n"); 1031 } 1032 1033 static int 1034 vlan_id_is_invalid(uint16_t vlan_id) 1035 { 1036 if (vlan_id < 4096) 1037 return 0; 1038 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1039 return 1; 1040 } 1041 1042 static int 1043 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1044 { 1045 const struct rte_pci_device *pci_dev; 1046 const struct rte_bus *bus; 1047 uint64_t pci_len; 1048 1049 if (reg_off & 0x3) { 1050 printf("Port register offset 0x%X not aligned on a 4-byte " 1051 "boundary\n", 1052 (unsigned)reg_off); 1053 return 1; 1054 } 1055 1056 if (!ports[port_id].dev_info.device) { 1057 printf("Invalid device\n"); 1058 return 0; 1059 } 1060 1061 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1062 if (bus && !strcmp(bus->name, "pci")) { 1063 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1064 } else { 1065 printf("Not a PCI device\n"); 1066 return 1; 1067 } 1068 1069 pci_len = pci_dev->mem_resource[0].len; 1070 if (reg_off >= pci_len) { 1071 printf("Port %d: register offset %u (0x%X) out of port PCI " 1072 "resource (length=%"PRIu64")\n", 1073 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1074 return 1; 1075 } 1076 return 0; 1077 } 1078 1079 static int 1080 reg_bit_pos_is_invalid(uint8_t bit_pos) 1081 { 1082 if (bit_pos <= 31) 1083 return 0; 1084 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1085 return 1; 1086 } 1087 1088 #define display_port_and_reg_off(port_id, reg_off) \ 1089 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1090 1091 static inline void 1092 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1093 { 1094 display_port_and_reg_off(port_id, (unsigned)reg_off); 1095 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1096 } 1097 1098 void 1099 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1100 { 1101 uint32_t reg_v; 1102 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit_x)) 1109 return; 1110 reg_v = port_id_pci_reg_read(port_id, reg_off); 1111 display_port_and_reg_off(port_id, (unsigned)reg_off); 1112 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1113 } 1114 1115 void 1116 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1117 uint8_t bit1_pos, uint8_t bit2_pos) 1118 { 1119 uint32_t reg_v; 1120 uint8_t l_bit; 1121 uint8_t h_bit; 1122 1123 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1124 return; 1125 if (port_reg_off_is_invalid(port_id, reg_off)) 1126 return; 1127 if (reg_bit_pos_is_invalid(bit1_pos)) 1128 return; 1129 if (reg_bit_pos_is_invalid(bit2_pos)) 1130 return; 1131 if (bit1_pos > bit2_pos) 1132 l_bit = bit2_pos, h_bit = bit1_pos; 1133 else 1134 l_bit = bit1_pos, h_bit = bit2_pos; 1135 1136 reg_v = port_id_pci_reg_read(port_id, reg_off); 1137 reg_v >>= l_bit; 1138 if (h_bit < 31) 1139 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1140 display_port_and_reg_off(port_id, (unsigned)reg_off); 1141 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1142 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1143 } 1144 1145 void 1146 port_reg_display(portid_t port_id, uint32_t reg_off) 1147 { 1148 uint32_t reg_v; 1149 1150 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1151 return; 1152 if (port_reg_off_is_invalid(port_id, reg_off)) 1153 return; 1154 reg_v = port_id_pci_reg_read(port_id, reg_off); 1155 display_port_reg_value(port_id, reg_off, reg_v); 1156 } 1157 1158 void 1159 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1160 uint8_t bit_v) 1161 { 1162 uint32_t reg_v; 1163 1164 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1165 return; 1166 if (port_reg_off_is_invalid(port_id, reg_off)) 1167 return; 1168 if (reg_bit_pos_is_invalid(bit_pos)) 1169 return; 1170 if (bit_v > 1) { 1171 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1172 return; 1173 } 1174 reg_v = port_id_pci_reg_read(port_id, reg_off); 1175 if (bit_v == 0) 1176 reg_v &= ~(1 << bit_pos); 1177 else 1178 reg_v |= (1 << bit_pos); 1179 port_id_pci_reg_write(port_id, reg_off, reg_v); 1180 display_port_reg_value(port_id, reg_off, reg_v); 1181 } 1182 1183 void 1184 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1185 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1186 { 1187 uint32_t max_v; 1188 uint32_t reg_v; 1189 uint8_t l_bit; 1190 uint8_t h_bit; 1191 1192 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1193 return; 1194 if (port_reg_off_is_invalid(port_id, reg_off)) 1195 return; 1196 if (reg_bit_pos_is_invalid(bit1_pos)) 1197 return; 1198 if (reg_bit_pos_is_invalid(bit2_pos)) 1199 return; 1200 if (bit1_pos > bit2_pos) 1201 l_bit = bit2_pos, h_bit = bit1_pos; 1202 else 1203 l_bit = bit1_pos, h_bit = bit2_pos; 1204 1205 if ((h_bit - l_bit) < 31) 1206 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1207 else 1208 max_v = 0xFFFFFFFF; 1209 1210 if (value > max_v) { 1211 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1212 (unsigned)value, (unsigned)value, 1213 (unsigned)max_v, (unsigned)max_v); 1214 return; 1215 } 1216 reg_v = port_id_pci_reg_read(port_id, reg_off); 1217 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1218 reg_v |= (value << l_bit); /* Set changed bits */ 1219 port_id_pci_reg_write(port_id, reg_off, reg_v); 1220 display_port_reg_value(port_id, reg_off, reg_v); 1221 } 1222 1223 void 1224 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1225 { 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 if (port_reg_off_is_invalid(port_id, reg_off)) 1229 return; 1230 port_id_pci_reg_write(port_id, reg_off, reg_v); 1231 display_port_reg_value(port_id, reg_off, reg_v); 1232 } 1233 1234 void 1235 port_mtu_set(portid_t port_id, uint16_t mtu) 1236 { 1237 int diag; 1238 struct rte_port *rte_port = &ports[port_id]; 1239 struct rte_eth_dev_info dev_info; 1240 uint16_t eth_overhead; 1241 int ret; 1242 1243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1244 return; 1245 1246 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1247 if (ret != 0) 1248 return; 1249 1250 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1251 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1252 mtu, dev_info.min_mtu, dev_info.max_mtu); 1253 return; 1254 } 1255 diag = rte_eth_dev_set_mtu(port_id, mtu); 1256 if (diag == 0 && 1257 dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1258 /* 1259 * Ether overhead in driver is equal to the difference of 1260 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1261 * device supports jumbo frame. 1262 */ 1263 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1264 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1265 rte_port->dev_conf.rxmode.offloads |= 1266 DEV_RX_OFFLOAD_JUMBO_FRAME; 1267 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1268 mtu + eth_overhead; 1269 } else 1270 rte_port->dev_conf.rxmode.offloads &= 1271 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1272 1273 return; 1274 } 1275 printf("Set MTU failed. diag=%d\n", diag); 1276 } 1277 1278 /* Generic flow management functions. */ 1279 1280 /** Generate a port_flow entry from attributes/pattern/actions. */ 1281 static struct port_flow * 1282 port_flow_new(const struct rte_flow_attr *attr, 1283 const struct rte_flow_item *pattern, 1284 const struct rte_flow_action *actions, 1285 struct rte_flow_error *error) 1286 { 1287 const struct rte_flow_conv_rule rule = { 1288 .attr_ro = attr, 1289 .pattern_ro = pattern, 1290 .actions_ro = actions, 1291 }; 1292 struct port_flow *pf; 1293 int ret; 1294 1295 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1296 if (ret < 0) 1297 return NULL; 1298 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1299 if (!pf) { 1300 rte_flow_error_set 1301 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1302 "calloc() failed"); 1303 return NULL; 1304 } 1305 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1306 error) >= 0) 1307 return pf; 1308 free(pf); 1309 return NULL; 1310 } 1311 1312 /** Print a message out of a flow error. */ 1313 static int 1314 port_flow_complain(struct rte_flow_error *error) 1315 { 1316 static const char *const errstrlist[] = { 1317 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1318 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1319 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1320 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1321 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1322 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1323 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1324 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1325 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1326 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1327 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1328 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1329 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1330 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1331 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1332 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1333 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1334 }; 1335 const char *errstr; 1336 char buf[32]; 1337 int err = rte_errno; 1338 1339 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1340 !errstrlist[error->type]) 1341 errstr = "unknown type"; 1342 else 1343 errstr = errstrlist[error->type]; 1344 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1345 error->type, errstr, 1346 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1347 error->cause), buf) : "", 1348 error->message ? error->message : "(no stated reason)", 1349 rte_strerror(err)); 1350 return -err; 1351 } 1352 1353 /** Validate flow rule. */ 1354 int 1355 port_flow_validate(portid_t port_id, 1356 const struct rte_flow_attr *attr, 1357 const struct rte_flow_item *pattern, 1358 const struct rte_flow_action *actions) 1359 { 1360 struct rte_flow_error error; 1361 1362 /* Poisoning to make sure PMDs update it in case of error. */ 1363 memset(&error, 0x11, sizeof(error)); 1364 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1365 return port_flow_complain(&error); 1366 printf("Flow rule validated\n"); 1367 return 0; 1368 } 1369 1370 /** Create flow rule. */ 1371 int 1372 port_flow_create(portid_t port_id, 1373 const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions) 1376 { 1377 struct rte_flow *flow; 1378 struct rte_port *port; 1379 struct port_flow *pf; 1380 uint32_t id; 1381 struct rte_flow_error error; 1382 1383 /* Poisoning to make sure PMDs update it in case of error. */ 1384 memset(&error, 0x22, sizeof(error)); 1385 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1386 if (!flow) 1387 return port_flow_complain(&error); 1388 port = &ports[port_id]; 1389 if (port->flow_list) { 1390 if (port->flow_list->id == UINT32_MAX) { 1391 printf("Highest rule ID is already assigned, delete" 1392 " it first"); 1393 rte_flow_destroy(port_id, flow, NULL); 1394 return -ENOMEM; 1395 } 1396 id = port->flow_list->id + 1; 1397 } else 1398 id = 0; 1399 pf = port_flow_new(attr, pattern, actions, &error); 1400 if (!pf) { 1401 rte_flow_destroy(port_id, flow, NULL); 1402 return port_flow_complain(&error); 1403 } 1404 pf->next = port->flow_list; 1405 pf->id = id; 1406 pf->flow = flow; 1407 port->flow_list = pf; 1408 printf("Flow rule #%u created\n", pf->id); 1409 return 0; 1410 } 1411 1412 /** Destroy a number of flow rules. */ 1413 int 1414 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1415 { 1416 struct rte_port *port; 1417 struct port_flow **tmp; 1418 uint32_t c = 0; 1419 int ret = 0; 1420 1421 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1422 port_id == (portid_t)RTE_PORT_ALL) 1423 return -EINVAL; 1424 port = &ports[port_id]; 1425 tmp = &port->flow_list; 1426 while (*tmp) { 1427 uint32_t i; 1428 1429 for (i = 0; i != n; ++i) { 1430 struct rte_flow_error error; 1431 struct port_flow *pf = *tmp; 1432 1433 if (rule[i] != pf->id) 1434 continue; 1435 /* 1436 * Poisoning to make sure PMDs update it in case 1437 * of error. 1438 */ 1439 memset(&error, 0x33, sizeof(error)); 1440 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1441 ret = port_flow_complain(&error); 1442 continue; 1443 } 1444 printf("Flow rule #%u destroyed\n", pf->id); 1445 *tmp = pf->next; 1446 free(pf); 1447 break; 1448 } 1449 if (i == n) 1450 tmp = &(*tmp)->next; 1451 ++c; 1452 } 1453 return ret; 1454 } 1455 1456 /** Remove all flow rules. */ 1457 int 1458 port_flow_flush(portid_t port_id) 1459 { 1460 struct rte_flow_error error; 1461 struct rte_port *port; 1462 int ret = 0; 1463 1464 /* Poisoning to make sure PMDs update it in case of error. */ 1465 memset(&error, 0x44, sizeof(error)); 1466 if (rte_flow_flush(port_id, &error)) { 1467 ret = port_flow_complain(&error); 1468 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1469 port_id == (portid_t)RTE_PORT_ALL) 1470 return ret; 1471 } 1472 port = &ports[port_id]; 1473 while (port->flow_list) { 1474 struct port_flow *pf = port->flow_list->next; 1475 1476 free(port->flow_list); 1477 port->flow_list = pf; 1478 } 1479 return ret; 1480 } 1481 1482 /** Dump all flow rules. */ 1483 int 1484 port_flow_dump(portid_t port_id, const char *file_name) 1485 { 1486 int ret = 0; 1487 FILE *file = stdout; 1488 struct rte_flow_error error; 1489 1490 if (file_name && strlen(file_name)) { 1491 file = fopen(file_name, "w"); 1492 if (!file) { 1493 printf("Failed to create file %s: %s\n", file_name, 1494 strerror(errno)); 1495 return -errno; 1496 } 1497 } 1498 ret = rte_flow_dev_dump(port_id, file, &error); 1499 if (ret) { 1500 port_flow_complain(&error); 1501 printf("Failed to dump flow: %s\n", strerror(-ret)); 1502 } else 1503 printf("Flow dump finished\n"); 1504 if (file_name && strlen(file_name)) 1505 fclose(file); 1506 return ret; 1507 } 1508 1509 /** Query a flow rule. */ 1510 int 1511 port_flow_query(portid_t port_id, uint32_t rule, 1512 const struct rte_flow_action *action) 1513 { 1514 struct rte_flow_error error; 1515 struct rte_port *port; 1516 struct port_flow *pf; 1517 const char *name; 1518 union { 1519 struct rte_flow_query_count count; 1520 } query; 1521 int ret; 1522 1523 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1524 port_id == (portid_t)RTE_PORT_ALL) 1525 return -EINVAL; 1526 port = &ports[port_id]; 1527 for (pf = port->flow_list; pf; pf = pf->next) 1528 if (pf->id == rule) 1529 break; 1530 if (!pf) { 1531 printf("Flow rule #%u not found\n", rule); 1532 return -ENOENT; 1533 } 1534 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1535 &name, sizeof(name), 1536 (void *)(uintptr_t)action->type, &error); 1537 if (ret < 0) 1538 return port_flow_complain(&error); 1539 switch (action->type) { 1540 case RTE_FLOW_ACTION_TYPE_COUNT: 1541 break; 1542 default: 1543 printf("Cannot query action type %d (%s)\n", 1544 action->type, name); 1545 return -ENOTSUP; 1546 } 1547 /* Poisoning to make sure PMDs update it in case of error. */ 1548 memset(&error, 0x55, sizeof(error)); 1549 memset(&query, 0, sizeof(query)); 1550 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1551 return port_flow_complain(&error); 1552 switch (action->type) { 1553 case RTE_FLOW_ACTION_TYPE_COUNT: 1554 printf("%s:\n" 1555 " hits_set: %u\n" 1556 " bytes_set: %u\n" 1557 " hits: %" PRIu64 "\n" 1558 " bytes: %" PRIu64 "\n", 1559 name, 1560 query.count.hits_set, 1561 query.count.bytes_set, 1562 query.count.hits, 1563 query.count.bytes); 1564 break; 1565 default: 1566 printf("Cannot display result for action type %d (%s)\n", 1567 action->type, name); 1568 break; 1569 } 1570 return 0; 1571 } 1572 1573 /** List flow rules. */ 1574 void 1575 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1576 { 1577 struct rte_port *port; 1578 struct port_flow *pf; 1579 struct port_flow *list = NULL; 1580 uint32_t i; 1581 1582 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1583 port_id == (portid_t)RTE_PORT_ALL) 1584 return; 1585 port = &ports[port_id]; 1586 if (!port->flow_list) 1587 return; 1588 /* Sort flows by group, priority and ID. */ 1589 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1590 struct port_flow **tmp; 1591 const struct rte_flow_attr *curr = pf->rule.attr; 1592 1593 if (n) { 1594 /* Filter out unwanted groups. */ 1595 for (i = 0; i != n; ++i) 1596 if (curr->group == group[i]) 1597 break; 1598 if (i == n) 1599 continue; 1600 } 1601 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1602 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1603 1604 if (curr->group > comp->group || 1605 (curr->group == comp->group && 1606 curr->priority > comp->priority) || 1607 (curr->group == comp->group && 1608 curr->priority == comp->priority && 1609 pf->id > (*tmp)->id)) 1610 continue; 1611 break; 1612 } 1613 pf->tmp = *tmp; 1614 *tmp = pf; 1615 } 1616 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1617 for (pf = list; pf != NULL; pf = pf->tmp) { 1618 const struct rte_flow_item *item = pf->rule.pattern; 1619 const struct rte_flow_action *action = pf->rule.actions; 1620 const char *name; 1621 1622 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1623 pf->id, 1624 pf->rule.attr->group, 1625 pf->rule.attr->priority, 1626 pf->rule.attr->ingress ? 'i' : '-', 1627 pf->rule.attr->egress ? 'e' : '-', 1628 pf->rule.attr->transfer ? 't' : '-'); 1629 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1630 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1631 &name, sizeof(name), 1632 (void *)(uintptr_t)item->type, 1633 NULL) <= 0) 1634 name = "[UNKNOWN]"; 1635 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1636 printf("%s ", name); 1637 ++item; 1638 } 1639 printf("=>"); 1640 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1641 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1642 &name, sizeof(name), 1643 (void *)(uintptr_t)action->type, 1644 NULL) <= 0) 1645 name = "[UNKNOWN]"; 1646 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1647 printf(" %s", name); 1648 ++action; 1649 } 1650 printf("\n"); 1651 } 1652 } 1653 1654 /** Restrict ingress traffic to the defined flow rules. */ 1655 int 1656 port_flow_isolate(portid_t port_id, int set) 1657 { 1658 struct rte_flow_error error; 1659 1660 /* Poisoning to make sure PMDs update it in case of error. */ 1661 memset(&error, 0x66, sizeof(error)); 1662 if (rte_flow_isolate(port_id, set, &error)) 1663 return port_flow_complain(&error); 1664 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1665 port_id, 1666 set ? "now restricted" : "not restricted anymore"); 1667 return 0; 1668 } 1669 1670 /* 1671 * RX/TX ring descriptors display functions. 1672 */ 1673 int 1674 rx_queue_id_is_invalid(queueid_t rxq_id) 1675 { 1676 if (rxq_id < nb_rxq) 1677 return 0; 1678 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1679 return 1; 1680 } 1681 1682 int 1683 tx_queue_id_is_invalid(queueid_t txq_id) 1684 { 1685 if (txq_id < nb_txq) 1686 return 0; 1687 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1688 return 1; 1689 } 1690 1691 static int 1692 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1693 { 1694 if (rxdesc_id < nb_rxd) 1695 return 0; 1696 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1697 rxdesc_id, nb_rxd); 1698 return 1; 1699 } 1700 1701 static int 1702 tx_desc_id_is_invalid(uint16_t txdesc_id) 1703 { 1704 if (txdesc_id < nb_txd) 1705 return 0; 1706 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1707 txdesc_id, nb_txd); 1708 return 1; 1709 } 1710 1711 static const struct rte_memzone * 1712 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1713 { 1714 char mz_name[RTE_MEMZONE_NAMESIZE]; 1715 const struct rte_memzone *mz; 1716 1717 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1718 port_id, q_id, ring_name); 1719 mz = rte_memzone_lookup(mz_name); 1720 if (mz == NULL) 1721 printf("%s ring memory zoneof (port %d, queue %d) not" 1722 "found (zone name = %s\n", 1723 ring_name, port_id, q_id, mz_name); 1724 return mz; 1725 } 1726 1727 union igb_ring_dword { 1728 uint64_t dword; 1729 struct { 1730 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1731 uint32_t lo; 1732 uint32_t hi; 1733 #else 1734 uint32_t hi; 1735 uint32_t lo; 1736 #endif 1737 } words; 1738 }; 1739 1740 struct igb_ring_desc_32_bytes { 1741 union igb_ring_dword lo_dword; 1742 union igb_ring_dword hi_dword; 1743 union igb_ring_dword resv1; 1744 union igb_ring_dword resv2; 1745 }; 1746 1747 struct igb_ring_desc_16_bytes { 1748 union igb_ring_dword lo_dword; 1749 union igb_ring_dword hi_dword; 1750 }; 1751 1752 static void 1753 ring_rxd_display_dword(union igb_ring_dword dword) 1754 { 1755 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1756 (unsigned)dword.words.hi); 1757 } 1758 1759 static void 1760 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1761 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1762 portid_t port_id, 1763 #else 1764 __rte_unused portid_t port_id, 1765 #endif 1766 uint16_t desc_id) 1767 { 1768 struct igb_ring_desc_16_bytes *ring = 1769 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1770 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1771 int ret; 1772 struct rte_eth_dev_info dev_info; 1773 1774 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1775 if (ret != 0) 1776 return; 1777 1778 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1779 /* 32 bytes RX descriptor, i40e only */ 1780 struct igb_ring_desc_32_bytes *ring = 1781 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1782 ring[desc_id].lo_dword.dword = 1783 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1784 ring_rxd_display_dword(ring[desc_id].lo_dword); 1785 ring[desc_id].hi_dword.dword = 1786 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1787 ring_rxd_display_dword(ring[desc_id].hi_dword); 1788 ring[desc_id].resv1.dword = 1789 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1790 ring_rxd_display_dword(ring[desc_id].resv1); 1791 ring[desc_id].resv2.dword = 1792 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1793 ring_rxd_display_dword(ring[desc_id].resv2); 1794 1795 return; 1796 } 1797 #endif 1798 /* 16 bytes RX descriptor */ 1799 ring[desc_id].lo_dword.dword = 1800 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1801 ring_rxd_display_dword(ring[desc_id].lo_dword); 1802 ring[desc_id].hi_dword.dword = 1803 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1804 ring_rxd_display_dword(ring[desc_id].hi_dword); 1805 } 1806 1807 static void 1808 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1809 { 1810 struct igb_ring_desc_16_bytes *ring; 1811 struct igb_ring_desc_16_bytes txd; 1812 1813 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1814 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1815 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1816 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1817 (unsigned)txd.lo_dword.words.lo, 1818 (unsigned)txd.lo_dword.words.hi, 1819 (unsigned)txd.hi_dword.words.lo, 1820 (unsigned)txd.hi_dword.words.hi); 1821 } 1822 1823 void 1824 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1825 { 1826 const struct rte_memzone *rx_mz; 1827 1828 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1829 return; 1830 if (rx_queue_id_is_invalid(rxq_id)) 1831 return; 1832 if (rx_desc_id_is_invalid(rxd_id)) 1833 return; 1834 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1835 if (rx_mz == NULL) 1836 return; 1837 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1838 } 1839 1840 void 1841 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1842 { 1843 const struct rte_memzone *tx_mz; 1844 1845 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1846 return; 1847 if (tx_queue_id_is_invalid(txq_id)) 1848 return; 1849 if (tx_desc_id_is_invalid(txd_id)) 1850 return; 1851 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1852 if (tx_mz == NULL) 1853 return; 1854 ring_tx_descriptor_display(tx_mz, txd_id); 1855 } 1856 1857 void 1858 fwd_lcores_config_display(void) 1859 { 1860 lcoreid_t lc_id; 1861 1862 printf("List of forwarding lcores:"); 1863 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1864 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1865 printf("\n"); 1866 } 1867 void 1868 rxtx_config_display(void) 1869 { 1870 portid_t pid; 1871 queueid_t qid; 1872 1873 printf(" %s packet forwarding%s packets/burst=%d\n", 1874 cur_fwd_eng->fwd_mode_name, 1875 retry_enabled == 0 ? "" : " with retry", 1876 nb_pkt_per_burst); 1877 1878 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1879 printf(" packet len=%u - nb packet segments=%d\n", 1880 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1881 1882 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1883 nb_fwd_lcores, nb_fwd_ports); 1884 1885 RTE_ETH_FOREACH_DEV(pid) { 1886 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1887 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1888 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1889 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1890 uint16_t nb_rx_desc_tmp; 1891 uint16_t nb_tx_desc_tmp; 1892 struct rte_eth_rxq_info rx_qinfo; 1893 struct rte_eth_txq_info tx_qinfo; 1894 int32_t rc; 1895 1896 /* per port config */ 1897 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1898 (unsigned int)pid, nb_rxq, nb_txq); 1899 1900 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1901 ports[pid].dev_conf.rxmode.offloads, 1902 ports[pid].dev_conf.txmode.offloads); 1903 1904 /* per rx queue config only for first queue to be less verbose */ 1905 for (qid = 0; qid < 1; qid++) { 1906 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1907 if (rc) 1908 nb_rx_desc_tmp = nb_rx_desc[qid]; 1909 else 1910 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1911 1912 printf(" RX queue: %d\n", qid); 1913 printf(" RX desc=%d - RX free threshold=%d\n", 1914 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1915 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1916 " wthresh=%d\n", 1917 rx_conf[qid].rx_thresh.pthresh, 1918 rx_conf[qid].rx_thresh.hthresh, 1919 rx_conf[qid].rx_thresh.wthresh); 1920 printf(" RX Offloads=0x%"PRIx64"\n", 1921 rx_conf[qid].offloads); 1922 } 1923 1924 /* per tx queue config only for first queue to be less verbose */ 1925 for (qid = 0; qid < 1; qid++) { 1926 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1927 if (rc) 1928 nb_tx_desc_tmp = nb_tx_desc[qid]; 1929 else 1930 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1931 1932 printf(" TX queue: %d\n", qid); 1933 printf(" TX desc=%d - TX free threshold=%d\n", 1934 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1935 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1936 " wthresh=%d\n", 1937 tx_conf[qid].tx_thresh.pthresh, 1938 tx_conf[qid].tx_thresh.hthresh, 1939 tx_conf[qid].tx_thresh.wthresh); 1940 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1941 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1942 } 1943 } 1944 } 1945 1946 void 1947 port_rss_reta_info(portid_t port_id, 1948 struct rte_eth_rss_reta_entry64 *reta_conf, 1949 uint16_t nb_entries) 1950 { 1951 uint16_t i, idx, shift; 1952 int ret; 1953 1954 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1955 return; 1956 1957 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1958 if (ret != 0) { 1959 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1960 return; 1961 } 1962 1963 for (i = 0; i < nb_entries; i++) { 1964 idx = i / RTE_RETA_GROUP_SIZE; 1965 shift = i % RTE_RETA_GROUP_SIZE; 1966 if (!(reta_conf[idx].mask & (1ULL << shift))) 1967 continue; 1968 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1969 i, reta_conf[idx].reta[shift]); 1970 } 1971 } 1972 1973 /* 1974 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1975 * key of the port. 1976 */ 1977 void 1978 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1979 { 1980 struct rte_eth_rss_conf rss_conf = {0}; 1981 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1982 uint64_t rss_hf; 1983 uint8_t i; 1984 int diag; 1985 struct rte_eth_dev_info dev_info; 1986 uint8_t hash_key_size; 1987 int ret; 1988 1989 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1990 return; 1991 1992 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1993 if (ret != 0) 1994 return; 1995 1996 if (dev_info.hash_key_size > 0 && 1997 dev_info.hash_key_size <= sizeof(rss_key)) 1998 hash_key_size = dev_info.hash_key_size; 1999 else { 2000 printf("dev_info did not provide a valid hash key size\n"); 2001 return; 2002 } 2003 2004 /* Get RSS hash key if asked to display it */ 2005 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2006 rss_conf.rss_key_len = hash_key_size; 2007 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2008 if (diag != 0) { 2009 switch (diag) { 2010 case -ENODEV: 2011 printf("port index %d invalid\n", port_id); 2012 break; 2013 case -ENOTSUP: 2014 printf("operation not supported by device\n"); 2015 break; 2016 default: 2017 printf("operation failed - diag=%d\n", diag); 2018 break; 2019 } 2020 return; 2021 } 2022 rss_hf = rss_conf.rss_hf; 2023 if (rss_hf == 0) { 2024 printf("RSS disabled\n"); 2025 return; 2026 } 2027 printf("RSS functions:\n "); 2028 for (i = 0; rss_type_table[i].str; i++) { 2029 if (rss_hf & rss_type_table[i].rss_type) 2030 printf("%s ", rss_type_table[i].str); 2031 } 2032 printf("\n"); 2033 if (!show_rss_key) 2034 return; 2035 printf("RSS key:\n"); 2036 for (i = 0; i < hash_key_size; i++) 2037 printf("%02X", rss_key[i]); 2038 printf("\n"); 2039 } 2040 2041 void 2042 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2043 uint hash_key_len) 2044 { 2045 struct rte_eth_rss_conf rss_conf; 2046 int diag; 2047 unsigned int i; 2048 2049 rss_conf.rss_key = NULL; 2050 rss_conf.rss_key_len = hash_key_len; 2051 rss_conf.rss_hf = 0; 2052 for (i = 0; rss_type_table[i].str; i++) { 2053 if (!strcmp(rss_type_table[i].str, rss_type)) 2054 rss_conf.rss_hf = rss_type_table[i].rss_type; 2055 } 2056 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2057 if (diag == 0) { 2058 rss_conf.rss_key = hash_key; 2059 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2060 } 2061 if (diag == 0) 2062 return; 2063 2064 switch (diag) { 2065 case -ENODEV: 2066 printf("port index %d invalid\n", port_id); 2067 break; 2068 case -ENOTSUP: 2069 printf("operation not supported by device\n"); 2070 break; 2071 default: 2072 printf("operation failed - diag=%d\n", diag); 2073 break; 2074 } 2075 } 2076 2077 /* 2078 * Setup forwarding configuration for each logical core. 2079 */ 2080 static void 2081 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2082 { 2083 streamid_t nb_fs_per_lcore; 2084 streamid_t nb_fs; 2085 streamid_t sm_id; 2086 lcoreid_t nb_extra; 2087 lcoreid_t nb_fc; 2088 lcoreid_t nb_lc; 2089 lcoreid_t lc_id; 2090 2091 nb_fs = cfg->nb_fwd_streams; 2092 nb_fc = cfg->nb_fwd_lcores; 2093 if (nb_fs <= nb_fc) { 2094 nb_fs_per_lcore = 1; 2095 nb_extra = 0; 2096 } else { 2097 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2098 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2099 } 2100 2101 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2102 sm_id = 0; 2103 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2104 fwd_lcores[lc_id]->stream_idx = sm_id; 2105 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2106 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2107 } 2108 2109 /* 2110 * Assign extra remaining streams, if any. 2111 */ 2112 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2113 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2114 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2115 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2116 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2117 } 2118 } 2119 2120 static portid_t 2121 fwd_topology_tx_port_get(portid_t rxp) 2122 { 2123 static int warning_once = 1; 2124 2125 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2126 2127 switch (port_topology) { 2128 default: 2129 case PORT_TOPOLOGY_PAIRED: 2130 if ((rxp & 0x1) == 0) { 2131 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2132 return rxp + 1; 2133 if (warning_once) { 2134 printf("\nWarning! port-topology=paired" 2135 " and odd forward ports number," 2136 " the last port will pair with" 2137 " itself.\n\n"); 2138 warning_once = 0; 2139 } 2140 return rxp; 2141 } 2142 return rxp - 1; 2143 case PORT_TOPOLOGY_CHAINED: 2144 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2145 case PORT_TOPOLOGY_LOOP: 2146 return rxp; 2147 } 2148 } 2149 2150 static void 2151 simple_fwd_config_setup(void) 2152 { 2153 portid_t i; 2154 2155 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2156 cur_fwd_config.nb_fwd_streams = 2157 (streamid_t) cur_fwd_config.nb_fwd_ports; 2158 2159 /* reinitialize forwarding streams */ 2160 init_fwd_streams(); 2161 2162 /* 2163 * In the simple forwarding test, the number of forwarding cores 2164 * must be lower or equal to the number of forwarding ports. 2165 */ 2166 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2167 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2168 cur_fwd_config.nb_fwd_lcores = 2169 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2170 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2171 2172 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2173 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2174 fwd_streams[i]->rx_queue = 0; 2175 fwd_streams[i]->tx_port = 2176 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2177 fwd_streams[i]->tx_queue = 0; 2178 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2179 fwd_streams[i]->retry_enabled = retry_enabled; 2180 } 2181 } 2182 2183 /** 2184 * For the RSS forwarding test all streams distributed over lcores. Each stream 2185 * being composed of a RX queue to poll on a RX port for input messages, 2186 * associated with a TX queue of a TX port where to send forwarded packets. 2187 */ 2188 static void 2189 rss_fwd_config_setup(void) 2190 { 2191 portid_t rxp; 2192 portid_t txp; 2193 queueid_t rxq; 2194 queueid_t nb_q; 2195 streamid_t sm_id; 2196 2197 nb_q = nb_rxq; 2198 if (nb_q > nb_txq) 2199 nb_q = nb_txq; 2200 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2201 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2202 cur_fwd_config.nb_fwd_streams = 2203 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2204 2205 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2206 cur_fwd_config.nb_fwd_lcores = 2207 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2208 2209 /* reinitialize forwarding streams */ 2210 init_fwd_streams(); 2211 2212 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2213 rxp = 0; rxq = 0; 2214 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2215 struct fwd_stream *fs; 2216 2217 fs = fwd_streams[sm_id]; 2218 txp = fwd_topology_tx_port_get(rxp); 2219 fs->rx_port = fwd_ports_ids[rxp]; 2220 fs->rx_queue = rxq; 2221 fs->tx_port = fwd_ports_ids[txp]; 2222 fs->tx_queue = rxq; 2223 fs->peer_addr = fs->tx_port; 2224 fs->retry_enabled = retry_enabled; 2225 rxp++; 2226 if (rxp < nb_fwd_ports) 2227 continue; 2228 rxp = 0; 2229 rxq++; 2230 } 2231 } 2232 2233 /** 2234 * For the DCB forwarding test, each core is assigned on each traffic class. 2235 * 2236 * Each core is assigned a multi-stream, each stream being composed of 2237 * a RX queue to poll on a RX port for input messages, associated with 2238 * a TX queue of a TX port where to send forwarded packets. All RX and 2239 * TX queues are mapping to the same traffic class. 2240 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2241 * the same core 2242 */ 2243 static void 2244 dcb_fwd_config_setup(void) 2245 { 2246 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2247 portid_t txp, rxp = 0; 2248 queueid_t txq, rxq = 0; 2249 lcoreid_t lc_id; 2250 uint16_t nb_rx_queue, nb_tx_queue; 2251 uint16_t i, j, k, sm_id = 0; 2252 uint8_t tc = 0; 2253 2254 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2255 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2256 cur_fwd_config.nb_fwd_streams = 2257 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2258 2259 /* reinitialize forwarding streams */ 2260 init_fwd_streams(); 2261 sm_id = 0; 2262 txp = 1; 2263 /* get the dcb info on the first RX and TX ports */ 2264 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2265 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2266 2267 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2268 fwd_lcores[lc_id]->stream_nb = 0; 2269 fwd_lcores[lc_id]->stream_idx = sm_id; 2270 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2271 /* if the nb_queue is zero, means this tc is 2272 * not enabled on the POOL 2273 */ 2274 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2275 break; 2276 k = fwd_lcores[lc_id]->stream_nb + 2277 fwd_lcores[lc_id]->stream_idx; 2278 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2279 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2280 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2281 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2282 for (j = 0; j < nb_rx_queue; j++) { 2283 struct fwd_stream *fs; 2284 2285 fs = fwd_streams[k + j]; 2286 fs->rx_port = fwd_ports_ids[rxp]; 2287 fs->rx_queue = rxq + j; 2288 fs->tx_port = fwd_ports_ids[txp]; 2289 fs->tx_queue = txq + j % nb_tx_queue; 2290 fs->peer_addr = fs->tx_port; 2291 fs->retry_enabled = retry_enabled; 2292 } 2293 fwd_lcores[lc_id]->stream_nb += 2294 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2295 } 2296 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2297 2298 tc++; 2299 if (tc < rxp_dcb_info.nb_tcs) 2300 continue; 2301 /* Restart from TC 0 on next RX port */ 2302 tc = 0; 2303 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2304 rxp = (portid_t) 2305 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2306 else 2307 rxp++; 2308 if (rxp >= nb_fwd_ports) 2309 return; 2310 /* get the dcb information on next RX and TX ports */ 2311 if ((rxp & 0x1) == 0) 2312 txp = (portid_t) (rxp + 1); 2313 else 2314 txp = (portid_t) (rxp - 1); 2315 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2316 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2317 } 2318 } 2319 2320 static void 2321 icmp_echo_config_setup(void) 2322 { 2323 portid_t rxp; 2324 queueid_t rxq; 2325 lcoreid_t lc_id; 2326 uint16_t sm_id; 2327 2328 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2329 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2330 (nb_txq * nb_fwd_ports); 2331 else 2332 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2333 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2334 cur_fwd_config.nb_fwd_streams = 2335 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2336 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2337 cur_fwd_config.nb_fwd_lcores = 2338 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2339 if (verbose_level > 0) { 2340 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2341 __FUNCTION__, 2342 cur_fwd_config.nb_fwd_lcores, 2343 cur_fwd_config.nb_fwd_ports, 2344 cur_fwd_config.nb_fwd_streams); 2345 } 2346 2347 /* reinitialize forwarding streams */ 2348 init_fwd_streams(); 2349 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2350 rxp = 0; rxq = 0; 2351 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2352 if (verbose_level > 0) 2353 printf(" core=%d: \n", lc_id); 2354 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2355 struct fwd_stream *fs; 2356 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2357 fs->rx_port = fwd_ports_ids[rxp]; 2358 fs->rx_queue = rxq; 2359 fs->tx_port = fs->rx_port; 2360 fs->tx_queue = rxq; 2361 fs->peer_addr = fs->tx_port; 2362 fs->retry_enabled = retry_enabled; 2363 if (verbose_level > 0) 2364 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2365 sm_id, fs->rx_port, fs->rx_queue, 2366 fs->tx_queue); 2367 rxq = (queueid_t) (rxq + 1); 2368 if (rxq == nb_rxq) { 2369 rxq = 0; 2370 rxp = (portid_t) (rxp + 1); 2371 } 2372 } 2373 } 2374 } 2375 2376 #if defined RTE_LIBRTE_PMD_SOFTNIC 2377 static void 2378 softnic_fwd_config_setup(void) 2379 { 2380 struct rte_port *port; 2381 portid_t pid, softnic_portid; 2382 queueid_t i; 2383 uint8_t softnic_enable = 0; 2384 2385 RTE_ETH_FOREACH_DEV(pid) { 2386 port = &ports[pid]; 2387 const char *driver = port->dev_info.driver_name; 2388 2389 if (strcmp(driver, "net_softnic") == 0) { 2390 softnic_portid = pid; 2391 softnic_enable = 1; 2392 break; 2393 } 2394 } 2395 2396 if (softnic_enable == 0) { 2397 printf("Softnic mode not configured(%s)!\n", __func__); 2398 return; 2399 } 2400 2401 cur_fwd_config.nb_fwd_ports = 1; 2402 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2403 2404 /* Re-initialize forwarding streams */ 2405 init_fwd_streams(); 2406 2407 /* 2408 * In the softnic forwarding test, the number of forwarding cores 2409 * is set to one and remaining are used for softnic packet processing. 2410 */ 2411 cur_fwd_config.nb_fwd_lcores = 1; 2412 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2413 2414 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2415 fwd_streams[i]->rx_port = softnic_portid; 2416 fwd_streams[i]->rx_queue = i; 2417 fwd_streams[i]->tx_port = softnic_portid; 2418 fwd_streams[i]->tx_queue = i; 2419 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2420 fwd_streams[i]->retry_enabled = retry_enabled; 2421 } 2422 } 2423 #endif 2424 2425 void 2426 fwd_config_setup(void) 2427 { 2428 cur_fwd_config.fwd_eng = cur_fwd_eng; 2429 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2430 icmp_echo_config_setup(); 2431 return; 2432 } 2433 2434 #if defined RTE_LIBRTE_PMD_SOFTNIC 2435 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2436 softnic_fwd_config_setup(); 2437 return; 2438 } 2439 #endif 2440 2441 if ((nb_rxq > 1) && (nb_txq > 1)){ 2442 if (dcb_config) 2443 dcb_fwd_config_setup(); 2444 else 2445 rss_fwd_config_setup(); 2446 } 2447 else 2448 simple_fwd_config_setup(); 2449 } 2450 2451 static const char * 2452 mp_alloc_to_str(uint8_t mode) 2453 { 2454 switch (mode) { 2455 case MP_ALLOC_NATIVE: 2456 return "native"; 2457 case MP_ALLOC_ANON: 2458 return "anon"; 2459 case MP_ALLOC_XMEM: 2460 return "xmem"; 2461 case MP_ALLOC_XMEM_HUGE: 2462 return "xmemhuge"; 2463 case MP_ALLOC_XBUF: 2464 return "xbuf"; 2465 default: 2466 return "invalid"; 2467 } 2468 } 2469 2470 void 2471 pkt_fwd_config_display(struct fwd_config *cfg) 2472 { 2473 struct fwd_stream *fs; 2474 lcoreid_t lc_id; 2475 streamid_t sm_id; 2476 2477 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2478 "NUMA support %s, MP allocation mode: %s\n", 2479 cfg->fwd_eng->fwd_mode_name, 2480 retry_enabled == 0 ? "" : " with retry", 2481 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2482 numa_support == 1 ? "enabled" : "disabled", 2483 mp_alloc_to_str(mp_alloc_type)); 2484 2485 if (retry_enabled) 2486 printf("TX retry num: %u, delay between TX retries: %uus\n", 2487 burst_tx_retry_num, burst_tx_delay_time); 2488 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2489 printf("Logical Core %u (socket %u) forwards packets on " 2490 "%d streams:", 2491 fwd_lcores_cpuids[lc_id], 2492 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2493 fwd_lcores[lc_id]->stream_nb); 2494 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2495 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2496 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2497 "P=%d/Q=%d (socket %u) ", 2498 fs->rx_port, fs->rx_queue, 2499 ports[fs->rx_port].socket_id, 2500 fs->tx_port, fs->tx_queue, 2501 ports[fs->tx_port].socket_id); 2502 print_ethaddr("peer=", 2503 &peer_eth_addrs[fs->peer_addr]); 2504 } 2505 printf("\n"); 2506 } 2507 printf("\n"); 2508 } 2509 2510 void 2511 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2512 { 2513 struct rte_ether_addr new_peer_addr; 2514 if (!rte_eth_dev_is_valid_port(port_id)) { 2515 printf("Error: Invalid port number %i\n", port_id); 2516 return; 2517 } 2518 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2519 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2520 return; 2521 } 2522 peer_eth_addrs[port_id] = new_peer_addr; 2523 } 2524 2525 int 2526 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2527 { 2528 unsigned int i; 2529 unsigned int lcore_cpuid; 2530 int record_now; 2531 2532 record_now = 0; 2533 again: 2534 for (i = 0; i < nb_lc; i++) { 2535 lcore_cpuid = lcorelist[i]; 2536 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2537 printf("lcore %u not enabled\n", lcore_cpuid); 2538 return -1; 2539 } 2540 if (lcore_cpuid == rte_get_master_lcore()) { 2541 printf("lcore %u cannot be masked on for running " 2542 "packet forwarding, which is the master lcore " 2543 "and reserved for command line parsing only\n", 2544 lcore_cpuid); 2545 return -1; 2546 } 2547 if (record_now) 2548 fwd_lcores_cpuids[i] = lcore_cpuid; 2549 } 2550 if (record_now == 0) { 2551 record_now = 1; 2552 goto again; 2553 } 2554 nb_cfg_lcores = (lcoreid_t) nb_lc; 2555 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2556 printf("previous number of forwarding cores %u - changed to " 2557 "number of configured cores %u\n", 2558 (unsigned int) nb_fwd_lcores, nb_lc); 2559 nb_fwd_lcores = (lcoreid_t) nb_lc; 2560 } 2561 2562 return 0; 2563 } 2564 2565 int 2566 set_fwd_lcores_mask(uint64_t lcoremask) 2567 { 2568 unsigned int lcorelist[64]; 2569 unsigned int nb_lc; 2570 unsigned int i; 2571 2572 if (lcoremask == 0) { 2573 printf("Invalid NULL mask of cores\n"); 2574 return -1; 2575 } 2576 nb_lc = 0; 2577 for (i = 0; i < 64; i++) { 2578 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2579 continue; 2580 lcorelist[nb_lc++] = i; 2581 } 2582 return set_fwd_lcores_list(lcorelist, nb_lc); 2583 } 2584 2585 void 2586 set_fwd_lcores_number(uint16_t nb_lc) 2587 { 2588 if (nb_lc > nb_cfg_lcores) { 2589 printf("nb fwd cores %u > %u (max. number of configured " 2590 "lcores) - ignored\n", 2591 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2592 return; 2593 } 2594 nb_fwd_lcores = (lcoreid_t) nb_lc; 2595 printf("Number of forwarding cores set to %u\n", 2596 (unsigned int) nb_fwd_lcores); 2597 } 2598 2599 void 2600 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2601 { 2602 unsigned int i; 2603 portid_t port_id; 2604 int record_now; 2605 2606 record_now = 0; 2607 again: 2608 for (i = 0; i < nb_pt; i++) { 2609 port_id = (portid_t) portlist[i]; 2610 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2611 return; 2612 if (record_now) 2613 fwd_ports_ids[i] = port_id; 2614 } 2615 if (record_now == 0) { 2616 record_now = 1; 2617 goto again; 2618 } 2619 nb_cfg_ports = (portid_t) nb_pt; 2620 if (nb_fwd_ports != (portid_t) nb_pt) { 2621 printf("previous number of forwarding ports %u - changed to " 2622 "number of configured ports %u\n", 2623 (unsigned int) nb_fwd_ports, nb_pt); 2624 nb_fwd_ports = (portid_t) nb_pt; 2625 } 2626 } 2627 2628 /** 2629 * Parse the user input and obtain the list of forwarding ports 2630 * 2631 * @param[in] list 2632 * String containing the user input. User can specify 2633 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 2634 * For example, if the user wants to use all the available 2635 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 2636 * If the user wants to use only the ports 1,2 then the input 2637 * is 1,2. 2638 * valid characters are '-' and ',' 2639 * @param[out] values 2640 * This array will be filled with a list of port IDs 2641 * based on the user input 2642 * Note that duplicate entries are discarded and only the first 2643 * count entries in this array are port IDs and all the rest 2644 * will contain default values 2645 * @param[in] maxsize 2646 * This parameter denotes 2 things 2647 * 1) Number of elements in the values array 2648 * 2) Maximum value of each element in the values array 2649 * @return 2650 * On success, returns total count of parsed port IDs 2651 * On failure, returns 0 2652 */ 2653 static unsigned int 2654 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 2655 { 2656 unsigned int count = 0; 2657 char *end = NULL; 2658 int min, max; 2659 int value, i; 2660 unsigned int marked[maxsize]; 2661 2662 if (list == NULL || values == NULL) 2663 return 0; 2664 2665 for (i = 0; i < (int)maxsize; i++) 2666 marked[i] = 0; 2667 2668 min = INT_MAX; 2669 2670 do { 2671 /*Remove the blank spaces if any*/ 2672 while (isblank(*list)) 2673 list++; 2674 if (*list == '\0') 2675 break; 2676 errno = 0; 2677 value = strtol(list, &end, 10); 2678 if (errno || end == NULL) 2679 return 0; 2680 if (value < 0 || value >= (int)maxsize) 2681 return 0; 2682 while (isblank(*end)) 2683 end++; 2684 if (*end == '-' && min == INT_MAX) { 2685 min = value; 2686 } else if ((*end == ',') || (*end == '\0')) { 2687 max = value; 2688 if (min == INT_MAX) 2689 min = value; 2690 for (i = min; i <= max; i++) { 2691 if (count < maxsize) { 2692 if (marked[i]) 2693 continue; 2694 values[count] = i; 2695 marked[i] = 1; 2696 count++; 2697 } 2698 } 2699 min = INT_MAX; 2700 } else 2701 return 0; 2702 list = end + 1; 2703 } while (*end != '\0'); 2704 2705 return count; 2706 } 2707 2708 void 2709 parse_fwd_portlist(const char *portlist) 2710 { 2711 unsigned int portcount; 2712 unsigned int portindex[RTE_MAX_ETHPORTS]; 2713 unsigned int i, valid_port_count = 0; 2714 2715 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 2716 if (!portcount) 2717 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 2718 2719 /* 2720 * Here we verify the validity of the ports 2721 * and thereby calculate the total number of 2722 * valid ports 2723 */ 2724 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 2725 if (rte_eth_dev_is_valid_port(portindex[i])) { 2726 portindex[valid_port_count] = portindex[i]; 2727 valid_port_count++; 2728 } 2729 } 2730 2731 set_fwd_ports_list(portindex, valid_port_count); 2732 } 2733 2734 void 2735 set_fwd_ports_mask(uint64_t portmask) 2736 { 2737 unsigned int portlist[64]; 2738 unsigned int nb_pt; 2739 unsigned int i; 2740 2741 if (portmask == 0) { 2742 printf("Invalid NULL mask of ports\n"); 2743 return; 2744 } 2745 nb_pt = 0; 2746 RTE_ETH_FOREACH_DEV(i) { 2747 if (! ((uint64_t)(1ULL << i) & portmask)) 2748 continue; 2749 portlist[nb_pt++] = i; 2750 } 2751 set_fwd_ports_list(portlist, nb_pt); 2752 } 2753 2754 void 2755 set_fwd_ports_number(uint16_t nb_pt) 2756 { 2757 if (nb_pt > nb_cfg_ports) { 2758 printf("nb fwd ports %u > %u (number of configured " 2759 "ports) - ignored\n", 2760 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2761 return; 2762 } 2763 nb_fwd_ports = (portid_t) nb_pt; 2764 printf("Number of forwarding ports set to %u\n", 2765 (unsigned int) nb_fwd_ports); 2766 } 2767 2768 int 2769 port_is_forwarding(portid_t port_id) 2770 { 2771 unsigned int i; 2772 2773 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2774 return -1; 2775 2776 for (i = 0; i < nb_fwd_ports; i++) { 2777 if (fwd_ports_ids[i] == port_id) 2778 return 1; 2779 } 2780 2781 return 0; 2782 } 2783 2784 void 2785 set_nb_pkt_per_burst(uint16_t nb) 2786 { 2787 if (nb > MAX_PKT_BURST) { 2788 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2789 " ignored\n", 2790 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2791 return; 2792 } 2793 nb_pkt_per_burst = nb; 2794 printf("Number of packets per burst set to %u\n", 2795 (unsigned int) nb_pkt_per_burst); 2796 } 2797 2798 static const char * 2799 tx_split_get_name(enum tx_pkt_split split) 2800 { 2801 uint32_t i; 2802 2803 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2804 if (tx_split_name[i].split == split) 2805 return tx_split_name[i].name; 2806 } 2807 return NULL; 2808 } 2809 2810 void 2811 set_tx_pkt_split(const char *name) 2812 { 2813 uint32_t i; 2814 2815 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2816 if (strcmp(tx_split_name[i].name, name) == 0) { 2817 tx_pkt_split = tx_split_name[i].split; 2818 return; 2819 } 2820 } 2821 printf("unknown value: \"%s\"\n", name); 2822 } 2823 2824 void 2825 show_tx_pkt_segments(void) 2826 { 2827 uint32_t i, n; 2828 const char *split; 2829 2830 n = tx_pkt_nb_segs; 2831 split = tx_split_get_name(tx_pkt_split); 2832 2833 printf("Number of segments: %u\n", n); 2834 printf("Segment sizes: "); 2835 for (i = 0; i != n - 1; i++) 2836 printf("%hu,", tx_pkt_seg_lengths[i]); 2837 printf("%hu\n", tx_pkt_seg_lengths[i]); 2838 printf("Split packet: %s\n", split); 2839 } 2840 2841 void 2842 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2843 { 2844 uint16_t tx_pkt_len; 2845 unsigned i; 2846 2847 if (nb_segs >= (unsigned) nb_txd) { 2848 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2849 nb_segs, (unsigned int) nb_txd); 2850 return; 2851 } 2852 2853 /* 2854 * Check that each segment length is greater or equal than 2855 * the mbuf data sise. 2856 * Check also that the total packet length is greater or equal than the 2857 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2858 * 20 + 8). 2859 */ 2860 tx_pkt_len = 0; 2861 for (i = 0; i < nb_segs; i++) { 2862 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2863 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2864 i, seg_lengths[i], (unsigned) mbuf_data_size); 2865 return; 2866 } 2867 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2868 } 2869 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2870 printf("total packet length=%u < %d - give up\n", 2871 (unsigned) tx_pkt_len, 2872 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2873 return; 2874 } 2875 2876 for (i = 0; i < nb_segs; i++) 2877 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2878 2879 tx_pkt_length = tx_pkt_len; 2880 tx_pkt_nb_segs = (uint8_t) nb_segs; 2881 } 2882 2883 void 2884 setup_gro(const char *onoff, portid_t port_id) 2885 { 2886 if (!rte_eth_dev_is_valid_port(port_id)) { 2887 printf("invalid port id %u\n", port_id); 2888 return; 2889 } 2890 if (test_done == 0) { 2891 printf("Before enable/disable GRO," 2892 " please stop forwarding first\n"); 2893 return; 2894 } 2895 if (strcmp(onoff, "on") == 0) { 2896 if (gro_ports[port_id].enable != 0) { 2897 printf("Port %u has enabled GRO. Please" 2898 " disable GRO first\n", port_id); 2899 return; 2900 } 2901 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2902 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2903 gro_ports[port_id].param.max_flow_num = 2904 GRO_DEFAULT_FLOW_NUM; 2905 gro_ports[port_id].param.max_item_per_flow = 2906 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2907 } 2908 gro_ports[port_id].enable = 1; 2909 } else { 2910 if (gro_ports[port_id].enable == 0) { 2911 printf("Port %u has disabled GRO\n", port_id); 2912 return; 2913 } 2914 gro_ports[port_id].enable = 0; 2915 } 2916 } 2917 2918 void 2919 setup_gro_flush_cycles(uint8_t cycles) 2920 { 2921 if (test_done == 0) { 2922 printf("Before change flush interval for GRO," 2923 " please stop forwarding first.\n"); 2924 return; 2925 } 2926 2927 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2928 GRO_DEFAULT_FLUSH_CYCLES) { 2929 printf("The flushing cycle be in the range" 2930 " of 1 to %u. Revert to the default" 2931 " value %u.\n", 2932 GRO_MAX_FLUSH_CYCLES, 2933 GRO_DEFAULT_FLUSH_CYCLES); 2934 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2935 } 2936 2937 gro_flush_cycles = cycles; 2938 } 2939 2940 void 2941 show_gro(portid_t port_id) 2942 { 2943 struct rte_gro_param *param; 2944 uint32_t max_pkts_num; 2945 2946 param = &gro_ports[port_id].param; 2947 2948 if (!rte_eth_dev_is_valid_port(port_id)) { 2949 printf("Invalid port id %u.\n", port_id); 2950 return; 2951 } 2952 if (gro_ports[port_id].enable) { 2953 printf("GRO type: TCP/IPv4\n"); 2954 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2955 max_pkts_num = param->max_flow_num * 2956 param->max_item_per_flow; 2957 } else 2958 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2959 printf("Max number of packets to perform GRO: %u\n", 2960 max_pkts_num); 2961 printf("Flushing cycles: %u\n", gro_flush_cycles); 2962 } else 2963 printf("Port %u doesn't enable GRO.\n", port_id); 2964 } 2965 2966 void 2967 setup_gso(const char *mode, portid_t port_id) 2968 { 2969 if (!rte_eth_dev_is_valid_port(port_id)) { 2970 printf("invalid port id %u\n", port_id); 2971 return; 2972 } 2973 if (strcmp(mode, "on") == 0) { 2974 if (test_done == 0) { 2975 printf("before enabling GSO," 2976 " please stop forwarding first\n"); 2977 return; 2978 } 2979 gso_ports[port_id].enable = 1; 2980 } else if (strcmp(mode, "off") == 0) { 2981 if (test_done == 0) { 2982 printf("before disabling GSO," 2983 " please stop forwarding first\n"); 2984 return; 2985 } 2986 gso_ports[port_id].enable = 0; 2987 } 2988 } 2989 2990 char* 2991 list_pkt_forwarding_modes(void) 2992 { 2993 static char fwd_modes[128] = ""; 2994 const char *separator = "|"; 2995 struct fwd_engine *fwd_eng; 2996 unsigned i = 0; 2997 2998 if (strlen (fwd_modes) == 0) { 2999 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3000 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3001 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3002 strncat(fwd_modes, separator, 3003 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3004 } 3005 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3006 } 3007 3008 return fwd_modes; 3009 } 3010 3011 char* 3012 list_pkt_forwarding_retry_modes(void) 3013 { 3014 static char fwd_modes[128] = ""; 3015 const char *separator = "|"; 3016 struct fwd_engine *fwd_eng; 3017 unsigned i = 0; 3018 3019 if (strlen(fwd_modes) == 0) { 3020 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3021 if (fwd_eng == &rx_only_engine) 3022 continue; 3023 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3024 sizeof(fwd_modes) - 3025 strlen(fwd_modes) - 1); 3026 strncat(fwd_modes, separator, 3027 sizeof(fwd_modes) - 3028 strlen(fwd_modes) - 1); 3029 } 3030 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3031 } 3032 3033 return fwd_modes; 3034 } 3035 3036 void 3037 set_pkt_forwarding_mode(const char *fwd_mode_name) 3038 { 3039 struct fwd_engine *fwd_eng; 3040 unsigned i; 3041 3042 i = 0; 3043 while ((fwd_eng = fwd_engines[i]) != NULL) { 3044 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 3045 printf("Set %s packet forwarding mode%s\n", 3046 fwd_mode_name, 3047 retry_enabled == 0 ? "" : " with retry"); 3048 cur_fwd_eng = fwd_eng; 3049 return; 3050 } 3051 i++; 3052 } 3053 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 3054 } 3055 3056 void 3057 add_rx_dump_callbacks(portid_t portid) 3058 { 3059 struct rte_eth_dev_info dev_info; 3060 uint16_t queue; 3061 int ret; 3062 3063 if (port_id_is_invalid(portid, ENABLED_WARN)) 3064 return; 3065 3066 ret = eth_dev_info_get_print_err(portid, &dev_info); 3067 if (ret != 0) 3068 return; 3069 3070 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3071 if (!ports[portid].rx_dump_cb[queue]) 3072 ports[portid].rx_dump_cb[queue] = 3073 rte_eth_add_rx_callback(portid, queue, 3074 dump_rx_pkts, NULL); 3075 } 3076 3077 void 3078 add_tx_dump_callbacks(portid_t portid) 3079 { 3080 struct rte_eth_dev_info dev_info; 3081 uint16_t queue; 3082 int ret; 3083 3084 if (port_id_is_invalid(portid, ENABLED_WARN)) 3085 return; 3086 3087 ret = eth_dev_info_get_print_err(portid, &dev_info); 3088 if (ret != 0) 3089 return; 3090 3091 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3092 if (!ports[portid].tx_dump_cb[queue]) 3093 ports[portid].tx_dump_cb[queue] = 3094 rte_eth_add_tx_callback(portid, queue, 3095 dump_tx_pkts, NULL); 3096 } 3097 3098 void 3099 remove_rx_dump_callbacks(portid_t portid) 3100 { 3101 struct rte_eth_dev_info dev_info; 3102 uint16_t queue; 3103 int ret; 3104 3105 if (port_id_is_invalid(portid, ENABLED_WARN)) 3106 return; 3107 3108 ret = eth_dev_info_get_print_err(portid, &dev_info); 3109 if (ret != 0) 3110 return; 3111 3112 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 3113 if (ports[portid].rx_dump_cb[queue]) { 3114 rte_eth_remove_rx_callback(portid, queue, 3115 ports[portid].rx_dump_cb[queue]); 3116 ports[portid].rx_dump_cb[queue] = NULL; 3117 } 3118 } 3119 3120 void 3121 remove_tx_dump_callbacks(portid_t portid) 3122 { 3123 struct rte_eth_dev_info dev_info; 3124 uint16_t queue; 3125 int ret; 3126 3127 if (port_id_is_invalid(portid, ENABLED_WARN)) 3128 return; 3129 3130 ret = eth_dev_info_get_print_err(portid, &dev_info); 3131 if (ret != 0) 3132 return; 3133 3134 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 3135 if (ports[portid].tx_dump_cb[queue]) { 3136 rte_eth_remove_tx_callback(portid, queue, 3137 ports[portid].tx_dump_cb[queue]); 3138 ports[portid].tx_dump_cb[queue] = NULL; 3139 } 3140 } 3141 3142 void 3143 configure_rxtx_dump_callbacks(uint16_t verbose) 3144 { 3145 portid_t portid; 3146 3147 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3148 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 3149 return; 3150 #endif 3151 3152 RTE_ETH_FOREACH_DEV(portid) 3153 { 3154 if (verbose == 1 || verbose > 2) 3155 add_rx_dump_callbacks(portid); 3156 else 3157 remove_rx_dump_callbacks(portid); 3158 if (verbose >= 2) 3159 add_tx_dump_callbacks(portid); 3160 else 3161 remove_tx_dump_callbacks(portid); 3162 } 3163 } 3164 3165 void 3166 set_verbose_level(uint16_t vb_level) 3167 { 3168 printf("Change verbose level from %u to %u\n", 3169 (unsigned int) verbose_level, (unsigned int) vb_level); 3170 verbose_level = vb_level; 3171 configure_rxtx_dump_callbacks(verbose_level); 3172 } 3173 3174 void 3175 vlan_extend_set(portid_t port_id, int on) 3176 { 3177 int diag; 3178 int vlan_offload; 3179 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3180 3181 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3182 return; 3183 3184 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3185 3186 if (on) { 3187 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 3188 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3189 } else { 3190 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 3191 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3192 } 3193 3194 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3195 if (diag < 0) 3196 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 3197 "diag=%d\n", port_id, on, diag); 3198 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3199 } 3200 3201 void 3202 rx_vlan_strip_set(portid_t port_id, int on) 3203 { 3204 int diag; 3205 int vlan_offload; 3206 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3207 3208 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3209 return; 3210 3211 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3212 3213 if (on) { 3214 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3215 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3216 } else { 3217 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3218 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3219 } 3220 3221 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3222 if (diag < 0) 3223 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3224 "diag=%d\n", port_id, on, diag); 3225 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3226 } 3227 3228 void 3229 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3230 { 3231 int diag; 3232 3233 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3234 return; 3235 3236 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3237 if (diag < 0) 3238 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3239 "diag=%d\n", port_id, queue_id, on, diag); 3240 } 3241 3242 void 3243 rx_vlan_filter_set(portid_t port_id, int on) 3244 { 3245 int diag; 3246 int vlan_offload; 3247 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3248 3249 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3250 return; 3251 3252 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3253 3254 if (on) { 3255 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3256 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3257 } else { 3258 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3259 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3260 } 3261 3262 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3263 if (diag < 0) 3264 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3265 "diag=%d\n", port_id, on, diag); 3266 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3267 } 3268 3269 void 3270 rx_vlan_qinq_strip_set(portid_t port_id, int on) 3271 { 3272 int diag; 3273 int vlan_offload; 3274 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3275 3276 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3277 return; 3278 3279 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3280 3281 if (on) { 3282 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 3283 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3284 } else { 3285 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 3286 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3287 } 3288 3289 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3290 if (diag < 0) 3291 printf("%s(port_pi=%d, on=%d) failed " 3292 "diag=%d\n", __func__, port_id, on, diag); 3293 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3294 } 3295 3296 int 3297 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3298 { 3299 int diag; 3300 3301 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3302 return 1; 3303 if (vlan_id_is_invalid(vlan_id)) 3304 return 1; 3305 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3306 if (diag == 0) 3307 return 0; 3308 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3309 "diag=%d\n", 3310 port_id, vlan_id, on, diag); 3311 return -1; 3312 } 3313 3314 void 3315 rx_vlan_all_filter_set(portid_t port_id, int on) 3316 { 3317 uint16_t vlan_id; 3318 3319 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3320 return; 3321 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3322 if (rx_vft_set(port_id, vlan_id, on)) 3323 break; 3324 } 3325 } 3326 3327 void 3328 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3329 { 3330 int diag; 3331 3332 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3333 return; 3334 3335 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3336 if (diag == 0) 3337 return; 3338 3339 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3340 "diag=%d\n", 3341 port_id, vlan_type, tp_id, diag); 3342 } 3343 3344 void 3345 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3346 { 3347 struct rte_eth_dev_info dev_info; 3348 int ret; 3349 3350 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3351 return; 3352 if (vlan_id_is_invalid(vlan_id)) 3353 return; 3354 3355 if (ports[port_id].dev_conf.txmode.offloads & 3356 DEV_TX_OFFLOAD_QINQ_INSERT) { 3357 printf("Error, as QinQ has been enabled.\n"); 3358 return; 3359 } 3360 3361 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3362 if (ret != 0) 3363 return; 3364 3365 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3366 printf("Error: vlan insert is not supported by port %d\n", 3367 port_id); 3368 return; 3369 } 3370 3371 tx_vlan_reset(port_id); 3372 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3373 ports[port_id].tx_vlan_id = vlan_id; 3374 } 3375 3376 void 3377 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3378 { 3379 struct rte_eth_dev_info dev_info; 3380 int ret; 3381 3382 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3383 return; 3384 if (vlan_id_is_invalid(vlan_id)) 3385 return; 3386 if (vlan_id_is_invalid(vlan_id_outer)) 3387 return; 3388 3389 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3390 if (ret != 0) 3391 return; 3392 3393 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3394 printf("Error: qinq insert not supported by port %d\n", 3395 port_id); 3396 return; 3397 } 3398 3399 tx_vlan_reset(port_id); 3400 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3401 DEV_TX_OFFLOAD_QINQ_INSERT); 3402 ports[port_id].tx_vlan_id = vlan_id; 3403 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3404 } 3405 3406 void 3407 tx_vlan_reset(portid_t port_id) 3408 { 3409 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3410 return; 3411 ports[port_id].dev_conf.txmode.offloads &= 3412 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3413 DEV_TX_OFFLOAD_QINQ_INSERT); 3414 ports[port_id].tx_vlan_id = 0; 3415 ports[port_id].tx_vlan_id_outer = 0; 3416 } 3417 3418 void 3419 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3420 { 3421 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3422 return; 3423 3424 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3425 } 3426 3427 void 3428 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3429 { 3430 uint16_t i; 3431 uint8_t existing_mapping_found = 0; 3432 3433 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3434 return; 3435 3436 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3437 return; 3438 3439 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3440 printf("map_value not in required range 0..%d\n", 3441 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3442 return; 3443 } 3444 3445 if (!is_rx) { /*then tx*/ 3446 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3447 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3448 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3449 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3450 existing_mapping_found = 1; 3451 break; 3452 } 3453 } 3454 if (!existing_mapping_found) { /* A new additional mapping... */ 3455 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3456 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3457 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3458 nb_tx_queue_stats_mappings++; 3459 } 3460 } 3461 else { /*rx*/ 3462 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3463 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3464 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3465 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3466 existing_mapping_found = 1; 3467 break; 3468 } 3469 } 3470 if (!existing_mapping_found) { /* A new additional mapping... */ 3471 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3472 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3473 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3474 nb_rx_queue_stats_mappings++; 3475 } 3476 } 3477 } 3478 3479 void 3480 set_xstats_hide_zero(uint8_t on_off) 3481 { 3482 xstats_hide_zero = on_off; 3483 } 3484 3485 static inline void 3486 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3487 { 3488 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3489 3490 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3491 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3492 " tunnel_id: 0x%08x", 3493 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3494 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3495 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3496 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3497 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3498 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3499 3500 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3501 rte_be_to_cpu_16(mask->src_port_mask), 3502 rte_be_to_cpu_16(mask->dst_port_mask)); 3503 3504 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3505 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3506 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3507 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3508 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3509 3510 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3511 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3512 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3513 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3514 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3515 } 3516 3517 printf("\n"); 3518 } 3519 3520 static inline void 3521 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3522 { 3523 struct rte_eth_flex_payload_cfg *cfg; 3524 uint32_t i, j; 3525 3526 for (i = 0; i < flex_conf->nb_payloads; i++) { 3527 cfg = &flex_conf->flex_set[i]; 3528 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3529 printf("\n RAW: "); 3530 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3531 printf("\n L2_PAYLOAD: "); 3532 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3533 printf("\n L3_PAYLOAD: "); 3534 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3535 printf("\n L4_PAYLOAD: "); 3536 else 3537 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3538 for (j = 0; j < num; j++) 3539 printf(" %-5u", cfg->src_offset[j]); 3540 } 3541 printf("\n"); 3542 } 3543 3544 static char * 3545 flowtype_to_str(uint16_t flow_type) 3546 { 3547 struct flow_type_info { 3548 char str[32]; 3549 uint16_t ftype; 3550 }; 3551 3552 uint8_t i; 3553 static struct flow_type_info flowtype_str_table[] = { 3554 {"raw", RTE_ETH_FLOW_RAW}, 3555 {"ipv4", RTE_ETH_FLOW_IPV4}, 3556 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3557 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3558 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3559 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3560 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3561 {"ipv6", RTE_ETH_FLOW_IPV6}, 3562 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3563 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3564 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3565 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3566 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3567 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3568 {"port", RTE_ETH_FLOW_PORT}, 3569 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3570 {"geneve", RTE_ETH_FLOW_GENEVE}, 3571 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3572 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3573 }; 3574 3575 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3576 if (flowtype_str_table[i].ftype == flow_type) 3577 return flowtype_str_table[i].str; 3578 } 3579 3580 return NULL; 3581 } 3582 3583 static inline void 3584 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3585 { 3586 struct rte_eth_fdir_flex_mask *mask; 3587 uint32_t i, j; 3588 char *p; 3589 3590 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3591 mask = &flex_conf->flex_mask[i]; 3592 p = flowtype_to_str(mask->flow_type); 3593 printf("\n %s:\t", p ? p : "unknown"); 3594 for (j = 0; j < num; j++) 3595 printf(" %02x", mask->mask[j]); 3596 } 3597 printf("\n"); 3598 } 3599 3600 static inline void 3601 print_fdir_flow_type(uint32_t flow_types_mask) 3602 { 3603 int i; 3604 char *p; 3605 3606 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3607 if (!(flow_types_mask & (1 << i))) 3608 continue; 3609 p = flowtype_to_str(i); 3610 if (p) 3611 printf(" %s", p); 3612 else 3613 printf(" unknown"); 3614 } 3615 printf("\n"); 3616 } 3617 3618 void 3619 fdir_get_infos(portid_t port_id) 3620 { 3621 struct rte_eth_fdir_stats fdir_stat; 3622 struct rte_eth_fdir_info fdir_info; 3623 int ret; 3624 3625 static const char *fdir_stats_border = "########################"; 3626 3627 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3628 return; 3629 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3630 if (ret < 0) { 3631 printf("\n FDIR is not supported on port %-2d\n", 3632 port_id); 3633 return; 3634 } 3635 3636 memset(&fdir_info, 0, sizeof(fdir_info)); 3637 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3638 RTE_ETH_FILTER_INFO, &fdir_info); 3639 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3640 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3641 RTE_ETH_FILTER_STATS, &fdir_stat); 3642 printf("\n %s FDIR infos for port %-2d %s\n", 3643 fdir_stats_border, port_id, fdir_stats_border); 3644 printf(" MODE: "); 3645 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3646 printf(" PERFECT\n"); 3647 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3648 printf(" PERFECT-MAC-VLAN\n"); 3649 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3650 printf(" PERFECT-TUNNEL\n"); 3651 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3652 printf(" SIGNATURE\n"); 3653 else 3654 printf(" DISABLE\n"); 3655 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3656 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3657 printf(" SUPPORTED FLOW TYPE: "); 3658 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3659 } 3660 printf(" FLEX PAYLOAD INFO:\n"); 3661 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3662 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3663 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3664 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3665 fdir_info.flex_payload_unit, 3666 fdir_info.max_flex_payload_segment_num, 3667 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3668 printf(" MASK: "); 3669 print_fdir_mask(&fdir_info.mask); 3670 if (fdir_info.flex_conf.nb_payloads > 0) { 3671 printf(" FLEX PAYLOAD SRC OFFSET:"); 3672 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3673 } 3674 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3675 printf(" FLEX MASK CFG:"); 3676 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3677 } 3678 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3679 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3680 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3681 fdir_info.guarant_spc, fdir_info.best_spc); 3682 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3683 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3684 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3685 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3686 fdir_stat.collision, fdir_stat.free, 3687 fdir_stat.maxhash, fdir_stat.maxlen, 3688 fdir_stat.add, fdir_stat.remove, 3689 fdir_stat.f_add, fdir_stat.f_remove); 3690 printf(" %s############################%s\n", 3691 fdir_stats_border, fdir_stats_border); 3692 } 3693 3694 void 3695 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3696 { 3697 struct rte_port *port; 3698 struct rte_eth_fdir_flex_conf *flex_conf; 3699 int i, idx = 0; 3700 3701 port = &ports[port_id]; 3702 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3703 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3704 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3705 idx = i; 3706 break; 3707 } 3708 } 3709 if (i >= RTE_ETH_FLOW_MAX) { 3710 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3711 idx = flex_conf->nb_flexmasks; 3712 flex_conf->nb_flexmasks++; 3713 } else { 3714 printf("The flex mask table is full. Can not set flex" 3715 " mask for flow_type(%u).", cfg->flow_type); 3716 return; 3717 } 3718 } 3719 rte_memcpy(&flex_conf->flex_mask[idx], 3720 cfg, 3721 sizeof(struct rte_eth_fdir_flex_mask)); 3722 } 3723 3724 void 3725 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3726 { 3727 struct rte_port *port; 3728 struct rte_eth_fdir_flex_conf *flex_conf; 3729 int i, idx = 0; 3730 3731 port = &ports[port_id]; 3732 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3733 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3734 if (cfg->type == flex_conf->flex_set[i].type) { 3735 idx = i; 3736 break; 3737 } 3738 } 3739 if (i >= RTE_ETH_PAYLOAD_MAX) { 3740 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3741 idx = flex_conf->nb_payloads; 3742 flex_conf->nb_payloads++; 3743 } else { 3744 printf("The flex payload table is full. Can not set" 3745 " flex payload for type(%u).", cfg->type); 3746 return; 3747 } 3748 } 3749 rte_memcpy(&flex_conf->flex_set[idx], 3750 cfg, 3751 sizeof(struct rte_eth_flex_payload_cfg)); 3752 3753 } 3754 3755 void 3756 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3757 { 3758 #ifdef RTE_LIBRTE_IXGBE_PMD 3759 int diag; 3760 3761 if (is_rx) 3762 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3763 else 3764 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3765 3766 if (diag == 0) 3767 return; 3768 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3769 is_rx ? "rx" : "tx", port_id, diag); 3770 return; 3771 #endif 3772 printf("VF %s setting not supported for port %d\n", 3773 is_rx ? "Rx" : "Tx", port_id); 3774 RTE_SET_USED(vf); 3775 RTE_SET_USED(on); 3776 } 3777 3778 int 3779 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3780 { 3781 int diag; 3782 struct rte_eth_link link; 3783 int ret; 3784 3785 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3786 return 1; 3787 ret = eth_link_get_nowait_print_err(port_id, &link); 3788 if (ret < 0) 3789 return 1; 3790 if (rate > link.link_speed) { 3791 printf("Invalid rate value:%u bigger than link speed: %u\n", 3792 rate, link.link_speed); 3793 return 1; 3794 } 3795 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3796 if (diag == 0) 3797 return diag; 3798 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3799 port_id, diag); 3800 return diag; 3801 } 3802 3803 int 3804 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3805 { 3806 int diag = -ENOTSUP; 3807 3808 RTE_SET_USED(vf); 3809 RTE_SET_USED(rate); 3810 RTE_SET_USED(q_msk); 3811 3812 #ifdef RTE_LIBRTE_IXGBE_PMD 3813 if (diag == -ENOTSUP) 3814 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3815 q_msk); 3816 #endif 3817 #ifdef RTE_LIBRTE_BNXT_PMD 3818 if (diag == -ENOTSUP) 3819 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3820 #endif 3821 if (diag == 0) 3822 return diag; 3823 3824 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3825 port_id, diag); 3826 return diag; 3827 } 3828 3829 /* 3830 * Functions to manage the set of filtered Multicast MAC addresses. 3831 * 3832 * A pool of filtered multicast MAC addresses is associated with each port. 3833 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3834 * The address of the pool and the number of valid multicast MAC addresses 3835 * recorded in the pool are stored in the fields "mc_addr_pool" and 3836 * "mc_addr_nb" of the "rte_port" data structure. 3837 * 3838 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3839 * to be supplied a contiguous array of multicast MAC addresses. 3840 * To comply with this constraint, the set of multicast addresses recorded 3841 * into the pool are systematically compacted at the beginning of the pool. 3842 * Hence, when a multicast address is removed from the pool, all following 3843 * addresses, if any, are copied back to keep the set contiguous. 3844 */ 3845 #define MCAST_POOL_INC 32 3846 3847 static int 3848 mcast_addr_pool_extend(struct rte_port *port) 3849 { 3850 struct rte_ether_addr *mc_pool; 3851 size_t mc_pool_size; 3852 3853 /* 3854 * If a free entry is available at the end of the pool, just 3855 * increment the number of recorded multicast addresses. 3856 */ 3857 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3858 port->mc_addr_nb++; 3859 return 0; 3860 } 3861 3862 /* 3863 * [re]allocate a pool with MCAST_POOL_INC more entries. 3864 * The previous test guarantees that port->mc_addr_nb is a multiple 3865 * of MCAST_POOL_INC. 3866 */ 3867 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3868 MCAST_POOL_INC); 3869 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3870 mc_pool_size); 3871 if (mc_pool == NULL) { 3872 printf("allocation of pool of %u multicast addresses failed\n", 3873 port->mc_addr_nb + MCAST_POOL_INC); 3874 return -ENOMEM; 3875 } 3876 3877 port->mc_addr_pool = mc_pool; 3878 port->mc_addr_nb++; 3879 return 0; 3880 3881 } 3882 3883 static void 3884 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 3885 { 3886 if (mcast_addr_pool_extend(port) != 0) 3887 return; 3888 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 3889 } 3890 3891 static void 3892 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3893 { 3894 port->mc_addr_nb--; 3895 if (addr_idx == port->mc_addr_nb) { 3896 /* No need to recompact the set of multicast addressses. */ 3897 if (port->mc_addr_nb == 0) { 3898 /* free the pool of multicast addresses. */ 3899 free(port->mc_addr_pool); 3900 port->mc_addr_pool = NULL; 3901 } 3902 return; 3903 } 3904 memmove(&port->mc_addr_pool[addr_idx], 3905 &port->mc_addr_pool[addr_idx + 1], 3906 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3907 } 3908 3909 static int 3910 eth_port_multicast_addr_list_set(portid_t port_id) 3911 { 3912 struct rte_port *port; 3913 int diag; 3914 3915 port = &ports[port_id]; 3916 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3917 port->mc_addr_nb); 3918 if (diag < 0) 3919 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3920 port_id, port->mc_addr_nb, diag); 3921 3922 return diag; 3923 } 3924 3925 void 3926 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3927 { 3928 struct rte_port *port; 3929 uint32_t i; 3930 3931 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3932 return; 3933 3934 port = &ports[port_id]; 3935 3936 /* 3937 * Check that the added multicast MAC address is not already recorded 3938 * in the pool of multicast addresses. 3939 */ 3940 for (i = 0; i < port->mc_addr_nb; i++) { 3941 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3942 printf("multicast address already filtered by port\n"); 3943 return; 3944 } 3945 } 3946 3947 mcast_addr_pool_append(port, mc_addr); 3948 if (eth_port_multicast_addr_list_set(port_id) < 0) 3949 /* Rollback on failure, remove the address from the pool */ 3950 mcast_addr_pool_remove(port, i); 3951 } 3952 3953 void 3954 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3955 { 3956 struct rte_port *port; 3957 uint32_t i; 3958 3959 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3960 return; 3961 3962 port = &ports[port_id]; 3963 3964 /* 3965 * Search the pool of multicast MAC addresses for the removed address. 3966 */ 3967 for (i = 0; i < port->mc_addr_nb; i++) { 3968 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3969 break; 3970 } 3971 if (i == port->mc_addr_nb) { 3972 printf("multicast address not filtered by port %d\n", port_id); 3973 return; 3974 } 3975 3976 mcast_addr_pool_remove(port, i); 3977 if (eth_port_multicast_addr_list_set(port_id) < 0) 3978 /* Rollback on failure, add the address back into the pool */ 3979 mcast_addr_pool_append(port, mc_addr); 3980 } 3981 3982 void 3983 port_dcb_info_display(portid_t port_id) 3984 { 3985 struct rte_eth_dcb_info dcb_info; 3986 uint16_t i; 3987 int ret; 3988 static const char *border = "================"; 3989 3990 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3991 return; 3992 3993 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3994 if (ret) { 3995 printf("\n Failed to get dcb infos on port %-2d\n", 3996 port_id); 3997 return; 3998 } 3999 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 4000 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 4001 printf("\n TC : "); 4002 for (i = 0; i < dcb_info.nb_tcs; i++) 4003 printf("\t%4d", i); 4004 printf("\n Priority : "); 4005 for (i = 0; i < dcb_info.nb_tcs; i++) 4006 printf("\t%4d", dcb_info.prio_tc[i]); 4007 printf("\n BW percent :"); 4008 for (i = 0; i < dcb_info.nb_tcs; i++) 4009 printf("\t%4d%%", dcb_info.tc_bws[i]); 4010 printf("\n RXQ base : "); 4011 for (i = 0; i < dcb_info.nb_tcs; i++) 4012 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 4013 printf("\n RXQ number :"); 4014 for (i = 0; i < dcb_info.nb_tcs; i++) 4015 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 4016 printf("\n TXQ base : "); 4017 for (i = 0; i < dcb_info.nb_tcs; i++) 4018 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 4019 printf("\n TXQ number :"); 4020 for (i = 0; i < dcb_info.nb_tcs; i++) 4021 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 4022 printf("\n"); 4023 } 4024 4025 uint8_t * 4026 open_file(const char *file_path, uint32_t *size) 4027 { 4028 int fd = open(file_path, O_RDONLY); 4029 off_t pkg_size; 4030 uint8_t *buf = NULL; 4031 int ret = 0; 4032 struct stat st_buf; 4033 4034 if (size) 4035 *size = 0; 4036 4037 if (fd == -1) { 4038 printf("%s: Failed to open %s\n", __func__, file_path); 4039 return buf; 4040 } 4041 4042 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 4043 close(fd); 4044 printf("%s: File operations failed\n", __func__); 4045 return buf; 4046 } 4047 4048 pkg_size = st_buf.st_size; 4049 if (pkg_size < 0) { 4050 close(fd); 4051 printf("%s: File operations failed\n", __func__); 4052 return buf; 4053 } 4054 4055 buf = (uint8_t *)malloc(pkg_size); 4056 if (!buf) { 4057 close(fd); 4058 printf("%s: Failed to malloc memory\n", __func__); 4059 return buf; 4060 } 4061 4062 ret = read(fd, buf, pkg_size); 4063 if (ret < 0) { 4064 close(fd); 4065 printf("%s: File read operation failed\n", __func__); 4066 close_file(buf); 4067 return NULL; 4068 } 4069 4070 if (size) 4071 *size = pkg_size; 4072 4073 close(fd); 4074 4075 return buf; 4076 } 4077 4078 int 4079 save_file(const char *file_path, uint8_t *buf, uint32_t size) 4080 { 4081 FILE *fh = fopen(file_path, "wb"); 4082 4083 if (fh == NULL) { 4084 printf("%s: Failed to open %s\n", __func__, file_path); 4085 return -1; 4086 } 4087 4088 if (fwrite(buf, 1, size, fh) != size) { 4089 fclose(fh); 4090 printf("%s: File write operation failed\n", __func__); 4091 return -1; 4092 } 4093 4094 fclose(fh); 4095 4096 return 0; 4097 } 4098 4099 int 4100 close_file(uint8_t *buf) 4101 { 4102 if (buf) { 4103 free((void *)buf); 4104 return 0; 4105 } 4106 4107 return -1; 4108 } 4109 4110 void 4111 port_queue_region_info_display(portid_t port_id, void *buf) 4112 { 4113 #ifdef RTE_LIBRTE_I40E_PMD 4114 uint16_t i, j; 4115 struct rte_pmd_i40e_queue_regions *info = 4116 (struct rte_pmd_i40e_queue_regions *)buf; 4117 static const char *queue_region_info_stats_border = "-------"; 4118 4119 if (!info->queue_region_number) 4120 printf("there is no region has been set before"); 4121 4122 printf("\n %s All queue region info for port=%2d %s", 4123 queue_region_info_stats_border, port_id, 4124 queue_region_info_stats_border); 4125 printf("\n queue_region_number: %-14u \n", 4126 info->queue_region_number); 4127 4128 for (i = 0; i < info->queue_region_number; i++) { 4129 printf("\n region_id: %-14u queue_number: %-14u " 4130 "queue_start_index: %-14u \n", 4131 info->region[i].region_id, 4132 info->region[i].queue_num, 4133 info->region[i].queue_start_index); 4134 4135 printf(" user_priority_num is %-14u :", 4136 info->region[i].user_priority_num); 4137 for (j = 0; j < info->region[i].user_priority_num; j++) 4138 printf(" %-14u ", info->region[i].user_priority[j]); 4139 4140 printf("\n flowtype_num is %-14u :", 4141 info->region[i].flowtype_num); 4142 for (j = 0; j < info->region[i].flowtype_num; j++) 4143 printf(" %-14u ", info->region[i].hw_flowtype[j]); 4144 } 4145 #else 4146 RTE_SET_USED(port_id); 4147 RTE_SET_USED(buf); 4148 #endif 4149 4150 printf("\n\n"); 4151 } 4152 4153 void 4154 show_macs(portid_t port_id) 4155 { 4156 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4157 struct rte_eth_dev_info dev_info; 4158 struct rte_ether_addr *addr; 4159 uint32_t i, num_macs = 0; 4160 struct rte_eth_dev *dev; 4161 4162 dev = &rte_eth_devices[port_id]; 4163 4164 rte_eth_dev_info_get(port_id, &dev_info); 4165 4166 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4167 addr = &dev->data->mac_addrs[i]; 4168 4169 /* skip zero address */ 4170 if (rte_is_zero_ether_addr(addr)) 4171 continue; 4172 4173 num_macs++; 4174 } 4175 4176 printf("Number of MAC address added: %d\n", num_macs); 4177 4178 for (i = 0; i < dev_info.max_mac_addrs; i++) { 4179 addr = &dev->data->mac_addrs[i]; 4180 4181 /* skip zero address */ 4182 if (rte_is_zero_ether_addr(addr)) 4183 continue; 4184 4185 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4186 printf(" %s\n", buf); 4187 } 4188 } 4189 4190 void 4191 show_mcast_macs(portid_t port_id) 4192 { 4193 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 4194 struct rte_ether_addr *addr; 4195 struct rte_port *port; 4196 uint32_t i; 4197 4198 port = &ports[port_id]; 4199 4200 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 4201 4202 for (i = 0; i < port->mc_addr_nb; i++) { 4203 addr = &port->mc_addr_pool[i]; 4204 4205 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 4206 printf(" %s\n", buf); 4207 } 4208 } 4209