1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { NULL, 0 }, 107 }; 108 109 static void 110 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 111 { 112 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 113 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 114 printf("%s%s", name, buf); 115 } 116 117 void 118 nic_stats_display(portid_t port_id) 119 { 120 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 121 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 122 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 123 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 124 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 125 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 126 diff_cycles; 127 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 128 struct rte_eth_stats stats; 129 struct rte_port *port = &ports[port_id]; 130 uint8_t i; 131 132 static const char *nic_stats_border = "########################"; 133 134 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 135 print_valid_ports(); 136 return; 137 } 138 rte_eth_stats_get(port_id, &stats); 139 printf("\n %s NIC statistics for port %-2d %s\n", 140 nic_stats_border, port_id, nic_stats_border); 141 142 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 143 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 144 "%-"PRIu64"\n", 145 stats.ipackets, stats.imissed, stats.ibytes); 146 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 147 printf(" RX-nombuf: %-10"PRIu64"\n", 148 stats.rx_nombuf); 149 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 150 "%-"PRIu64"\n", 151 stats.opackets, stats.oerrors, stats.obytes); 152 } 153 else { 154 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 155 " RX-bytes: %10"PRIu64"\n", 156 stats.ipackets, stats.ierrors, stats.ibytes); 157 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 158 printf(" RX-nombuf: %10"PRIu64"\n", 159 stats.rx_nombuf); 160 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 161 " TX-bytes: %10"PRIu64"\n", 162 stats.opackets, stats.oerrors, stats.obytes); 163 } 164 165 if (port->rx_queue_stats_mapping_enabled) { 166 printf("\n"); 167 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 168 printf(" Stats reg %2d RX-packets: %10"PRIu64 169 " RX-errors: %10"PRIu64 170 " RX-bytes: %10"PRIu64"\n", 171 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 172 } 173 } 174 if (port->tx_queue_stats_mapping_enabled) { 175 printf("\n"); 176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 177 printf(" Stats reg %2d TX-packets: %10"PRIu64 178 " TX-bytes: %10"PRIu64"\n", 179 i, stats.q_opackets[i], stats.q_obytes[i]); 180 } 181 } 182 183 diff_cycles = prev_cycles[port_id]; 184 prev_cycles[port_id] = rte_rdtsc(); 185 if (diff_cycles > 0) 186 diff_cycles = prev_cycles[port_id] - diff_cycles; 187 188 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 189 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 190 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 191 (stats.opackets - prev_pkts_tx[port_id]) : 0; 192 prev_pkts_rx[port_id] = stats.ipackets; 193 prev_pkts_tx[port_id] = stats.opackets; 194 mpps_rx = diff_cycles > 0 ? 195 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 196 mpps_tx = diff_cycles > 0 ? 197 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 198 199 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 200 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 201 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 202 (stats.obytes - prev_bytes_tx[port_id]) : 0; 203 prev_bytes_rx[port_id] = stats.ibytes; 204 prev_bytes_tx[port_id] = stats.obytes; 205 mbps_rx = diff_cycles > 0 ? 206 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; 207 mbps_tx = diff_cycles > 0 ? 208 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; 209 210 printf("\n Throughput (since last show)\n"); 211 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 212 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 213 mpps_tx, mbps_tx * 8); 214 215 printf(" %s############################%s\n", 216 nic_stats_border, nic_stats_border); 217 } 218 219 void 220 nic_stats_clear(portid_t port_id) 221 { 222 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 223 print_valid_ports(); 224 return; 225 } 226 rte_eth_stats_reset(port_id); 227 printf("\n NIC statistics for port %d cleared\n", port_id); 228 } 229 230 void 231 nic_xstats_display(portid_t port_id) 232 { 233 struct rte_eth_xstat *xstats; 234 int cnt_xstats, idx_xstat; 235 struct rte_eth_xstat_name *xstats_names; 236 237 printf("###### NIC extended statistics for port %-2d\n", port_id); 238 if (!rte_eth_dev_is_valid_port(port_id)) { 239 printf("Error: Invalid port number %i\n", port_id); 240 return; 241 } 242 243 /* Get count */ 244 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 245 if (cnt_xstats < 0) { 246 printf("Error: Cannot get count of xstats\n"); 247 return; 248 } 249 250 /* Get id-name lookup table */ 251 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 252 if (xstats_names == NULL) { 253 printf("Cannot allocate memory for xstats lookup\n"); 254 return; 255 } 256 if (cnt_xstats != rte_eth_xstats_get_names( 257 port_id, xstats_names, cnt_xstats)) { 258 printf("Error: Cannot get xstats lookup\n"); 259 free(xstats_names); 260 return; 261 } 262 263 /* Get stats themselves */ 264 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 265 if (xstats == NULL) { 266 printf("Cannot allocate memory for xstats\n"); 267 free(xstats_names); 268 return; 269 } 270 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 271 printf("Error: Unable to get xstats\n"); 272 free(xstats_names); 273 free(xstats); 274 return; 275 } 276 277 /* Display xstats */ 278 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 279 if (xstats_hide_zero && !xstats[idx_xstat].value) 280 continue; 281 printf("%s: %"PRIu64"\n", 282 xstats_names[idx_xstat].name, 283 xstats[idx_xstat].value); 284 } 285 free(xstats_names); 286 free(xstats); 287 } 288 289 void 290 nic_xstats_clear(portid_t port_id) 291 { 292 int ret; 293 294 ret = rte_eth_xstats_reset(port_id); 295 if (ret != 0) { 296 printf("%s: Error: failed to reset xstats (port %u): %s", 297 __func__, port_id, strerror(ret)); 298 } 299 } 300 301 void 302 nic_stats_mapping_display(portid_t port_id) 303 { 304 struct rte_port *port = &ports[port_id]; 305 uint16_t i; 306 307 static const char *nic_stats_mapping_border = "########################"; 308 309 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 310 print_valid_ports(); 311 return; 312 } 313 314 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 315 printf("Port id %d - either does not support queue statistic mapping or" 316 " no queue statistic mapping set\n", port_id); 317 return; 318 } 319 320 printf("\n %s NIC statistics mapping for port %-2d %s\n", 321 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 322 323 if (port->rx_queue_stats_mapping_enabled) { 324 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 325 if (rx_queue_stats_mappings[i].port_id == port_id) { 326 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 327 rx_queue_stats_mappings[i].queue_id, 328 rx_queue_stats_mappings[i].stats_counter_id); 329 } 330 } 331 printf("\n"); 332 } 333 334 335 if (port->tx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 337 if (tx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 339 tx_queue_stats_mappings[i].queue_id, 340 tx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 } 344 345 printf(" %s####################################%s\n", 346 nic_stats_mapping_border, nic_stats_mapping_border); 347 } 348 349 void 350 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 351 { 352 struct rte_eth_rxq_info qinfo; 353 int32_t rc; 354 static const char *info_border = "*********************"; 355 356 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 357 if (rc != 0) { 358 printf("Failed to retrieve information for port: %u, " 359 "RX queue: %hu\nerror desc: %s(%d)\n", 360 port_id, queue_id, strerror(-rc), rc); 361 return; 362 } 363 364 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 365 info_border, port_id, queue_id, info_border); 366 367 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 368 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 369 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 370 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 371 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 372 printf("\nRX drop packets: %s", 373 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 374 printf("\nRX deferred start: %s", 375 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 376 printf("\nRX scattered packets: %s", 377 (qinfo.scattered_rx != 0) ? "on" : "off"); 378 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 379 printf("\n"); 380 } 381 382 void 383 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 384 { 385 struct rte_eth_txq_info qinfo; 386 int32_t rc; 387 static const char *info_border = "*********************"; 388 389 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 390 if (rc != 0) { 391 printf("Failed to retrieve information for port: %u, " 392 "TX queue: %hu\nerror desc: %s(%d)\n", 393 port_id, queue_id, strerror(-rc), rc); 394 return; 395 } 396 397 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 398 info_border, port_id, queue_id, info_border); 399 400 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 401 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 402 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 403 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 404 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 405 printf("\nTX deferred start: %s", 406 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 407 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 408 printf("\n"); 409 } 410 411 static int bus_match_all(const struct rte_bus *bus, const void *data) 412 { 413 RTE_SET_USED(bus); 414 RTE_SET_USED(data); 415 return 0; 416 } 417 418 void 419 device_infos_display(const char *identifier) 420 { 421 static const char *info_border = "*********************"; 422 struct rte_bus *start = NULL, *next; 423 struct rte_dev_iterator dev_iter; 424 char name[RTE_ETH_NAME_MAX_LEN]; 425 struct rte_ether_addr mac_addr; 426 struct rte_device *dev; 427 struct rte_devargs da; 428 portid_t port_id; 429 char devstr[128]; 430 431 memset(&da, 0, sizeof(da)); 432 if (!identifier) 433 goto skip_parse; 434 435 if (rte_devargs_parsef(&da, "%s", identifier)) { 436 printf("cannot parse identifier\n"); 437 if (da.args) 438 free(da.args); 439 return; 440 } 441 442 skip_parse: 443 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 444 445 start = next; 446 if (identifier && da.bus != next) 447 continue; 448 449 /* Skip buses that don't have iterate method */ 450 if (!next->dev_iterate) 451 continue; 452 453 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 454 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 455 456 if (!dev->driver) 457 continue; 458 /* Check for matching device if identifier is present */ 459 if (identifier && 460 strncmp(da.name, dev->name, strlen(dev->name))) 461 continue; 462 printf("\n%s Infos for device %s %s\n", 463 info_border, dev->name, info_border); 464 printf("Bus name: %s", dev->bus->name); 465 printf("\nDriver name: %s", dev->driver->name); 466 printf("\nDevargs: %s", 467 dev->devargs ? dev->devargs->args : ""); 468 printf("\nConnect to socket: %d", dev->numa_node); 469 printf("\n"); 470 471 /* List ports with matching device name */ 472 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 473 printf("\n\tPort id: %-2d", port_id); 474 if (eth_macaddr_get_print_err(port_id, 475 &mac_addr) == 0) 476 print_ethaddr("\n\tMAC address: ", 477 &mac_addr); 478 rte_eth_dev_get_name_by_port(port_id, name); 479 printf("\n\tDevice name: %s", name); 480 printf("\n"); 481 } 482 } 483 }; 484 } 485 486 void 487 port_infos_display(portid_t port_id) 488 { 489 struct rte_port *port; 490 struct rte_ether_addr mac_addr; 491 struct rte_eth_link link; 492 struct rte_eth_dev_info dev_info; 493 int vlan_offload; 494 struct rte_mempool * mp; 495 static const char *info_border = "*********************"; 496 uint16_t mtu; 497 char name[RTE_ETH_NAME_MAX_LEN]; 498 int ret; 499 500 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 501 print_valid_ports(); 502 return; 503 } 504 port = &ports[port_id]; 505 ret = eth_link_get_nowait_print_err(port_id, &link); 506 if (ret < 0) 507 return; 508 509 ret = eth_dev_info_get_print_err(port_id, &dev_info); 510 if (ret != 0) 511 return; 512 513 printf("\n%s Infos for port %-2d %s\n", 514 info_border, port_id, info_border); 515 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 516 print_ethaddr("MAC address: ", &mac_addr); 517 rte_eth_dev_get_name_by_port(port_id, name); 518 printf("\nDevice name: %s", name); 519 printf("\nDriver name: %s", dev_info.driver_name); 520 if (dev_info.device->devargs && dev_info.device->devargs->args) 521 printf("\nDevargs: %s", dev_info.device->devargs->args); 522 printf("\nConnect to socket: %u", port->socket_id); 523 524 if (port_numa[port_id] != NUMA_NO_CONFIG) { 525 mp = mbuf_pool_find(port_numa[port_id]); 526 if (mp) 527 printf("\nmemory allocation on the socket: %d", 528 port_numa[port_id]); 529 } else 530 printf("\nmemory allocation on the socket: %u",port->socket_id); 531 532 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 533 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 534 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 535 ("full-duplex") : ("half-duplex")); 536 537 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 538 printf("MTU: %u\n", mtu); 539 540 printf("Promiscuous mode: %s\n", 541 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 542 printf("Allmulticast mode: %s\n", 543 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 544 printf("Maximum number of MAC addresses: %u\n", 545 (unsigned int)(port->dev_info.max_mac_addrs)); 546 printf("Maximum number of MAC addresses of hash filtering: %u\n", 547 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 548 549 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 550 if (vlan_offload >= 0){ 551 printf("VLAN offload: \n"); 552 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 553 printf(" strip on \n"); 554 else 555 printf(" strip off \n"); 556 557 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 558 printf(" filter on \n"); 559 else 560 printf(" filter off \n"); 561 562 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 563 printf(" qinq(extend) on \n"); 564 else 565 printf(" qinq(extend) off \n"); 566 } 567 568 if (dev_info.hash_key_size > 0) 569 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 570 if (dev_info.reta_size > 0) 571 printf("Redirection table size: %u\n", dev_info.reta_size); 572 if (!dev_info.flow_type_rss_offloads) 573 printf("No RSS offload flow type is supported.\n"); 574 else { 575 uint16_t i; 576 char *p; 577 578 printf("Supported RSS offload flow types:\n"); 579 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 580 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 581 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 582 continue; 583 p = flowtype_to_str(i); 584 if (p) 585 printf(" %s\n", p); 586 else 587 printf(" user defined %d\n", i); 588 } 589 } 590 591 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 592 printf("Maximum configurable length of RX packet: %u\n", 593 dev_info.max_rx_pktlen); 594 if (dev_info.max_vfs) 595 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 596 if (dev_info.max_vmdq_pools) 597 printf("Maximum number of VMDq pools: %u\n", 598 dev_info.max_vmdq_pools); 599 600 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 601 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 602 printf("Max possible number of RXDs per queue: %hu\n", 603 dev_info.rx_desc_lim.nb_max); 604 printf("Min possible number of RXDs per queue: %hu\n", 605 dev_info.rx_desc_lim.nb_min); 606 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 607 608 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 609 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 610 printf("Max possible number of TXDs per queue: %hu\n", 611 dev_info.tx_desc_lim.nb_max); 612 printf("Min possible number of TXDs per queue: %hu\n", 613 dev_info.tx_desc_lim.nb_min); 614 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 615 printf("Max segment number per packet: %hu\n", 616 dev_info.tx_desc_lim.nb_seg_max); 617 printf("Max segment number per MTU/TSO: %hu\n", 618 dev_info.tx_desc_lim.nb_mtu_seg_max); 619 620 /* Show switch info only if valid switch domain and port id is set */ 621 if (dev_info.switch_info.domain_id != 622 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 623 if (dev_info.switch_info.name) 624 printf("Switch name: %s\n", dev_info.switch_info.name); 625 626 printf("Switch domain Id: %u\n", 627 dev_info.switch_info.domain_id); 628 printf("Switch Port Id: %u\n", 629 dev_info.switch_info.port_id); 630 } 631 } 632 633 void 634 port_summary_header_display(void) 635 { 636 uint16_t port_number; 637 638 port_number = rte_eth_dev_count_avail(); 639 printf("Number of available ports: %i\n", port_number); 640 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 641 "Driver", "Status", "Link"); 642 } 643 644 void 645 port_summary_display(portid_t port_id) 646 { 647 struct rte_ether_addr mac_addr; 648 struct rte_eth_link link; 649 struct rte_eth_dev_info dev_info; 650 char name[RTE_ETH_NAME_MAX_LEN]; 651 int ret; 652 653 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 654 print_valid_ports(); 655 return; 656 } 657 658 ret = eth_link_get_nowait_print_err(port_id, &link); 659 if (ret < 0) 660 return; 661 662 ret = eth_dev_info_get_print_err(port_id, &dev_info); 663 if (ret != 0) 664 return; 665 666 rte_eth_dev_get_name_by_port(port_id, name); 667 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 668 if (ret != 0) 669 return; 670 671 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 672 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 673 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 674 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 675 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 676 (unsigned int) link.link_speed); 677 } 678 679 void 680 port_offload_cap_display(portid_t port_id) 681 { 682 struct rte_eth_dev_info dev_info; 683 static const char *info_border = "************"; 684 int ret; 685 686 if (port_id_is_invalid(port_id, ENABLED_WARN)) 687 return; 688 689 ret = eth_dev_info_get_print_err(port_id, &dev_info); 690 if (ret != 0) 691 return; 692 693 printf("\n%s Port %d supported offload features: %s\n", 694 info_border, port_id, info_border); 695 696 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 697 printf("VLAN stripped: "); 698 if (ports[port_id].dev_conf.rxmode.offloads & 699 DEV_RX_OFFLOAD_VLAN_STRIP) 700 printf("on\n"); 701 else 702 printf("off\n"); 703 } 704 705 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 706 printf("Double VLANs stripped: "); 707 if (ports[port_id].dev_conf.rxmode.offloads & 708 DEV_RX_OFFLOAD_QINQ_STRIP) 709 printf("on\n"); 710 else 711 printf("off\n"); 712 } 713 714 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 715 printf("RX IPv4 checksum: "); 716 if (ports[port_id].dev_conf.rxmode.offloads & 717 DEV_RX_OFFLOAD_IPV4_CKSUM) 718 printf("on\n"); 719 else 720 printf("off\n"); 721 } 722 723 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 724 printf("RX UDP checksum: "); 725 if (ports[port_id].dev_conf.rxmode.offloads & 726 DEV_RX_OFFLOAD_UDP_CKSUM) 727 printf("on\n"); 728 else 729 printf("off\n"); 730 } 731 732 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 733 printf("RX TCP checksum: "); 734 if (ports[port_id].dev_conf.rxmode.offloads & 735 DEV_RX_OFFLOAD_TCP_CKSUM) 736 printf("on\n"); 737 else 738 printf("off\n"); 739 } 740 741 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 742 printf("RX SCTP checksum: "); 743 if (ports[port_id].dev_conf.rxmode.offloads & 744 DEV_RX_OFFLOAD_SCTP_CKSUM) 745 printf("on\n"); 746 else 747 printf("off\n"); 748 } 749 750 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 751 printf("RX Outer IPv4 checksum: "); 752 if (ports[port_id].dev_conf.rxmode.offloads & 753 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 754 printf("on\n"); 755 else 756 printf("off\n"); 757 } 758 759 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 760 printf("RX Outer UDP checksum: "); 761 if (ports[port_id].dev_conf.rxmode.offloads & 762 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 763 printf("on\n"); 764 else 765 printf("off\n"); 766 } 767 768 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 769 printf("Large receive offload: "); 770 if (ports[port_id].dev_conf.rxmode.offloads & 771 DEV_RX_OFFLOAD_TCP_LRO) 772 printf("on\n"); 773 else 774 printf("off\n"); 775 } 776 777 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 778 printf("HW timestamp: "); 779 if (ports[port_id].dev_conf.rxmode.offloads & 780 DEV_RX_OFFLOAD_TIMESTAMP) 781 printf("on\n"); 782 else 783 printf("off\n"); 784 } 785 786 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 787 printf("Rx Keep CRC: "); 788 if (ports[port_id].dev_conf.rxmode.offloads & 789 DEV_RX_OFFLOAD_KEEP_CRC) 790 printf("on\n"); 791 else 792 printf("off\n"); 793 } 794 795 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 796 printf("RX offload security: "); 797 if (ports[port_id].dev_conf.rxmode.offloads & 798 DEV_RX_OFFLOAD_SECURITY) 799 printf("on\n"); 800 else 801 printf("off\n"); 802 } 803 804 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 805 printf("VLAN insert: "); 806 if (ports[port_id].dev_conf.txmode.offloads & 807 DEV_TX_OFFLOAD_VLAN_INSERT) 808 printf("on\n"); 809 else 810 printf("off\n"); 811 } 812 813 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 814 printf("Double VLANs insert: "); 815 if (ports[port_id].dev_conf.txmode.offloads & 816 DEV_TX_OFFLOAD_QINQ_INSERT) 817 printf("on\n"); 818 else 819 printf("off\n"); 820 } 821 822 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 823 printf("TX IPv4 checksum: "); 824 if (ports[port_id].dev_conf.txmode.offloads & 825 DEV_TX_OFFLOAD_IPV4_CKSUM) 826 printf("on\n"); 827 else 828 printf("off\n"); 829 } 830 831 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 832 printf("TX UDP checksum: "); 833 if (ports[port_id].dev_conf.txmode.offloads & 834 DEV_TX_OFFLOAD_UDP_CKSUM) 835 printf("on\n"); 836 else 837 printf("off\n"); 838 } 839 840 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 841 printf("TX TCP checksum: "); 842 if (ports[port_id].dev_conf.txmode.offloads & 843 DEV_TX_OFFLOAD_TCP_CKSUM) 844 printf("on\n"); 845 else 846 printf("off\n"); 847 } 848 849 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 850 printf("TX SCTP checksum: "); 851 if (ports[port_id].dev_conf.txmode.offloads & 852 DEV_TX_OFFLOAD_SCTP_CKSUM) 853 printf("on\n"); 854 else 855 printf("off\n"); 856 } 857 858 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 859 printf("TX Outer IPv4 checksum: "); 860 if (ports[port_id].dev_conf.txmode.offloads & 861 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 862 printf("on\n"); 863 else 864 printf("off\n"); 865 } 866 867 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 868 printf("TX TCP segmentation: "); 869 if (ports[port_id].dev_conf.txmode.offloads & 870 DEV_TX_OFFLOAD_TCP_TSO) 871 printf("on\n"); 872 else 873 printf("off\n"); 874 } 875 876 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 877 printf("TX UDP segmentation: "); 878 if (ports[port_id].dev_conf.txmode.offloads & 879 DEV_TX_OFFLOAD_UDP_TSO) 880 printf("on\n"); 881 else 882 printf("off\n"); 883 } 884 885 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 886 printf("TSO for VXLAN tunnel packet: "); 887 if (ports[port_id].dev_conf.txmode.offloads & 888 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 889 printf("on\n"); 890 else 891 printf("off\n"); 892 } 893 894 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 895 printf("TSO for GRE tunnel packet: "); 896 if (ports[port_id].dev_conf.txmode.offloads & 897 DEV_TX_OFFLOAD_GRE_TNL_TSO) 898 printf("on\n"); 899 else 900 printf("off\n"); 901 } 902 903 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 904 printf("TSO for IPIP tunnel packet: "); 905 if (ports[port_id].dev_conf.txmode.offloads & 906 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 907 printf("on\n"); 908 else 909 printf("off\n"); 910 } 911 912 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 913 printf("TSO for GENEVE tunnel packet: "); 914 if (ports[port_id].dev_conf.txmode.offloads & 915 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 916 printf("on\n"); 917 else 918 printf("off\n"); 919 } 920 921 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 922 printf("IP tunnel TSO: "); 923 if (ports[port_id].dev_conf.txmode.offloads & 924 DEV_TX_OFFLOAD_IP_TNL_TSO) 925 printf("on\n"); 926 else 927 printf("off\n"); 928 } 929 930 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 931 printf("UDP tunnel TSO: "); 932 if (ports[port_id].dev_conf.txmode.offloads & 933 DEV_TX_OFFLOAD_UDP_TNL_TSO) 934 printf("on\n"); 935 else 936 printf("off\n"); 937 } 938 939 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 940 printf("TX Outer UDP checksum: "); 941 if (ports[port_id].dev_conf.txmode.offloads & 942 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 943 printf("on\n"); 944 else 945 printf("off\n"); 946 } 947 948 } 949 950 int 951 port_id_is_invalid(portid_t port_id, enum print_warning warning) 952 { 953 uint16_t pid; 954 955 if (port_id == (portid_t)RTE_PORT_ALL) 956 return 0; 957 958 RTE_ETH_FOREACH_DEV(pid) 959 if (port_id == pid) 960 return 0; 961 962 if (warning == ENABLED_WARN) 963 printf("Invalid port %d\n", port_id); 964 965 return 1; 966 } 967 968 void print_valid_ports(void) 969 { 970 portid_t pid; 971 972 printf("The valid ports array is ["); 973 RTE_ETH_FOREACH_DEV(pid) { 974 printf(" %d", pid); 975 } 976 printf(" ]\n"); 977 } 978 979 static int 980 vlan_id_is_invalid(uint16_t vlan_id) 981 { 982 if (vlan_id < 4096) 983 return 0; 984 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 985 return 1; 986 } 987 988 static int 989 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 990 { 991 const struct rte_pci_device *pci_dev; 992 const struct rte_bus *bus; 993 uint64_t pci_len; 994 995 if (reg_off & 0x3) { 996 printf("Port register offset 0x%X not aligned on a 4-byte " 997 "boundary\n", 998 (unsigned)reg_off); 999 return 1; 1000 } 1001 1002 if (!ports[port_id].dev_info.device) { 1003 printf("Invalid device\n"); 1004 return 0; 1005 } 1006 1007 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1008 if (bus && !strcmp(bus->name, "pci")) { 1009 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1010 } else { 1011 printf("Not a PCI device\n"); 1012 return 1; 1013 } 1014 1015 pci_len = pci_dev->mem_resource[0].len; 1016 if (reg_off >= pci_len) { 1017 printf("Port %d: register offset %u (0x%X) out of port PCI " 1018 "resource (length=%"PRIu64")\n", 1019 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1020 return 1; 1021 } 1022 return 0; 1023 } 1024 1025 static int 1026 reg_bit_pos_is_invalid(uint8_t bit_pos) 1027 { 1028 if (bit_pos <= 31) 1029 return 0; 1030 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1031 return 1; 1032 } 1033 1034 #define display_port_and_reg_off(port_id, reg_off) \ 1035 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1036 1037 static inline void 1038 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1039 { 1040 display_port_and_reg_off(port_id, (unsigned)reg_off); 1041 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1042 } 1043 1044 void 1045 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1046 { 1047 uint32_t reg_v; 1048 1049 1050 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1051 return; 1052 if (port_reg_off_is_invalid(port_id, reg_off)) 1053 return; 1054 if (reg_bit_pos_is_invalid(bit_x)) 1055 return; 1056 reg_v = port_id_pci_reg_read(port_id, reg_off); 1057 display_port_and_reg_off(port_id, (unsigned)reg_off); 1058 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1059 } 1060 1061 void 1062 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1063 uint8_t bit1_pos, uint8_t bit2_pos) 1064 { 1065 uint32_t reg_v; 1066 uint8_t l_bit; 1067 uint8_t h_bit; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1070 return; 1071 if (port_reg_off_is_invalid(port_id, reg_off)) 1072 return; 1073 if (reg_bit_pos_is_invalid(bit1_pos)) 1074 return; 1075 if (reg_bit_pos_is_invalid(bit2_pos)) 1076 return; 1077 if (bit1_pos > bit2_pos) 1078 l_bit = bit2_pos, h_bit = bit1_pos; 1079 else 1080 l_bit = bit1_pos, h_bit = bit2_pos; 1081 1082 reg_v = port_id_pci_reg_read(port_id, reg_off); 1083 reg_v >>= l_bit; 1084 if (h_bit < 31) 1085 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1086 display_port_and_reg_off(port_id, (unsigned)reg_off); 1087 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1088 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1089 } 1090 1091 void 1092 port_reg_display(portid_t port_id, uint32_t reg_off) 1093 { 1094 uint32_t reg_v; 1095 1096 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1097 return; 1098 if (port_reg_off_is_invalid(port_id, reg_off)) 1099 return; 1100 reg_v = port_id_pci_reg_read(port_id, reg_off); 1101 display_port_reg_value(port_id, reg_off, reg_v); 1102 } 1103 1104 void 1105 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1106 uint8_t bit_v) 1107 { 1108 uint32_t reg_v; 1109 1110 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1111 return; 1112 if (port_reg_off_is_invalid(port_id, reg_off)) 1113 return; 1114 if (reg_bit_pos_is_invalid(bit_pos)) 1115 return; 1116 if (bit_v > 1) { 1117 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1118 return; 1119 } 1120 reg_v = port_id_pci_reg_read(port_id, reg_off); 1121 if (bit_v == 0) 1122 reg_v &= ~(1 << bit_pos); 1123 else 1124 reg_v |= (1 << bit_pos); 1125 port_id_pci_reg_write(port_id, reg_off, reg_v); 1126 display_port_reg_value(port_id, reg_off, reg_v); 1127 } 1128 1129 void 1130 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1131 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1132 { 1133 uint32_t max_v; 1134 uint32_t reg_v; 1135 uint8_t l_bit; 1136 uint8_t h_bit; 1137 1138 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1139 return; 1140 if (port_reg_off_is_invalid(port_id, reg_off)) 1141 return; 1142 if (reg_bit_pos_is_invalid(bit1_pos)) 1143 return; 1144 if (reg_bit_pos_is_invalid(bit2_pos)) 1145 return; 1146 if (bit1_pos > bit2_pos) 1147 l_bit = bit2_pos, h_bit = bit1_pos; 1148 else 1149 l_bit = bit1_pos, h_bit = bit2_pos; 1150 1151 if ((h_bit - l_bit) < 31) 1152 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1153 else 1154 max_v = 0xFFFFFFFF; 1155 1156 if (value > max_v) { 1157 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1158 (unsigned)value, (unsigned)value, 1159 (unsigned)max_v, (unsigned)max_v); 1160 return; 1161 } 1162 reg_v = port_id_pci_reg_read(port_id, reg_off); 1163 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1164 reg_v |= (value << l_bit); /* Set changed bits */ 1165 port_id_pci_reg_write(port_id, reg_off, reg_v); 1166 display_port_reg_value(port_id, reg_off, reg_v); 1167 } 1168 1169 void 1170 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1171 { 1172 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1173 return; 1174 if (port_reg_off_is_invalid(port_id, reg_off)) 1175 return; 1176 port_id_pci_reg_write(port_id, reg_off, reg_v); 1177 display_port_reg_value(port_id, reg_off, reg_v); 1178 } 1179 1180 void 1181 port_mtu_set(portid_t port_id, uint16_t mtu) 1182 { 1183 int diag; 1184 struct rte_eth_dev_info dev_info; 1185 int ret; 1186 1187 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1188 return; 1189 1190 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1191 if (ret != 0) 1192 return; 1193 1194 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1195 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1196 mtu, dev_info.min_mtu, dev_info.max_mtu); 1197 return; 1198 } 1199 diag = rte_eth_dev_set_mtu(port_id, mtu); 1200 if (diag == 0) 1201 return; 1202 printf("Set MTU failed. diag=%d\n", diag); 1203 } 1204 1205 /* Generic flow management functions. */ 1206 1207 /** Generate a port_flow entry from attributes/pattern/actions. */ 1208 static struct port_flow * 1209 port_flow_new(const struct rte_flow_attr *attr, 1210 const struct rte_flow_item *pattern, 1211 const struct rte_flow_action *actions, 1212 struct rte_flow_error *error) 1213 { 1214 const struct rte_flow_conv_rule rule = { 1215 .attr_ro = attr, 1216 .pattern_ro = pattern, 1217 .actions_ro = actions, 1218 }; 1219 struct port_flow *pf; 1220 int ret; 1221 1222 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1223 if (ret < 0) 1224 return NULL; 1225 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1226 if (!pf) { 1227 rte_flow_error_set 1228 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1229 "calloc() failed"); 1230 return NULL; 1231 } 1232 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1233 error) >= 0) 1234 return pf; 1235 free(pf); 1236 return NULL; 1237 } 1238 1239 /** Print a message out of a flow error. */ 1240 static int 1241 port_flow_complain(struct rte_flow_error *error) 1242 { 1243 static const char *const errstrlist[] = { 1244 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1245 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1246 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1247 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1248 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1249 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1250 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1251 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1252 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1253 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1254 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1255 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1256 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1257 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1258 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1259 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1260 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1261 }; 1262 const char *errstr; 1263 char buf[32]; 1264 int err = rte_errno; 1265 1266 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1267 !errstrlist[error->type]) 1268 errstr = "unknown type"; 1269 else 1270 errstr = errstrlist[error->type]; 1271 printf("Caught error type %d (%s): %s%s: %s\n", 1272 error->type, errstr, 1273 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1274 error->cause), buf) : "", 1275 error->message ? error->message : "(no stated reason)", 1276 rte_strerror(err)); 1277 return -err; 1278 } 1279 1280 /** Validate flow rule. */ 1281 int 1282 port_flow_validate(portid_t port_id, 1283 const struct rte_flow_attr *attr, 1284 const struct rte_flow_item *pattern, 1285 const struct rte_flow_action *actions) 1286 { 1287 struct rte_flow_error error; 1288 1289 /* Poisoning to make sure PMDs update it in case of error. */ 1290 memset(&error, 0x11, sizeof(error)); 1291 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1292 return port_flow_complain(&error); 1293 printf("Flow rule validated\n"); 1294 return 0; 1295 } 1296 1297 /** Create flow rule. */ 1298 int 1299 port_flow_create(portid_t port_id, 1300 const struct rte_flow_attr *attr, 1301 const struct rte_flow_item *pattern, 1302 const struct rte_flow_action *actions) 1303 { 1304 struct rte_flow *flow; 1305 struct rte_port *port; 1306 struct port_flow *pf; 1307 uint32_t id; 1308 struct rte_flow_error error; 1309 1310 /* Poisoning to make sure PMDs update it in case of error. */ 1311 memset(&error, 0x22, sizeof(error)); 1312 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1313 if (!flow) 1314 return port_flow_complain(&error); 1315 port = &ports[port_id]; 1316 if (port->flow_list) { 1317 if (port->flow_list->id == UINT32_MAX) { 1318 printf("Highest rule ID is already assigned, delete" 1319 " it first"); 1320 rte_flow_destroy(port_id, flow, NULL); 1321 return -ENOMEM; 1322 } 1323 id = port->flow_list->id + 1; 1324 } else 1325 id = 0; 1326 pf = port_flow_new(attr, pattern, actions, &error); 1327 if (!pf) { 1328 rte_flow_destroy(port_id, flow, NULL); 1329 return port_flow_complain(&error); 1330 } 1331 pf->next = port->flow_list; 1332 pf->id = id; 1333 pf->flow = flow; 1334 port->flow_list = pf; 1335 printf("Flow rule #%u created\n", pf->id); 1336 return 0; 1337 } 1338 1339 /** Destroy a number of flow rules. */ 1340 int 1341 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1342 { 1343 struct rte_port *port; 1344 struct port_flow **tmp; 1345 uint32_t c = 0; 1346 int ret = 0; 1347 1348 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1349 port_id == (portid_t)RTE_PORT_ALL) 1350 return -EINVAL; 1351 port = &ports[port_id]; 1352 tmp = &port->flow_list; 1353 while (*tmp) { 1354 uint32_t i; 1355 1356 for (i = 0; i != n; ++i) { 1357 struct rte_flow_error error; 1358 struct port_flow *pf = *tmp; 1359 1360 if (rule[i] != pf->id) 1361 continue; 1362 /* 1363 * Poisoning to make sure PMDs update it in case 1364 * of error. 1365 */ 1366 memset(&error, 0x33, sizeof(error)); 1367 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1368 ret = port_flow_complain(&error); 1369 continue; 1370 } 1371 printf("Flow rule #%u destroyed\n", pf->id); 1372 *tmp = pf->next; 1373 free(pf); 1374 break; 1375 } 1376 if (i == n) 1377 tmp = &(*tmp)->next; 1378 ++c; 1379 } 1380 return ret; 1381 } 1382 1383 /** Remove all flow rules. */ 1384 int 1385 port_flow_flush(portid_t port_id) 1386 { 1387 struct rte_flow_error error; 1388 struct rte_port *port; 1389 int ret = 0; 1390 1391 /* Poisoning to make sure PMDs update it in case of error. */ 1392 memset(&error, 0x44, sizeof(error)); 1393 if (rte_flow_flush(port_id, &error)) { 1394 ret = port_flow_complain(&error); 1395 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1396 port_id == (portid_t)RTE_PORT_ALL) 1397 return ret; 1398 } 1399 port = &ports[port_id]; 1400 while (port->flow_list) { 1401 struct port_flow *pf = port->flow_list->next; 1402 1403 free(port->flow_list); 1404 port->flow_list = pf; 1405 } 1406 return ret; 1407 } 1408 1409 /** Query a flow rule. */ 1410 int 1411 port_flow_query(portid_t port_id, uint32_t rule, 1412 const struct rte_flow_action *action) 1413 { 1414 struct rte_flow_error error; 1415 struct rte_port *port; 1416 struct port_flow *pf; 1417 const char *name; 1418 union { 1419 struct rte_flow_query_count count; 1420 } query; 1421 int ret; 1422 1423 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1424 port_id == (portid_t)RTE_PORT_ALL) 1425 return -EINVAL; 1426 port = &ports[port_id]; 1427 for (pf = port->flow_list; pf; pf = pf->next) 1428 if (pf->id == rule) 1429 break; 1430 if (!pf) { 1431 printf("Flow rule #%u not found\n", rule); 1432 return -ENOENT; 1433 } 1434 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1435 &name, sizeof(name), 1436 (void *)(uintptr_t)action->type, &error); 1437 if (ret < 0) 1438 return port_flow_complain(&error); 1439 switch (action->type) { 1440 case RTE_FLOW_ACTION_TYPE_COUNT: 1441 break; 1442 default: 1443 printf("Cannot query action type %d (%s)\n", 1444 action->type, name); 1445 return -ENOTSUP; 1446 } 1447 /* Poisoning to make sure PMDs update it in case of error. */ 1448 memset(&error, 0x55, sizeof(error)); 1449 memset(&query, 0, sizeof(query)); 1450 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1451 return port_flow_complain(&error); 1452 switch (action->type) { 1453 case RTE_FLOW_ACTION_TYPE_COUNT: 1454 printf("%s:\n" 1455 " hits_set: %u\n" 1456 " bytes_set: %u\n" 1457 " hits: %" PRIu64 "\n" 1458 " bytes: %" PRIu64 "\n", 1459 name, 1460 query.count.hits_set, 1461 query.count.bytes_set, 1462 query.count.hits, 1463 query.count.bytes); 1464 break; 1465 default: 1466 printf("Cannot display result for action type %d (%s)\n", 1467 action->type, name); 1468 break; 1469 } 1470 return 0; 1471 } 1472 1473 /** List flow rules. */ 1474 void 1475 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1476 { 1477 struct rte_port *port; 1478 struct port_flow *pf; 1479 struct port_flow *list = NULL; 1480 uint32_t i; 1481 1482 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1483 port_id == (portid_t)RTE_PORT_ALL) 1484 return; 1485 port = &ports[port_id]; 1486 if (!port->flow_list) 1487 return; 1488 /* Sort flows by group, priority and ID. */ 1489 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1490 struct port_flow **tmp; 1491 const struct rte_flow_attr *curr = pf->rule.attr; 1492 1493 if (n) { 1494 /* Filter out unwanted groups. */ 1495 for (i = 0; i != n; ++i) 1496 if (curr->group == group[i]) 1497 break; 1498 if (i == n) 1499 continue; 1500 } 1501 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1502 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1503 1504 if (curr->group > comp->group || 1505 (curr->group == comp->group && 1506 curr->priority > comp->priority) || 1507 (curr->group == comp->group && 1508 curr->priority == comp->priority && 1509 pf->id > (*tmp)->id)) 1510 continue; 1511 break; 1512 } 1513 pf->tmp = *tmp; 1514 *tmp = pf; 1515 } 1516 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1517 for (pf = list; pf != NULL; pf = pf->tmp) { 1518 const struct rte_flow_item *item = pf->rule.pattern; 1519 const struct rte_flow_action *action = pf->rule.actions; 1520 const char *name; 1521 1522 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1523 pf->id, 1524 pf->rule.attr->group, 1525 pf->rule.attr->priority, 1526 pf->rule.attr->ingress ? 'i' : '-', 1527 pf->rule.attr->egress ? 'e' : '-', 1528 pf->rule.attr->transfer ? 't' : '-'); 1529 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1530 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1531 &name, sizeof(name), 1532 (void *)(uintptr_t)item->type, 1533 NULL) <= 0) 1534 name = "[UNKNOWN]"; 1535 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1536 printf("%s ", name); 1537 ++item; 1538 } 1539 printf("=>"); 1540 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1541 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1542 &name, sizeof(name), 1543 (void *)(uintptr_t)action->type, 1544 NULL) <= 0) 1545 name = "[UNKNOWN]"; 1546 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1547 printf(" %s", name); 1548 ++action; 1549 } 1550 printf("\n"); 1551 } 1552 } 1553 1554 /** Restrict ingress traffic to the defined flow rules. */ 1555 int 1556 port_flow_isolate(portid_t port_id, int set) 1557 { 1558 struct rte_flow_error error; 1559 1560 /* Poisoning to make sure PMDs update it in case of error. */ 1561 memset(&error, 0x66, sizeof(error)); 1562 if (rte_flow_isolate(port_id, set, &error)) 1563 return port_flow_complain(&error); 1564 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1565 port_id, 1566 set ? "now restricted" : "not restricted anymore"); 1567 return 0; 1568 } 1569 1570 /* 1571 * RX/TX ring descriptors display functions. 1572 */ 1573 int 1574 rx_queue_id_is_invalid(queueid_t rxq_id) 1575 { 1576 if (rxq_id < nb_rxq) 1577 return 0; 1578 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1579 return 1; 1580 } 1581 1582 int 1583 tx_queue_id_is_invalid(queueid_t txq_id) 1584 { 1585 if (txq_id < nb_txq) 1586 return 0; 1587 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1588 return 1; 1589 } 1590 1591 static int 1592 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1593 { 1594 if (rxdesc_id < nb_rxd) 1595 return 0; 1596 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1597 rxdesc_id, nb_rxd); 1598 return 1; 1599 } 1600 1601 static int 1602 tx_desc_id_is_invalid(uint16_t txdesc_id) 1603 { 1604 if (txdesc_id < nb_txd) 1605 return 0; 1606 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1607 txdesc_id, nb_txd); 1608 return 1; 1609 } 1610 1611 static const struct rte_memzone * 1612 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1613 { 1614 char mz_name[RTE_MEMZONE_NAMESIZE]; 1615 const struct rte_memzone *mz; 1616 1617 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1618 port_id, q_id, ring_name); 1619 mz = rte_memzone_lookup(mz_name); 1620 if (mz == NULL) 1621 printf("%s ring memory zoneof (port %d, queue %d) not" 1622 "found (zone name = %s\n", 1623 ring_name, port_id, q_id, mz_name); 1624 return mz; 1625 } 1626 1627 union igb_ring_dword { 1628 uint64_t dword; 1629 struct { 1630 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1631 uint32_t lo; 1632 uint32_t hi; 1633 #else 1634 uint32_t hi; 1635 uint32_t lo; 1636 #endif 1637 } words; 1638 }; 1639 1640 struct igb_ring_desc_32_bytes { 1641 union igb_ring_dword lo_dword; 1642 union igb_ring_dword hi_dword; 1643 union igb_ring_dword resv1; 1644 union igb_ring_dword resv2; 1645 }; 1646 1647 struct igb_ring_desc_16_bytes { 1648 union igb_ring_dword lo_dword; 1649 union igb_ring_dword hi_dword; 1650 }; 1651 1652 static void 1653 ring_rxd_display_dword(union igb_ring_dword dword) 1654 { 1655 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1656 (unsigned)dword.words.hi); 1657 } 1658 1659 static void 1660 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1661 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1662 portid_t port_id, 1663 #else 1664 __rte_unused portid_t port_id, 1665 #endif 1666 uint16_t desc_id) 1667 { 1668 struct igb_ring_desc_16_bytes *ring = 1669 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1670 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1671 int ret; 1672 struct rte_eth_dev_info dev_info; 1673 1674 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1675 if (ret != 0) 1676 return; 1677 1678 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1679 /* 32 bytes RX descriptor, i40e only */ 1680 struct igb_ring_desc_32_bytes *ring = 1681 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1682 ring[desc_id].lo_dword.dword = 1683 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1684 ring_rxd_display_dword(ring[desc_id].lo_dword); 1685 ring[desc_id].hi_dword.dword = 1686 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1687 ring_rxd_display_dword(ring[desc_id].hi_dword); 1688 ring[desc_id].resv1.dword = 1689 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1690 ring_rxd_display_dword(ring[desc_id].resv1); 1691 ring[desc_id].resv2.dword = 1692 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1693 ring_rxd_display_dword(ring[desc_id].resv2); 1694 1695 return; 1696 } 1697 #endif 1698 /* 16 bytes RX descriptor */ 1699 ring[desc_id].lo_dword.dword = 1700 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1701 ring_rxd_display_dword(ring[desc_id].lo_dword); 1702 ring[desc_id].hi_dword.dword = 1703 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1704 ring_rxd_display_dword(ring[desc_id].hi_dword); 1705 } 1706 1707 static void 1708 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1709 { 1710 struct igb_ring_desc_16_bytes *ring; 1711 struct igb_ring_desc_16_bytes txd; 1712 1713 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1714 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1715 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1716 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1717 (unsigned)txd.lo_dword.words.lo, 1718 (unsigned)txd.lo_dword.words.hi, 1719 (unsigned)txd.hi_dword.words.lo, 1720 (unsigned)txd.hi_dword.words.hi); 1721 } 1722 1723 void 1724 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1725 { 1726 const struct rte_memzone *rx_mz; 1727 1728 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1729 return; 1730 if (rx_queue_id_is_invalid(rxq_id)) 1731 return; 1732 if (rx_desc_id_is_invalid(rxd_id)) 1733 return; 1734 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1735 if (rx_mz == NULL) 1736 return; 1737 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1738 } 1739 1740 void 1741 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1742 { 1743 const struct rte_memzone *tx_mz; 1744 1745 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1746 return; 1747 if (tx_queue_id_is_invalid(txq_id)) 1748 return; 1749 if (tx_desc_id_is_invalid(txd_id)) 1750 return; 1751 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1752 if (tx_mz == NULL) 1753 return; 1754 ring_tx_descriptor_display(tx_mz, txd_id); 1755 } 1756 1757 void 1758 fwd_lcores_config_display(void) 1759 { 1760 lcoreid_t lc_id; 1761 1762 printf("List of forwarding lcores:"); 1763 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1764 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1765 printf("\n"); 1766 } 1767 void 1768 rxtx_config_display(void) 1769 { 1770 portid_t pid; 1771 queueid_t qid; 1772 1773 printf(" %s packet forwarding%s packets/burst=%d\n", 1774 cur_fwd_eng->fwd_mode_name, 1775 retry_enabled == 0 ? "" : " with retry", 1776 nb_pkt_per_burst); 1777 1778 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1779 printf(" packet len=%u - nb packet segments=%d\n", 1780 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1781 1782 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1783 nb_fwd_lcores, nb_fwd_ports); 1784 1785 RTE_ETH_FOREACH_DEV(pid) { 1786 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1787 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1788 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1789 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1790 uint16_t nb_rx_desc_tmp; 1791 uint16_t nb_tx_desc_tmp; 1792 struct rte_eth_rxq_info rx_qinfo; 1793 struct rte_eth_txq_info tx_qinfo; 1794 int32_t rc; 1795 1796 /* per port config */ 1797 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1798 (unsigned int)pid, nb_rxq, nb_txq); 1799 1800 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1801 ports[pid].dev_conf.rxmode.offloads, 1802 ports[pid].dev_conf.txmode.offloads); 1803 1804 /* per rx queue config only for first queue to be less verbose */ 1805 for (qid = 0; qid < 1; qid++) { 1806 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1807 if (rc) 1808 nb_rx_desc_tmp = nb_rx_desc[qid]; 1809 else 1810 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1811 1812 printf(" RX queue: %d\n", qid); 1813 printf(" RX desc=%d - RX free threshold=%d\n", 1814 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1815 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1816 " wthresh=%d\n", 1817 rx_conf[qid].rx_thresh.pthresh, 1818 rx_conf[qid].rx_thresh.hthresh, 1819 rx_conf[qid].rx_thresh.wthresh); 1820 printf(" RX Offloads=0x%"PRIx64"\n", 1821 rx_conf[qid].offloads); 1822 } 1823 1824 /* per tx queue config only for first queue to be less verbose */ 1825 for (qid = 0; qid < 1; qid++) { 1826 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1827 if (rc) 1828 nb_tx_desc_tmp = nb_tx_desc[qid]; 1829 else 1830 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1831 1832 printf(" TX queue: %d\n", qid); 1833 printf(" TX desc=%d - TX free threshold=%d\n", 1834 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1835 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1836 " wthresh=%d\n", 1837 tx_conf[qid].tx_thresh.pthresh, 1838 tx_conf[qid].tx_thresh.hthresh, 1839 tx_conf[qid].tx_thresh.wthresh); 1840 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1841 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1842 } 1843 } 1844 } 1845 1846 void 1847 port_rss_reta_info(portid_t port_id, 1848 struct rte_eth_rss_reta_entry64 *reta_conf, 1849 uint16_t nb_entries) 1850 { 1851 uint16_t i, idx, shift; 1852 int ret; 1853 1854 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1855 return; 1856 1857 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1858 if (ret != 0) { 1859 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1860 return; 1861 } 1862 1863 for (i = 0; i < nb_entries; i++) { 1864 idx = i / RTE_RETA_GROUP_SIZE; 1865 shift = i % RTE_RETA_GROUP_SIZE; 1866 if (!(reta_conf[idx].mask & (1ULL << shift))) 1867 continue; 1868 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1869 i, reta_conf[idx].reta[shift]); 1870 } 1871 } 1872 1873 /* 1874 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1875 * key of the port. 1876 */ 1877 void 1878 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1879 { 1880 struct rte_eth_rss_conf rss_conf = {0}; 1881 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1882 uint64_t rss_hf; 1883 uint8_t i; 1884 int diag; 1885 struct rte_eth_dev_info dev_info; 1886 uint8_t hash_key_size; 1887 int ret; 1888 1889 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1890 return; 1891 1892 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1893 if (ret != 0) 1894 return; 1895 1896 if (dev_info.hash_key_size > 0 && 1897 dev_info.hash_key_size <= sizeof(rss_key)) 1898 hash_key_size = dev_info.hash_key_size; 1899 else { 1900 printf("dev_info did not provide a valid hash key size\n"); 1901 return; 1902 } 1903 1904 /* Get RSS hash key if asked to display it */ 1905 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1906 rss_conf.rss_key_len = hash_key_size; 1907 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1908 if (diag != 0) { 1909 switch (diag) { 1910 case -ENODEV: 1911 printf("port index %d invalid\n", port_id); 1912 break; 1913 case -ENOTSUP: 1914 printf("operation not supported by device\n"); 1915 break; 1916 default: 1917 printf("operation failed - diag=%d\n", diag); 1918 break; 1919 } 1920 return; 1921 } 1922 rss_hf = rss_conf.rss_hf; 1923 if (rss_hf == 0) { 1924 printf("RSS disabled\n"); 1925 return; 1926 } 1927 printf("RSS functions:\n "); 1928 for (i = 0; rss_type_table[i].str; i++) { 1929 if (rss_hf & rss_type_table[i].rss_type) 1930 printf("%s ", rss_type_table[i].str); 1931 } 1932 printf("\n"); 1933 if (!show_rss_key) 1934 return; 1935 printf("RSS key:\n"); 1936 for (i = 0; i < hash_key_size; i++) 1937 printf("%02X", rss_key[i]); 1938 printf("\n"); 1939 } 1940 1941 void 1942 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1943 uint hash_key_len) 1944 { 1945 struct rte_eth_rss_conf rss_conf; 1946 int diag; 1947 unsigned int i; 1948 1949 rss_conf.rss_key = NULL; 1950 rss_conf.rss_key_len = hash_key_len; 1951 rss_conf.rss_hf = 0; 1952 for (i = 0; rss_type_table[i].str; i++) { 1953 if (!strcmp(rss_type_table[i].str, rss_type)) 1954 rss_conf.rss_hf = rss_type_table[i].rss_type; 1955 } 1956 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1957 if (diag == 0) { 1958 rss_conf.rss_key = hash_key; 1959 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1960 } 1961 if (diag == 0) 1962 return; 1963 1964 switch (diag) { 1965 case -ENODEV: 1966 printf("port index %d invalid\n", port_id); 1967 break; 1968 case -ENOTSUP: 1969 printf("operation not supported by device\n"); 1970 break; 1971 default: 1972 printf("operation failed - diag=%d\n", diag); 1973 break; 1974 } 1975 } 1976 1977 /* 1978 * Setup forwarding configuration for each logical core. 1979 */ 1980 static void 1981 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1982 { 1983 streamid_t nb_fs_per_lcore; 1984 streamid_t nb_fs; 1985 streamid_t sm_id; 1986 lcoreid_t nb_extra; 1987 lcoreid_t nb_fc; 1988 lcoreid_t nb_lc; 1989 lcoreid_t lc_id; 1990 1991 nb_fs = cfg->nb_fwd_streams; 1992 nb_fc = cfg->nb_fwd_lcores; 1993 if (nb_fs <= nb_fc) { 1994 nb_fs_per_lcore = 1; 1995 nb_extra = 0; 1996 } else { 1997 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1998 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1999 } 2000 2001 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2002 sm_id = 0; 2003 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2004 fwd_lcores[lc_id]->stream_idx = sm_id; 2005 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2006 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2007 } 2008 2009 /* 2010 * Assign extra remaining streams, if any. 2011 */ 2012 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2013 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2014 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2015 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2016 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2017 } 2018 } 2019 2020 static portid_t 2021 fwd_topology_tx_port_get(portid_t rxp) 2022 { 2023 static int warning_once = 1; 2024 2025 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2026 2027 switch (port_topology) { 2028 default: 2029 case PORT_TOPOLOGY_PAIRED: 2030 if ((rxp & 0x1) == 0) { 2031 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2032 return rxp + 1; 2033 if (warning_once) { 2034 printf("\nWarning! port-topology=paired" 2035 " and odd forward ports number," 2036 " the last port will pair with" 2037 " itself.\n\n"); 2038 warning_once = 0; 2039 } 2040 return rxp; 2041 } 2042 return rxp - 1; 2043 case PORT_TOPOLOGY_CHAINED: 2044 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2045 case PORT_TOPOLOGY_LOOP: 2046 return rxp; 2047 } 2048 } 2049 2050 static void 2051 simple_fwd_config_setup(void) 2052 { 2053 portid_t i; 2054 2055 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2056 cur_fwd_config.nb_fwd_streams = 2057 (streamid_t) cur_fwd_config.nb_fwd_ports; 2058 2059 /* reinitialize forwarding streams */ 2060 init_fwd_streams(); 2061 2062 /* 2063 * In the simple forwarding test, the number of forwarding cores 2064 * must be lower or equal to the number of forwarding ports. 2065 */ 2066 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2067 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2068 cur_fwd_config.nb_fwd_lcores = 2069 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2070 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2071 2072 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2073 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2074 fwd_streams[i]->rx_queue = 0; 2075 fwd_streams[i]->tx_port = 2076 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2077 fwd_streams[i]->tx_queue = 0; 2078 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2079 fwd_streams[i]->retry_enabled = retry_enabled; 2080 } 2081 } 2082 2083 /** 2084 * For the RSS forwarding test all streams distributed over lcores. Each stream 2085 * being composed of a RX queue to poll on a RX port for input messages, 2086 * associated with a TX queue of a TX port where to send forwarded packets. 2087 */ 2088 static void 2089 rss_fwd_config_setup(void) 2090 { 2091 portid_t rxp; 2092 portid_t txp; 2093 queueid_t rxq; 2094 queueid_t nb_q; 2095 streamid_t sm_id; 2096 2097 nb_q = nb_rxq; 2098 if (nb_q > nb_txq) 2099 nb_q = nb_txq; 2100 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2101 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2102 cur_fwd_config.nb_fwd_streams = 2103 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2104 2105 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2106 cur_fwd_config.nb_fwd_lcores = 2107 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2108 2109 /* reinitialize forwarding streams */ 2110 init_fwd_streams(); 2111 2112 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2113 rxp = 0; rxq = 0; 2114 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2115 struct fwd_stream *fs; 2116 2117 fs = fwd_streams[sm_id]; 2118 txp = fwd_topology_tx_port_get(rxp); 2119 fs->rx_port = fwd_ports_ids[rxp]; 2120 fs->rx_queue = rxq; 2121 fs->tx_port = fwd_ports_ids[txp]; 2122 fs->tx_queue = rxq; 2123 fs->peer_addr = fs->tx_port; 2124 fs->retry_enabled = retry_enabled; 2125 rxp++; 2126 if (rxp < nb_fwd_ports) 2127 continue; 2128 rxp = 0; 2129 rxq++; 2130 } 2131 } 2132 2133 /** 2134 * For the DCB forwarding test, each core is assigned on each traffic class. 2135 * 2136 * Each core is assigned a multi-stream, each stream being composed of 2137 * a RX queue to poll on a RX port for input messages, associated with 2138 * a TX queue of a TX port where to send forwarded packets. All RX and 2139 * TX queues are mapping to the same traffic class. 2140 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2141 * the same core 2142 */ 2143 static void 2144 dcb_fwd_config_setup(void) 2145 { 2146 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2147 portid_t txp, rxp = 0; 2148 queueid_t txq, rxq = 0; 2149 lcoreid_t lc_id; 2150 uint16_t nb_rx_queue, nb_tx_queue; 2151 uint16_t i, j, k, sm_id = 0; 2152 uint8_t tc = 0; 2153 2154 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2155 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2156 cur_fwd_config.nb_fwd_streams = 2157 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2158 2159 /* reinitialize forwarding streams */ 2160 init_fwd_streams(); 2161 sm_id = 0; 2162 txp = 1; 2163 /* get the dcb info on the first RX and TX ports */ 2164 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2165 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2166 2167 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2168 fwd_lcores[lc_id]->stream_nb = 0; 2169 fwd_lcores[lc_id]->stream_idx = sm_id; 2170 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2171 /* if the nb_queue is zero, means this tc is 2172 * not enabled on the POOL 2173 */ 2174 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2175 break; 2176 k = fwd_lcores[lc_id]->stream_nb + 2177 fwd_lcores[lc_id]->stream_idx; 2178 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2179 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2180 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2181 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2182 for (j = 0; j < nb_rx_queue; j++) { 2183 struct fwd_stream *fs; 2184 2185 fs = fwd_streams[k + j]; 2186 fs->rx_port = fwd_ports_ids[rxp]; 2187 fs->rx_queue = rxq + j; 2188 fs->tx_port = fwd_ports_ids[txp]; 2189 fs->tx_queue = txq + j % nb_tx_queue; 2190 fs->peer_addr = fs->tx_port; 2191 fs->retry_enabled = retry_enabled; 2192 } 2193 fwd_lcores[lc_id]->stream_nb += 2194 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2195 } 2196 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2197 2198 tc++; 2199 if (tc < rxp_dcb_info.nb_tcs) 2200 continue; 2201 /* Restart from TC 0 on next RX port */ 2202 tc = 0; 2203 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2204 rxp = (portid_t) 2205 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2206 else 2207 rxp++; 2208 if (rxp >= nb_fwd_ports) 2209 return; 2210 /* get the dcb information on next RX and TX ports */ 2211 if ((rxp & 0x1) == 0) 2212 txp = (portid_t) (rxp + 1); 2213 else 2214 txp = (portid_t) (rxp - 1); 2215 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2216 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2217 } 2218 } 2219 2220 static void 2221 icmp_echo_config_setup(void) 2222 { 2223 portid_t rxp; 2224 queueid_t rxq; 2225 lcoreid_t lc_id; 2226 uint16_t sm_id; 2227 2228 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2229 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2230 (nb_txq * nb_fwd_ports); 2231 else 2232 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2233 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2234 cur_fwd_config.nb_fwd_streams = 2235 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2236 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2237 cur_fwd_config.nb_fwd_lcores = 2238 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2239 if (verbose_level > 0) { 2240 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2241 __FUNCTION__, 2242 cur_fwd_config.nb_fwd_lcores, 2243 cur_fwd_config.nb_fwd_ports, 2244 cur_fwd_config.nb_fwd_streams); 2245 } 2246 2247 /* reinitialize forwarding streams */ 2248 init_fwd_streams(); 2249 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2250 rxp = 0; rxq = 0; 2251 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2252 if (verbose_level > 0) 2253 printf(" core=%d: \n", lc_id); 2254 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2255 struct fwd_stream *fs; 2256 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2257 fs->rx_port = fwd_ports_ids[rxp]; 2258 fs->rx_queue = rxq; 2259 fs->tx_port = fs->rx_port; 2260 fs->tx_queue = rxq; 2261 fs->peer_addr = fs->tx_port; 2262 fs->retry_enabled = retry_enabled; 2263 if (verbose_level > 0) 2264 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2265 sm_id, fs->rx_port, fs->rx_queue, 2266 fs->tx_queue); 2267 rxq = (queueid_t) (rxq + 1); 2268 if (rxq == nb_rxq) { 2269 rxq = 0; 2270 rxp = (portid_t) (rxp + 1); 2271 } 2272 } 2273 } 2274 } 2275 2276 #if defined RTE_LIBRTE_PMD_SOFTNIC 2277 static void 2278 softnic_fwd_config_setup(void) 2279 { 2280 struct rte_port *port; 2281 portid_t pid, softnic_portid; 2282 queueid_t i; 2283 uint8_t softnic_enable = 0; 2284 2285 RTE_ETH_FOREACH_DEV(pid) { 2286 port = &ports[pid]; 2287 const char *driver = port->dev_info.driver_name; 2288 2289 if (strcmp(driver, "net_softnic") == 0) { 2290 softnic_portid = pid; 2291 softnic_enable = 1; 2292 break; 2293 } 2294 } 2295 2296 if (softnic_enable == 0) { 2297 printf("Softnic mode not configured(%s)!\n", __func__); 2298 return; 2299 } 2300 2301 cur_fwd_config.nb_fwd_ports = 1; 2302 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2303 2304 /* Re-initialize forwarding streams */ 2305 init_fwd_streams(); 2306 2307 /* 2308 * In the softnic forwarding test, the number of forwarding cores 2309 * is set to one and remaining are used for softnic packet processing. 2310 */ 2311 cur_fwd_config.nb_fwd_lcores = 1; 2312 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2313 2314 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2315 fwd_streams[i]->rx_port = softnic_portid; 2316 fwd_streams[i]->rx_queue = i; 2317 fwd_streams[i]->tx_port = softnic_portid; 2318 fwd_streams[i]->tx_queue = i; 2319 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2320 fwd_streams[i]->retry_enabled = retry_enabled; 2321 } 2322 } 2323 #endif 2324 2325 void 2326 fwd_config_setup(void) 2327 { 2328 cur_fwd_config.fwd_eng = cur_fwd_eng; 2329 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2330 icmp_echo_config_setup(); 2331 return; 2332 } 2333 2334 #if defined RTE_LIBRTE_PMD_SOFTNIC 2335 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2336 softnic_fwd_config_setup(); 2337 return; 2338 } 2339 #endif 2340 2341 if ((nb_rxq > 1) && (nb_txq > 1)){ 2342 if (dcb_config) 2343 dcb_fwd_config_setup(); 2344 else 2345 rss_fwd_config_setup(); 2346 } 2347 else 2348 simple_fwd_config_setup(); 2349 } 2350 2351 static const char * 2352 mp_alloc_to_str(uint8_t mode) 2353 { 2354 switch (mode) { 2355 case MP_ALLOC_NATIVE: 2356 return "native"; 2357 case MP_ALLOC_ANON: 2358 return "anon"; 2359 case MP_ALLOC_XMEM: 2360 return "xmem"; 2361 case MP_ALLOC_XMEM_HUGE: 2362 return "xmemhuge"; 2363 default: 2364 return "invalid"; 2365 } 2366 } 2367 2368 void 2369 pkt_fwd_config_display(struct fwd_config *cfg) 2370 { 2371 struct fwd_stream *fs; 2372 lcoreid_t lc_id; 2373 streamid_t sm_id; 2374 2375 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2376 "NUMA support %s, MP allocation mode: %s\n", 2377 cfg->fwd_eng->fwd_mode_name, 2378 retry_enabled == 0 ? "" : " with retry", 2379 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2380 numa_support == 1 ? "enabled" : "disabled", 2381 mp_alloc_to_str(mp_alloc_type)); 2382 2383 if (retry_enabled) 2384 printf("TX retry num: %u, delay between TX retries: %uus\n", 2385 burst_tx_retry_num, burst_tx_delay_time); 2386 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2387 printf("Logical Core %u (socket %u) forwards packets on " 2388 "%d streams:", 2389 fwd_lcores_cpuids[lc_id], 2390 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2391 fwd_lcores[lc_id]->stream_nb); 2392 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2393 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2394 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2395 "P=%d/Q=%d (socket %u) ", 2396 fs->rx_port, fs->rx_queue, 2397 ports[fs->rx_port].socket_id, 2398 fs->tx_port, fs->tx_queue, 2399 ports[fs->tx_port].socket_id); 2400 print_ethaddr("peer=", 2401 &peer_eth_addrs[fs->peer_addr]); 2402 } 2403 printf("\n"); 2404 } 2405 printf("\n"); 2406 } 2407 2408 void 2409 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2410 { 2411 struct rte_ether_addr new_peer_addr; 2412 if (!rte_eth_dev_is_valid_port(port_id)) { 2413 printf("Error: Invalid port number %i\n", port_id); 2414 return; 2415 } 2416 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2417 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2418 return; 2419 } 2420 peer_eth_addrs[port_id] = new_peer_addr; 2421 } 2422 2423 int 2424 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2425 { 2426 unsigned int i; 2427 unsigned int lcore_cpuid; 2428 int record_now; 2429 2430 record_now = 0; 2431 again: 2432 for (i = 0; i < nb_lc; i++) { 2433 lcore_cpuid = lcorelist[i]; 2434 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2435 printf("lcore %u not enabled\n", lcore_cpuid); 2436 return -1; 2437 } 2438 if (lcore_cpuid == rte_get_master_lcore()) { 2439 printf("lcore %u cannot be masked on for running " 2440 "packet forwarding, which is the master lcore " 2441 "and reserved for command line parsing only\n", 2442 lcore_cpuid); 2443 return -1; 2444 } 2445 if (record_now) 2446 fwd_lcores_cpuids[i] = lcore_cpuid; 2447 } 2448 if (record_now == 0) { 2449 record_now = 1; 2450 goto again; 2451 } 2452 nb_cfg_lcores = (lcoreid_t) nb_lc; 2453 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2454 printf("previous number of forwarding cores %u - changed to " 2455 "number of configured cores %u\n", 2456 (unsigned int) nb_fwd_lcores, nb_lc); 2457 nb_fwd_lcores = (lcoreid_t) nb_lc; 2458 } 2459 2460 return 0; 2461 } 2462 2463 int 2464 set_fwd_lcores_mask(uint64_t lcoremask) 2465 { 2466 unsigned int lcorelist[64]; 2467 unsigned int nb_lc; 2468 unsigned int i; 2469 2470 if (lcoremask == 0) { 2471 printf("Invalid NULL mask of cores\n"); 2472 return -1; 2473 } 2474 nb_lc = 0; 2475 for (i = 0; i < 64; i++) { 2476 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2477 continue; 2478 lcorelist[nb_lc++] = i; 2479 } 2480 return set_fwd_lcores_list(lcorelist, nb_lc); 2481 } 2482 2483 void 2484 set_fwd_lcores_number(uint16_t nb_lc) 2485 { 2486 if (nb_lc > nb_cfg_lcores) { 2487 printf("nb fwd cores %u > %u (max. number of configured " 2488 "lcores) - ignored\n", 2489 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2490 return; 2491 } 2492 nb_fwd_lcores = (lcoreid_t) nb_lc; 2493 printf("Number of forwarding cores set to %u\n", 2494 (unsigned int) nb_fwd_lcores); 2495 } 2496 2497 void 2498 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2499 { 2500 unsigned int i; 2501 portid_t port_id; 2502 int record_now; 2503 2504 record_now = 0; 2505 again: 2506 for (i = 0; i < nb_pt; i++) { 2507 port_id = (portid_t) portlist[i]; 2508 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2509 return; 2510 if (record_now) 2511 fwd_ports_ids[i] = port_id; 2512 } 2513 if (record_now == 0) { 2514 record_now = 1; 2515 goto again; 2516 } 2517 nb_cfg_ports = (portid_t) nb_pt; 2518 if (nb_fwd_ports != (portid_t) nb_pt) { 2519 printf("previous number of forwarding ports %u - changed to " 2520 "number of configured ports %u\n", 2521 (unsigned int) nb_fwd_ports, nb_pt); 2522 nb_fwd_ports = (portid_t) nb_pt; 2523 } 2524 } 2525 2526 void 2527 set_fwd_ports_mask(uint64_t portmask) 2528 { 2529 unsigned int portlist[64]; 2530 unsigned int nb_pt; 2531 unsigned int i; 2532 2533 if (portmask == 0) { 2534 printf("Invalid NULL mask of ports\n"); 2535 return; 2536 } 2537 nb_pt = 0; 2538 RTE_ETH_FOREACH_DEV(i) { 2539 if (! ((uint64_t)(1ULL << i) & portmask)) 2540 continue; 2541 portlist[nb_pt++] = i; 2542 } 2543 set_fwd_ports_list(portlist, nb_pt); 2544 } 2545 2546 void 2547 set_fwd_ports_number(uint16_t nb_pt) 2548 { 2549 if (nb_pt > nb_cfg_ports) { 2550 printf("nb fwd ports %u > %u (number of configured " 2551 "ports) - ignored\n", 2552 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2553 return; 2554 } 2555 nb_fwd_ports = (portid_t) nb_pt; 2556 printf("Number of forwarding ports set to %u\n", 2557 (unsigned int) nb_fwd_ports); 2558 } 2559 2560 int 2561 port_is_forwarding(portid_t port_id) 2562 { 2563 unsigned int i; 2564 2565 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2566 return -1; 2567 2568 for (i = 0; i < nb_fwd_ports; i++) { 2569 if (fwd_ports_ids[i] == port_id) 2570 return 1; 2571 } 2572 2573 return 0; 2574 } 2575 2576 void 2577 set_nb_pkt_per_burst(uint16_t nb) 2578 { 2579 if (nb > MAX_PKT_BURST) { 2580 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2581 " ignored\n", 2582 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2583 return; 2584 } 2585 nb_pkt_per_burst = nb; 2586 printf("Number of packets per burst set to %u\n", 2587 (unsigned int) nb_pkt_per_burst); 2588 } 2589 2590 static const char * 2591 tx_split_get_name(enum tx_pkt_split split) 2592 { 2593 uint32_t i; 2594 2595 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2596 if (tx_split_name[i].split == split) 2597 return tx_split_name[i].name; 2598 } 2599 return NULL; 2600 } 2601 2602 void 2603 set_tx_pkt_split(const char *name) 2604 { 2605 uint32_t i; 2606 2607 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2608 if (strcmp(tx_split_name[i].name, name) == 0) { 2609 tx_pkt_split = tx_split_name[i].split; 2610 return; 2611 } 2612 } 2613 printf("unknown value: \"%s\"\n", name); 2614 } 2615 2616 void 2617 show_tx_pkt_segments(void) 2618 { 2619 uint32_t i, n; 2620 const char *split; 2621 2622 n = tx_pkt_nb_segs; 2623 split = tx_split_get_name(tx_pkt_split); 2624 2625 printf("Number of segments: %u\n", n); 2626 printf("Segment sizes: "); 2627 for (i = 0; i != n - 1; i++) 2628 printf("%hu,", tx_pkt_seg_lengths[i]); 2629 printf("%hu\n", tx_pkt_seg_lengths[i]); 2630 printf("Split packet: %s\n", split); 2631 } 2632 2633 void 2634 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2635 { 2636 uint16_t tx_pkt_len; 2637 unsigned i; 2638 2639 if (nb_segs >= (unsigned) nb_txd) { 2640 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2641 nb_segs, (unsigned int) nb_txd); 2642 return; 2643 } 2644 2645 /* 2646 * Check that each segment length is greater or equal than 2647 * the mbuf data sise. 2648 * Check also that the total packet length is greater or equal than the 2649 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2650 * 20 + 8). 2651 */ 2652 tx_pkt_len = 0; 2653 for (i = 0; i < nb_segs; i++) { 2654 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2655 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2656 i, seg_lengths[i], (unsigned) mbuf_data_size); 2657 return; 2658 } 2659 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2660 } 2661 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2662 printf("total packet length=%u < %d - give up\n", 2663 (unsigned) tx_pkt_len, 2664 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2665 return; 2666 } 2667 2668 for (i = 0; i < nb_segs; i++) 2669 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2670 2671 tx_pkt_length = tx_pkt_len; 2672 tx_pkt_nb_segs = (uint8_t) nb_segs; 2673 } 2674 2675 void 2676 setup_gro(const char *onoff, portid_t port_id) 2677 { 2678 if (!rte_eth_dev_is_valid_port(port_id)) { 2679 printf("invalid port id %u\n", port_id); 2680 return; 2681 } 2682 if (test_done == 0) { 2683 printf("Before enable/disable GRO," 2684 " please stop forwarding first\n"); 2685 return; 2686 } 2687 if (strcmp(onoff, "on") == 0) { 2688 if (gro_ports[port_id].enable != 0) { 2689 printf("Port %u has enabled GRO. Please" 2690 " disable GRO first\n", port_id); 2691 return; 2692 } 2693 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2694 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2695 gro_ports[port_id].param.max_flow_num = 2696 GRO_DEFAULT_FLOW_NUM; 2697 gro_ports[port_id].param.max_item_per_flow = 2698 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2699 } 2700 gro_ports[port_id].enable = 1; 2701 } else { 2702 if (gro_ports[port_id].enable == 0) { 2703 printf("Port %u has disabled GRO\n", port_id); 2704 return; 2705 } 2706 gro_ports[port_id].enable = 0; 2707 } 2708 } 2709 2710 void 2711 setup_gro_flush_cycles(uint8_t cycles) 2712 { 2713 if (test_done == 0) { 2714 printf("Before change flush interval for GRO," 2715 " please stop forwarding first.\n"); 2716 return; 2717 } 2718 2719 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2720 GRO_DEFAULT_FLUSH_CYCLES) { 2721 printf("The flushing cycle be in the range" 2722 " of 1 to %u. Revert to the default" 2723 " value %u.\n", 2724 GRO_MAX_FLUSH_CYCLES, 2725 GRO_DEFAULT_FLUSH_CYCLES); 2726 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2727 } 2728 2729 gro_flush_cycles = cycles; 2730 } 2731 2732 void 2733 show_gro(portid_t port_id) 2734 { 2735 struct rte_gro_param *param; 2736 uint32_t max_pkts_num; 2737 2738 param = &gro_ports[port_id].param; 2739 2740 if (!rte_eth_dev_is_valid_port(port_id)) { 2741 printf("Invalid port id %u.\n", port_id); 2742 return; 2743 } 2744 if (gro_ports[port_id].enable) { 2745 printf("GRO type: TCP/IPv4\n"); 2746 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2747 max_pkts_num = param->max_flow_num * 2748 param->max_item_per_flow; 2749 } else 2750 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2751 printf("Max number of packets to perform GRO: %u\n", 2752 max_pkts_num); 2753 printf("Flushing cycles: %u\n", gro_flush_cycles); 2754 } else 2755 printf("Port %u doesn't enable GRO.\n", port_id); 2756 } 2757 2758 void 2759 setup_gso(const char *mode, portid_t port_id) 2760 { 2761 if (!rte_eth_dev_is_valid_port(port_id)) { 2762 printf("invalid port id %u\n", port_id); 2763 return; 2764 } 2765 if (strcmp(mode, "on") == 0) { 2766 if (test_done == 0) { 2767 printf("before enabling GSO," 2768 " please stop forwarding first\n"); 2769 return; 2770 } 2771 gso_ports[port_id].enable = 1; 2772 } else if (strcmp(mode, "off") == 0) { 2773 if (test_done == 0) { 2774 printf("before disabling GSO," 2775 " please stop forwarding first\n"); 2776 return; 2777 } 2778 gso_ports[port_id].enable = 0; 2779 } 2780 } 2781 2782 char* 2783 list_pkt_forwarding_modes(void) 2784 { 2785 static char fwd_modes[128] = ""; 2786 const char *separator = "|"; 2787 struct fwd_engine *fwd_eng; 2788 unsigned i = 0; 2789 2790 if (strlen (fwd_modes) == 0) { 2791 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2792 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2793 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2794 strncat(fwd_modes, separator, 2795 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2796 } 2797 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2798 } 2799 2800 return fwd_modes; 2801 } 2802 2803 char* 2804 list_pkt_forwarding_retry_modes(void) 2805 { 2806 static char fwd_modes[128] = ""; 2807 const char *separator = "|"; 2808 struct fwd_engine *fwd_eng; 2809 unsigned i = 0; 2810 2811 if (strlen(fwd_modes) == 0) { 2812 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2813 if (fwd_eng == &rx_only_engine) 2814 continue; 2815 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2816 sizeof(fwd_modes) - 2817 strlen(fwd_modes) - 1); 2818 strncat(fwd_modes, separator, 2819 sizeof(fwd_modes) - 2820 strlen(fwd_modes) - 1); 2821 } 2822 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2823 } 2824 2825 return fwd_modes; 2826 } 2827 2828 void 2829 set_pkt_forwarding_mode(const char *fwd_mode_name) 2830 { 2831 struct fwd_engine *fwd_eng; 2832 unsigned i; 2833 2834 i = 0; 2835 while ((fwd_eng = fwd_engines[i]) != NULL) { 2836 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2837 printf("Set %s packet forwarding mode%s\n", 2838 fwd_mode_name, 2839 retry_enabled == 0 ? "" : " with retry"); 2840 cur_fwd_eng = fwd_eng; 2841 return; 2842 } 2843 i++; 2844 } 2845 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2846 } 2847 2848 void 2849 add_rx_dump_callbacks(portid_t portid) 2850 { 2851 struct rte_eth_dev_info dev_info; 2852 uint16_t queue; 2853 int ret; 2854 2855 if (port_id_is_invalid(portid, ENABLED_WARN)) 2856 return; 2857 2858 ret = eth_dev_info_get_print_err(portid, &dev_info); 2859 if (ret != 0) 2860 return; 2861 2862 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2863 if (!ports[portid].rx_dump_cb[queue]) 2864 ports[portid].rx_dump_cb[queue] = 2865 rte_eth_add_rx_callback(portid, queue, 2866 dump_rx_pkts, NULL); 2867 } 2868 2869 void 2870 add_tx_dump_callbacks(portid_t portid) 2871 { 2872 struct rte_eth_dev_info dev_info; 2873 uint16_t queue; 2874 int ret; 2875 2876 if (port_id_is_invalid(portid, ENABLED_WARN)) 2877 return; 2878 2879 ret = eth_dev_info_get_print_err(portid, &dev_info); 2880 if (ret != 0) 2881 return; 2882 2883 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2884 if (!ports[portid].tx_dump_cb[queue]) 2885 ports[portid].tx_dump_cb[queue] = 2886 rte_eth_add_tx_callback(portid, queue, 2887 dump_tx_pkts, NULL); 2888 } 2889 2890 void 2891 remove_rx_dump_callbacks(portid_t portid) 2892 { 2893 struct rte_eth_dev_info dev_info; 2894 uint16_t queue; 2895 int ret; 2896 2897 if (port_id_is_invalid(portid, ENABLED_WARN)) 2898 return; 2899 2900 ret = eth_dev_info_get_print_err(portid, &dev_info); 2901 if (ret != 0) 2902 return; 2903 2904 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2905 if (ports[portid].rx_dump_cb[queue]) { 2906 rte_eth_remove_rx_callback(portid, queue, 2907 ports[portid].rx_dump_cb[queue]); 2908 ports[portid].rx_dump_cb[queue] = NULL; 2909 } 2910 } 2911 2912 void 2913 remove_tx_dump_callbacks(portid_t portid) 2914 { 2915 struct rte_eth_dev_info dev_info; 2916 uint16_t queue; 2917 int ret; 2918 2919 if (port_id_is_invalid(portid, ENABLED_WARN)) 2920 return; 2921 2922 ret = eth_dev_info_get_print_err(portid, &dev_info); 2923 if (ret != 0) 2924 return; 2925 2926 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2927 if (ports[portid].tx_dump_cb[queue]) { 2928 rte_eth_remove_tx_callback(portid, queue, 2929 ports[portid].tx_dump_cb[queue]); 2930 ports[portid].tx_dump_cb[queue] = NULL; 2931 } 2932 } 2933 2934 void 2935 configure_rxtx_dump_callbacks(uint16_t verbose) 2936 { 2937 portid_t portid; 2938 2939 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 2940 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 2941 return; 2942 #endif 2943 2944 RTE_ETH_FOREACH_DEV(portid) 2945 { 2946 if (verbose == 1 || verbose > 2) 2947 add_rx_dump_callbacks(portid); 2948 else 2949 remove_rx_dump_callbacks(portid); 2950 if (verbose >= 2) 2951 add_tx_dump_callbacks(portid); 2952 else 2953 remove_tx_dump_callbacks(portid); 2954 } 2955 } 2956 2957 void 2958 set_verbose_level(uint16_t vb_level) 2959 { 2960 printf("Change verbose level from %u to %u\n", 2961 (unsigned int) verbose_level, (unsigned int) vb_level); 2962 verbose_level = vb_level; 2963 configure_rxtx_dump_callbacks(verbose_level); 2964 } 2965 2966 void 2967 vlan_extend_set(portid_t port_id, int on) 2968 { 2969 int diag; 2970 int vlan_offload; 2971 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2972 2973 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2974 return; 2975 2976 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2977 2978 if (on) { 2979 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2980 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2981 } else { 2982 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2983 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2984 } 2985 2986 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2987 if (diag < 0) 2988 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2989 "diag=%d\n", port_id, on, diag); 2990 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2991 } 2992 2993 void 2994 rx_vlan_strip_set(portid_t port_id, int on) 2995 { 2996 int diag; 2997 int vlan_offload; 2998 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2999 3000 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3001 return; 3002 3003 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3004 3005 if (on) { 3006 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 3007 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3008 } else { 3009 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 3010 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3011 } 3012 3013 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3014 if (diag < 0) 3015 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 3016 "diag=%d\n", port_id, on, diag); 3017 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3018 } 3019 3020 void 3021 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 3022 { 3023 int diag; 3024 3025 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3026 return; 3027 3028 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 3029 if (diag < 0) 3030 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 3031 "diag=%d\n", port_id, queue_id, on, diag); 3032 } 3033 3034 void 3035 rx_vlan_filter_set(portid_t port_id, int on) 3036 { 3037 int diag; 3038 int vlan_offload; 3039 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 3040 3041 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3042 return; 3043 3044 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3045 3046 if (on) { 3047 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 3048 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3049 } else { 3050 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 3051 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3052 } 3053 3054 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 3055 if (diag < 0) 3056 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 3057 "diag=%d\n", port_id, on, diag); 3058 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 3059 } 3060 3061 int 3062 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 3063 { 3064 int diag; 3065 3066 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3067 return 1; 3068 if (vlan_id_is_invalid(vlan_id)) 3069 return 1; 3070 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3071 if (diag == 0) 3072 return 0; 3073 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3074 "diag=%d\n", 3075 port_id, vlan_id, on, diag); 3076 return -1; 3077 } 3078 3079 void 3080 rx_vlan_all_filter_set(portid_t port_id, int on) 3081 { 3082 uint16_t vlan_id; 3083 3084 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3085 return; 3086 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3087 if (rx_vft_set(port_id, vlan_id, on)) 3088 break; 3089 } 3090 } 3091 3092 void 3093 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3094 { 3095 int diag; 3096 3097 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3098 return; 3099 3100 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3101 if (diag == 0) 3102 return; 3103 3104 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3105 "diag=%d\n", 3106 port_id, vlan_type, tp_id, diag); 3107 } 3108 3109 void 3110 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3111 { 3112 struct rte_eth_dev_info dev_info; 3113 int ret; 3114 3115 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3116 return; 3117 if (vlan_id_is_invalid(vlan_id)) 3118 return; 3119 3120 if (ports[port_id].dev_conf.txmode.offloads & 3121 DEV_TX_OFFLOAD_QINQ_INSERT) { 3122 printf("Error, as QinQ has been enabled.\n"); 3123 return; 3124 } 3125 3126 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3127 if (ret != 0) 3128 return; 3129 3130 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3131 printf("Error: vlan insert is not supported by port %d\n", 3132 port_id); 3133 return; 3134 } 3135 3136 tx_vlan_reset(port_id); 3137 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3138 ports[port_id].tx_vlan_id = vlan_id; 3139 } 3140 3141 void 3142 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3143 { 3144 struct rte_eth_dev_info dev_info; 3145 int ret; 3146 3147 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3148 return; 3149 if (vlan_id_is_invalid(vlan_id)) 3150 return; 3151 if (vlan_id_is_invalid(vlan_id_outer)) 3152 return; 3153 3154 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3155 if (ret != 0) 3156 return; 3157 3158 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3159 printf("Error: qinq insert not supported by port %d\n", 3160 port_id); 3161 return; 3162 } 3163 3164 tx_vlan_reset(port_id); 3165 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3166 DEV_TX_OFFLOAD_QINQ_INSERT); 3167 ports[port_id].tx_vlan_id = vlan_id; 3168 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3169 } 3170 3171 void 3172 tx_vlan_reset(portid_t port_id) 3173 { 3174 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3175 return; 3176 ports[port_id].dev_conf.txmode.offloads &= 3177 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3178 DEV_TX_OFFLOAD_QINQ_INSERT); 3179 ports[port_id].tx_vlan_id = 0; 3180 ports[port_id].tx_vlan_id_outer = 0; 3181 } 3182 3183 void 3184 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3185 { 3186 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3187 return; 3188 3189 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3190 } 3191 3192 void 3193 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3194 { 3195 uint16_t i; 3196 uint8_t existing_mapping_found = 0; 3197 3198 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3199 return; 3200 3201 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3202 return; 3203 3204 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3205 printf("map_value not in required range 0..%d\n", 3206 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3207 return; 3208 } 3209 3210 if (!is_rx) { /*then tx*/ 3211 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3212 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3213 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3214 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3215 existing_mapping_found = 1; 3216 break; 3217 } 3218 } 3219 if (!existing_mapping_found) { /* A new additional mapping... */ 3220 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3221 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3222 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3223 nb_tx_queue_stats_mappings++; 3224 } 3225 } 3226 else { /*rx*/ 3227 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3228 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3229 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3230 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3231 existing_mapping_found = 1; 3232 break; 3233 } 3234 } 3235 if (!existing_mapping_found) { /* A new additional mapping... */ 3236 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3237 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3238 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3239 nb_rx_queue_stats_mappings++; 3240 } 3241 } 3242 } 3243 3244 void 3245 set_xstats_hide_zero(uint8_t on_off) 3246 { 3247 xstats_hide_zero = on_off; 3248 } 3249 3250 static inline void 3251 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3252 { 3253 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3254 3255 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3256 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3257 " tunnel_id: 0x%08x", 3258 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3259 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3260 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3261 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3262 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3263 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3264 3265 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3266 rte_be_to_cpu_16(mask->src_port_mask), 3267 rte_be_to_cpu_16(mask->dst_port_mask)); 3268 3269 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3270 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3271 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3272 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3273 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3274 3275 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3276 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3277 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3278 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3279 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3280 } 3281 3282 printf("\n"); 3283 } 3284 3285 static inline void 3286 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3287 { 3288 struct rte_eth_flex_payload_cfg *cfg; 3289 uint32_t i, j; 3290 3291 for (i = 0; i < flex_conf->nb_payloads; i++) { 3292 cfg = &flex_conf->flex_set[i]; 3293 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3294 printf("\n RAW: "); 3295 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3296 printf("\n L2_PAYLOAD: "); 3297 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3298 printf("\n L3_PAYLOAD: "); 3299 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3300 printf("\n L4_PAYLOAD: "); 3301 else 3302 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3303 for (j = 0; j < num; j++) 3304 printf(" %-5u", cfg->src_offset[j]); 3305 } 3306 printf("\n"); 3307 } 3308 3309 static char * 3310 flowtype_to_str(uint16_t flow_type) 3311 { 3312 struct flow_type_info { 3313 char str[32]; 3314 uint16_t ftype; 3315 }; 3316 3317 uint8_t i; 3318 static struct flow_type_info flowtype_str_table[] = { 3319 {"raw", RTE_ETH_FLOW_RAW}, 3320 {"ipv4", RTE_ETH_FLOW_IPV4}, 3321 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3322 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3323 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3324 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3325 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3326 {"ipv6", RTE_ETH_FLOW_IPV6}, 3327 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3328 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3329 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3330 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3331 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3332 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3333 {"port", RTE_ETH_FLOW_PORT}, 3334 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3335 {"geneve", RTE_ETH_FLOW_GENEVE}, 3336 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3337 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3338 }; 3339 3340 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3341 if (flowtype_str_table[i].ftype == flow_type) 3342 return flowtype_str_table[i].str; 3343 } 3344 3345 return NULL; 3346 } 3347 3348 static inline void 3349 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3350 { 3351 struct rte_eth_fdir_flex_mask *mask; 3352 uint32_t i, j; 3353 char *p; 3354 3355 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3356 mask = &flex_conf->flex_mask[i]; 3357 p = flowtype_to_str(mask->flow_type); 3358 printf("\n %s:\t", p ? p : "unknown"); 3359 for (j = 0; j < num; j++) 3360 printf(" %02x", mask->mask[j]); 3361 } 3362 printf("\n"); 3363 } 3364 3365 static inline void 3366 print_fdir_flow_type(uint32_t flow_types_mask) 3367 { 3368 int i; 3369 char *p; 3370 3371 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3372 if (!(flow_types_mask & (1 << i))) 3373 continue; 3374 p = flowtype_to_str(i); 3375 if (p) 3376 printf(" %s", p); 3377 else 3378 printf(" unknown"); 3379 } 3380 printf("\n"); 3381 } 3382 3383 void 3384 fdir_get_infos(portid_t port_id) 3385 { 3386 struct rte_eth_fdir_stats fdir_stat; 3387 struct rte_eth_fdir_info fdir_info; 3388 int ret; 3389 3390 static const char *fdir_stats_border = "########################"; 3391 3392 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3393 return; 3394 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3395 if (ret < 0) { 3396 printf("\n FDIR is not supported on port %-2d\n", 3397 port_id); 3398 return; 3399 } 3400 3401 memset(&fdir_info, 0, sizeof(fdir_info)); 3402 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3403 RTE_ETH_FILTER_INFO, &fdir_info); 3404 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3405 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3406 RTE_ETH_FILTER_STATS, &fdir_stat); 3407 printf("\n %s FDIR infos for port %-2d %s\n", 3408 fdir_stats_border, port_id, fdir_stats_border); 3409 printf(" MODE: "); 3410 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3411 printf(" PERFECT\n"); 3412 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3413 printf(" PERFECT-MAC-VLAN\n"); 3414 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3415 printf(" PERFECT-TUNNEL\n"); 3416 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3417 printf(" SIGNATURE\n"); 3418 else 3419 printf(" DISABLE\n"); 3420 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3421 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3422 printf(" SUPPORTED FLOW TYPE: "); 3423 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3424 } 3425 printf(" FLEX PAYLOAD INFO:\n"); 3426 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3427 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3428 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3429 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3430 fdir_info.flex_payload_unit, 3431 fdir_info.max_flex_payload_segment_num, 3432 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3433 printf(" MASK: "); 3434 print_fdir_mask(&fdir_info.mask); 3435 if (fdir_info.flex_conf.nb_payloads > 0) { 3436 printf(" FLEX PAYLOAD SRC OFFSET:"); 3437 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3438 } 3439 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3440 printf(" FLEX MASK CFG:"); 3441 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3442 } 3443 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3444 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3445 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3446 fdir_info.guarant_spc, fdir_info.best_spc); 3447 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3448 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3449 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3450 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3451 fdir_stat.collision, fdir_stat.free, 3452 fdir_stat.maxhash, fdir_stat.maxlen, 3453 fdir_stat.add, fdir_stat.remove, 3454 fdir_stat.f_add, fdir_stat.f_remove); 3455 printf(" %s############################%s\n", 3456 fdir_stats_border, fdir_stats_border); 3457 } 3458 3459 void 3460 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3461 { 3462 struct rte_port *port; 3463 struct rte_eth_fdir_flex_conf *flex_conf; 3464 int i, idx = 0; 3465 3466 port = &ports[port_id]; 3467 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3468 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3469 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3470 idx = i; 3471 break; 3472 } 3473 } 3474 if (i >= RTE_ETH_FLOW_MAX) { 3475 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3476 idx = flex_conf->nb_flexmasks; 3477 flex_conf->nb_flexmasks++; 3478 } else { 3479 printf("The flex mask table is full. Can not set flex" 3480 " mask for flow_type(%u).", cfg->flow_type); 3481 return; 3482 } 3483 } 3484 rte_memcpy(&flex_conf->flex_mask[idx], 3485 cfg, 3486 sizeof(struct rte_eth_fdir_flex_mask)); 3487 } 3488 3489 void 3490 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3491 { 3492 struct rte_port *port; 3493 struct rte_eth_fdir_flex_conf *flex_conf; 3494 int i, idx = 0; 3495 3496 port = &ports[port_id]; 3497 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3498 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3499 if (cfg->type == flex_conf->flex_set[i].type) { 3500 idx = i; 3501 break; 3502 } 3503 } 3504 if (i >= RTE_ETH_PAYLOAD_MAX) { 3505 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3506 idx = flex_conf->nb_payloads; 3507 flex_conf->nb_payloads++; 3508 } else { 3509 printf("The flex payload table is full. Can not set" 3510 " flex payload for type(%u).", cfg->type); 3511 return; 3512 } 3513 } 3514 rte_memcpy(&flex_conf->flex_set[idx], 3515 cfg, 3516 sizeof(struct rte_eth_flex_payload_cfg)); 3517 3518 } 3519 3520 void 3521 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3522 { 3523 #ifdef RTE_LIBRTE_IXGBE_PMD 3524 int diag; 3525 3526 if (is_rx) 3527 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3528 else 3529 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3530 3531 if (diag == 0) 3532 return; 3533 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3534 is_rx ? "rx" : "tx", port_id, diag); 3535 return; 3536 #endif 3537 printf("VF %s setting not supported for port %d\n", 3538 is_rx ? "Rx" : "Tx", port_id); 3539 RTE_SET_USED(vf); 3540 RTE_SET_USED(on); 3541 } 3542 3543 int 3544 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3545 { 3546 int diag; 3547 struct rte_eth_link link; 3548 int ret; 3549 3550 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3551 return 1; 3552 ret = eth_link_get_nowait_print_err(port_id, &link); 3553 if (ret < 0) 3554 return 1; 3555 if (rate > link.link_speed) { 3556 printf("Invalid rate value:%u bigger than link speed: %u\n", 3557 rate, link.link_speed); 3558 return 1; 3559 } 3560 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3561 if (diag == 0) 3562 return diag; 3563 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3564 port_id, diag); 3565 return diag; 3566 } 3567 3568 int 3569 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3570 { 3571 int diag = -ENOTSUP; 3572 3573 RTE_SET_USED(vf); 3574 RTE_SET_USED(rate); 3575 RTE_SET_USED(q_msk); 3576 3577 #ifdef RTE_LIBRTE_IXGBE_PMD 3578 if (diag == -ENOTSUP) 3579 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3580 q_msk); 3581 #endif 3582 #ifdef RTE_LIBRTE_BNXT_PMD 3583 if (diag == -ENOTSUP) 3584 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3585 #endif 3586 if (diag == 0) 3587 return diag; 3588 3589 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3590 port_id, diag); 3591 return diag; 3592 } 3593 3594 /* 3595 * Functions to manage the set of filtered Multicast MAC addresses. 3596 * 3597 * A pool of filtered multicast MAC addresses is associated with each port. 3598 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3599 * The address of the pool and the number of valid multicast MAC addresses 3600 * recorded in the pool are stored in the fields "mc_addr_pool" and 3601 * "mc_addr_nb" of the "rte_port" data structure. 3602 * 3603 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3604 * to be supplied a contiguous array of multicast MAC addresses. 3605 * To comply with this constraint, the set of multicast addresses recorded 3606 * into the pool are systematically compacted at the beginning of the pool. 3607 * Hence, when a multicast address is removed from the pool, all following 3608 * addresses, if any, are copied back to keep the set contiguous. 3609 */ 3610 #define MCAST_POOL_INC 32 3611 3612 static int 3613 mcast_addr_pool_extend(struct rte_port *port) 3614 { 3615 struct rte_ether_addr *mc_pool; 3616 size_t mc_pool_size; 3617 3618 /* 3619 * If a free entry is available at the end of the pool, just 3620 * increment the number of recorded multicast addresses. 3621 */ 3622 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3623 port->mc_addr_nb++; 3624 return 0; 3625 } 3626 3627 /* 3628 * [re]allocate a pool with MCAST_POOL_INC more entries. 3629 * The previous test guarantees that port->mc_addr_nb is a multiple 3630 * of MCAST_POOL_INC. 3631 */ 3632 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3633 MCAST_POOL_INC); 3634 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3635 mc_pool_size); 3636 if (mc_pool == NULL) { 3637 printf("allocation of pool of %u multicast addresses failed\n", 3638 port->mc_addr_nb + MCAST_POOL_INC); 3639 return -ENOMEM; 3640 } 3641 3642 port->mc_addr_pool = mc_pool; 3643 port->mc_addr_nb++; 3644 return 0; 3645 3646 } 3647 3648 static void 3649 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3650 { 3651 port->mc_addr_nb--; 3652 if (addr_idx == port->mc_addr_nb) { 3653 /* No need to recompact the set of multicast addressses. */ 3654 if (port->mc_addr_nb == 0) { 3655 /* free the pool of multicast addresses. */ 3656 free(port->mc_addr_pool); 3657 port->mc_addr_pool = NULL; 3658 } 3659 return; 3660 } 3661 memmove(&port->mc_addr_pool[addr_idx], 3662 &port->mc_addr_pool[addr_idx + 1], 3663 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3664 } 3665 3666 static void 3667 eth_port_multicast_addr_list_set(portid_t port_id) 3668 { 3669 struct rte_port *port; 3670 int diag; 3671 3672 port = &ports[port_id]; 3673 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3674 port->mc_addr_nb); 3675 if (diag == 0) 3676 return; 3677 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3678 port->mc_addr_nb, port_id, -diag); 3679 } 3680 3681 void 3682 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3683 { 3684 struct rte_port *port; 3685 uint32_t i; 3686 3687 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3688 return; 3689 3690 port = &ports[port_id]; 3691 3692 /* 3693 * Check that the added multicast MAC address is not already recorded 3694 * in the pool of multicast addresses. 3695 */ 3696 for (i = 0; i < port->mc_addr_nb; i++) { 3697 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3698 printf("multicast address already filtered by port\n"); 3699 return; 3700 } 3701 } 3702 3703 if (mcast_addr_pool_extend(port) != 0) 3704 return; 3705 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3706 eth_port_multicast_addr_list_set(port_id); 3707 } 3708 3709 void 3710 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3711 { 3712 struct rte_port *port; 3713 uint32_t i; 3714 3715 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3716 return; 3717 3718 port = &ports[port_id]; 3719 3720 /* 3721 * Search the pool of multicast MAC addresses for the removed address. 3722 */ 3723 for (i = 0; i < port->mc_addr_nb; i++) { 3724 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3725 break; 3726 } 3727 if (i == port->mc_addr_nb) { 3728 printf("multicast address not filtered by port %d\n", port_id); 3729 return; 3730 } 3731 3732 mcast_addr_pool_remove(port, i); 3733 eth_port_multicast_addr_list_set(port_id); 3734 } 3735 3736 void 3737 port_dcb_info_display(portid_t port_id) 3738 { 3739 struct rte_eth_dcb_info dcb_info; 3740 uint16_t i; 3741 int ret; 3742 static const char *border = "================"; 3743 3744 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3745 return; 3746 3747 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3748 if (ret) { 3749 printf("\n Failed to get dcb infos on port %-2d\n", 3750 port_id); 3751 return; 3752 } 3753 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3754 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3755 printf("\n TC : "); 3756 for (i = 0; i < dcb_info.nb_tcs; i++) 3757 printf("\t%4d", i); 3758 printf("\n Priority : "); 3759 for (i = 0; i < dcb_info.nb_tcs; i++) 3760 printf("\t%4d", dcb_info.prio_tc[i]); 3761 printf("\n BW percent :"); 3762 for (i = 0; i < dcb_info.nb_tcs; i++) 3763 printf("\t%4d%%", dcb_info.tc_bws[i]); 3764 printf("\n RXQ base : "); 3765 for (i = 0; i < dcb_info.nb_tcs; i++) 3766 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3767 printf("\n RXQ number :"); 3768 for (i = 0; i < dcb_info.nb_tcs; i++) 3769 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3770 printf("\n TXQ base : "); 3771 for (i = 0; i < dcb_info.nb_tcs; i++) 3772 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3773 printf("\n TXQ number :"); 3774 for (i = 0; i < dcb_info.nb_tcs; i++) 3775 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3776 printf("\n"); 3777 } 3778 3779 uint8_t * 3780 open_file(const char *file_path, uint32_t *size) 3781 { 3782 int fd = open(file_path, O_RDONLY); 3783 off_t pkg_size; 3784 uint8_t *buf = NULL; 3785 int ret = 0; 3786 struct stat st_buf; 3787 3788 if (size) 3789 *size = 0; 3790 3791 if (fd == -1) { 3792 printf("%s: Failed to open %s\n", __func__, file_path); 3793 return buf; 3794 } 3795 3796 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3797 close(fd); 3798 printf("%s: File operations failed\n", __func__); 3799 return buf; 3800 } 3801 3802 pkg_size = st_buf.st_size; 3803 if (pkg_size < 0) { 3804 close(fd); 3805 printf("%s: File operations failed\n", __func__); 3806 return buf; 3807 } 3808 3809 buf = (uint8_t *)malloc(pkg_size); 3810 if (!buf) { 3811 close(fd); 3812 printf("%s: Failed to malloc memory\n", __func__); 3813 return buf; 3814 } 3815 3816 ret = read(fd, buf, pkg_size); 3817 if (ret < 0) { 3818 close(fd); 3819 printf("%s: File read operation failed\n", __func__); 3820 close_file(buf); 3821 return NULL; 3822 } 3823 3824 if (size) 3825 *size = pkg_size; 3826 3827 close(fd); 3828 3829 return buf; 3830 } 3831 3832 int 3833 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3834 { 3835 FILE *fh = fopen(file_path, "wb"); 3836 3837 if (fh == NULL) { 3838 printf("%s: Failed to open %s\n", __func__, file_path); 3839 return -1; 3840 } 3841 3842 if (fwrite(buf, 1, size, fh) != size) { 3843 fclose(fh); 3844 printf("%s: File write operation failed\n", __func__); 3845 return -1; 3846 } 3847 3848 fclose(fh); 3849 3850 return 0; 3851 } 3852 3853 int 3854 close_file(uint8_t *buf) 3855 { 3856 if (buf) { 3857 free((void *)buf); 3858 return 0; 3859 } 3860 3861 return -1; 3862 } 3863 3864 void 3865 port_queue_region_info_display(portid_t port_id, void *buf) 3866 { 3867 #ifdef RTE_LIBRTE_I40E_PMD 3868 uint16_t i, j; 3869 struct rte_pmd_i40e_queue_regions *info = 3870 (struct rte_pmd_i40e_queue_regions *)buf; 3871 static const char *queue_region_info_stats_border = "-------"; 3872 3873 if (!info->queue_region_number) 3874 printf("there is no region has been set before"); 3875 3876 printf("\n %s All queue region info for port=%2d %s", 3877 queue_region_info_stats_border, port_id, 3878 queue_region_info_stats_border); 3879 printf("\n queue_region_number: %-14u \n", 3880 info->queue_region_number); 3881 3882 for (i = 0; i < info->queue_region_number; i++) { 3883 printf("\n region_id: %-14u queue_number: %-14u " 3884 "queue_start_index: %-14u \n", 3885 info->region[i].region_id, 3886 info->region[i].queue_num, 3887 info->region[i].queue_start_index); 3888 3889 printf(" user_priority_num is %-14u :", 3890 info->region[i].user_priority_num); 3891 for (j = 0; j < info->region[i].user_priority_num; j++) 3892 printf(" %-14u ", info->region[i].user_priority[j]); 3893 3894 printf("\n flowtype_num is %-14u :", 3895 info->region[i].flowtype_num); 3896 for (j = 0; j < info->region[i].flowtype_num; j++) 3897 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3898 } 3899 #else 3900 RTE_SET_USED(port_id); 3901 RTE_SET_USED(buf); 3902 #endif 3903 3904 printf("\n\n"); 3905 } 3906