1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_config.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "all", ETH_RSS_IP | ETH_RSS_TCP | 78 ETH_RSS_UDP | ETH_RSS_SCTP | 79 ETH_RSS_L2_PAYLOAD }, 80 { "none", 0 }, 81 { "ipv4", ETH_RSS_IPV4 }, 82 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 83 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 84 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 85 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 86 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 87 { "ipv6", ETH_RSS_IPV6 }, 88 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 89 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 90 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 91 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 92 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 93 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 94 { "ipv6-ex", ETH_RSS_IPV6_EX }, 95 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 96 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 97 { "port", ETH_RSS_PORT }, 98 { "vxlan", ETH_RSS_VXLAN }, 99 { "geneve", ETH_RSS_GENEVE }, 100 { "nvgre", ETH_RSS_NVGRE }, 101 { "ip", ETH_RSS_IP }, 102 { "udp", ETH_RSS_UDP }, 103 { "tcp", ETH_RSS_TCP }, 104 { "sctp", ETH_RSS_SCTP }, 105 { "tunnel", ETH_RSS_TUNNEL }, 106 { NULL, 0 }, 107 }; 108 109 static void 110 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 111 { 112 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 113 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 114 printf("%s%s", name, buf); 115 } 116 117 void 118 nic_stats_display(portid_t port_id) 119 { 120 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 121 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 122 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 123 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 124 uint64_t mpps_rx, mpps_tx; 125 struct rte_eth_stats stats; 126 struct rte_port *port = &ports[port_id]; 127 uint8_t i; 128 129 static const char *nic_stats_border = "########################"; 130 131 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 132 print_valid_ports(); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 207 print_valid_ports(); 208 return; 209 } 210 rte_eth_stats_reset(port_id); 211 printf("\n NIC statistics for port %d cleared\n", port_id); 212 } 213 214 void 215 nic_xstats_display(portid_t port_id) 216 { 217 struct rte_eth_xstat *xstats; 218 int cnt_xstats, idx_xstat; 219 struct rte_eth_xstat_name *xstats_names; 220 221 printf("###### NIC extended statistics for port %-2d\n", port_id); 222 if (!rte_eth_dev_is_valid_port(port_id)) { 223 printf("Error: Invalid port number %i\n", port_id); 224 return; 225 } 226 227 /* Get count */ 228 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 229 if (cnt_xstats < 0) { 230 printf("Error: Cannot get count of xstats\n"); 231 return; 232 } 233 234 /* Get id-name lookup table */ 235 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 236 if (xstats_names == NULL) { 237 printf("Cannot allocate memory for xstats lookup\n"); 238 return; 239 } 240 if (cnt_xstats != rte_eth_xstats_get_names( 241 port_id, xstats_names, cnt_xstats)) { 242 printf("Error: Cannot get xstats lookup\n"); 243 free(xstats_names); 244 return; 245 } 246 247 /* Get stats themselves */ 248 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 249 if (xstats == NULL) { 250 printf("Cannot allocate memory for xstats\n"); 251 free(xstats_names); 252 return; 253 } 254 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 255 printf("Error: Unable to get xstats\n"); 256 free(xstats_names); 257 free(xstats); 258 return; 259 } 260 261 /* Display xstats */ 262 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 263 if (xstats_hide_zero && !xstats[idx_xstat].value) 264 continue; 265 printf("%s: %"PRIu64"\n", 266 xstats_names[idx_xstat].name, 267 xstats[idx_xstat].value); 268 } 269 free(xstats_names); 270 free(xstats); 271 } 272 273 void 274 nic_xstats_clear(portid_t port_id) 275 { 276 rte_eth_xstats_reset(port_id); 277 } 278 279 void 280 nic_stats_mapping_display(portid_t port_id) 281 { 282 struct rte_port *port = &ports[port_id]; 283 uint16_t i; 284 285 static const char *nic_stats_mapping_border = "########################"; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 292 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 293 printf("Port id %d - either does not support queue statistic mapping or" 294 " no queue statistic mapping set\n", port_id); 295 return; 296 } 297 298 printf("\n %s NIC statistics mapping for port %-2d %s\n", 299 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 300 301 if (port->rx_queue_stats_mapping_enabled) { 302 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 303 if (rx_queue_stats_mappings[i].port_id == port_id) { 304 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 305 rx_queue_stats_mappings[i].queue_id, 306 rx_queue_stats_mappings[i].stats_counter_id); 307 } 308 } 309 printf("\n"); 310 } 311 312 313 if (port->tx_queue_stats_mapping_enabled) { 314 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 315 if (tx_queue_stats_mappings[i].port_id == port_id) { 316 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 317 tx_queue_stats_mappings[i].queue_id, 318 tx_queue_stats_mappings[i].stats_counter_id); 319 } 320 } 321 } 322 323 printf(" %s####################################%s\n", 324 nic_stats_mapping_border, nic_stats_mapping_border); 325 } 326 327 void 328 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 329 { 330 struct rte_eth_rxq_info qinfo; 331 int32_t rc; 332 static const char *info_border = "*********************"; 333 334 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 335 if (rc != 0) { 336 printf("Failed to retrieve information for port: %u, " 337 "RX queue: %hu\nerror desc: %s(%d)\n", 338 port_id, queue_id, strerror(-rc), rc); 339 return; 340 } 341 342 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 343 info_border, port_id, queue_id, info_border); 344 345 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 346 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 347 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 348 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 349 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 350 printf("\nRX drop packets: %s", 351 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 352 printf("\nRX deferred start: %s", 353 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 354 printf("\nRX scattered packets: %s", 355 (qinfo.scattered_rx != 0) ? "on" : "off"); 356 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 357 printf("\n"); 358 } 359 360 void 361 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 362 { 363 struct rte_eth_txq_info qinfo; 364 int32_t rc; 365 static const char *info_border = "*********************"; 366 367 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 368 if (rc != 0) { 369 printf("Failed to retrieve information for port: %u, " 370 "TX queue: %hu\nerror desc: %s(%d)\n", 371 port_id, queue_id, strerror(-rc), rc); 372 return; 373 } 374 375 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 376 info_border, port_id, queue_id, info_border); 377 378 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 379 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 380 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 381 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 382 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 383 printf("\nTX deferred start: %s", 384 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 385 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 386 printf("\n"); 387 } 388 389 static int bus_match_all(const struct rte_bus *bus, const void *data) 390 { 391 RTE_SET_USED(bus); 392 RTE_SET_USED(data); 393 return 0; 394 } 395 396 void 397 device_infos_display(const char *identifier) 398 { 399 static const char *info_border = "*********************"; 400 struct rte_bus *start = NULL, *next; 401 struct rte_dev_iterator dev_iter; 402 char name[RTE_ETH_NAME_MAX_LEN]; 403 struct rte_ether_addr mac_addr; 404 struct rte_device *dev; 405 struct rte_devargs da; 406 portid_t port_id; 407 char devstr[128]; 408 409 memset(&da, 0, sizeof(da)); 410 if (!identifier) 411 goto skip_parse; 412 413 if (rte_devargs_parsef(&da, "%s", identifier)) { 414 printf("cannot parse identifier\n"); 415 if (da.args) 416 free(da.args); 417 return; 418 } 419 420 skip_parse: 421 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 422 423 start = next; 424 if (identifier && da.bus != next) 425 continue; 426 427 /* Skip buses that don't have iterate method */ 428 if (!next->dev_iterate) 429 continue; 430 431 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 432 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 433 434 if (!dev->driver) 435 continue; 436 /* Check for matching device if identifier is present */ 437 if (identifier && 438 strncmp(da.name, dev->name, strlen(dev->name))) 439 continue; 440 printf("\n%s Infos for device %s %s\n", 441 info_border, dev->name, info_border); 442 printf("Bus name: %s", dev->bus->name); 443 printf("\nDriver name: %s", dev->driver->name); 444 printf("\nDevargs: %s", 445 dev->devargs ? dev->devargs->args : ""); 446 printf("\nConnect to socket: %d", dev->numa_node); 447 printf("\n"); 448 449 /* List ports with matching device name */ 450 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 451 rte_eth_macaddr_get(port_id, &mac_addr); 452 printf("\n\tPort id: %-2d", port_id); 453 print_ethaddr("\n\tMAC address: ", &mac_addr); 454 rte_eth_dev_get_name_by_port(port_id, name); 455 printf("\n\tDevice name: %s", name); 456 printf("\n"); 457 } 458 } 459 }; 460 } 461 462 void 463 port_infos_display(portid_t port_id) 464 { 465 struct rte_port *port; 466 struct rte_ether_addr mac_addr; 467 struct rte_eth_link link; 468 struct rte_eth_dev_info dev_info; 469 int vlan_offload; 470 struct rte_mempool * mp; 471 static const char *info_border = "*********************"; 472 uint16_t mtu; 473 char name[RTE_ETH_NAME_MAX_LEN]; 474 475 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 476 print_valid_ports(); 477 return; 478 } 479 port = &ports[port_id]; 480 rte_eth_link_get_nowait(port_id, &link); 481 memset(&dev_info, 0, sizeof(dev_info)); 482 rte_eth_dev_info_get(port_id, &dev_info); 483 printf("\n%s Infos for port %-2d %s\n", 484 info_border, port_id, info_border); 485 rte_eth_macaddr_get(port_id, &mac_addr); 486 print_ethaddr("MAC address: ", &mac_addr); 487 rte_eth_dev_get_name_by_port(port_id, name); 488 printf("\nDevice name: %s", name); 489 printf("\nDriver name: %s", dev_info.driver_name); 490 if (dev_info.device->devargs && dev_info.device->devargs->args) 491 printf("\nDevargs: %s", dev_info.device->devargs->args); 492 printf("\nConnect to socket: %u", port->socket_id); 493 494 if (port_numa[port_id] != NUMA_NO_CONFIG) { 495 mp = mbuf_pool_find(port_numa[port_id]); 496 if (mp) 497 printf("\nmemory allocation on the socket: %d", 498 port_numa[port_id]); 499 } else 500 printf("\nmemory allocation on the socket: %u",port->socket_id); 501 502 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 503 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 504 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 505 ("full-duplex") : ("half-duplex")); 506 507 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 508 printf("MTU: %u\n", mtu); 509 510 printf("Promiscuous mode: %s\n", 511 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 512 printf("Allmulticast mode: %s\n", 513 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 514 printf("Maximum number of MAC addresses: %u\n", 515 (unsigned int)(port->dev_info.max_mac_addrs)); 516 printf("Maximum number of MAC addresses of hash filtering: %u\n", 517 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 518 519 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 520 if (vlan_offload >= 0){ 521 printf("VLAN offload: \n"); 522 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 523 printf(" strip on \n"); 524 else 525 printf(" strip off \n"); 526 527 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 528 printf(" filter on \n"); 529 else 530 printf(" filter off \n"); 531 532 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 533 printf(" qinq(extend) on \n"); 534 else 535 printf(" qinq(extend) off \n"); 536 } 537 538 if (dev_info.hash_key_size > 0) 539 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 540 if (dev_info.reta_size > 0) 541 printf("Redirection table size: %u\n", dev_info.reta_size); 542 if (!dev_info.flow_type_rss_offloads) 543 printf("No RSS offload flow type is supported.\n"); 544 else { 545 uint16_t i; 546 char *p; 547 548 printf("Supported RSS offload flow types:\n"); 549 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 550 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 551 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 552 continue; 553 p = flowtype_to_str(i); 554 if (p) 555 printf(" %s\n", p); 556 else 557 printf(" user defined %d\n", i); 558 } 559 } 560 561 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 562 printf("Maximum configurable length of RX packet: %u\n", 563 dev_info.max_rx_pktlen); 564 if (dev_info.max_vfs) 565 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 566 if (dev_info.max_vmdq_pools) 567 printf("Maximum number of VMDq pools: %u\n", 568 dev_info.max_vmdq_pools); 569 570 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 571 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 572 printf("Max possible number of RXDs per queue: %hu\n", 573 dev_info.rx_desc_lim.nb_max); 574 printf("Min possible number of RXDs per queue: %hu\n", 575 dev_info.rx_desc_lim.nb_min); 576 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 577 578 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 579 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 580 printf("Max possible number of TXDs per queue: %hu\n", 581 dev_info.tx_desc_lim.nb_max); 582 printf("Min possible number of TXDs per queue: %hu\n", 583 dev_info.tx_desc_lim.nb_min); 584 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 585 printf("Max segment number per packet: %hu\n", 586 dev_info.tx_desc_lim.nb_seg_max); 587 printf("Max segment number per MTU/TSO: %hu\n", 588 dev_info.tx_desc_lim.nb_mtu_seg_max); 589 590 /* Show switch info only if valid switch domain and port id is set */ 591 if (dev_info.switch_info.domain_id != 592 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 593 if (dev_info.switch_info.name) 594 printf("Switch name: %s\n", dev_info.switch_info.name); 595 596 printf("Switch domain Id: %u\n", 597 dev_info.switch_info.domain_id); 598 printf("Switch Port Id: %u\n", 599 dev_info.switch_info.port_id); 600 } 601 } 602 603 void 604 port_summary_header_display(void) 605 { 606 uint16_t port_number; 607 608 port_number = rte_eth_dev_count_avail(); 609 printf("Number of available ports: %i\n", port_number); 610 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 611 "Driver", "Status", "Link"); 612 } 613 614 void 615 port_summary_display(portid_t port_id) 616 { 617 struct rte_ether_addr mac_addr; 618 struct rte_eth_link link; 619 struct rte_eth_dev_info dev_info; 620 char name[RTE_ETH_NAME_MAX_LEN]; 621 622 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 623 print_valid_ports(); 624 return; 625 } 626 627 rte_eth_link_get_nowait(port_id, &link); 628 rte_eth_dev_info_get(port_id, &dev_info); 629 rte_eth_dev_get_name_by_port(port_id, name); 630 rte_eth_macaddr_get(port_id, &mac_addr); 631 632 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 633 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 634 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 635 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 636 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 637 (unsigned int) link.link_speed); 638 } 639 640 void 641 port_offload_cap_display(portid_t port_id) 642 { 643 struct rte_eth_dev_info dev_info; 644 static const char *info_border = "************"; 645 646 if (port_id_is_invalid(port_id, ENABLED_WARN)) 647 return; 648 649 rte_eth_dev_info_get(port_id, &dev_info); 650 651 printf("\n%s Port %d supported offload features: %s\n", 652 info_border, port_id, info_border); 653 654 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 655 printf("VLAN stripped: "); 656 if (ports[port_id].dev_conf.rxmode.offloads & 657 DEV_RX_OFFLOAD_VLAN_STRIP) 658 printf("on\n"); 659 else 660 printf("off\n"); 661 } 662 663 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 664 printf("Double VLANs stripped: "); 665 if (ports[port_id].dev_conf.rxmode.offloads & 666 DEV_RX_OFFLOAD_QINQ_STRIP) 667 printf("on\n"); 668 else 669 printf("off\n"); 670 } 671 672 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 673 printf("RX IPv4 checksum: "); 674 if (ports[port_id].dev_conf.rxmode.offloads & 675 DEV_RX_OFFLOAD_IPV4_CKSUM) 676 printf("on\n"); 677 else 678 printf("off\n"); 679 } 680 681 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 682 printf("RX UDP checksum: "); 683 if (ports[port_id].dev_conf.rxmode.offloads & 684 DEV_RX_OFFLOAD_UDP_CKSUM) 685 printf("on\n"); 686 else 687 printf("off\n"); 688 } 689 690 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 691 printf("RX TCP checksum: "); 692 if (ports[port_id].dev_conf.rxmode.offloads & 693 DEV_RX_OFFLOAD_TCP_CKSUM) 694 printf("on\n"); 695 else 696 printf("off\n"); 697 } 698 699 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 700 printf("RX SCTP checksum: "); 701 if (ports[port_id].dev_conf.rxmode.offloads & 702 DEV_RX_OFFLOAD_SCTP_CKSUM) 703 printf("on\n"); 704 else 705 printf("off\n"); 706 } 707 708 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 709 printf("RX Outer IPv4 checksum: "); 710 if (ports[port_id].dev_conf.rxmode.offloads & 711 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 712 printf("on\n"); 713 else 714 printf("off\n"); 715 } 716 717 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 718 printf("RX Outer UDP checksum: "); 719 if (ports[port_id].dev_conf.rxmode.offloads & 720 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 721 printf("on\n"); 722 else 723 printf("off\n"); 724 } 725 726 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 727 printf("Large receive offload: "); 728 if (ports[port_id].dev_conf.rxmode.offloads & 729 DEV_RX_OFFLOAD_TCP_LRO) 730 printf("on\n"); 731 else 732 printf("off\n"); 733 } 734 735 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 736 printf("HW timestamp: "); 737 if (ports[port_id].dev_conf.rxmode.offloads & 738 DEV_RX_OFFLOAD_TIMESTAMP) 739 printf("on\n"); 740 else 741 printf("off\n"); 742 } 743 744 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 745 printf("Rx Keep CRC: "); 746 if (ports[port_id].dev_conf.rxmode.offloads & 747 DEV_RX_OFFLOAD_KEEP_CRC) 748 printf("on\n"); 749 else 750 printf("off\n"); 751 } 752 753 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 754 printf("RX offload security: "); 755 if (ports[port_id].dev_conf.rxmode.offloads & 756 DEV_RX_OFFLOAD_SECURITY) 757 printf("on\n"); 758 else 759 printf("off\n"); 760 } 761 762 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 763 printf("VLAN insert: "); 764 if (ports[port_id].dev_conf.txmode.offloads & 765 DEV_TX_OFFLOAD_VLAN_INSERT) 766 printf("on\n"); 767 else 768 printf("off\n"); 769 } 770 771 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 772 printf("Double VLANs insert: "); 773 if (ports[port_id].dev_conf.txmode.offloads & 774 DEV_TX_OFFLOAD_QINQ_INSERT) 775 printf("on\n"); 776 else 777 printf("off\n"); 778 } 779 780 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 781 printf("TX IPv4 checksum: "); 782 if (ports[port_id].dev_conf.txmode.offloads & 783 DEV_TX_OFFLOAD_IPV4_CKSUM) 784 printf("on\n"); 785 else 786 printf("off\n"); 787 } 788 789 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 790 printf("TX UDP checksum: "); 791 if (ports[port_id].dev_conf.txmode.offloads & 792 DEV_TX_OFFLOAD_UDP_CKSUM) 793 printf("on\n"); 794 else 795 printf("off\n"); 796 } 797 798 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 799 printf("TX TCP checksum: "); 800 if (ports[port_id].dev_conf.txmode.offloads & 801 DEV_TX_OFFLOAD_TCP_CKSUM) 802 printf("on\n"); 803 else 804 printf("off\n"); 805 } 806 807 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 808 printf("TX SCTP checksum: "); 809 if (ports[port_id].dev_conf.txmode.offloads & 810 DEV_TX_OFFLOAD_SCTP_CKSUM) 811 printf("on\n"); 812 else 813 printf("off\n"); 814 } 815 816 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 817 printf("TX Outer IPv4 checksum: "); 818 if (ports[port_id].dev_conf.txmode.offloads & 819 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 820 printf("on\n"); 821 else 822 printf("off\n"); 823 } 824 825 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 826 printf("TX TCP segmentation: "); 827 if (ports[port_id].dev_conf.txmode.offloads & 828 DEV_TX_OFFLOAD_TCP_TSO) 829 printf("on\n"); 830 else 831 printf("off\n"); 832 } 833 834 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 835 printf("TX UDP segmentation: "); 836 if (ports[port_id].dev_conf.txmode.offloads & 837 DEV_TX_OFFLOAD_UDP_TSO) 838 printf("on\n"); 839 else 840 printf("off\n"); 841 } 842 843 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 844 printf("TSO for VXLAN tunnel packet: "); 845 if (ports[port_id].dev_conf.txmode.offloads & 846 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 847 printf("on\n"); 848 else 849 printf("off\n"); 850 } 851 852 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 853 printf("TSO for GRE tunnel packet: "); 854 if (ports[port_id].dev_conf.txmode.offloads & 855 DEV_TX_OFFLOAD_GRE_TNL_TSO) 856 printf("on\n"); 857 else 858 printf("off\n"); 859 } 860 861 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 862 printf("TSO for IPIP tunnel packet: "); 863 if (ports[port_id].dev_conf.txmode.offloads & 864 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 865 printf("on\n"); 866 else 867 printf("off\n"); 868 } 869 870 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 871 printf("TSO for GENEVE tunnel packet: "); 872 if (ports[port_id].dev_conf.txmode.offloads & 873 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 874 printf("on\n"); 875 else 876 printf("off\n"); 877 } 878 879 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 880 printf("IP tunnel TSO: "); 881 if (ports[port_id].dev_conf.txmode.offloads & 882 DEV_TX_OFFLOAD_IP_TNL_TSO) 883 printf("on\n"); 884 else 885 printf("off\n"); 886 } 887 888 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 889 printf("UDP tunnel TSO: "); 890 if (ports[port_id].dev_conf.txmode.offloads & 891 DEV_TX_OFFLOAD_UDP_TNL_TSO) 892 printf("on\n"); 893 else 894 printf("off\n"); 895 } 896 897 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 898 printf("TX Outer UDP checksum: "); 899 if (ports[port_id].dev_conf.txmode.offloads & 900 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 901 printf("on\n"); 902 else 903 printf("off\n"); 904 } 905 906 } 907 908 int 909 port_id_is_invalid(portid_t port_id, enum print_warning warning) 910 { 911 uint16_t pid; 912 913 if (port_id == (portid_t)RTE_PORT_ALL) 914 return 0; 915 916 RTE_ETH_FOREACH_DEV(pid) 917 if (port_id == pid) 918 return 0; 919 920 if (warning == ENABLED_WARN) 921 printf("Invalid port %d\n", port_id); 922 923 return 1; 924 } 925 926 void print_valid_ports(void) 927 { 928 portid_t pid; 929 930 printf("The valid ports array is ["); 931 RTE_ETH_FOREACH_DEV(pid) { 932 printf(" %d", pid); 933 } 934 printf(" ]\n"); 935 } 936 937 static int 938 vlan_id_is_invalid(uint16_t vlan_id) 939 { 940 if (vlan_id < 4096) 941 return 0; 942 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 943 return 1; 944 } 945 946 static int 947 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 948 { 949 const struct rte_pci_device *pci_dev; 950 const struct rte_bus *bus; 951 uint64_t pci_len; 952 953 if (reg_off & 0x3) { 954 printf("Port register offset 0x%X not aligned on a 4-byte " 955 "boundary\n", 956 (unsigned)reg_off); 957 return 1; 958 } 959 960 if (!ports[port_id].dev_info.device) { 961 printf("Invalid device\n"); 962 return 0; 963 } 964 965 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 966 if (bus && !strcmp(bus->name, "pci")) { 967 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 968 } else { 969 printf("Not a PCI device\n"); 970 return 1; 971 } 972 973 pci_len = pci_dev->mem_resource[0].len; 974 if (reg_off >= pci_len) { 975 printf("Port %d: register offset %u (0x%X) out of port PCI " 976 "resource (length=%"PRIu64")\n", 977 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 978 return 1; 979 } 980 return 0; 981 } 982 983 static int 984 reg_bit_pos_is_invalid(uint8_t bit_pos) 985 { 986 if (bit_pos <= 31) 987 return 0; 988 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 989 return 1; 990 } 991 992 #define display_port_and_reg_off(port_id, reg_off) \ 993 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 994 995 static inline void 996 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 997 { 998 display_port_and_reg_off(port_id, (unsigned)reg_off); 999 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1000 } 1001 1002 void 1003 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1004 { 1005 uint32_t reg_v; 1006 1007 1008 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1009 return; 1010 if (port_reg_off_is_invalid(port_id, reg_off)) 1011 return; 1012 if (reg_bit_pos_is_invalid(bit_x)) 1013 return; 1014 reg_v = port_id_pci_reg_read(port_id, reg_off); 1015 display_port_and_reg_off(port_id, (unsigned)reg_off); 1016 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1017 } 1018 1019 void 1020 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1021 uint8_t bit1_pos, uint8_t bit2_pos) 1022 { 1023 uint32_t reg_v; 1024 uint8_t l_bit; 1025 uint8_t h_bit; 1026 1027 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1028 return; 1029 if (port_reg_off_is_invalid(port_id, reg_off)) 1030 return; 1031 if (reg_bit_pos_is_invalid(bit1_pos)) 1032 return; 1033 if (reg_bit_pos_is_invalid(bit2_pos)) 1034 return; 1035 if (bit1_pos > bit2_pos) 1036 l_bit = bit2_pos, h_bit = bit1_pos; 1037 else 1038 l_bit = bit1_pos, h_bit = bit2_pos; 1039 1040 reg_v = port_id_pci_reg_read(port_id, reg_off); 1041 reg_v >>= l_bit; 1042 if (h_bit < 31) 1043 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1044 display_port_and_reg_off(port_id, (unsigned)reg_off); 1045 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1046 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1047 } 1048 1049 void 1050 port_reg_display(portid_t port_id, uint32_t reg_off) 1051 { 1052 uint32_t reg_v; 1053 1054 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1055 return; 1056 if (port_reg_off_is_invalid(port_id, reg_off)) 1057 return; 1058 reg_v = port_id_pci_reg_read(port_id, reg_off); 1059 display_port_reg_value(port_id, reg_off, reg_v); 1060 } 1061 1062 void 1063 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1064 uint8_t bit_v) 1065 { 1066 uint32_t reg_v; 1067 1068 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1069 return; 1070 if (port_reg_off_is_invalid(port_id, reg_off)) 1071 return; 1072 if (reg_bit_pos_is_invalid(bit_pos)) 1073 return; 1074 if (bit_v > 1) { 1075 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1076 return; 1077 } 1078 reg_v = port_id_pci_reg_read(port_id, reg_off); 1079 if (bit_v == 0) 1080 reg_v &= ~(1 << bit_pos); 1081 else 1082 reg_v |= (1 << bit_pos); 1083 port_id_pci_reg_write(port_id, reg_off, reg_v); 1084 display_port_reg_value(port_id, reg_off, reg_v); 1085 } 1086 1087 void 1088 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1089 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1090 { 1091 uint32_t max_v; 1092 uint32_t reg_v; 1093 uint8_t l_bit; 1094 uint8_t h_bit; 1095 1096 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1097 return; 1098 if (port_reg_off_is_invalid(port_id, reg_off)) 1099 return; 1100 if (reg_bit_pos_is_invalid(bit1_pos)) 1101 return; 1102 if (reg_bit_pos_is_invalid(bit2_pos)) 1103 return; 1104 if (bit1_pos > bit2_pos) 1105 l_bit = bit2_pos, h_bit = bit1_pos; 1106 else 1107 l_bit = bit1_pos, h_bit = bit2_pos; 1108 1109 if ((h_bit - l_bit) < 31) 1110 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1111 else 1112 max_v = 0xFFFFFFFF; 1113 1114 if (value > max_v) { 1115 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1116 (unsigned)value, (unsigned)value, 1117 (unsigned)max_v, (unsigned)max_v); 1118 return; 1119 } 1120 reg_v = port_id_pci_reg_read(port_id, reg_off); 1121 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1122 reg_v |= (value << l_bit); /* Set changed bits */ 1123 port_id_pci_reg_write(port_id, reg_off, reg_v); 1124 display_port_reg_value(port_id, reg_off, reg_v); 1125 } 1126 1127 void 1128 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1129 { 1130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1131 return; 1132 if (port_reg_off_is_invalid(port_id, reg_off)) 1133 return; 1134 port_id_pci_reg_write(port_id, reg_off, reg_v); 1135 display_port_reg_value(port_id, reg_off, reg_v); 1136 } 1137 1138 void 1139 port_mtu_set(portid_t port_id, uint16_t mtu) 1140 { 1141 int diag; 1142 struct rte_eth_dev_info dev_info; 1143 1144 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1145 return; 1146 rte_eth_dev_info_get(port_id, &dev_info); 1147 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1148 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1149 mtu, dev_info.min_mtu, dev_info.max_mtu); 1150 return; 1151 } 1152 diag = rte_eth_dev_set_mtu(port_id, mtu); 1153 if (diag == 0) 1154 return; 1155 printf("Set MTU failed. diag=%d\n", diag); 1156 } 1157 1158 /* Generic flow management functions. */ 1159 1160 /** Generate a port_flow entry from attributes/pattern/actions. */ 1161 static struct port_flow * 1162 port_flow_new(const struct rte_flow_attr *attr, 1163 const struct rte_flow_item *pattern, 1164 const struct rte_flow_action *actions, 1165 struct rte_flow_error *error) 1166 { 1167 const struct rte_flow_conv_rule rule = { 1168 .attr_ro = attr, 1169 .pattern_ro = pattern, 1170 .actions_ro = actions, 1171 }; 1172 struct port_flow *pf; 1173 int ret; 1174 1175 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1176 if (ret < 0) 1177 return NULL; 1178 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1179 if (!pf) { 1180 rte_flow_error_set 1181 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1182 "calloc() failed"); 1183 return NULL; 1184 } 1185 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1186 error) >= 0) 1187 return pf; 1188 free(pf); 1189 return NULL; 1190 } 1191 1192 /** Print a message out of a flow error. */ 1193 static int 1194 port_flow_complain(struct rte_flow_error *error) 1195 { 1196 static const char *const errstrlist[] = { 1197 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1198 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1199 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1200 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1201 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1202 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1203 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1204 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1205 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1206 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1207 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1208 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1209 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1210 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1211 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1212 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1213 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1214 }; 1215 const char *errstr; 1216 char buf[32]; 1217 int err = rte_errno; 1218 1219 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1220 !errstrlist[error->type]) 1221 errstr = "unknown type"; 1222 else 1223 errstr = errstrlist[error->type]; 1224 printf("Caught error type %d (%s): %s%s: %s\n", 1225 error->type, errstr, 1226 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1227 error->cause), buf) : "", 1228 error->message ? error->message : "(no stated reason)", 1229 rte_strerror(err)); 1230 return -err; 1231 } 1232 1233 /** Validate flow rule. */ 1234 int 1235 port_flow_validate(portid_t port_id, 1236 const struct rte_flow_attr *attr, 1237 const struct rte_flow_item *pattern, 1238 const struct rte_flow_action *actions) 1239 { 1240 struct rte_flow_error error; 1241 1242 /* Poisoning to make sure PMDs update it in case of error. */ 1243 memset(&error, 0x11, sizeof(error)); 1244 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1245 return port_flow_complain(&error); 1246 printf("Flow rule validated\n"); 1247 return 0; 1248 } 1249 1250 /** Create flow rule. */ 1251 int 1252 port_flow_create(portid_t port_id, 1253 const struct rte_flow_attr *attr, 1254 const struct rte_flow_item *pattern, 1255 const struct rte_flow_action *actions) 1256 { 1257 struct rte_flow *flow; 1258 struct rte_port *port; 1259 struct port_flow *pf; 1260 uint32_t id; 1261 struct rte_flow_error error; 1262 1263 /* Poisoning to make sure PMDs update it in case of error. */ 1264 memset(&error, 0x22, sizeof(error)); 1265 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1266 if (!flow) 1267 return port_flow_complain(&error); 1268 port = &ports[port_id]; 1269 if (port->flow_list) { 1270 if (port->flow_list->id == UINT32_MAX) { 1271 printf("Highest rule ID is already assigned, delete" 1272 " it first"); 1273 rte_flow_destroy(port_id, flow, NULL); 1274 return -ENOMEM; 1275 } 1276 id = port->flow_list->id + 1; 1277 } else 1278 id = 0; 1279 pf = port_flow_new(attr, pattern, actions, &error); 1280 if (!pf) { 1281 rte_flow_destroy(port_id, flow, NULL); 1282 return port_flow_complain(&error); 1283 } 1284 pf->next = port->flow_list; 1285 pf->id = id; 1286 pf->flow = flow; 1287 port->flow_list = pf; 1288 printf("Flow rule #%u created\n", pf->id); 1289 return 0; 1290 } 1291 1292 /** Destroy a number of flow rules. */ 1293 int 1294 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1295 { 1296 struct rte_port *port; 1297 struct port_flow **tmp; 1298 uint32_t c = 0; 1299 int ret = 0; 1300 1301 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1302 port_id == (portid_t)RTE_PORT_ALL) 1303 return -EINVAL; 1304 port = &ports[port_id]; 1305 tmp = &port->flow_list; 1306 while (*tmp) { 1307 uint32_t i; 1308 1309 for (i = 0; i != n; ++i) { 1310 struct rte_flow_error error; 1311 struct port_flow *pf = *tmp; 1312 1313 if (rule[i] != pf->id) 1314 continue; 1315 /* 1316 * Poisoning to make sure PMDs update it in case 1317 * of error. 1318 */ 1319 memset(&error, 0x33, sizeof(error)); 1320 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1321 ret = port_flow_complain(&error); 1322 continue; 1323 } 1324 printf("Flow rule #%u destroyed\n", pf->id); 1325 *tmp = pf->next; 1326 free(pf); 1327 break; 1328 } 1329 if (i == n) 1330 tmp = &(*tmp)->next; 1331 ++c; 1332 } 1333 return ret; 1334 } 1335 1336 /** Remove all flow rules. */ 1337 int 1338 port_flow_flush(portid_t port_id) 1339 { 1340 struct rte_flow_error error; 1341 struct rte_port *port; 1342 int ret = 0; 1343 1344 /* Poisoning to make sure PMDs update it in case of error. */ 1345 memset(&error, 0x44, sizeof(error)); 1346 if (rte_flow_flush(port_id, &error)) { 1347 ret = port_flow_complain(&error); 1348 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1349 port_id == (portid_t)RTE_PORT_ALL) 1350 return ret; 1351 } 1352 port = &ports[port_id]; 1353 while (port->flow_list) { 1354 struct port_flow *pf = port->flow_list->next; 1355 1356 free(port->flow_list); 1357 port->flow_list = pf; 1358 } 1359 return ret; 1360 } 1361 1362 /** Query a flow rule. */ 1363 int 1364 port_flow_query(portid_t port_id, uint32_t rule, 1365 const struct rte_flow_action *action) 1366 { 1367 struct rte_flow_error error; 1368 struct rte_port *port; 1369 struct port_flow *pf; 1370 const char *name; 1371 union { 1372 struct rte_flow_query_count count; 1373 } query; 1374 int ret; 1375 1376 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1377 port_id == (portid_t)RTE_PORT_ALL) 1378 return -EINVAL; 1379 port = &ports[port_id]; 1380 for (pf = port->flow_list; pf; pf = pf->next) 1381 if (pf->id == rule) 1382 break; 1383 if (!pf) { 1384 printf("Flow rule #%u not found\n", rule); 1385 return -ENOENT; 1386 } 1387 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1388 &name, sizeof(name), 1389 (void *)(uintptr_t)action->type, &error); 1390 if (ret < 0) 1391 return port_flow_complain(&error); 1392 switch (action->type) { 1393 case RTE_FLOW_ACTION_TYPE_COUNT: 1394 break; 1395 default: 1396 printf("Cannot query action type %d (%s)\n", 1397 action->type, name); 1398 return -ENOTSUP; 1399 } 1400 /* Poisoning to make sure PMDs update it in case of error. */ 1401 memset(&error, 0x55, sizeof(error)); 1402 memset(&query, 0, sizeof(query)); 1403 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1404 return port_flow_complain(&error); 1405 switch (action->type) { 1406 case RTE_FLOW_ACTION_TYPE_COUNT: 1407 printf("%s:\n" 1408 " hits_set: %u\n" 1409 " bytes_set: %u\n" 1410 " hits: %" PRIu64 "\n" 1411 " bytes: %" PRIu64 "\n", 1412 name, 1413 query.count.hits_set, 1414 query.count.bytes_set, 1415 query.count.hits, 1416 query.count.bytes); 1417 break; 1418 default: 1419 printf("Cannot display result for action type %d (%s)\n", 1420 action->type, name); 1421 break; 1422 } 1423 return 0; 1424 } 1425 1426 /** List flow rules. */ 1427 void 1428 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1429 { 1430 struct rte_port *port; 1431 struct port_flow *pf; 1432 struct port_flow *list = NULL; 1433 uint32_t i; 1434 1435 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1436 port_id == (portid_t)RTE_PORT_ALL) 1437 return; 1438 port = &ports[port_id]; 1439 if (!port->flow_list) 1440 return; 1441 /* Sort flows by group, priority and ID. */ 1442 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1443 struct port_flow **tmp; 1444 const struct rte_flow_attr *curr = pf->rule.attr; 1445 1446 if (n) { 1447 /* Filter out unwanted groups. */ 1448 for (i = 0; i != n; ++i) 1449 if (curr->group == group[i]) 1450 break; 1451 if (i == n) 1452 continue; 1453 } 1454 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1455 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1456 1457 if (curr->group > comp->group || 1458 (curr->group == comp->group && 1459 curr->priority > comp->priority) || 1460 (curr->group == comp->group && 1461 curr->priority == comp->priority && 1462 pf->id > (*tmp)->id)) 1463 continue; 1464 break; 1465 } 1466 pf->tmp = *tmp; 1467 *tmp = pf; 1468 } 1469 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1470 for (pf = list; pf != NULL; pf = pf->tmp) { 1471 const struct rte_flow_item *item = pf->rule.pattern; 1472 const struct rte_flow_action *action = pf->rule.actions; 1473 const char *name; 1474 1475 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1476 pf->id, 1477 pf->rule.attr->group, 1478 pf->rule.attr->priority, 1479 pf->rule.attr->ingress ? 'i' : '-', 1480 pf->rule.attr->egress ? 'e' : '-', 1481 pf->rule.attr->transfer ? 't' : '-'); 1482 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1483 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1484 &name, sizeof(name), 1485 (void *)(uintptr_t)item->type, 1486 NULL) <= 0) 1487 name = "[UNKNOWN]"; 1488 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1489 printf("%s ", name); 1490 ++item; 1491 } 1492 printf("=>"); 1493 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1494 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1495 &name, sizeof(name), 1496 (void *)(uintptr_t)action->type, 1497 NULL) <= 0) 1498 name = "[UNKNOWN]"; 1499 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1500 printf(" %s", name); 1501 ++action; 1502 } 1503 printf("\n"); 1504 } 1505 } 1506 1507 /** Restrict ingress traffic to the defined flow rules. */ 1508 int 1509 port_flow_isolate(portid_t port_id, int set) 1510 { 1511 struct rte_flow_error error; 1512 1513 /* Poisoning to make sure PMDs update it in case of error. */ 1514 memset(&error, 0x66, sizeof(error)); 1515 if (rte_flow_isolate(port_id, set, &error)) 1516 return port_flow_complain(&error); 1517 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1518 port_id, 1519 set ? "now restricted" : "not restricted anymore"); 1520 return 0; 1521 } 1522 1523 /* 1524 * RX/TX ring descriptors display functions. 1525 */ 1526 int 1527 rx_queue_id_is_invalid(queueid_t rxq_id) 1528 { 1529 if (rxq_id < nb_rxq) 1530 return 0; 1531 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1532 return 1; 1533 } 1534 1535 int 1536 tx_queue_id_is_invalid(queueid_t txq_id) 1537 { 1538 if (txq_id < nb_txq) 1539 return 0; 1540 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1541 return 1; 1542 } 1543 1544 static int 1545 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1546 { 1547 if (rxdesc_id < nb_rxd) 1548 return 0; 1549 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1550 rxdesc_id, nb_rxd); 1551 return 1; 1552 } 1553 1554 static int 1555 tx_desc_id_is_invalid(uint16_t txdesc_id) 1556 { 1557 if (txdesc_id < nb_txd) 1558 return 0; 1559 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1560 txdesc_id, nb_txd); 1561 return 1; 1562 } 1563 1564 static const struct rte_memzone * 1565 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1566 { 1567 char mz_name[RTE_MEMZONE_NAMESIZE]; 1568 const struct rte_memzone *mz; 1569 1570 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1571 port_id, q_id, ring_name); 1572 mz = rte_memzone_lookup(mz_name); 1573 if (mz == NULL) 1574 printf("%s ring memory zoneof (port %d, queue %d) not" 1575 "found (zone name = %s\n", 1576 ring_name, port_id, q_id, mz_name); 1577 return mz; 1578 } 1579 1580 union igb_ring_dword { 1581 uint64_t dword; 1582 struct { 1583 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1584 uint32_t lo; 1585 uint32_t hi; 1586 #else 1587 uint32_t hi; 1588 uint32_t lo; 1589 #endif 1590 } words; 1591 }; 1592 1593 struct igb_ring_desc_32_bytes { 1594 union igb_ring_dword lo_dword; 1595 union igb_ring_dword hi_dword; 1596 union igb_ring_dword resv1; 1597 union igb_ring_dword resv2; 1598 }; 1599 1600 struct igb_ring_desc_16_bytes { 1601 union igb_ring_dword lo_dword; 1602 union igb_ring_dword hi_dword; 1603 }; 1604 1605 static void 1606 ring_rxd_display_dword(union igb_ring_dword dword) 1607 { 1608 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1609 (unsigned)dword.words.hi); 1610 } 1611 1612 static void 1613 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1614 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1615 portid_t port_id, 1616 #else 1617 __rte_unused portid_t port_id, 1618 #endif 1619 uint16_t desc_id) 1620 { 1621 struct igb_ring_desc_16_bytes *ring = 1622 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1623 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1624 struct rte_eth_dev_info dev_info; 1625 1626 memset(&dev_info, 0, sizeof(dev_info)); 1627 rte_eth_dev_info_get(port_id, &dev_info); 1628 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1629 /* 32 bytes RX descriptor, i40e only */ 1630 struct igb_ring_desc_32_bytes *ring = 1631 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1632 ring[desc_id].lo_dword.dword = 1633 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1634 ring_rxd_display_dword(ring[desc_id].lo_dword); 1635 ring[desc_id].hi_dword.dword = 1636 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1637 ring_rxd_display_dword(ring[desc_id].hi_dword); 1638 ring[desc_id].resv1.dword = 1639 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1640 ring_rxd_display_dword(ring[desc_id].resv1); 1641 ring[desc_id].resv2.dword = 1642 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1643 ring_rxd_display_dword(ring[desc_id].resv2); 1644 1645 return; 1646 } 1647 #endif 1648 /* 16 bytes RX descriptor */ 1649 ring[desc_id].lo_dword.dword = 1650 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1651 ring_rxd_display_dword(ring[desc_id].lo_dword); 1652 ring[desc_id].hi_dword.dword = 1653 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1654 ring_rxd_display_dword(ring[desc_id].hi_dword); 1655 } 1656 1657 static void 1658 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1659 { 1660 struct igb_ring_desc_16_bytes *ring; 1661 struct igb_ring_desc_16_bytes txd; 1662 1663 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1664 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1665 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1666 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1667 (unsigned)txd.lo_dword.words.lo, 1668 (unsigned)txd.lo_dword.words.hi, 1669 (unsigned)txd.hi_dword.words.lo, 1670 (unsigned)txd.hi_dword.words.hi); 1671 } 1672 1673 void 1674 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1675 { 1676 const struct rte_memzone *rx_mz; 1677 1678 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1679 return; 1680 if (rx_queue_id_is_invalid(rxq_id)) 1681 return; 1682 if (rx_desc_id_is_invalid(rxd_id)) 1683 return; 1684 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1685 if (rx_mz == NULL) 1686 return; 1687 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1688 } 1689 1690 void 1691 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1692 { 1693 const struct rte_memzone *tx_mz; 1694 1695 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1696 return; 1697 if (tx_queue_id_is_invalid(txq_id)) 1698 return; 1699 if (tx_desc_id_is_invalid(txd_id)) 1700 return; 1701 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1702 if (tx_mz == NULL) 1703 return; 1704 ring_tx_descriptor_display(tx_mz, txd_id); 1705 } 1706 1707 void 1708 fwd_lcores_config_display(void) 1709 { 1710 lcoreid_t lc_id; 1711 1712 printf("List of forwarding lcores:"); 1713 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1714 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1715 printf("\n"); 1716 } 1717 void 1718 rxtx_config_display(void) 1719 { 1720 portid_t pid; 1721 queueid_t qid; 1722 1723 printf(" %s packet forwarding%s packets/burst=%d\n", 1724 cur_fwd_eng->fwd_mode_name, 1725 retry_enabled == 0 ? "" : " with retry", 1726 nb_pkt_per_burst); 1727 1728 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1729 printf(" packet len=%u - nb packet segments=%d\n", 1730 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1731 1732 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1733 nb_fwd_lcores, nb_fwd_ports); 1734 1735 RTE_ETH_FOREACH_DEV(pid) { 1736 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1737 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1738 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1739 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1740 uint16_t nb_rx_desc_tmp; 1741 uint16_t nb_tx_desc_tmp; 1742 struct rte_eth_rxq_info rx_qinfo; 1743 struct rte_eth_txq_info tx_qinfo; 1744 int32_t rc; 1745 1746 /* per port config */ 1747 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1748 (unsigned int)pid, nb_rxq, nb_txq); 1749 1750 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1751 ports[pid].dev_conf.rxmode.offloads, 1752 ports[pid].dev_conf.txmode.offloads); 1753 1754 /* per rx queue config only for first queue to be less verbose */ 1755 for (qid = 0; qid < 1; qid++) { 1756 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1757 if (rc) 1758 nb_rx_desc_tmp = nb_rx_desc[qid]; 1759 else 1760 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1761 1762 printf(" RX queue: %d\n", qid); 1763 printf(" RX desc=%d - RX free threshold=%d\n", 1764 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1765 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1766 " wthresh=%d\n", 1767 rx_conf[qid].rx_thresh.pthresh, 1768 rx_conf[qid].rx_thresh.hthresh, 1769 rx_conf[qid].rx_thresh.wthresh); 1770 printf(" RX Offloads=0x%"PRIx64"\n", 1771 rx_conf[qid].offloads); 1772 } 1773 1774 /* per tx queue config only for first queue to be less verbose */ 1775 for (qid = 0; qid < 1; qid++) { 1776 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1777 if (rc) 1778 nb_tx_desc_tmp = nb_tx_desc[qid]; 1779 else 1780 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1781 1782 printf(" TX queue: %d\n", qid); 1783 printf(" TX desc=%d - TX free threshold=%d\n", 1784 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1785 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1786 " wthresh=%d\n", 1787 tx_conf[qid].tx_thresh.pthresh, 1788 tx_conf[qid].tx_thresh.hthresh, 1789 tx_conf[qid].tx_thresh.wthresh); 1790 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1791 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1792 } 1793 } 1794 } 1795 1796 void 1797 port_rss_reta_info(portid_t port_id, 1798 struct rte_eth_rss_reta_entry64 *reta_conf, 1799 uint16_t nb_entries) 1800 { 1801 uint16_t i, idx, shift; 1802 int ret; 1803 1804 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1805 return; 1806 1807 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1808 if (ret != 0) { 1809 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1810 return; 1811 } 1812 1813 for (i = 0; i < nb_entries; i++) { 1814 idx = i / RTE_RETA_GROUP_SIZE; 1815 shift = i % RTE_RETA_GROUP_SIZE; 1816 if (!(reta_conf[idx].mask & (1ULL << shift))) 1817 continue; 1818 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1819 i, reta_conf[idx].reta[shift]); 1820 } 1821 } 1822 1823 /* 1824 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1825 * key of the port. 1826 */ 1827 void 1828 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1829 { 1830 struct rte_eth_rss_conf rss_conf = {0}; 1831 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1832 uint64_t rss_hf; 1833 uint8_t i; 1834 int diag; 1835 struct rte_eth_dev_info dev_info; 1836 uint8_t hash_key_size; 1837 1838 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1839 return; 1840 1841 rte_eth_dev_info_get(port_id, &dev_info); 1842 if (dev_info.hash_key_size > 0 && 1843 dev_info.hash_key_size <= sizeof(rss_key)) 1844 hash_key_size = dev_info.hash_key_size; 1845 else { 1846 printf("dev_info did not provide a valid hash key size\n"); 1847 return; 1848 } 1849 1850 /* Get RSS hash key if asked to display it */ 1851 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1852 rss_conf.rss_key_len = hash_key_size; 1853 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1854 if (diag != 0) { 1855 switch (diag) { 1856 case -ENODEV: 1857 printf("port index %d invalid\n", port_id); 1858 break; 1859 case -ENOTSUP: 1860 printf("operation not supported by device\n"); 1861 break; 1862 default: 1863 printf("operation failed - diag=%d\n", diag); 1864 break; 1865 } 1866 return; 1867 } 1868 rss_hf = rss_conf.rss_hf; 1869 if (rss_hf == 0) { 1870 printf("RSS disabled\n"); 1871 return; 1872 } 1873 printf("RSS functions:\n "); 1874 for (i = 0; rss_type_table[i].str; i++) { 1875 if (rss_hf & rss_type_table[i].rss_type) 1876 printf("%s ", rss_type_table[i].str); 1877 } 1878 printf("\n"); 1879 if (!show_rss_key) 1880 return; 1881 printf("RSS key:\n"); 1882 for (i = 0; i < hash_key_size; i++) 1883 printf("%02X", rss_key[i]); 1884 printf("\n"); 1885 } 1886 1887 void 1888 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1889 uint hash_key_len) 1890 { 1891 struct rte_eth_rss_conf rss_conf; 1892 int diag; 1893 unsigned int i; 1894 1895 rss_conf.rss_key = NULL; 1896 rss_conf.rss_key_len = hash_key_len; 1897 rss_conf.rss_hf = 0; 1898 for (i = 0; rss_type_table[i].str; i++) { 1899 if (!strcmp(rss_type_table[i].str, rss_type)) 1900 rss_conf.rss_hf = rss_type_table[i].rss_type; 1901 } 1902 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1903 if (diag == 0) { 1904 rss_conf.rss_key = hash_key; 1905 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1906 } 1907 if (diag == 0) 1908 return; 1909 1910 switch (diag) { 1911 case -ENODEV: 1912 printf("port index %d invalid\n", port_id); 1913 break; 1914 case -ENOTSUP: 1915 printf("operation not supported by device\n"); 1916 break; 1917 default: 1918 printf("operation failed - diag=%d\n", diag); 1919 break; 1920 } 1921 } 1922 1923 /* 1924 * Setup forwarding configuration for each logical core. 1925 */ 1926 static void 1927 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1928 { 1929 streamid_t nb_fs_per_lcore; 1930 streamid_t nb_fs; 1931 streamid_t sm_id; 1932 lcoreid_t nb_extra; 1933 lcoreid_t nb_fc; 1934 lcoreid_t nb_lc; 1935 lcoreid_t lc_id; 1936 1937 nb_fs = cfg->nb_fwd_streams; 1938 nb_fc = cfg->nb_fwd_lcores; 1939 if (nb_fs <= nb_fc) { 1940 nb_fs_per_lcore = 1; 1941 nb_extra = 0; 1942 } else { 1943 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1944 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1945 } 1946 1947 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1948 sm_id = 0; 1949 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1950 fwd_lcores[lc_id]->stream_idx = sm_id; 1951 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1952 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1953 } 1954 1955 /* 1956 * Assign extra remaining streams, if any. 1957 */ 1958 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1959 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1960 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1961 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1962 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1963 } 1964 } 1965 1966 static portid_t 1967 fwd_topology_tx_port_get(portid_t rxp) 1968 { 1969 static int warning_once = 1; 1970 1971 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 1972 1973 switch (port_topology) { 1974 default: 1975 case PORT_TOPOLOGY_PAIRED: 1976 if ((rxp & 0x1) == 0) { 1977 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 1978 return rxp + 1; 1979 if (warning_once) { 1980 printf("\nWarning! port-topology=paired" 1981 " and odd forward ports number," 1982 " the last port will pair with" 1983 " itself.\n\n"); 1984 warning_once = 0; 1985 } 1986 return rxp; 1987 } 1988 return rxp - 1; 1989 case PORT_TOPOLOGY_CHAINED: 1990 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 1991 case PORT_TOPOLOGY_LOOP: 1992 return rxp; 1993 } 1994 } 1995 1996 static void 1997 simple_fwd_config_setup(void) 1998 { 1999 portid_t i; 2000 2001 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2002 cur_fwd_config.nb_fwd_streams = 2003 (streamid_t) cur_fwd_config.nb_fwd_ports; 2004 2005 /* reinitialize forwarding streams */ 2006 init_fwd_streams(); 2007 2008 /* 2009 * In the simple forwarding test, the number of forwarding cores 2010 * must be lower or equal to the number of forwarding ports. 2011 */ 2012 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2013 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2014 cur_fwd_config.nb_fwd_lcores = 2015 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2016 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2017 2018 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2019 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2020 fwd_streams[i]->rx_queue = 0; 2021 fwd_streams[i]->tx_port = 2022 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2023 fwd_streams[i]->tx_queue = 0; 2024 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2025 fwd_streams[i]->retry_enabled = retry_enabled; 2026 } 2027 } 2028 2029 /** 2030 * For the RSS forwarding test all streams distributed over lcores. Each stream 2031 * being composed of a RX queue to poll on a RX port for input messages, 2032 * associated with a TX queue of a TX port where to send forwarded packets. 2033 */ 2034 static void 2035 rss_fwd_config_setup(void) 2036 { 2037 portid_t rxp; 2038 portid_t txp; 2039 queueid_t rxq; 2040 queueid_t nb_q; 2041 streamid_t sm_id; 2042 2043 nb_q = nb_rxq; 2044 if (nb_q > nb_txq) 2045 nb_q = nb_txq; 2046 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2047 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2048 cur_fwd_config.nb_fwd_streams = 2049 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2050 2051 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2052 cur_fwd_config.nb_fwd_lcores = 2053 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2054 2055 /* reinitialize forwarding streams */ 2056 init_fwd_streams(); 2057 2058 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2059 rxp = 0; rxq = 0; 2060 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2061 struct fwd_stream *fs; 2062 2063 fs = fwd_streams[sm_id]; 2064 txp = fwd_topology_tx_port_get(rxp); 2065 fs->rx_port = fwd_ports_ids[rxp]; 2066 fs->rx_queue = rxq; 2067 fs->tx_port = fwd_ports_ids[txp]; 2068 fs->tx_queue = rxq; 2069 fs->peer_addr = fs->tx_port; 2070 fs->retry_enabled = retry_enabled; 2071 rxp++; 2072 if (rxp < nb_fwd_ports) 2073 continue; 2074 rxp = 0; 2075 rxq++; 2076 } 2077 } 2078 2079 /** 2080 * For the DCB forwarding test, each core is assigned on each traffic class. 2081 * 2082 * Each core is assigned a multi-stream, each stream being composed of 2083 * a RX queue to poll on a RX port for input messages, associated with 2084 * a TX queue of a TX port where to send forwarded packets. All RX and 2085 * TX queues are mapping to the same traffic class. 2086 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2087 * the same core 2088 */ 2089 static void 2090 dcb_fwd_config_setup(void) 2091 { 2092 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2093 portid_t txp, rxp = 0; 2094 queueid_t txq, rxq = 0; 2095 lcoreid_t lc_id; 2096 uint16_t nb_rx_queue, nb_tx_queue; 2097 uint16_t i, j, k, sm_id = 0; 2098 uint8_t tc = 0; 2099 2100 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2101 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2102 cur_fwd_config.nb_fwd_streams = 2103 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2104 2105 /* reinitialize forwarding streams */ 2106 init_fwd_streams(); 2107 sm_id = 0; 2108 txp = 1; 2109 /* get the dcb info on the first RX and TX ports */ 2110 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2111 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2112 2113 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2114 fwd_lcores[lc_id]->stream_nb = 0; 2115 fwd_lcores[lc_id]->stream_idx = sm_id; 2116 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2117 /* if the nb_queue is zero, means this tc is 2118 * not enabled on the POOL 2119 */ 2120 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2121 break; 2122 k = fwd_lcores[lc_id]->stream_nb + 2123 fwd_lcores[lc_id]->stream_idx; 2124 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2125 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2126 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2127 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2128 for (j = 0; j < nb_rx_queue; j++) { 2129 struct fwd_stream *fs; 2130 2131 fs = fwd_streams[k + j]; 2132 fs->rx_port = fwd_ports_ids[rxp]; 2133 fs->rx_queue = rxq + j; 2134 fs->tx_port = fwd_ports_ids[txp]; 2135 fs->tx_queue = txq + j % nb_tx_queue; 2136 fs->peer_addr = fs->tx_port; 2137 fs->retry_enabled = retry_enabled; 2138 } 2139 fwd_lcores[lc_id]->stream_nb += 2140 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2141 } 2142 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2143 2144 tc++; 2145 if (tc < rxp_dcb_info.nb_tcs) 2146 continue; 2147 /* Restart from TC 0 on next RX port */ 2148 tc = 0; 2149 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2150 rxp = (portid_t) 2151 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2152 else 2153 rxp++; 2154 if (rxp >= nb_fwd_ports) 2155 return; 2156 /* get the dcb information on next RX and TX ports */ 2157 if ((rxp & 0x1) == 0) 2158 txp = (portid_t) (rxp + 1); 2159 else 2160 txp = (portid_t) (rxp - 1); 2161 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2162 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2163 } 2164 } 2165 2166 static void 2167 icmp_echo_config_setup(void) 2168 { 2169 portid_t rxp; 2170 queueid_t rxq; 2171 lcoreid_t lc_id; 2172 uint16_t sm_id; 2173 2174 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2175 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2176 (nb_txq * nb_fwd_ports); 2177 else 2178 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2179 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2180 cur_fwd_config.nb_fwd_streams = 2181 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2182 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2183 cur_fwd_config.nb_fwd_lcores = 2184 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2185 if (verbose_level > 0) { 2186 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2187 __FUNCTION__, 2188 cur_fwd_config.nb_fwd_lcores, 2189 cur_fwd_config.nb_fwd_ports, 2190 cur_fwd_config.nb_fwd_streams); 2191 } 2192 2193 /* reinitialize forwarding streams */ 2194 init_fwd_streams(); 2195 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2196 rxp = 0; rxq = 0; 2197 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2198 if (verbose_level > 0) 2199 printf(" core=%d: \n", lc_id); 2200 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2201 struct fwd_stream *fs; 2202 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2203 fs->rx_port = fwd_ports_ids[rxp]; 2204 fs->rx_queue = rxq; 2205 fs->tx_port = fs->rx_port; 2206 fs->tx_queue = rxq; 2207 fs->peer_addr = fs->tx_port; 2208 fs->retry_enabled = retry_enabled; 2209 if (verbose_level > 0) 2210 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2211 sm_id, fs->rx_port, fs->rx_queue, 2212 fs->tx_queue); 2213 rxq = (queueid_t) (rxq + 1); 2214 if (rxq == nb_rxq) { 2215 rxq = 0; 2216 rxp = (portid_t) (rxp + 1); 2217 } 2218 } 2219 } 2220 } 2221 2222 #if defined RTE_LIBRTE_PMD_SOFTNIC 2223 static void 2224 softnic_fwd_config_setup(void) 2225 { 2226 struct rte_port *port; 2227 portid_t pid, softnic_portid; 2228 queueid_t i; 2229 uint8_t softnic_enable = 0; 2230 2231 RTE_ETH_FOREACH_DEV(pid) { 2232 port = &ports[pid]; 2233 const char *driver = port->dev_info.driver_name; 2234 2235 if (strcmp(driver, "net_softnic") == 0) { 2236 softnic_portid = pid; 2237 softnic_enable = 1; 2238 break; 2239 } 2240 } 2241 2242 if (softnic_enable == 0) { 2243 printf("Softnic mode not configured(%s)!\n", __func__); 2244 return; 2245 } 2246 2247 cur_fwd_config.nb_fwd_ports = 1; 2248 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2249 2250 /* Re-initialize forwarding streams */ 2251 init_fwd_streams(); 2252 2253 /* 2254 * In the softnic forwarding test, the number of forwarding cores 2255 * is set to one and remaining are used for softnic packet processing. 2256 */ 2257 cur_fwd_config.nb_fwd_lcores = 1; 2258 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2259 2260 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2261 fwd_streams[i]->rx_port = softnic_portid; 2262 fwd_streams[i]->rx_queue = i; 2263 fwd_streams[i]->tx_port = softnic_portid; 2264 fwd_streams[i]->tx_queue = i; 2265 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2266 fwd_streams[i]->retry_enabled = retry_enabled; 2267 } 2268 } 2269 #endif 2270 2271 void 2272 fwd_config_setup(void) 2273 { 2274 cur_fwd_config.fwd_eng = cur_fwd_eng; 2275 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2276 icmp_echo_config_setup(); 2277 return; 2278 } 2279 2280 #if defined RTE_LIBRTE_PMD_SOFTNIC 2281 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2282 softnic_fwd_config_setup(); 2283 return; 2284 } 2285 #endif 2286 2287 if ((nb_rxq > 1) && (nb_txq > 1)){ 2288 if (dcb_config) 2289 dcb_fwd_config_setup(); 2290 else 2291 rss_fwd_config_setup(); 2292 } 2293 else 2294 simple_fwd_config_setup(); 2295 } 2296 2297 static const char * 2298 mp_alloc_to_str(uint8_t mode) 2299 { 2300 switch (mode) { 2301 case MP_ALLOC_NATIVE: 2302 return "native"; 2303 case MP_ALLOC_ANON: 2304 return "anon"; 2305 case MP_ALLOC_XMEM: 2306 return "xmem"; 2307 case MP_ALLOC_XMEM_HUGE: 2308 return "xmemhuge"; 2309 default: 2310 return "invalid"; 2311 } 2312 } 2313 2314 void 2315 pkt_fwd_config_display(struct fwd_config *cfg) 2316 { 2317 struct fwd_stream *fs; 2318 lcoreid_t lc_id; 2319 streamid_t sm_id; 2320 2321 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2322 "NUMA support %s, MP allocation mode: %s\n", 2323 cfg->fwd_eng->fwd_mode_name, 2324 retry_enabled == 0 ? "" : " with retry", 2325 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2326 numa_support == 1 ? "enabled" : "disabled", 2327 mp_alloc_to_str(mp_alloc_type)); 2328 2329 if (retry_enabled) 2330 printf("TX retry num: %u, delay between TX retries: %uus\n", 2331 burst_tx_retry_num, burst_tx_delay_time); 2332 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2333 printf("Logical Core %u (socket %u) forwards packets on " 2334 "%d streams:", 2335 fwd_lcores_cpuids[lc_id], 2336 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2337 fwd_lcores[lc_id]->stream_nb); 2338 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2339 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2340 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2341 "P=%d/Q=%d (socket %u) ", 2342 fs->rx_port, fs->rx_queue, 2343 ports[fs->rx_port].socket_id, 2344 fs->tx_port, fs->tx_queue, 2345 ports[fs->tx_port].socket_id); 2346 print_ethaddr("peer=", 2347 &peer_eth_addrs[fs->peer_addr]); 2348 } 2349 printf("\n"); 2350 } 2351 printf("\n"); 2352 } 2353 2354 void 2355 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2356 { 2357 struct rte_ether_addr new_peer_addr; 2358 if (!rte_eth_dev_is_valid_port(port_id)) { 2359 printf("Error: Invalid port number %i\n", port_id); 2360 return; 2361 } 2362 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 2363 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2364 return; 2365 } 2366 peer_eth_addrs[port_id] = new_peer_addr; 2367 } 2368 2369 int 2370 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2371 { 2372 unsigned int i; 2373 unsigned int lcore_cpuid; 2374 int record_now; 2375 2376 record_now = 0; 2377 again: 2378 for (i = 0; i < nb_lc; i++) { 2379 lcore_cpuid = lcorelist[i]; 2380 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2381 printf("lcore %u not enabled\n", lcore_cpuid); 2382 return -1; 2383 } 2384 if (lcore_cpuid == rte_get_master_lcore()) { 2385 printf("lcore %u cannot be masked on for running " 2386 "packet forwarding, which is the master lcore " 2387 "and reserved for command line parsing only\n", 2388 lcore_cpuid); 2389 return -1; 2390 } 2391 if (record_now) 2392 fwd_lcores_cpuids[i] = lcore_cpuid; 2393 } 2394 if (record_now == 0) { 2395 record_now = 1; 2396 goto again; 2397 } 2398 nb_cfg_lcores = (lcoreid_t) nb_lc; 2399 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2400 printf("previous number of forwarding cores %u - changed to " 2401 "number of configured cores %u\n", 2402 (unsigned int) nb_fwd_lcores, nb_lc); 2403 nb_fwd_lcores = (lcoreid_t) nb_lc; 2404 } 2405 2406 return 0; 2407 } 2408 2409 int 2410 set_fwd_lcores_mask(uint64_t lcoremask) 2411 { 2412 unsigned int lcorelist[64]; 2413 unsigned int nb_lc; 2414 unsigned int i; 2415 2416 if (lcoremask == 0) { 2417 printf("Invalid NULL mask of cores\n"); 2418 return -1; 2419 } 2420 nb_lc = 0; 2421 for (i = 0; i < 64; i++) { 2422 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2423 continue; 2424 lcorelist[nb_lc++] = i; 2425 } 2426 return set_fwd_lcores_list(lcorelist, nb_lc); 2427 } 2428 2429 void 2430 set_fwd_lcores_number(uint16_t nb_lc) 2431 { 2432 if (nb_lc > nb_cfg_lcores) { 2433 printf("nb fwd cores %u > %u (max. number of configured " 2434 "lcores) - ignored\n", 2435 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2436 return; 2437 } 2438 nb_fwd_lcores = (lcoreid_t) nb_lc; 2439 printf("Number of forwarding cores set to %u\n", 2440 (unsigned int) nb_fwd_lcores); 2441 } 2442 2443 void 2444 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2445 { 2446 unsigned int i; 2447 portid_t port_id; 2448 int record_now; 2449 2450 record_now = 0; 2451 again: 2452 for (i = 0; i < nb_pt; i++) { 2453 port_id = (portid_t) portlist[i]; 2454 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2455 return; 2456 if (record_now) 2457 fwd_ports_ids[i] = port_id; 2458 } 2459 if (record_now == 0) { 2460 record_now = 1; 2461 goto again; 2462 } 2463 nb_cfg_ports = (portid_t) nb_pt; 2464 if (nb_fwd_ports != (portid_t) nb_pt) { 2465 printf("previous number of forwarding ports %u - changed to " 2466 "number of configured ports %u\n", 2467 (unsigned int) nb_fwd_ports, nb_pt); 2468 nb_fwd_ports = (portid_t) nb_pt; 2469 } 2470 } 2471 2472 void 2473 set_fwd_ports_mask(uint64_t portmask) 2474 { 2475 unsigned int portlist[64]; 2476 unsigned int nb_pt; 2477 unsigned int i; 2478 2479 if (portmask == 0) { 2480 printf("Invalid NULL mask of ports\n"); 2481 return; 2482 } 2483 nb_pt = 0; 2484 RTE_ETH_FOREACH_DEV(i) { 2485 if (! ((uint64_t)(1ULL << i) & portmask)) 2486 continue; 2487 portlist[nb_pt++] = i; 2488 } 2489 set_fwd_ports_list(portlist, nb_pt); 2490 } 2491 2492 void 2493 set_fwd_ports_number(uint16_t nb_pt) 2494 { 2495 if (nb_pt > nb_cfg_ports) { 2496 printf("nb fwd ports %u > %u (number of configured " 2497 "ports) - ignored\n", 2498 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2499 return; 2500 } 2501 nb_fwd_ports = (portid_t) nb_pt; 2502 printf("Number of forwarding ports set to %u\n", 2503 (unsigned int) nb_fwd_ports); 2504 } 2505 2506 int 2507 port_is_forwarding(portid_t port_id) 2508 { 2509 unsigned int i; 2510 2511 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2512 return -1; 2513 2514 for (i = 0; i < nb_fwd_ports; i++) { 2515 if (fwd_ports_ids[i] == port_id) 2516 return 1; 2517 } 2518 2519 return 0; 2520 } 2521 2522 void 2523 set_nb_pkt_per_burst(uint16_t nb) 2524 { 2525 if (nb > MAX_PKT_BURST) { 2526 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2527 " ignored\n", 2528 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2529 return; 2530 } 2531 nb_pkt_per_burst = nb; 2532 printf("Number of packets per burst set to %u\n", 2533 (unsigned int) nb_pkt_per_burst); 2534 } 2535 2536 static const char * 2537 tx_split_get_name(enum tx_pkt_split split) 2538 { 2539 uint32_t i; 2540 2541 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2542 if (tx_split_name[i].split == split) 2543 return tx_split_name[i].name; 2544 } 2545 return NULL; 2546 } 2547 2548 void 2549 set_tx_pkt_split(const char *name) 2550 { 2551 uint32_t i; 2552 2553 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2554 if (strcmp(tx_split_name[i].name, name) == 0) { 2555 tx_pkt_split = tx_split_name[i].split; 2556 return; 2557 } 2558 } 2559 printf("unknown value: \"%s\"\n", name); 2560 } 2561 2562 void 2563 show_tx_pkt_segments(void) 2564 { 2565 uint32_t i, n; 2566 const char *split; 2567 2568 n = tx_pkt_nb_segs; 2569 split = tx_split_get_name(tx_pkt_split); 2570 2571 printf("Number of segments: %u\n", n); 2572 printf("Segment sizes: "); 2573 for (i = 0; i != n - 1; i++) 2574 printf("%hu,", tx_pkt_seg_lengths[i]); 2575 printf("%hu\n", tx_pkt_seg_lengths[i]); 2576 printf("Split packet: %s\n", split); 2577 } 2578 2579 void 2580 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2581 { 2582 uint16_t tx_pkt_len; 2583 unsigned i; 2584 2585 if (nb_segs >= (unsigned) nb_txd) { 2586 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2587 nb_segs, (unsigned int) nb_txd); 2588 return; 2589 } 2590 2591 /* 2592 * Check that each segment length is greater or equal than 2593 * the mbuf data sise. 2594 * Check also that the total packet length is greater or equal than the 2595 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 2596 * 20 + 8). 2597 */ 2598 tx_pkt_len = 0; 2599 for (i = 0; i < nb_segs; i++) { 2600 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2601 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2602 i, seg_lengths[i], (unsigned) mbuf_data_size); 2603 return; 2604 } 2605 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2606 } 2607 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 2608 printf("total packet length=%u < %d - give up\n", 2609 (unsigned) tx_pkt_len, 2610 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 2611 return; 2612 } 2613 2614 for (i = 0; i < nb_segs; i++) 2615 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2616 2617 tx_pkt_length = tx_pkt_len; 2618 tx_pkt_nb_segs = (uint8_t) nb_segs; 2619 } 2620 2621 void 2622 setup_gro(const char *onoff, portid_t port_id) 2623 { 2624 if (!rte_eth_dev_is_valid_port(port_id)) { 2625 printf("invalid port id %u\n", port_id); 2626 return; 2627 } 2628 if (test_done == 0) { 2629 printf("Before enable/disable GRO," 2630 " please stop forwarding first\n"); 2631 return; 2632 } 2633 if (strcmp(onoff, "on") == 0) { 2634 if (gro_ports[port_id].enable != 0) { 2635 printf("Port %u has enabled GRO. Please" 2636 " disable GRO first\n", port_id); 2637 return; 2638 } 2639 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2640 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2641 gro_ports[port_id].param.max_flow_num = 2642 GRO_DEFAULT_FLOW_NUM; 2643 gro_ports[port_id].param.max_item_per_flow = 2644 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2645 } 2646 gro_ports[port_id].enable = 1; 2647 } else { 2648 if (gro_ports[port_id].enable == 0) { 2649 printf("Port %u has disabled GRO\n", port_id); 2650 return; 2651 } 2652 gro_ports[port_id].enable = 0; 2653 } 2654 } 2655 2656 void 2657 setup_gro_flush_cycles(uint8_t cycles) 2658 { 2659 if (test_done == 0) { 2660 printf("Before change flush interval for GRO," 2661 " please stop forwarding first.\n"); 2662 return; 2663 } 2664 2665 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2666 GRO_DEFAULT_FLUSH_CYCLES) { 2667 printf("The flushing cycle be in the range" 2668 " of 1 to %u. Revert to the default" 2669 " value %u.\n", 2670 GRO_MAX_FLUSH_CYCLES, 2671 GRO_DEFAULT_FLUSH_CYCLES); 2672 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2673 } 2674 2675 gro_flush_cycles = cycles; 2676 } 2677 2678 void 2679 show_gro(portid_t port_id) 2680 { 2681 struct rte_gro_param *param; 2682 uint32_t max_pkts_num; 2683 2684 param = &gro_ports[port_id].param; 2685 2686 if (!rte_eth_dev_is_valid_port(port_id)) { 2687 printf("Invalid port id %u.\n", port_id); 2688 return; 2689 } 2690 if (gro_ports[port_id].enable) { 2691 printf("GRO type: TCP/IPv4\n"); 2692 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2693 max_pkts_num = param->max_flow_num * 2694 param->max_item_per_flow; 2695 } else 2696 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2697 printf("Max number of packets to perform GRO: %u\n", 2698 max_pkts_num); 2699 printf("Flushing cycles: %u\n", gro_flush_cycles); 2700 } else 2701 printf("Port %u doesn't enable GRO.\n", port_id); 2702 } 2703 2704 void 2705 setup_gso(const char *mode, portid_t port_id) 2706 { 2707 if (!rte_eth_dev_is_valid_port(port_id)) { 2708 printf("invalid port id %u\n", port_id); 2709 return; 2710 } 2711 if (strcmp(mode, "on") == 0) { 2712 if (test_done == 0) { 2713 printf("before enabling GSO," 2714 " please stop forwarding first\n"); 2715 return; 2716 } 2717 gso_ports[port_id].enable = 1; 2718 } else if (strcmp(mode, "off") == 0) { 2719 if (test_done == 0) { 2720 printf("before disabling GSO," 2721 " please stop forwarding first\n"); 2722 return; 2723 } 2724 gso_ports[port_id].enable = 0; 2725 } 2726 } 2727 2728 char* 2729 list_pkt_forwarding_modes(void) 2730 { 2731 static char fwd_modes[128] = ""; 2732 const char *separator = "|"; 2733 struct fwd_engine *fwd_eng; 2734 unsigned i = 0; 2735 2736 if (strlen (fwd_modes) == 0) { 2737 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2738 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2739 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2740 strncat(fwd_modes, separator, 2741 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2742 } 2743 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2744 } 2745 2746 return fwd_modes; 2747 } 2748 2749 char* 2750 list_pkt_forwarding_retry_modes(void) 2751 { 2752 static char fwd_modes[128] = ""; 2753 const char *separator = "|"; 2754 struct fwd_engine *fwd_eng; 2755 unsigned i = 0; 2756 2757 if (strlen(fwd_modes) == 0) { 2758 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2759 if (fwd_eng == &rx_only_engine) 2760 continue; 2761 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2762 sizeof(fwd_modes) - 2763 strlen(fwd_modes) - 1); 2764 strncat(fwd_modes, separator, 2765 sizeof(fwd_modes) - 2766 strlen(fwd_modes) - 1); 2767 } 2768 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2769 } 2770 2771 return fwd_modes; 2772 } 2773 2774 void 2775 set_pkt_forwarding_mode(const char *fwd_mode_name) 2776 { 2777 struct fwd_engine *fwd_eng; 2778 unsigned i; 2779 2780 i = 0; 2781 while ((fwd_eng = fwd_engines[i]) != NULL) { 2782 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2783 printf("Set %s packet forwarding mode%s\n", 2784 fwd_mode_name, 2785 retry_enabled == 0 ? "" : " with retry"); 2786 cur_fwd_eng = fwd_eng; 2787 return; 2788 } 2789 i++; 2790 } 2791 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2792 } 2793 2794 void 2795 add_rx_dump_callbacks(portid_t portid) 2796 { 2797 struct rte_eth_dev_info dev_info; 2798 uint16_t queue; 2799 2800 if (port_id_is_invalid(portid, ENABLED_WARN)) 2801 return; 2802 2803 rte_eth_dev_info_get(portid, &dev_info); 2804 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2805 if (!ports[portid].rx_dump_cb[queue]) 2806 ports[portid].rx_dump_cb[queue] = 2807 rte_eth_add_rx_callback(portid, queue, 2808 dump_rx_pkts, NULL); 2809 } 2810 2811 void 2812 add_tx_dump_callbacks(portid_t portid) 2813 { 2814 struct rte_eth_dev_info dev_info; 2815 uint16_t queue; 2816 2817 if (port_id_is_invalid(portid, ENABLED_WARN)) 2818 return; 2819 rte_eth_dev_info_get(portid, &dev_info); 2820 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2821 if (!ports[portid].tx_dump_cb[queue]) 2822 ports[portid].tx_dump_cb[queue] = 2823 rte_eth_add_tx_callback(portid, queue, 2824 dump_tx_pkts, NULL); 2825 } 2826 2827 void 2828 remove_rx_dump_callbacks(portid_t portid) 2829 { 2830 struct rte_eth_dev_info dev_info; 2831 uint16_t queue; 2832 2833 if (port_id_is_invalid(portid, ENABLED_WARN)) 2834 return; 2835 rte_eth_dev_info_get(portid, &dev_info); 2836 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2837 if (ports[portid].rx_dump_cb[queue]) { 2838 rte_eth_remove_rx_callback(portid, queue, 2839 ports[portid].rx_dump_cb[queue]); 2840 ports[portid].rx_dump_cb[queue] = NULL; 2841 } 2842 } 2843 2844 void 2845 remove_tx_dump_callbacks(portid_t portid) 2846 { 2847 struct rte_eth_dev_info dev_info; 2848 uint16_t queue; 2849 2850 if (port_id_is_invalid(portid, ENABLED_WARN)) 2851 return; 2852 rte_eth_dev_info_get(portid, &dev_info); 2853 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2854 if (ports[portid].tx_dump_cb[queue]) { 2855 rte_eth_remove_tx_callback(portid, queue, 2856 ports[portid].tx_dump_cb[queue]); 2857 ports[portid].tx_dump_cb[queue] = NULL; 2858 } 2859 } 2860 2861 void 2862 configure_rxtx_dump_callbacks(uint16_t verbose) 2863 { 2864 portid_t portid; 2865 2866 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 2867 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 2868 return; 2869 #endif 2870 2871 RTE_ETH_FOREACH_DEV(portid) 2872 { 2873 if (verbose == 1 || verbose > 2) 2874 add_rx_dump_callbacks(portid); 2875 else 2876 remove_rx_dump_callbacks(portid); 2877 if (verbose >= 2) 2878 add_tx_dump_callbacks(portid); 2879 else 2880 remove_tx_dump_callbacks(portid); 2881 } 2882 } 2883 2884 void 2885 set_verbose_level(uint16_t vb_level) 2886 { 2887 printf("Change verbose level from %u to %u\n", 2888 (unsigned int) verbose_level, (unsigned int) vb_level); 2889 verbose_level = vb_level; 2890 configure_rxtx_dump_callbacks(verbose_level); 2891 } 2892 2893 void 2894 vlan_extend_set(portid_t port_id, int on) 2895 { 2896 int diag; 2897 int vlan_offload; 2898 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2899 2900 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2901 return; 2902 2903 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2904 2905 if (on) { 2906 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2907 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2908 } else { 2909 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2910 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2911 } 2912 2913 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2914 if (diag < 0) 2915 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2916 "diag=%d\n", port_id, on, diag); 2917 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2918 } 2919 2920 void 2921 rx_vlan_strip_set(portid_t port_id, int on) 2922 { 2923 int diag; 2924 int vlan_offload; 2925 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2926 2927 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2928 return; 2929 2930 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2931 2932 if (on) { 2933 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2934 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2935 } else { 2936 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2937 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2938 } 2939 2940 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2941 if (diag < 0) 2942 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2943 "diag=%d\n", port_id, on, diag); 2944 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2945 } 2946 2947 void 2948 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2949 { 2950 int diag; 2951 2952 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2953 return; 2954 2955 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2956 if (diag < 0) 2957 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2958 "diag=%d\n", port_id, queue_id, on, diag); 2959 } 2960 2961 void 2962 rx_vlan_filter_set(portid_t port_id, int on) 2963 { 2964 int diag; 2965 int vlan_offload; 2966 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2967 2968 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2969 return; 2970 2971 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2972 2973 if (on) { 2974 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2975 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2976 } else { 2977 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2978 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2979 } 2980 2981 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2982 if (diag < 0) 2983 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2984 "diag=%d\n", port_id, on, diag); 2985 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2986 } 2987 2988 int 2989 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2990 { 2991 int diag; 2992 2993 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2994 return 1; 2995 if (vlan_id_is_invalid(vlan_id)) 2996 return 1; 2997 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2998 if (diag == 0) 2999 return 0; 3000 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3001 "diag=%d\n", 3002 port_id, vlan_id, on, diag); 3003 return -1; 3004 } 3005 3006 void 3007 rx_vlan_all_filter_set(portid_t port_id, int on) 3008 { 3009 uint16_t vlan_id; 3010 3011 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3012 return; 3013 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3014 if (rx_vft_set(port_id, vlan_id, on)) 3015 break; 3016 } 3017 } 3018 3019 void 3020 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3021 { 3022 int diag; 3023 3024 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3025 return; 3026 3027 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3028 if (diag == 0) 3029 return; 3030 3031 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3032 "diag=%d\n", 3033 port_id, vlan_type, tp_id, diag); 3034 } 3035 3036 void 3037 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3038 { 3039 struct rte_eth_dev_info dev_info; 3040 3041 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3042 return; 3043 if (vlan_id_is_invalid(vlan_id)) 3044 return; 3045 3046 if (ports[port_id].dev_conf.txmode.offloads & 3047 DEV_TX_OFFLOAD_QINQ_INSERT) { 3048 printf("Error, as QinQ has been enabled.\n"); 3049 return; 3050 } 3051 rte_eth_dev_info_get(port_id, &dev_info); 3052 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3053 printf("Error: vlan insert is not supported by port %d\n", 3054 port_id); 3055 return; 3056 } 3057 3058 tx_vlan_reset(port_id); 3059 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3060 ports[port_id].tx_vlan_id = vlan_id; 3061 } 3062 3063 void 3064 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3065 { 3066 struct rte_eth_dev_info dev_info; 3067 3068 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3069 return; 3070 if (vlan_id_is_invalid(vlan_id)) 3071 return; 3072 if (vlan_id_is_invalid(vlan_id_outer)) 3073 return; 3074 3075 rte_eth_dev_info_get(port_id, &dev_info); 3076 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3077 printf("Error: qinq insert not supported by port %d\n", 3078 port_id); 3079 return; 3080 } 3081 3082 tx_vlan_reset(port_id); 3083 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 3084 DEV_TX_OFFLOAD_QINQ_INSERT); 3085 ports[port_id].tx_vlan_id = vlan_id; 3086 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3087 } 3088 3089 void 3090 tx_vlan_reset(portid_t port_id) 3091 { 3092 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3093 return; 3094 ports[port_id].dev_conf.txmode.offloads &= 3095 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3096 DEV_TX_OFFLOAD_QINQ_INSERT); 3097 ports[port_id].tx_vlan_id = 0; 3098 ports[port_id].tx_vlan_id_outer = 0; 3099 } 3100 3101 void 3102 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3103 { 3104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3105 return; 3106 3107 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3108 } 3109 3110 void 3111 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3112 { 3113 uint16_t i; 3114 uint8_t existing_mapping_found = 0; 3115 3116 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3117 return; 3118 3119 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3120 return; 3121 3122 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3123 printf("map_value not in required range 0..%d\n", 3124 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3125 return; 3126 } 3127 3128 if (!is_rx) { /*then tx*/ 3129 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3130 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3131 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3132 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3133 existing_mapping_found = 1; 3134 break; 3135 } 3136 } 3137 if (!existing_mapping_found) { /* A new additional mapping... */ 3138 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3139 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3140 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3141 nb_tx_queue_stats_mappings++; 3142 } 3143 } 3144 else { /*rx*/ 3145 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3146 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3147 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3148 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3149 existing_mapping_found = 1; 3150 break; 3151 } 3152 } 3153 if (!existing_mapping_found) { /* A new additional mapping... */ 3154 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3155 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3156 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3157 nb_rx_queue_stats_mappings++; 3158 } 3159 } 3160 } 3161 3162 void 3163 set_xstats_hide_zero(uint8_t on_off) 3164 { 3165 xstats_hide_zero = on_off; 3166 } 3167 3168 static inline void 3169 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3170 { 3171 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3172 3173 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3174 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3175 " tunnel_id: 0x%08x", 3176 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3177 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3178 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3179 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3180 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3181 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3182 3183 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3184 rte_be_to_cpu_16(mask->src_port_mask), 3185 rte_be_to_cpu_16(mask->dst_port_mask)); 3186 3187 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3188 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3189 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3190 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3191 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3192 3193 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3194 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3195 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3196 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3197 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3198 } 3199 3200 printf("\n"); 3201 } 3202 3203 static inline void 3204 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3205 { 3206 struct rte_eth_flex_payload_cfg *cfg; 3207 uint32_t i, j; 3208 3209 for (i = 0; i < flex_conf->nb_payloads; i++) { 3210 cfg = &flex_conf->flex_set[i]; 3211 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3212 printf("\n RAW: "); 3213 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3214 printf("\n L2_PAYLOAD: "); 3215 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3216 printf("\n L3_PAYLOAD: "); 3217 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3218 printf("\n L4_PAYLOAD: "); 3219 else 3220 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3221 for (j = 0; j < num; j++) 3222 printf(" %-5u", cfg->src_offset[j]); 3223 } 3224 printf("\n"); 3225 } 3226 3227 static char * 3228 flowtype_to_str(uint16_t flow_type) 3229 { 3230 struct flow_type_info { 3231 char str[32]; 3232 uint16_t ftype; 3233 }; 3234 3235 uint8_t i; 3236 static struct flow_type_info flowtype_str_table[] = { 3237 {"raw", RTE_ETH_FLOW_RAW}, 3238 {"ipv4", RTE_ETH_FLOW_IPV4}, 3239 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3240 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3241 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3242 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3243 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3244 {"ipv6", RTE_ETH_FLOW_IPV6}, 3245 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3246 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3247 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3248 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3249 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3250 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3251 {"port", RTE_ETH_FLOW_PORT}, 3252 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3253 {"geneve", RTE_ETH_FLOW_GENEVE}, 3254 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3255 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3256 }; 3257 3258 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3259 if (flowtype_str_table[i].ftype == flow_type) 3260 return flowtype_str_table[i].str; 3261 } 3262 3263 return NULL; 3264 } 3265 3266 static inline void 3267 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3268 { 3269 struct rte_eth_fdir_flex_mask *mask; 3270 uint32_t i, j; 3271 char *p; 3272 3273 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3274 mask = &flex_conf->flex_mask[i]; 3275 p = flowtype_to_str(mask->flow_type); 3276 printf("\n %s:\t", p ? p : "unknown"); 3277 for (j = 0; j < num; j++) 3278 printf(" %02x", mask->mask[j]); 3279 } 3280 printf("\n"); 3281 } 3282 3283 static inline void 3284 print_fdir_flow_type(uint32_t flow_types_mask) 3285 { 3286 int i; 3287 char *p; 3288 3289 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3290 if (!(flow_types_mask & (1 << i))) 3291 continue; 3292 p = flowtype_to_str(i); 3293 if (p) 3294 printf(" %s", p); 3295 else 3296 printf(" unknown"); 3297 } 3298 printf("\n"); 3299 } 3300 3301 void 3302 fdir_get_infos(portid_t port_id) 3303 { 3304 struct rte_eth_fdir_stats fdir_stat; 3305 struct rte_eth_fdir_info fdir_info; 3306 int ret; 3307 3308 static const char *fdir_stats_border = "########################"; 3309 3310 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3311 return; 3312 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3313 if (ret < 0) { 3314 printf("\n FDIR is not supported on port %-2d\n", 3315 port_id); 3316 return; 3317 } 3318 3319 memset(&fdir_info, 0, sizeof(fdir_info)); 3320 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3321 RTE_ETH_FILTER_INFO, &fdir_info); 3322 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3323 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3324 RTE_ETH_FILTER_STATS, &fdir_stat); 3325 printf("\n %s FDIR infos for port %-2d %s\n", 3326 fdir_stats_border, port_id, fdir_stats_border); 3327 printf(" MODE: "); 3328 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3329 printf(" PERFECT\n"); 3330 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3331 printf(" PERFECT-MAC-VLAN\n"); 3332 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3333 printf(" PERFECT-TUNNEL\n"); 3334 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3335 printf(" SIGNATURE\n"); 3336 else 3337 printf(" DISABLE\n"); 3338 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3339 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3340 printf(" SUPPORTED FLOW TYPE: "); 3341 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3342 } 3343 printf(" FLEX PAYLOAD INFO:\n"); 3344 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3345 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3346 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3347 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3348 fdir_info.flex_payload_unit, 3349 fdir_info.max_flex_payload_segment_num, 3350 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3351 printf(" MASK: "); 3352 print_fdir_mask(&fdir_info.mask); 3353 if (fdir_info.flex_conf.nb_payloads > 0) { 3354 printf(" FLEX PAYLOAD SRC OFFSET:"); 3355 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3356 } 3357 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3358 printf(" FLEX MASK CFG:"); 3359 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3360 } 3361 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3362 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3363 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3364 fdir_info.guarant_spc, fdir_info.best_spc); 3365 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3366 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3367 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3368 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3369 fdir_stat.collision, fdir_stat.free, 3370 fdir_stat.maxhash, fdir_stat.maxlen, 3371 fdir_stat.add, fdir_stat.remove, 3372 fdir_stat.f_add, fdir_stat.f_remove); 3373 printf(" %s############################%s\n", 3374 fdir_stats_border, fdir_stats_border); 3375 } 3376 3377 void 3378 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3379 { 3380 struct rte_port *port; 3381 struct rte_eth_fdir_flex_conf *flex_conf; 3382 int i, idx = 0; 3383 3384 port = &ports[port_id]; 3385 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3386 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3387 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3388 idx = i; 3389 break; 3390 } 3391 } 3392 if (i >= RTE_ETH_FLOW_MAX) { 3393 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3394 idx = flex_conf->nb_flexmasks; 3395 flex_conf->nb_flexmasks++; 3396 } else { 3397 printf("The flex mask table is full. Can not set flex" 3398 " mask for flow_type(%u).", cfg->flow_type); 3399 return; 3400 } 3401 } 3402 rte_memcpy(&flex_conf->flex_mask[idx], 3403 cfg, 3404 sizeof(struct rte_eth_fdir_flex_mask)); 3405 } 3406 3407 void 3408 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3409 { 3410 struct rte_port *port; 3411 struct rte_eth_fdir_flex_conf *flex_conf; 3412 int i, idx = 0; 3413 3414 port = &ports[port_id]; 3415 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3416 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3417 if (cfg->type == flex_conf->flex_set[i].type) { 3418 idx = i; 3419 break; 3420 } 3421 } 3422 if (i >= RTE_ETH_PAYLOAD_MAX) { 3423 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3424 idx = flex_conf->nb_payloads; 3425 flex_conf->nb_payloads++; 3426 } else { 3427 printf("The flex payload table is full. Can not set" 3428 " flex payload for type(%u).", cfg->type); 3429 return; 3430 } 3431 } 3432 rte_memcpy(&flex_conf->flex_set[idx], 3433 cfg, 3434 sizeof(struct rte_eth_flex_payload_cfg)); 3435 3436 } 3437 3438 void 3439 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3440 { 3441 #ifdef RTE_LIBRTE_IXGBE_PMD 3442 int diag; 3443 3444 if (is_rx) 3445 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3446 else 3447 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3448 3449 if (diag == 0) 3450 return; 3451 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3452 is_rx ? "rx" : "tx", port_id, diag); 3453 return; 3454 #endif 3455 printf("VF %s setting not supported for port %d\n", 3456 is_rx ? "Rx" : "Tx", port_id); 3457 RTE_SET_USED(vf); 3458 RTE_SET_USED(on); 3459 } 3460 3461 int 3462 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3463 { 3464 int diag; 3465 struct rte_eth_link link; 3466 3467 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3468 return 1; 3469 rte_eth_link_get_nowait(port_id, &link); 3470 if (rate > link.link_speed) { 3471 printf("Invalid rate value:%u bigger than link speed: %u\n", 3472 rate, link.link_speed); 3473 return 1; 3474 } 3475 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3476 if (diag == 0) 3477 return diag; 3478 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3479 port_id, diag); 3480 return diag; 3481 } 3482 3483 int 3484 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3485 { 3486 int diag = -ENOTSUP; 3487 3488 RTE_SET_USED(vf); 3489 RTE_SET_USED(rate); 3490 RTE_SET_USED(q_msk); 3491 3492 #ifdef RTE_LIBRTE_IXGBE_PMD 3493 if (diag == -ENOTSUP) 3494 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3495 q_msk); 3496 #endif 3497 #ifdef RTE_LIBRTE_BNXT_PMD 3498 if (diag == -ENOTSUP) 3499 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3500 #endif 3501 if (diag == 0) 3502 return diag; 3503 3504 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3505 port_id, diag); 3506 return diag; 3507 } 3508 3509 /* 3510 * Functions to manage the set of filtered Multicast MAC addresses. 3511 * 3512 * A pool of filtered multicast MAC addresses is associated with each port. 3513 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3514 * The address of the pool and the number of valid multicast MAC addresses 3515 * recorded in the pool are stored in the fields "mc_addr_pool" and 3516 * "mc_addr_nb" of the "rte_port" data structure. 3517 * 3518 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3519 * to be supplied a contiguous array of multicast MAC addresses. 3520 * To comply with this constraint, the set of multicast addresses recorded 3521 * into the pool are systematically compacted at the beginning of the pool. 3522 * Hence, when a multicast address is removed from the pool, all following 3523 * addresses, if any, are copied back to keep the set contiguous. 3524 */ 3525 #define MCAST_POOL_INC 32 3526 3527 static int 3528 mcast_addr_pool_extend(struct rte_port *port) 3529 { 3530 struct rte_ether_addr *mc_pool; 3531 size_t mc_pool_size; 3532 3533 /* 3534 * If a free entry is available at the end of the pool, just 3535 * increment the number of recorded multicast addresses. 3536 */ 3537 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3538 port->mc_addr_nb++; 3539 return 0; 3540 } 3541 3542 /* 3543 * [re]allocate a pool with MCAST_POOL_INC more entries. 3544 * The previous test guarantees that port->mc_addr_nb is a multiple 3545 * of MCAST_POOL_INC. 3546 */ 3547 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 3548 MCAST_POOL_INC); 3549 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 3550 mc_pool_size); 3551 if (mc_pool == NULL) { 3552 printf("allocation of pool of %u multicast addresses failed\n", 3553 port->mc_addr_nb + MCAST_POOL_INC); 3554 return -ENOMEM; 3555 } 3556 3557 port->mc_addr_pool = mc_pool; 3558 port->mc_addr_nb++; 3559 return 0; 3560 3561 } 3562 3563 static void 3564 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3565 { 3566 port->mc_addr_nb--; 3567 if (addr_idx == port->mc_addr_nb) { 3568 /* No need to recompact the set of multicast addressses. */ 3569 if (port->mc_addr_nb == 0) { 3570 /* free the pool of multicast addresses. */ 3571 free(port->mc_addr_pool); 3572 port->mc_addr_pool = NULL; 3573 } 3574 return; 3575 } 3576 memmove(&port->mc_addr_pool[addr_idx], 3577 &port->mc_addr_pool[addr_idx + 1], 3578 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 3579 } 3580 3581 static void 3582 eth_port_multicast_addr_list_set(portid_t port_id) 3583 { 3584 struct rte_port *port; 3585 int diag; 3586 3587 port = &ports[port_id]; 3588 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3589 port->mc_addr_nb); 3590 if (diag == 0) 3591 return; 3592 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3593 port->mc_addr_nb, port_id, -diag); 3594 } 3595 3596 void 3597 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 3598 { 3599 struct rte_port *port; 3600 uint32_t i; 3601 3602 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3603 return; 3604 3605 port = &ports[port_id]; 3606 3607 /* 3608 * Check that the added multicast MAC address is not already recorded 3609 * in the pool of multicast addresses. 3610 */ 3611 for (i = 0; i < port->mc_addr_nb; i++) { 3612 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3613 printf("multicast address already filtered by port\n"); 3614 return; 3615 } 3616 } 3617 3618 if (mcast_addr_pool_extend(port) != 0) 3619 return; 3620 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3621 eth_port_multicast_addr_list_set(port_id); 3622 } 3623 3624 void 3625 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 3626 { 3627 struct rte_port *port; 3628 uint32_t i; 3629 3630 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3631 return; 3632 3633 port = &ports[port_id]; 3634 3635 /* 3636 * Search the pool of multicast MAC addresses for the removed address. 3637 */ 3638 for (i = 0; i < port->mc_addr_nb; i++) { 3639 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3640 break; 3641 } 3642 if (i == port->mc_addr_nb) { 3643 printf("multicast address not filtered by port %d\n", port_id); 3644 return; 3645 } 3646 3647 mcast_addr_pool_remove(port, i); 3648 eth_port_multicast_addr_list_set(port_id); 3649 } 3650 3651 void 3652 port_dcb_info_display(portid_t port_id) 3653 { 3654 struct rte_eth_dcb_info dcb_info; 3655 uint16_t i; 3656 int ret; 3657 static const char *border = "================"; 3658 3659 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3660 return; 3661 3662 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3663 if (ret) { 3664 printf("\n Failed to get dcb infos on port %-2d\n", 3665 port_id); 3666 return; 3667 } 3668 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3669 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3670 printf("\n TC : "); 3671 for (i = 0; i < dcb_info.nb_tcs; i++) 3672 printf("\t%4d", i); 3673 printf("\n Priority : "); 3674 for (i = 0; i < dcb_info.nb_tcs; i++) 3675 printf("\t%4d", dcb_info.prio_tc[i]); 3676 printf("\n BW percent :"); 3677 for (i = 0; i < dcb_info.nb_tcs; i++) 3678 printf("\t%4d%%", dcb_info.tc_bws[i]); 3679 printf("\n RXQ base : "); 3680 for (i = 0; i < dcb_info.nb_tcs; i++) 3681 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3682 printf("\n RXQ number :"); 3683 for (i = 0; i < dcb_info.nb_tcs; i++) 3684 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3685 printf("\n TXQ base : "); 3686 for (i = 0; i < dcb_info.nb_tcs; i++) 3687 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3688 printf("\n TXQ number :"); 3689 for (i = 0; i < dcb_info.nb_tcs; i++) 3690 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3691 printf("\n"); 3692 } 3693 3694 uint8_t * 3695 open_file(const char *file_path, uint32_t *size) 3696 { 3697 int fd = open(file_path, O_RDONLY); 3698 off_t pkg_size; 3699 uint8_t *buf = NULL; 3700 int ret = 0; 3701 struct stat st_buf; 3702 3703 if (size) 3704 *size = 0; 3705 3706 if (fd == -1) { 3707 printf("%s: Failed to open %s\n", __func__, file_path); 3708 return buf; 3709 } 3710 3711 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3712 close(fd); 3713 printf("%s: File operations failed\n", __func__); 3714 return buf; 3715 } 3716 3717 pkg_size = st_buf.st_size; 3718 if (pkg_size < 0) { 3719 close(fd); 3720 printf("%s: File operations failed\n", __func__); 3721 return buf; 3722 } 3723 3724 buf = (uint8_t *)malloc(pkg_size); 3725 if (!buf) { 3726 close(fd); 3727 printf("%s: Failed to malloc memory\n", __func__); 3728 return buf; 3729 } 3730 3731 ret = read(fd, buf, pkg_size); 3732 if (ret < 0) { 3733 close(fd); 3734 printf("%s: File read operation failed\n", __func__); 3735 close_file(buf); 3736 return NULL; 3737 } 3738 3739 if (size) 3740 *size = pkg_size; 3741 3742 close(fd); 3743 3744 return buf; 3745 } 3746 3747 int 3748 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3749 { 3750 FILE *fh = fopen(file_path, "wb"); 3751 3752 if (fh == NULL) { 3753 printf("%s: Failed to open %s\n", __func__, file_path); 3754 return -1; 3755 } 3756 3757 if (fwrite(buf, 1, size, fh) != size) { 3758 fclose(fh); 3759 printf("%s: File write operation failed\n", __func__); 3760 return -1; 3761 } 3762 3763 fclose(fh); 3764 3765 return 0; 3766 } 3767 3768 int 3769 close_file(uint8_t *buf) 3770 { 3771 if (buf) { 3772 free((void *)buf); 3773 return 0; 3774 } 3775 3776 return -1; 3777 } 3778 3779 void 3780 port_queue_region_info_display(portid_t port_id, void *buf) 3781 { 3782 #ifdef RTE_LIBRTE_I40E_PMD 3783 uint16_t i, j; 3784 struct rte_pmd_i40e_queue_regions *info = 3785 (struct rte_pmd_i40e_queue_regions *)buf; 3786 static const char *queue_region_info_stats_border = "-------"; 3787 3788 if (!info->queue_region_number) 3789 printf("there is no region has been set before"); 3790 3791 printf("\n %s All queue region info for port=%2d %s", 3792 queue_region_info_stats_border, port_id, 3793 queue_region_info_stats_border); 3794 printf("\n queue_region_number: %-14u \n", 3795 info->queue_region_number); 3796 3797 for (i = 0; i < info->queue_region_number; i++) { 3798 printf("\n region_id: %-14u queue_number: %-14u " 3799 "queue_start_index: %-14u \n", 3800 info->region[i].region_id, 3801 info->region[i].queue_num, 3802 info->region[i].queue_start_index); 3803 3804 printf(" user_priority_num is %-14u :", 3805 info->region[i].user_priority_num); 3806 for (j = 0; j < info->region[i].user_priority_num; j++) 3807 printf(" %-14u ", info->region[i].user_priority[j]); 3808 3809 printf("\n flowtype_num is %-14u :", 3810 info->region[i].flowtype_num); 3811 for (j = 0; j < info->region[i].flowtype_num; j++) 3812 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3813 } 3814 #else 3815 RTE_SET_USED(port_id); 3816 RTE_SET_USED(buf); 3817 #endif 3818 3819 printf("\n\n"); 3820 } 3821