1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * Copyright 2013-2014 6WIND S.A. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <stdarg.h> 36 #include <errno.h> 37 #include <stdio.h> 38 #include <string.h> 39 #include <stdint.h> 40 #include <inttypes.h> 41 42 #include <sys/queue.h> 43 #include <sys/types.h> 44 #include <sys/stat.h> 45 #include <fcntl.h> 46 #include <unistd.h> 47 48 #include <rte_common.h> 49 #include <rte_byteorder.h> 50 #include <rte_debug.h> 51 #include <rte_log.h> 52 #include <rte_memory.h> 53 #include <rte_memcpy.h> 54 #include <rte_memzone.h> 55 #include <rte_launch.h> 56 #include <rte_eal.h> 57 #include <rte_per_lcore.h> 58 #include <rte_lcore.h> 59 #include <rte_atomic.h> 60 #include <rte_branch_prediction.h> 61 #include <rte_mempool.h> 62 #include <rte_mbuf.h> 63 #include <rte_interrupts.h> 64 #include <rte_pci.h> 65 #include <rte_ether.h> 66 #include <rte_ethdev.h> 67 #include <rte_string_fns.h> 68 #include <rte_cycles.h> 69 #include <rte_flow.h> 70 #include <rte_errno.h> 71 #ifdef RTE_LIBRTE_IXGBE_PMD 72 #include <rte_pmd_ixgbe.h> 73 #endif 74 #ifdef RTE_LIBRTE_I40E_PMD 75 #include <rte_pmd_i40e.h> 76 #endif 77 #ifdef RTE_LIBRTE_BNXT_PMD 78 #include <rte_pmd_bnxt.h> 79 #endif 80 #include <rte_gro.h> 81 #include <cmdline_parse_etheraddr.h> 82 83 #include "testpmd.h" 84 85 static char *flowtype_to_str(uint16_t flow_type); 86 87 static const struct { 88 enum tx_pkt_split split; 89 const char *name; 90 } tx_split_name[] = { 91 { 92 .split = TX_PKT_SPLIT_OFF, 93 .name = "off", 94 }, 95 { 96 .split = TX_PKT_SPLIT_ON, 97 .name = "on", 98 }, 99 { 100 .split = TX_PKT_SPLIT_RND, 101 .name = "rand", 102 }, 103 }; 104 105 struct rss_type_info { 106 char str[32]; 107 uint64_t rss_type; 108 }; 109 110 static const struct rss_type_info rss_type_table[] = { 111 { "ipv4", ETH_RSS_IPV4 }, 112 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 113 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 114 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 115 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 116 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 117 { "ipv6", ETH_RSS_IPV6 }, 118 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 119 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 120 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 121 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 122 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 123 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 124 { "ipv6-ex", ETH_RSS_IPV6_EX }, 125 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 126 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 127 { "port", ETH_RSS_PORT }, 128 { "vxlan", ETH_RSS_VXLAN }, 129 { "geneve", ETH_RSS_GENEVE }, 130 { "nvgre", ETH_RSS_NVGRE }, 131 132 }; 133 134 static void 135 print_ethaddr(const char *name, struct ether_addr *eth_addr) 136 { 137 char buf[ETHER_ADDR_FMT_SIZE]; 138 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 139 printf("%s%s", name, buf); 140 } 141 142 void 143 nic_stats_display(portid_t port_id) 144 { 145 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 146 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 147 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 148 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 149 uint64_t mpps_rx, mpps_tx; 150 struct rte_eth_stats stats; 151 struct rte_port *port = &ports[port_id]; 152 uint8_t i; 153 portid_t pid; 154 155 static const char *nic_stats_border = "########################"; 156 157 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 158 printf("Valid port range is [0"); 159 RTE_ETH_FOREACH_DEV(pid) 160 printf(", %d", pid); 161 printf("]\n"); 162 return; 163 } 164 rte_eth_stats_get(port_id, &stats); 165 printf("\n %s NIC statistics for port %-2d %s\n", 166 nic_stats_border, port_id, nic_stats_border); 167 168 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 169 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 170 "%-"PRIu64"\n", 171 stats.ipackets, stats.imissed, stats.ibytes); 172 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 173 printf(" RX-nombuf: %-10"PRIu64"\n", 174 stats.rx_nombuf); 175 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 176 "%-"PRIu64"\n", 177 stats.opackets, stats.oerrors, stats.obytes); 178 } 179 else { 180 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 181 " RX-bytes: %10"PRIu64"\n", 182 stats.ipackets, stats.ierrors, stats.ibytes); 183 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 184 printf(" RX-nombuf: %10"PRIu64"\n", 185 stats.rx_nombuf); 186 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 187 " TX-bytes: %10"PRIu64"\n", 188 stats.opackets, stats.oerrors, stats.obytes); 189 } 190 191 if (port->rx_queue_stats_mapping_enabled) { 192 printf("\n"); 193 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 194 printf(" Stats reg %2d RX-packets: %10"PRIu64 195 " RX-errors: %10"PRIu64 196 " RX-bytes: %10"PRIu64"\n", 197 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 198 } 199 } 200 if (port->tx_queue_stats_mapping_enabled) { 201 printf("\n"); 202 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 203 printf(" Stats reg %2d TX-packets: %10"PRIu64 204 " TX-bytes: %10"PRIu64"\n", 205 i, stats.q_opackets[i], stats.q_obytes[i]); 206 } 207 } 208 209 diff_cycles = prev_cycles[port_id]; 210 prev_cycles[port_id] = rte_rdtsc(); 211 if (diff_cycles > 0) 212 diff_cycles = prev_cycles[port_id] - diff_cycles; 213 214 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 215 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 216 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 217 (stats.opackets - prev_pkts_tx[port_id]) : 0; 218 prev_pkts_rx[port_id] = stats.ipackets; 219 prev_pkts_tx[port_id] = stats.opackets; 220 mpps_rx = diff_cycles > 0 ? 221 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 222 mpps_tx = diff_cycles > 0 ? 223 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 224 printf("\n Throughput (since last show)\n"); 225 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 226 mpps_rx, mpps_tx); 227 228 printf(" %s############################%s\n", 229 nic_stats_border, nic_stats_border); 230 } 231 232 void 233 nic_stats_clear(portid_t port_id) 234 { 235 portid_t pid; 236 237 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 238 printf("Valid port range is [0"); 239 RTE_ETH_FOREACH_DEV(pid) 240 printf(", %d", pid); 241 printf("]\n"); 242 return; 243 } 244 rte_eth_stats_reset(port_id); 245 printf("\n NIC statistics for port %d cleared\n", port_id); 246 } 247 248 void 249 nic_xstats_display(portid_t port_id) 250 { 251 struct rte_eth_xstat *xstats; 252 int cnt_xstats, idx_xstat; 253 struct rte_eth_xstat_name *xstats_names; 254 255 printf("###### NIC extended statistics for port %-2d\n", port_id); 256 if (!rte_eth_dev_is_valid_port(port_id)) { 257 printf("Error: Invalid port number %i\n", port_id); 258 return; 259 } 260 261 /* Get count */ 262 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 263 if (cnt_xstats < 0) { 264 printf("Error: Cannot get count of xstats\n"); 265 return; 266 } 267 268 /* Get id-name lookup table */ 269 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 270 if (xstats_names == NULL) { 271 printf("Cannot allocate memory for xstats lookup\n"); 272 return; 273 } 274 if (cnt_xstats != rte_eth_xstats_get_names( 275 port_id, xstats_names, cnt_xstats)) { 276 printf("Error: Cannot get xstats lookup\n"); 277 free(xstats_names); 278 return; 279 } 280 281 /* Get stats themselves */ 282 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 283 if (xstats == NULL) { 284 printf("Cannot allocate memory for xstats\n"); 285 free(xstats_names); 286 return; 287 } 288 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 289 printf("Error: Unable to get xstats\n"); 290 free(xstats_names); 291 free(xstats); 292 return; 293 } 294 295 /* Display xstats */ 296 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 297 if (xstats_hide_zero && !xstats[idx_xstat].value) 298 continue; 299 printf("%s: %"PRIu64"\n", 300 xstats_names[idx_xstat].name, 301 xstats[idx_xstat].value); 302 } 303 free(xstats_names); 304 free(xstats); 305 } 306 307 void 308 nic_xstats_clear(portid_t port_id) 309 { 310 rte_eth_xstats_reset(port_id); 311 } 312 313 void 314 nic_stats_mapping_display(portid_t port_id) 315 { 316 struct rte_port *port = &ports[port_id]; 317 uint16_t i; 318 portid_t pid; 319 320 static const char *nic_stats_mapping_border = "########################"; 321 322 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 323 printf("Valid port range is [0"); 324 RTE_ETH_FOREACH_DEV(pid) 325 printf(", %d", pid); 326 printf("]\n"); 327 return; 328 } 329 330 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 331 printf("Port id %d - either does not support queue statistic mapping or" 332 " no queue statistic mapping set\n", port_id); 333 return; 334 } 335 336 printf("\n %s NIC statistics mapping for port %-2d %s\n", 337 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 338 339 if (port->rx_queue_stats_mapping_enabled) { 340 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 341 if (rx_queue_stats_mappings[i].port_id == port_id) { 342 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 343 rx_queue_stats_mappings[i].queue_id, 344 rx_queue_stats_mappings[i].stats_counter_id); 345 } 346 } 347 printf("\n"); 348 } 349 350 351 if (port->tx_queue_stats_mapping_enabled) { 352 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 353 if (tx_queue_stats_mappings[i].port_id == port_id) { 354 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 355 tx_queue_stats_mappings[i].queue_id, 356 tx_queue_stats_mappings[i].stats_counter_id); 357 } 358 } 359 } 360 361 printf(" %s####################################%s\n", 362 nic_stats_mapping_border, nic_stats_mapping_border); 363 } 364 365 void 366 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 367 { 368 struct rte_eth_rxq_info qinfo; 369 int32_t rc; 370 static const char *info_border = "*********************"; 371 372 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 373 if (rc != 0) { 374 printf("Failed to retrieve information for port: %u, " 375 "RX queue: %hu\nerror desc: %s(%d)\n", 376 port_id, queue_id, strerror(-rc), rc); 377 return; 378 } 379 380 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 381 info_border, port_id, queue_id, info_border); 382 383 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 384 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 385 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 386 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 387 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 388 printf("\nRX drop packets: %s", 389 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 390 printf("\nRX deferred start: %s", 391 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 392 printf("\nRX scattered packets: %s", 393 (qinfo.scattered_rx != 0) ? "on" : "off"); 394 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 400 { 401 struct rte_eth_txq_info qinfo; 402 int32_t rc; 403 static const char *info_border = "*********************"; 404 405 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 406 if (rc != 0) { 407 printf("Failed to retrieve information for port: %u, " 408 "TX queue: %hu\nerror desc: %s(%d)\n", 409 port_id, queue_id, strerror(-rc), rc); 410 return; 411 } 412 413 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 414 info_border, port_id, queue_id, info_border); 415 416 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 417 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 418 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 419 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 420 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 421 printf("\nTX deferred start: %s", 422 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 423 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 424 printf("\n"); 425 } 426 427 void 428 port_infos_display(portid_t port_id) 429 { 430 struct rte_port *port; 431 struct ether_addr mac_addr; 432 struct rte_eth_link link; 433 struct rte_eth_dev_info dev_info; 434 int vlan_offload; 435 struct rte_mempool * mp; 436 static const char *info_border = "*********************"; 437 portid_t pid; 438 uint16_t mtu; 439 440 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 441 printf("Valid port range is [0"); 442 RTE_ETH_FOREACH_DEV(pid) 443 printf(", %d", pid); 444 printf("]\n"); 445 return; 446 } 447 port = &ports[port_id]; 448 rte_eth_link_get_nowait(port_id, &link); 449 memset(&dev_info, 0, sizeof(dev_info)); 450 rte_eth_dev_info_get(port_id, &dev_info); 451 printf("\n%s Infos for port %-2d %s\n", 452 info_border, port_id, info_border); 453 rte_eth_macaddr_get(port_id, &mac_addr); 454 print_ethaddr("MAC address: ", &mac_addr); 455 printf("\nDriver name: %s", dev_info.driver_name); 456 printf("\nConnect to socket: %u", port->socket_id); 457 458 if (port_numa[port_id] != NUMA_NO_CONFIG) { 459 mp = mbuf_pool_find(port_numa[port_id]); 460 if (mp) 461 printf("\nmemory allocation on the socket: %d", 462 port_numa[port_id]); 463 } else 464 printf("\nmemory allocation on the socket: %u",port->socket_id); 465 466 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 467 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 468 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 469 ("full-duplex") : ("half-duplex")); 470 471 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 472 printf("MTU: %u\n", mtu); 473 474 printf("Promiscuous mode: %s\n", 475 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 476 printf("Allmulticast mode: %s\n", 477 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 478 printf("Maximum number of MAC addresses: %u\n", 479 (unsigned int)(port->dev_info.max_mac_addrs)); 480 printf("Maximum number of MAC addresses of hash filtering: %u\n", 481 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 482 483 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 484 if (vlan_offload >= 0){ 485 printf("VLAN offload: \n"); 486 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 487 printf(" strip on \n"); 488 else 489 printf(" strip off \n"); 490 491 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 492 printf(" filter on \n"); 493 else 494 printf(" filter off \n"); 495 496 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 497 printf(" qinq(extend) on \n"); 498 else 499 printf(" qinq(extend) off \n"); 500 } 501 502 if (dev_info.hash_key_size > 0) 503 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 504 if (dev_info.reta_size > 0) 505 printf("Redirection table size: %u\n", dev_info.reta_size); 506 if (!dev_info.flow_type_rss_offloads) 507 printf("No flow type is supported.\n"); 508 else { 509 uint16_t i; 510 char *p; 511 512 printf("Supported flow types:\n"); 513 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 514 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 515 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 516 continue; 517 p = flowtype_to_str(i); 518 if (p) 519 printf(" %s\n", p); 520 else 521 printf(" user defined %d\n", i); 522 } 523 } 524 525 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 526 printf("Maximum configurable length of RX packet: %u\n", 527 dev_info.max_rx_pktlen); 528 if (dev_info.max_vfs) 529 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 530 if (dev_info.max_vmdq_pools) 531 printf("Maximum number of VMDq pools: %u\n", 532 dev_info.max_vmdq_pools); 533 534 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 535 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 536 printf("Max possible number of RXDs per queue: %hu\n", 537 dev_info.rx_desc_lim.nb_max); 538 printf("Min possible number of RXDs per queue: %hu\n", 539 dev_info.rx_desc_lim.nb_min); 540 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 541 542 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 543 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 544 printf("Max possible number of TXDs per queue: %hu\n", 545 dev_info.tx_desc_lim.nb_max); 546 printf("Min possible number of TXDs per queue: %hu\n", 547 dev_info.tx_desc_lim.nb_min); 548 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 549 } 550 551 void 552 port_offload_cap_display(portid_t port_id) 553 { 554 struct rte_eth_dev_info dev_info; 555 static const char *info_border = "************"; 556 557 if (port_id_is_invalid(port_id, ENABLED_WARN)) 558 return; 559 560 rte_eth_dev_info_get(port_id, &dev_info); 561 562 printf("\n%s Port %d supported offload features: %s\n", 563 info_border, port_id, info_border); 564 565 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 566 printf("VLAN stripped: "); 567 if (ports[port_id].dev_conf.rxmode.offloads & 568 DEV_RX_OFFLOAD_VLAN_STRIP) 569 printf("on\n"); 570 else 571 printf("off\n"); 572 } 573 574 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 575 printf("Double VLANs stripped: "); 576 if (ports[port_id].dev_conf.rxmode.offloads & 577 DEV_RX_OFFLOAD_VLAN_EXTEND) 578 printf("on\n"); 579 else 580 printf("off\n"); 581 } 582 583 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 584 printf("RX IPv4 checksum: "); 585 if (ports[port_id].dev_conf.rxmode.offloads & 586 DEV_RX_OFFLOAD_IPV4_CKSUM) 587 printf("on\n"); 588 else 589 printf("off\n"); 590 } 591 592 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 593 printf("RX UDP checksum: "); 594 if (ports[port_id].dev_conf.rxmode.offloads & 595 DEV_RX_OFFLOAD_UDP_CKSUM) 596 printf("on\n"); 597 else 598 printf("off\n"); 599 } 600 601 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 602 printf("RX TCP checksum: "); 603 if (ports[port_id].dev_conf.rxmode.offloads & 604 DEV_RX_OFFLOAD_TCP_CKSUM) 605 printf("on\n"); 606 else 607 printf("off\n"); 608 } 609 610 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 611 printf("RX Outer IPv4 checksum: "); 612 if (ports[port_id].dev_conf.rxmode.offloads & 613 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 614 printf("on\n"); 615 else 616 printf("off\n"); 617 } 618 619 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 620 printf("Large receive offload: "); 621 if (ports[port_id].dev_conf.rxmode.offloads & 622 DEV_RX_OFFLOAD_TCP_LRO) 623 printf("on\n"); 624 else 625 printf("off\n"); 626 } 627 628 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 629 printf("VLAN insert: "); 630 if (ports[port_id].dev_conf.txmode.offloads & 631 DEV_TX_OFFLOAD_VLAN_INSERT) 632 printf("on\n"); 633 else 634 printf("off\n"); 635 } 636 637 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 638 printf("HW timestamp: "); 639 if (ports[port_id].dev_conf.rxmode.offloads & 640 DEV_RX_OFFLOAD_TIMESTAMP) 641 printf("on\n"); 642 else 643 printf("off\n"); 644 } 645 646 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 647 printf("Double VLANs insert: "); 648 if (ports[port_id].dev_conf.txmode.offloads & 649 DEV_TX_OFFLOAD_QINQ_INSERT) 650 printf("on\n"); 651 else 652 printf("off\n"); 653 } 654 655 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 656 printf("TX IPv4 checksum: "); 657 if (ports[port_id].dev_conf.txmode.offloads & 658 DEV_TX_OFFLOAD_IPV4_CKSUM) 659 printf("on\n"); 660 else 661 printf("off\n"); 662 } 663 664 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 665 printf("TX UDP checksum: "); 666 if (ports[port_id].dev_conf.txmode.offloads & 667 DEV_TX_OFFLOAD_UDP_CKSUM) 668 printf("on\n"); 669 else 670 printf("off\n"); 671 } 672 673 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 674 printf("TX TCP checksum: "); 675 if (ports[port_id].dev_conf.txmode.offloads & 676 DEV_TX_OFFLOAD_TCP_CKSUM) 677 printf("on\n"); 678 else 679 printf("off\n"); 680 } 681 682 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 683 printf("TX SCTP checksum: "); 684 if (ports[port_id].dev_conf.txmode.offloads & 685 DEV_TX_OFFLOAD_SCTP_CKSUM) 686 printf("on\n"); 687 else 688 printf("off\n"); 689 } 690 691 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 692 printf("TX Outer IPv4 checksum: "); 693 if (ports[port_id].dev_conf.txmode.offloads & 694 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 695 printf("on\n"); 696 else 697 printf("off\n"); 698 } 699 700 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 701 printf("TX TCP segmentation: "); 702 if (ports[port_id].dev_conf.txmode.offloads & 703 DEV_TX_OFFLOAD_TCP_TSO) 704 printf("on\n"); 705 else 706 printf("off\n"); 707 } 708 709 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 710 printf("TX UDP segmentation: "); 711 if (ports[port_id].dev_conf.txmode.offloads & 712 DEV_TX_OFFLOAD_UDP_TSO) 713 printf("on\n"); 714 else 715 printf("off\n"); 716 } 717 718 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 719 printf("TSO for VXLAN tunnel packet: "); 720 if (ports[port_id].dev_conf.txmode.offloads & 721 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 722 printf("on\n"); 723 else 724 printf("off\n"); 725 } 726 727 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 728 printf("TSO for GRE tunnel packet: "); 729 if (ports[port_id].dev_conf.txmode.offloads & 730 DEV_TX_OFFLOAD_GRE_TNL_TSO) 731 printf("on\n"); 732 else 733 printf("off\n"); 734 } 735 736 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 737 printf("TSO for IPIP tunnel packet: "); 738 if (ports[port_id].dev_conf.txmode.offloads & 739 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 740 printf("on\n"); 741 else 742 printf("off\n"); 743 } 744 745 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 746 printf("TSO for GENEVE tunnel packet: "); 747 if (ports[port_id].dev_conf.txmode.offloads & 748 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 749 printf("on\n"); 750 else 751 printf("off\n"); 752 } 753 754 } 755 756 int 757 port_id_is_invalid(portid_t port_id, enum print_warning warning) 758 { 759 uint16_t pid; 760 761 if (port_id == (portid_t)RTE_PORT_ALL) 762 return 0; 763 764 RTE_ETH_FOREACH_DEV(pid) 765 if (port_id == pid) 766 return 0; 767 768 if (warning == ENABLED_WARN) 769 printf("Invalid port %d\n", port_id); 770 771 return 1; 772 } 773 774 static int 775 vlan_id_is_invalid(uint16_t vlan_id) 776 { 777 if (vlan_id < 4096) 778 return 0; 779 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 780 return 1; 781 } 782 783 static int 784 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 785 { 786 uint64_t pci_len; 787 788 if (reg_off & 0x3) { 789 printf("Port register offset 0x%X not aligned on a 4-byte " 790 "boundary\n", 791 (unsigned)reg_off); 792 return 1; 793 } 794 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 795 if (reg_off >= pci_len) { 796 printf("Port %d: register offset %u (0x%X) out of port PCI " 797 "resource (length=%"PRIu64")\n", 798 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 799 return 1; 800 } 801 return 0; 802 } 803 804 static int 805 reg_bit_pos_is_invalid(uint8_t bit_pos) 806 { 807 if (bit_pos <= 31) 808 return 0; 809 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 810 return 1; 811 } 812 813 #define display_port_and_reg_off(port_id, reg_off) \ 814 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 815 816 static inline void 817 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 818 { 819 display_port_and_reg_off(port_id, (unsigned)reg_off); 820 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 821 } 822 823 void 824 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 825 { 826 uint32_t reg_v; 827 828 829 if (port_id_is_invalid(port_id, ENABLED_WARN)) 830 return; 831 if (port_reg_off_is_invalid(port_id, reg_off)) 832 return; 833 if (reg_bit_pos_is_invalid(bit_x)) 834 return; 835 reg_v = port_id_pci_reg_read(port_id, reg_off); 836 display_port_and_reg_off(port_id, (unsigned)reg_off); 837 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 838 } 839 840 void 841 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 842 uint8_t bit1_pos, uint8_t bit2_pos) 843 { 844 uint32_t reg_v; 845 uint8_t l_bit; 846 uint8_t h_bit; 847 848 if (port_id_is_invalid(port_id, ENABLED_WARN)) 849 return; 850 if (port_reg_off_is_invalid(port_id, reg_off)) 851 return; 852 if (reg_bit_pos_is_invalid(bit1_pos)) 853 return; 854 if (reg_bit_pos_is_invalid(bit2_pos)) 855 return; 856 if (bit1_pos > bit2_pos) 857 l_bit = bit2_pos, h_bit = bit1_pos; 858 else 859 l_bit = bit1_pos, h_bit = bit2_pos; 860 861 reg_v = port_id_pci_reg_read(port_id, reg_off); 862 reg_v >>= l_bit; 863 if (h_bit < 31) 864 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 865 display_port_and_reg_off(port_id, (unsigned)reg_off); 866 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 867 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 868 } 869 870 void 871 port_reg_display(portid_t port_id, uint32_t reg_off) 872 { 873 uint32_t reg_v; 874 875 if (port_id_is_invalid(port_id, ENABLED_WARN)) 876 return; 877 if (port_reg_off_is_invalid(port_id, reg_off)) 878 return; 879 reg_v = port_id_pci_reg_read(port_id, reg_off); 880 display_port_reg_value(port_id, reg_off, reg_v); 881 } 882 883 void 884 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 885 uint8_t bit_v) 886 { 887 uint32_t reg_v; 888 889 if (port_id_is_invalid(port_id, ENABLED_WARN)) 890 return; 891 if (port_reg_off_is_invalid(port_id, reg_off)) 892 return; 893 if (reg_bit_pos_is_invalid(bit_pos)) 894 return; 895 if (bit_v > 1) { 896 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 897 return; 898 } 899 reg_v = port_id_pci_reg_read(port_id, reg_off); 900 if (bit_v == 0) 901 reg_v &= ~(1 << bit_pos); 902 else 903 reg_v |= (1 << bit_pos); 904 port_id_pci_reg_write(port_id, reg_off, reg_v); 905 display_port_reg_value(port_id, reg_off, reg_v); 906 } 907 908 void 909 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 910 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 911 { 912 uint32_t max_v; 913 uint32_t reg_v; 914 uint8_t l_bit; 915 uint8_t h_bit; 916 917 if (port_id_is_invalid(port_id, ENABLED_WARN)) 918 return; 919 if (port_reg_off_is_invalid(port_id, reg_off)) 920 return; 921 if (reg_bit_pos_is_invalid(bit1_pos)) 922 return; 923 if (reg_bit_pos_is_invalid(bit2_pos)) 924 return; 925 if (bit1_pos > bit2_pos) 926 l_bit = bit2_pos, h_bit = bit1_pos; 927 else 928 l_bit = bit1_pos, h_bit = bit2_pos; 929 930 if ((h_bit - l_bit) < 31) 931 max_v = (1 << (h_bit - l_bit + 1)) - 1; 932 else 933 max_v = 0xFFFFFFFF; 934 935 if (value > max_v) { 936 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 937 (unsigned)value, (unsigned)value, 938 (unsigned)max_v, (unsigned)max_v); 939 return; 940 } 941 reg_v = port_id_pci_reg_read(port_id, reg_off); 942 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 943 reg_v |= (value << l_bit); /* Set changed bits */ 944 port_id_pci_reg_write(port_id, reg_off, reg_v); 945 display_port_reg_value(port_id, reg_off, reg_v); 946 } 947 948 void 949 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 950 { 951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 952 return; 953 if (port_reg_off_is_invalid(port_id, reg_off)) 954 return; 955 port_id_pci_reg_write(port_id, reg_off, reg_v); 956 display_port_reg_value(port_id, reg_off, reg_v); 957 } 958 959 void 960 port_mtu_set(portid_t port_id, uint16_t mtu) 961 { 962 int diag; 963 964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 965 return; 966 diag = rte_eth_dev_set_mtu(port_id, mtu); 967 if (diag == 0) 968 return; 969 printf("Set MTU failed. diag=%d\n", diag); 970 } 971 972 /* Generic flow management functions. */ 973 974 /** Generate flow_item[] entry. */ 975 #define MK_FLOW_ITEM(t, s) \ 976 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 977 .name = # t, \ 978 .size = s, \ 979 } 980 981 /** Information about known flow pattern items. */ 982 static const struct { 983 const char *name; 984 size_t size; 985 } flow_item[] = { 986 MK_FLOW_ITEM(END, 0), 987 MK_FLOW_ITEM(VOID, 0), 988 MK_FLOW_ITEM(INVERT, 0), 989 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 990 MK_FLOW_ITEM(PF, 0), 991 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 992 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 993 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 994 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 995 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 996 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 997 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 998 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 999 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1000 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1001 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1002 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1003 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1004 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1005 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1006 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1007 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1008 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1009 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1010 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1011 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1012 }; 1013 1014 /** Compute storage space needed by item specification. */ 1015 static void 1016 flow_item_spec_size(const struct rte_flow_item *item, 1017 size_t *size, size_t *pad) 1018 { 1019 if (!item->spec) { 1020 *size = 0; 1021 goto empty; 1022 } 1023 switch (item->type) { 1024 union { 1025 const struct rte_flow_item_raw *raw; 1026 } spec; 1027 1028 case RTE_FLOW_ITEM_TYPE_RAW: 1029 spec.raw = item->spec; 1030 *size = offsetof(struct rte_flow_item_raw, pattern) + 1031 spec.raw->length * sizeof(*spec.raw->pattern); 1032 break; 1033 default: 1034 *size = flow_item[item->type].size; 1035 break; 1036 } 1037 empty: 1038 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1039 } 1040 1041 /** Generate flow_action[] entry. */ 1042 #define MK_FLOW_ACTION(t, s) \ 1043 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1044 .name = # t, \ 1045 .size = s, \ 1046 } 1047 1048 /** Information about known flow actions. */ 1049 static const struct { 1050 const char *name; 1051 size_t size; 1052 } flow_action[] = { 1053 MK_FLOW_ACTION(END, 0), 1054 MK_FLOW_ACTION(VOID, 0), 1055 MK_FLOW_ACTION(PASSTHRU, 0), 1056 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1057 MK_FLOW_ACTION(FLAG, 0), 1058 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1059 MK_FLOW_ACTION(DROP, 0), 1060 MK_FLOW_ACTION(COUNT, 0), 1061 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 1062 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 1063 MK_FLOW_ACTION(PF, 0), 1064 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1065 }; 1066 1067 /** Compute storage space needed by action configuration. */ 1068 static void 1069 flow_action_conf_size(const struct rte_flow_action *action, 1070 size_t *size, size_t *pad) 1071 { 1072 if (!action->conf) { 1073 *size = 0; 1074 goto empty; 1075 } 1076 switch (action->type) { 1077 union { 1078 const struct rte_flow_action_rss *rss; 1079 } conf; 1080 1081 case RTE_FLOW_ACTION_TYPE_RSS: 1082 conf.rss = action->conf; 1083 *size = offsetof(struct rte_flow_action_rss, queue) + 1084 conf.rss->num * sizeof(*conf.rss->queue); 1085 break; 1086 default: 1087 *size = flow_action[action->type].size; 1088 break; 1089 } 1090 empty: 1091 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1092 } 1093 1094 /** Generate a port_flow entry from attributes/pattern/actions. */ 1095 static struct port_flow * 1096 port_flow_new(const struct rte_flow_attr *attr, 1097 const struct rte_flow_item *pattern, 1098 const struct rte_flow_action *actions) 1099 { 1100 const struct rte_flow_item *item; 1101 const struct rte_flow_action *action; 1102 struct port_flow *pf = NULL; 1103 size_t tmp; 1104 size_t pad; 1105 size_t off1 = 0; 1106 size_t off2 = 0; 1107 int err = ENOTSUP; 1108 1109 store: 1110 item = pattern; 1111 if (pf) 1112 pf->pattern = (void *)&pf->data[off1]; 1113 do { 1114 struct rte_flow_item *dst = NULL; 1115 1116 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1117 !flow_item[item->type].name) 1118 goto notsup; 1119 if (pf) 1120 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1121 off1 += sizeof(*item); 1122 flow_item_spec_size(item, &tmp, &pad); 1123 if (item->spec) { 1124 if (pf) 1125 dst->spec = memcpy(pf->data + off2, 1126 item->spec, tmp); 1127 off2 += tmp + pad; 1128 } 1129 if (item->last) { 1130 if (pf) 1131 dst->last = memcpy(pf->data + off2, 1132 item->last, tmp); 1133 off2 += tmp + pad; 1134 } 1135 if (item->mask) { 1136 if (pf) 1137 dst->mask = memcpy(pf->data + off2, 1138 item->mask, tmp); 1139 off2 += tmp + pad; 1140 } 1141 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1142 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1143 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1144 action = actions; 1145 if (pf) 1146 pf->actions = (void *)&pf->data[off1]; 1147 do { 1148 struct rte_flow_action *dst = NULL; 1149 1150 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1151 !flow_action[action->type].name) 1152 goto notsup; 1153 if (pf) 1154 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1155 off1 += sizeof(*action); 1156 flow_action_conf_size(action, &tmp, &pad); 1157 if (action->conf) { 1158 if (pf) 1159 dst->conf = memcpy(pf->data + off2, 1160 action->conf, tmp); 1161 off2 += tmp + pad; 1162 } 1163 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1164 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1165 if (pf != NULL) 1166 return pf; 1167 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1168 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1169 pf = calloc(1, tmp + off1 + off2); 1170 if (pf == NULL) 1171 err = errno; 1172 else { 1173 *pf = (const struct port_flow){ 1174 .size = tmp + off1 + off2, 1175 .attr = *attr, 1176 }; 1177 tmp -= offsetof(struct port_flow, data); 1178 off2 = tmp + off1; 1179 off1 = tmp; 1180 goto store; 1181 } 1182 notsup: 1183 rte_errno = err; 1184 return NULL; 1185 } 1186 1187 /** Print a message out of a flow error. */ 1188 static int 1189 port_flow_complain(struct rte_flow_error *error) 1190 { 1191 static const char *const errstrlist[] = { 1192 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1193 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1194 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1195 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1196 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1197 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1198 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1199 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1200 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1201 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1202 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1203 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1204 }; 1205 const char *errstr; 1206 char buf[32]; 1207 int err = rte_errno; 1208 1209 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1210 !errstrlist[error->type]) 1211 errstr = "unknown type"; 1212 else 1213 errstr = errstrlist[error->type]; 1214 printf("Caught error type %d (%s): %s%s\n", 1215 error->type, errstr, 1216 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1217 error->cause), buf) : "", 1218 error->message ? error->message : "(no stated reason)"); 1219 return -err; 1220 } 1221 1222 /** Validate flow rule. */ 1223 int 1224 port_flow_validate(portid_t port_id, 1225 const struct rte_flow_attr *attr, 1226 const struct rte_flow_item *pattern, 1227 const struct rte_flow_action *actions) 1228 { 1229 struct rte_flow_error error; 1230 1231 /* Poisoning to make sure PMDs update it in case of error. */ 1232 memset(&error, 0x11, sizeof(error)); 1233 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1234 return port_flow_complain(&error); 1235 printf("Flow rule validated\n"); 1236 return 0; 1237 } 1238 1239 /** Create flow rule. */ 1240 int 1241 port_flow_create(portid_t port_id, 1242 const struct rte_flow_attr *attr, 1243 const struct rte_flow_item *pattern, 1244 const struct rte_flow_action *actions) 1245 { 1246 struct rte_flow *flow; 1247 struct rte_port *port; 1248 struct port_flow *pf; 1249 uint32_t id; 1250 struct rte_flow_error error; 1251 1252 /* Poisoning to make sure PMDs update it in case of error. */ 1253 memset(&error, 0x22, sizeof(error)); 1254 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1255 if (!flow) 1256 return port_flow_complain(&error); 1257 port = &ports[port_id]; 1258 if (port->flow_list) { 1259 if (port->flow_list->id == UINT32_MAX) { 1260 printf("Highest rule ID is already assigned, delete" 1261 " it first"); 1262 rte_flow_destroy(port_id, flow, NULL); 1263 return -ENOMEM; 1264 } 1265 id = port->flow_list->id + 1; 1266 } else 1267 id = 0; 1268 pf = port_flow_new(attr, pattern, actions); 1269 if (!pf) { 1270 int err = rte_errno; 1271 1272 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1273 rte_flow_destroy(port_id, flow, NULL); 1274 return -err; 1275 } 1276 pf->next = port->flow_list; 1277 pf->id = id; 1278 pf->flow = flow; 1279 port->flow_list = pf; 1280 printf("Flow rule #%u created\n", pf->id); 1281 return 0; 1282 } 1283 1284 /** Destroy a number of flow rules. */ 1285 int 1286 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1287 { 1288 struct rte_port *port; 1289 struct port_flow **tmp; 1290 uint32_t c = 0; 1291 int ret = 0; 1292 1293 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1294 port_id == (portid_t)RTE_PORT_ALL) 1295 return -EINVAL; 1296 port = &ports[port_id]; 1297 tmp = &port->flow_list; 1298 while (*tmp) { 1299 uint32_t i; 1300 1301 for (i = 0; i != n; ++i) { 1302 struct rte_flow_error error; 1303 struct port_flow *pf = *tmp; 1304 1305 if (rule[i] != pf->id) 1306 continue; 1307 /* 1308 * Poisoning to make sure PMDs update it in case 1309 * of error. 1310 */ 1311 memset(&error, 0x33, sizeof(error)); 1312 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1313 ret = port_flow_complain(&error); 1314 continue; 1315 } 1316 printf("Flow rule #%u destroyed\n", pf->id); 1317 *tmp = pf->next; 1318 free(pf); 1319 break; 1320 } 1321 if (i == n) 1322 tmp = &(*tmp)->next; 1323 ++c; 1324 } 1325 return ret; 1326 } 1327 1328 /** Remove all flow rules. */ 1329 int 1330 port_flow_flush(portid_t port_id) 1331 { 1332 struct rte_flow_error error; 1333 struct rte_port *port; 1334 int ret = 0; 1335 1336 /* Poisoning to make sure PMDs update it in case of error. */ 1337 memset(&error, 0x44, sizeof(error)); 1338 if (rte_flow_flush(port_id, &error)) { 1339 ret = port_flow_complain(&error); 1340 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1341 port_id == (portid_t)RTE_PORT_ALL) 1342 return ret; 1343 } 1344 port = &ports[port_id]; 1345 while (port->flow_list) { 1346 struct port_flow *pf = port->flow_list->next; 1347 1348 free(port->flow_list); 1349 port->flow_list = pf; 1350 } 1351 return ret; 1352 } 1353 1354 /** Query a flow rule. */ 1355 int 1356 port_flow_query(portid_t port_id, uint32_t rule, 1357 enum rte_flow_action_type action) 1358 { 1359 struct rte_flow_error error; 1360 struct rte_port *port; 1361 struct port_flow *pf; 1362 const char *name; 1363 union { 1364 struct rte_flow_query_count count; 1365 } query; 1366 1367 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1368 port_id == (portid_t)RTE_PORT_ALL) 1369 return -EINVAL; 1370 port = &ports[port_id]; 1371 for (pf = port->flow_list; pf; pf = pf->next) 1372 if (pf->id == rule) 1373 break; 1374 if (!pf) { 1375 printf("Flow rule #%u not found\n", rule); 1376 return -ENOENT; 1377 } 1378 if ((unsigned int)action >= RTE_DIM(flow_action) || 1379 !flow_action[action].name) 1380 name = "unknown"; 1381 else 1382 name = flow_action[action].name; 1383 switch (action) { 1384 case RTE_FLOW_ACTION_TYPE_COUNT: 1385 break; 1386 default: 1387 printf("Cannot query action type %d (%s)\n", action, name); 1388 return -ENOTSUP; 1389 } 1390 /* Poisoning to make sure PMDs update it in case of error. */ 1391 memset(&error, 0x55, sizeof(error)); 1392 memset(&query, 0, sizeof(query)); 1393 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1394 return port_flow_complain(&error); 1395 switch (action) { 1396 case RTE_FLOW_ACTION_TYPE_COUNT: 1397 printf("%s:\n" 1398 " hits_set: %u\n" 1399 " bytes_set: %u\n" 1400 " hits: %" PRIu64 "\n" 1401 " bytes: %" PRIu64 "\n", 1402 name, 1403 query.count.hits_set, 1404 query.count.bytes_set, 1405 query.count.hits, 1406 query.count.bytes); 1407 break; 1408 default: 1409 printf("Cannot display result for action type %d (%s)\n", 1410 action, name); 1411 break; 1412 } 1413 return 0; 1414 } 1415 1416 /** List flow rules. */ 1417 void 1418 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1419 { 1420 struct rte_port *port; 1421 struct port_flow *pf; 1422 struct port_flow *list = NULL; 1423 uint32_t i; 1424 1425 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1426 port_id == (portid_t)RTE_PORT_ALL) 1427 return; 1428 port = &ports[port_id]; 1429 if (!port->flow_list) 1430 return; 1431 /* Sort flows by group, priority and ID. */ 1432 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1433 struct port_flow **tmp; 1434 1435 if (n) { 1436 /* Filter out unwanted groups. */ 1437 for (i = 0; i != n; ++i) 1438 if (pf->attr.group == group[i]) 1439 break; 1440 if (i == n) 1441 continue; 1442 } 1443 tmp = &list; 1444 while (*tmp && 1445 (pf->attr.group > (*tmp)->attr.group || 1446 (pf->attr.group == (*tmp)->attr.group && 1447 pf->attr.priority > (*tmp)->attr.priority) || 1448 (pf->attr.group == (*tmp)->attr.group && 1449 pf->attr.priority == (*tmp)->attr.priority && 1450 pf->id > (*tmp)->id))) 1451 tmp = &(*tmp)->tmp; 1452 pf->tmp = *tmp; 1453 *tmp = pf; 1454 } 1455 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1456 for (pf = list; pf != NULL; pf = pf->tmp) { 1457 const struct rte_flow_item *item = pf->pattern; 1458 const struct rte_flow_action *action = pf->actions; 1459 1460 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1461 pf->id, 1462 pf->attr.group, 1463 pf->attr.priority, 1464 pf->attr.ingress ? 'i' : '-', 1465 pf->attr.egress ? 'e' : '-'); 1466 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1467 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1468 printf("%s ", flow_item[item->type].name); 1469 ++item; 1470 } 1471 printf("=>"); 1472 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1473 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1474 printf(" %s", flow_action[action->type].name); 1475 ++action; 1476 } 1477 printf("\n"); 1478 } 1479 } 1480 1481 /** Restrict ingress traffic to the defined flow rules. */ 1482 int 1483 port_flow_isolate(portid_t port_id, int set) 1484 { 1485 struct rte_flow_error error; 1486 1487 /* Poisoning to make sure PMDs update it in case of error. */ 1488 memset(&error, 0x66, sizeof(error)); 1489 if (rte_flow_isolate(port_id, set, &error)) 1490 return port_flow_complain(&error); 1491 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1492 port_id, 1493 set ? "now restricted" : "not restricted anymore"); 1494 return 0; 1495 } 1496 1497 /* 1498 * RX/TX ring descriptors display functions. 1499 */ 1500 int 1501 rx_queue_id_is_invalid(queueid_t rxq_id) 1502 { 1503 if (rxq_id < nb_rxq) 1504 return 0; 1505 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1506 return 1; 1507 } 1508 1509 int 1510 tx_queue_id_is_invalid(queueid_t txq_id) 1511 { 1512 if (txq_id < nb_txq) 1513 return 0; 1514 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1515 return 1; 1516 } 1517 1518 static int 1519 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1520 { 1521 if (rxdesc_id < nb_rxd) 1522 return 0; 1523 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1524 rxdesc_id, nb_rxd); 1525 return 1; 1526 } 1527 1528 static int 1529 tx_desc_id_is_invalid(uint16_t txdesc_id) 1530 { 1531 if (txdesc_id < nb_txd) 1532 return 0; 1533 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1534 txdesc_id, nb_txd); 1535 return 1; 1536 } 1537 1538 static const struct rte_memzone * 1539 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1540 { 1541 char mz_name[RTE_MEMZONE_NAMESIZE]; 1542 const struct rte_memzone *mz; 1543 1544 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1545 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1546 mz = rte_memzone_lookup(mz_name); 1547 if (mz == NULL) 1548 printf("%s ring memory zoneof (port %d, queue %d) not" 1549 "found (zone name = %s\n", 1550 ring_name, port_id, q_id, mz_name); 1551 return mz; 1552 } 1553 1554 union igb_ring_dword { 1555 uint64_t dword; 1556 struct { 1557 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1558 uint32_t lo; 1559 uint32_t hi; 1560 #else 1561 uint32_t hi; 1562 uint32_t lo; 1563 #endif 1564 } words; 1565 }; 1566 1567 struct igb_ring_desc_32_bytes { 1568 union igb_ring_dword lo_dword; 1569 union igb_ring_dword hi_dword; 1570 union igb_ring_dword resv1; 1571 union igb_ring_dword resv2; 1572 }; 1573 1574 struct igb_ring_desc_16_bytes { 1575 union igb_ring_dword lo_dword; 1576 union igb_ring_dword hi_dword; 1577 }; 1578 1579 static void 1580 ring_rxd_display_dword(union igb_ring_dword dword) 1581 { 1582 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1583 (unsigned)dword.words.hi); 1584 } 1585 1586 static void 1587 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1588 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1589 portid_t port_id, 1590 #else 1591 __rte_unused portid_t port_id, 1592 #endif 1593 uint16_t desc_id) 1594 { 1595 struct igb_ring_desc_16_bytes *ring = 1596 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1597 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1598 struct rte_eth_dev_info dev_info; 1599 1600 memset(&dev_info, 0, sizeof(dev_info)); 1601 rte_eth_dev_info_get(port_id, &dev_info); 1602 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1603 /* 32 bytes RX descriptor, i40e only */ 1604 struct igb_ring_desc_32_bytes *ring = 1605 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1606 ring[desc_id].lo_dword.dword = 1607 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1608 ring_rxd_display_dword(ring[desc_id].lo_dword); 1609 ring[desc_id].hi_dword.dword = 1610 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1611 ring_rxd_display_dword(ring[desc_id].hi_dword); 1612 ring[desc_id].resv1.dword = 1613 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1614 ring_rxd_display_dword(ring[desc_id].resv1); 1615 ring[desc_id].resv2.dword = 1616 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1617 ring_rxd_display_dword(ring[desc_id].resv2); 1618 1619 return; 1620 } 1621 #endif 1622 /* 16 bytes RX descriptor */ 1623 ring[desc_id].lo_dword.dword = 1624 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1625 ring_rxd_display_dword(ring[desc_id].lo_dword); 1626 ring[desc_id].hi_dword.dword = 1627 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1628 ring_rxd_display_dword(ring[desc_id].hi_dword); 1629 } 1630 1631 static void 1632 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1633 { 1634 struct igb_ring_desc_16_bytes *ring; 1635 struct igb_ring_desc_16_bytes txd; 1636 1637 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1638 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1639 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1640 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1641 (unsigned)txd.lo_dword.words.lo, 1642 (unsigned)txd.lo_dword.words.hi, 1643 (unsigned)txd.hi_dword.words.lo, 1644 (unsigned)txd.hi_dword.words.hi); 1645 } 1646 1647 void 1648 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1649 { 1650 const struct rte_memzone *rx_mz; 1651 1652 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1653 return; 1654 if (rx_queue_id_is_invalid(rxq_id)) 1655 return; 1656 if (rx_desc_id_is_invalid(rxd_id)) 1657 return; 1658 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1659 if (rx_mz == NULL) 1660 return; 1661 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1662 } 1663 1664 void 1665 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1666 { 1667 const struct rte_memzone *tx_mz; 1668 1669 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1670 return; 1671 if (tx_queue_id_is_invalid(txq_id)) 1672 return; 1673 if (tx_desc_id_is_invalid(txd_id)) 1674 return; 1675 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1676 if (tx_mz == NULL) 1677 return; 1678 ring_tx_descriptor_display(tx_mz, txd_id); 1679 } 1680 1681 void 1682 fwd_lcores_config_display(void) 1683 { 1684 lcoreid_t lc_id; 1685 1686 printf("List of forwarding lcores:"); 1687 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1688 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1689 printf("\n"); 1690 } 1691 void 1692 rxtx_config_display(void) 1693 { 1694 portid_t pid; 1695 1696 printf(" %s packet forwarding%s packets/burst=%d\n", 1697 cur_fwd_eng->fwd_mode_name, 1698 retry_enabled == 0 ? "" : " with retry", 1699 nb_pkt_per_burst); 1700 1701 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1702 printf(" packet len=%u - nb packet segments=%d\n", 1703 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1704 1705 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1706 nb_fwd_lcores, nb_fwd_ports); 1707 1708 RTE_ETH_FOREACH_DEV(pid) { 1709 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; 1710 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; 1711 1712 printf(" port %d:\n", (unsigned int)pid); 1713 printf(" CRC stripping %s\n", 1714 (ports[pid].dev_conf.rxmode.offloads & 1715 DEV_RX_OFFLOAD_CRC_STRIP) ? 1716 "enabled" : "disabled"); 1717 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1718 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1719 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1720 " wthresh=%d\n", 1721 rx_conf->rx_thresh.pthresh, 1722 rx_conf->rx_thresh.hthresh, 1723 rx_conf->rx_thresh.wthresh); 1724 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1725 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1726 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1727 " wthresh=%d\n", 1728 tx_conf->tx_thresh.pthresh, 1729 tx_conf->tx_thresh.hthresh, 1730 tx_conf->tx_thresh.wthresh); 1731 printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", 1732 tx_conf->tx_rs_thresh, tx_conf->offloads); 1733 } 1734 } 1735 1736 void 1737 port_rss_reta_info(portid_t port_id, 1738 struct rte_eth_rss_reta_entry64 *reta_conf, 1739 uint16_t nb_entries) 1740 { 1741 uint16_t i, idx, shift; 1742 int ret; 1743 1744 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1745 return; 1746 1747 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1748 if (ret != 0) { 1749 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1750 return; 1751 } 1752 1753 for (i = 0; i < nb_entries; i++) { 1754 idx = i / RTE_RETA_GROUP_SIZE; 1755 shift = i % RTE_RETA_GROUP_SIZE; 1756 if (!(reta_conf[idx].mask & (1ULL << shift))) 1757 continue; 1758 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1759 i, reta_conf[idx].reta[shift]); 1760 } 1761 } 1762 1763 /* 1764 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1765 * key of the port. 1766 */ 1767 void 1768 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1769 { 1770 struct rte_eth_rss_conf rss_conf; 1771 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1772 uint64_t rss_hf; 1773 uint8_t i; 1774 int diag; 1775 struct rte_eth_dev_info dev_info; 1776 uint8_t hash_key_size; 1777 1778 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1779 return; 1780 1781 memset(&dev_info, 0, sizeof(dev_info)); 1782 rte_eth_dev_info_get(port_id, &dev_info); 1783 if (dev_info.hash_key_size > 0 && 1784 dev_info.hash_key_size <= sizeof(rss_key)) 1785 hash_key_size = dev_info.hash_key_size; 1786 else { 1787 printf("dev_info did not provide a valid hash key size\n"); 1788 return; 1789 } 1790 1791 rss_conf.rss_hf = 0; 1792 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1793 if (!strcmp(rss_info, rss_type_table[i].str)) 1794 rss_conf.rss_hf = rss_type_table[i].rss_type; 1795 } 1796 1797 /* Get RSS hash key if asked to display it */ 1798 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1799 rss_conf.rss_key_len = hash_key_size; 1800 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1801 if (diag != 0) { 1802 switch (diag) { 1803 case -ENODEV: 1804 printf("port index %d invalid\n", port_id); 1805 break; 1806 case -ENOTSUP: 1807 printf("operation not supported by device\n"); 1808 break; 1809 default: 1810 printf("operation failed - diag=%d\n", diag); 1811 break; 1812 } 1813 return; 1814 } 1815 rss_hf = rss_conf.rss_hf; 1816 if (rss_hf == 0) { 1817 printf("RSS disabled\n"); 1818 return; 1819 } 1820 printf("RSS functions:\n "); 1821 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1822 if (rss_hf & rss_type_table[i].rss_type) 1823 printf("%s ", rss_type_table[i].str); 1824 } 1825 printf("\n"); 1826 if (!show_rss_key) 1827 return; 1828 printf("RSS key:\n"); 1829 for (i = 0; i < hash_key_size; i++) 1830 printf("%02X", rss_key[i]); 1831 printf("\n"); 1832 } 1833 1834 void 1835 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1836 uint hash_key_len) 1837 { 1838 struct rte_eth_rss_conf rss_conf; 1839 int diag; 1840 unsigned int i; 1841 1842 rss_conf.rss_key = NULL; 1843 rss_conf.rss_key_len = hash_key_len; 1844 rss_conf.rss_hf = 0; 1845 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1846 if (!strcmp(rss_type_table[i].str, rss_type)) 1847 rss_conf.rss_hf = rss_type_table[i].rss_type; 1848 } 1849 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1850 if (diag == 0) { 1851 rss_conf.rss_key = hash_key; 1852 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1853 } 1854 if (diag == 0) 1855 return; 1856 1857 switch (diag) { 1858 case -ENODEV: 1859 printf("port index %d invalid\n", port_id); 1860 break; 1861 case -ENOTSUP: 1862 printf("operation not supported by device\n"); 1863 break; 1864 default: 1865 printf("operation failed - diag=%d\n", diag); 1866 break; 1867 } 1868 } 1869 1870 /* 1871 * Setup forwarding configuration for each logical core. 1872 */ 1873 static void 1874 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1875 { 1876 streamid_t nb_fs_per_lcore; 1877 streamid_t nb_fs; 1878 streamid_t sm_id; 1879 lcoreid_t nb_extra; 1880 lcoreid_t nb_fc; 1881 lcoreid_t nb_lc; 1882 lcoreid_t lc_id; 1883 1884 nb_fs = cfg->nb_fwd_streams; 1885 nb_fc = cfg->nb_fwd_lcores; 1886 if (nb_fs <= nb_fc) { 1887 nb_fs_per_lcore = 1; 1888 nb_extra = 0; 1889 } else { 1890 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1891 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1892 } 1893 1894 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1895 sm_id = 0; 1896 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1897 fwd_lcores[lc_id]->stream_idx = sm_id; 1898 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1899 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1900 } 1901 1902 /* 1903 * Assign extra remaining streams, if any. 1904 */ 1905 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1906 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1907 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1908 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1909 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1910 } 1911 } 1912 1913 static void 1914 simple_fwd_config_setup(void) 1915 { 1916 portid_t i; 1917 portid_t j; 1918 portid_t inc = 2; 1919 1920 if (port_topology == PORT_TOPOLOGY_CHAINED || 1921 port_topology == PORT_TOPOLOGY_LOOP) { 1922 inc = 1; 1923 } else if (nb_fwd_ports % 2) { 1924 printf("\nWarning! Cannot handle an odd number of ports " 1925 "with the current port topology. Configuration " 1926 "must be changed to have an even number of ports, " 1927 "or relaunch application with " 1928 "--port-topology=chained\n\n"); 1929 } 1930 1931 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1932 cur_fwd_config.nb_fwd_streams = 1933 (streamid_t) cur_fwd_config.nb_fwd_ports; 1934 1935 /* reinitialize forwarding streams */ 1936 init_fwd_streams(); 1937 1938 /* 1939 * In the simple forwarding test, the number of forwarding cores 1940 * must be lower or equal to the number of forwarding ports. 1941 */ 1942 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1943 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1944 cur_fwd_config.nb_fwd_lcores = 1945 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1946 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1947 1948 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1949 if (port_topology != PORT_TOPOLOGY_LOOP) 1950 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1951 else 1952 j = i; 1953 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1954 fwd_streams[i]->rx_queue = 0; 1955 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1956 fwd_streams[i]->tx_queue = 0; 1957 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 1958 fwd_streams[i]->retry_enabled = retry_enabled; 1959 1960 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1961 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1962 fwd_streams[j]->rx_queue = 0; 1963 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1964 fwd_streams[j]->tx_queue = 0; 1965 fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port; 1966 fwd_streams[j]->retry_enabled = retry_enabled; 1967 } 1968 } 1969 } 1970 1971 /** 1972 * For the RSS forwarding test all streams distributed over lcores. Each stream 1973 * being composed of a RX queue to poll on a RX port for input messages, 1974 * associated with a TX queue of a TX port where to send forwarded packets. 1975 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1976 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1977 * following rules: 1978 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1979 * - TxQl = RxQj 1980 */ 1981 static void 1982 rss_fwd_config_setup(void) 1983 { 1984 portid_t rxp; 1985 portid_t txp; 1986 queueid_t rxq; 1987 queueid_t nb_q; 1988 streamid_t sm_id; 1989 1990 nb_q = nb_rxq; 1991 if (nb_q > nb_txq) 1992 nb_q = nb_txq; 1993 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1994 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1995 cur_fwd_config.nb_fwd_streams = 1996 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1997 1998 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1999 cur_fwd_config.nb_fwd_lcores = 2000 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2001 2002 /* reinitialize forwarding streams */ 2003 init_fwd_streams(); 2004 2005 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2006 rxp = 0; rxq = 0; 2007 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2008 struct fwd_stream *fs; 2009 2010 fs = fwd_streams[sm_id]; 2011 2012 if ((rxp & 0x1) == 0) 2013 txp = (portid_t) (rxp + 1); 2014 else 2015 txp = (portid_t) (rxp - 1); 2016 /* 2017 * if we are in loopback, simply send stuff out through the 2018 * ingress port 2019 */ 2020 if (port_topology == PORT_TOPOLOGY_LOOP) 2021 txp = rxp; 2022 2023 fs->rx_port = fwd_ports_ids[rxp]; 2024 fs->rx_queue = rxq; 2025 fs->tx_port = fwd_ports_ids[txp]; 2026 fs->tx_queue = rxq; 2027 fs->peer_addr = fs->tx_port; 2028 fs->retry_enabled = retry_enabled; 2029 rxq = (queueid_t) (rxq + 1); 2030 if (rxq < nb_q) 2031 continue; 2032 /* 2033 * rxq == nb_q 2034 * Restart from RX queue 0 on next RX port 2035 */ 2036 rxq = 0; 2037 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2038 rxp = (portid_t) 2039 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2040 else 2041 rxp = (portid_t) (rxp + 1); 2042 } 2043 } 2044 2045 /** 2046 * For the DCB forwarding test, each core is assigned on each traffic class. 2047 * 2048 * Each core is assigned a multi-stream, each stream being composed of 2049 * a RX queue to poll on a RX port for input messages, associated with 2050 * a TX queue of a TX port where to send forwarded packets. All RX and 2051 * TX queues are mapping to the same traffic class. 2052 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2053 * the same core 2054 */ 2055 static void 2056 dcb_fwd_config_setup(void) 2057 { 2058 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2059 portid_t txp, rxp = 0; 2060 queueid_t txq, rxq = 0; 2061 lcoreid_t lc_id; 2062 uint16_t nb_rx_queue, nb_tx_queue; 2063 uint16_t i, j, k, sm_id = 0; 2064 uint8_t tc = 0; 2065 2066 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2067 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2068 cur_fwd_config.nb_fwd_streams = 2069 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2070 2071 /* reinitialize forwarding streams */ 2072 init_fwd_streams(); 2073 sm_id = 0; 2074 txp = 1; 2075 /* get the dcb info on the first RX and TX ports */ 2076 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2077 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2078 2079 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2080 fwd_lcores[lc_id]->stream_nb = 0; 2081 fwd_lcores[lc_id]->stream_idx = sm_id; 2082 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2083 /* if the nb_queue is zero, means this tc is 2084 * not enabled on the POOL 2085 */ 2086 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2087 break; 2088 k = fwd_lcores[lc_id]->stream_nb + 2089 fwd_lcores[lc_id]->stream_idx; 2090 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2091 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2092 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2093 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2094 for (j = 0; j < nb_rx_queue; j++) { 2095 struct fwd_stream *fs; 2096 2097 fs = fwd_streams[k + j]; 2098 fs->rx_port = fwd_ports_ids[rxp]; 2099 fs->rx_queue = rxq + j; 2100 fs->tx_port = fwd_ports_ids[txp]; 2101 fs->tx_queue = txq + j % nb_tx_queue; 2102 fs->peer_addr = fs->tx_port; 2103 fs->retry_enabled = retry_enabled; 2104 } 2105 fwd_lcores[lc_id]->stream_nb += 2106 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2107 } 2108 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2109 2110 tc++; 2111 if (tc < rxp_dcb_info.nb_tcs) 2112 continue; 2113 /* Restart from TC 0 on next RX port */ 2114 tc = 0; 2115 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2116 rxp = (portid_t) 2117 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2118 else 2119 rxp++; 2120 if (rxp >= nb_fwd_ports) 2121 return; 2122 /* get the dcb information on next RX and TX ports */ 2123 if ((rxp & 0x1) == 0) 2124 txp = (portid_t) (rxp + 1); 2125 else 2126 txp = (portid_t) (rxp - 1); 2127 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2128 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2129 } 2130 } 2131 2132 static void 2133 icmp_echo_config_setup(void) 2134 { 2135 portid_t rxp; 2136 queueid_t rxq; 2137 lcoreid_t lc_id; 2138 uint16_t sm_id; 2139 2140 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2141 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2142 (nb_txq * nb_fwd_ports); 2143 else 2144 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2145 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2146 cur_fwd_config.nb_fwd_streams = 2147 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2148 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2149 cur_fwd_config.nb_fwd_lcores = 2150 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2151 if (verbose_level > 0) { 2152 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2153 __FUNCTION__, 2154 cur_fwd_config.nb_fwd_lcores, 2155 cur_fwd_config.nb_fwd_ports, 2156 cur_fwd_config.nb_fwd_streams); 2157 } 2158 2159 /* reinitialize forwarding streams */ 2160 init_fwd_streams(); 2161 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2162 rxp = 0; rxq = 0; 2163 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2164 if (verbose_level > 0) 2165 printf(" core=%d: \n", lc_id); 2166 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2167 struct fwd_stream *fs; 2168 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2169 fs->rx_port = fwd_ports_ids[rxp]; 2170 fs->rx_queue = rxq; 2171 fs->tx_port = fs->rx_port; 2172 fs->tx_queue = rxq; 2173 fs->peer_addr = fs->tx_port; 2174 fs->retry_enabled = retry_enabled; 2175 if (verbose_level > 0) 2176 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2177 sm_id, fs->rx_port, fs->rx_queue, 2178 fs->tx_queue); 2179 rxq = (queueid_t) (rxq + 1); 2180 if (rxq == nb_rxq) { 2181 rxq = 0; 2182 rxp = (portid_t) (rxp + 1); 2183 } 2184 } 2185 } 2186 } 2187 2188 void 2189 fwd_config_setup(void) 2190 { 2191 cur_fwd_config.fwd_eng = cur_fwd_eng; 2192 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2193 icmp_echo_config_setup(); 2194 return; 2195 } 2196 if ((nb_rxq > 1) && (nb_txq > 1)){ 2197 if (dcb_config) 2198 dcb_fwd_config_setup(); 2199 else 2200 rss_fwd_config_setup(); 2201 } 2202 else 2203 simple_fwd_config_setup(); 2204 } 2205 2206 void 2207 pkt_fwd_config_display(struct fwd_config *cfg) 2208 { 2209 struct fwd_stream *fs; 2210 lcoreid_t lc_id; 2211 streamid_t sm_id; 2212 2213 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2214 "NUMA support %s, MP over anonymous pages %s\n", 2215 cfg->fwd_eng->fwd_mode_name, 2216 retry_enabled == 0 ? "" : " with retry", 2217 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2218 numa_support == 1 ? "enabled" : "disabled", 2219 mp_anon != 0 ? "enabled" : "disabled"); 2220 2221 if (retry_enabled) 2222 printf("TX retry num: %u, delay between TX retries: %uus\n", 2223 burst_tx_retry_num, burst_tx_delay_time); 2224 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2225 printf("Logical Core %u (socket %u) forwards packets on " 2226 "%d streams:", 2227 fwd_lcores_cpuids[lc_id], 2228 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2229 fwd_lcores[lc_id]->stream_nb); 2230 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2231 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2232 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2233 "P=%d/Q=%d (socket %u) ", 2234 fs->rx_port, fs->rx_queue, 2235 ports[fs->rx_port].socket_id, 2236 fs->tx_port, fs->tx_queue, 2237 ports[fs->tx_port].socket_id); 2238 print_ethaddr("peer=", 2239 &peer_eth_addrs[fs->peer_addr]); 2240 } 2241 printf("\n"); 2242 } 2243 printf("\n"); 2244 } 2245 2246 void 2247 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2248 { 2249 uint8_t c, new_peer_addr[6]; 2250 if (!rte_eth_dev_is_valid_port(port_id)) { 2251 printf("Error: Invalid port number %i\n", port_id); 2252 return; 2253 } 2254 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2255 sizeof(new_peer_addr)) < 0) { 2256 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2257 return; 2258 } 2259 for (c = 0; c < 6; c++) 2260 peer_eth_addrs[port_id].addr_bytes[c] = 2261 new_peer_addr[c]; 2262 } 2263 2264 int 2265 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2266 { 2267 unsigned int i; 2268 unsigned int lcore_cpuid; 2269 int record_now; 2270 2271 record_now = 0; 2272 again: 2273 for (i = 0; i < nb_lc; i++) { 2274 lcore_cpuid = lcorelist[i]; 2275 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2276 printf("lcore %u not enabled\n", lcore_cpuid); 2277 return -1; 2278 } 2279 if (lcore_cpuid == rte_get_master_lcore()) { 2280 printf("lcore %u cannot be masked on for running " 2281 "packet forwarding, which is the master lcore " 2282 "and reserved for command line parsing only\n", 2283 lcore_cpuid); 2284 return -1; 2285 } 2286 if (record_now) 2287 fwd_lcores_cpuids[i] = lcore_cpuid; 2288 } 2289 if (record_now == 0) { 2290 record_now = 1; 2291 goto again; 2292 } 2293 nb_cfg_lcores = (lcoreid_t) nb_lc; 2294 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2295 printf("previous number of forwarding cores %u - changed to " 2296 "number of configured cores %u\n", 2297 (unsigned int) nb_fwd_lcores, nb_lc); 2298 nb_fwd_lcores = (lcoreid_t) nb_lc; 2299 } 2300 2301 return 0; 2302 } 2303 2304 int 2305 set_fwd_lcores_mask(uint64_t lcoremask) 2306 { 2307 unsigned int lcorelist[64]; 2308 unsigned int nb_lc; 2309 unsigned int i; 2310 2311 if (lcoremask == 0) { 2312 printf("Invalid NULL mask of cores\n"); 2313 return -1; 2314 } 2315 nb_lc = 0; 2316 for (i = 0; i < 64; i++) { 2317 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2318 continue; 2319 lcorelist[nb_lc++] = i; 2320 } 2321 return set_fwd_lcores_list(lcorelist, nb_lc); 2322 } 2323 2324 void 2325 set_fwd_lcores_number(uint16_t nb_lc) 2326 { 2327 if (nb_lc > nb_cfg_lcores) { 2328 printf("nb fwd cores %u > %u (max. number of configured " 2329 "lcores) - ignored\n", 2330 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2331 return; 2332 } 2333 nb_fwd_lcores = (lcoreid_t) nb_lc; 2334 printf("Number of forwarding cores set to %u\n", 2335 (unsigned int) nb_fwd_lcores); 2336 } 2337 2338 void 2339 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2340 { 2341 unsigned int i; 2342 portid_t port_id; 2343 int record_now; 2344 2345 record_now = 0; 2346 again: 2347 for (i = 0; i < nb_pt; i++) { 2348 port_id = (portid_t) portlist[i]; 2349 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2350 return; 2351 if (record_now) 2352 fwd_ports_ids[i] = port_id; 2353 } 2354 if (record_now == 0) { 2355 record_now = 1; 2356 goto again; 2357 } 2358 nb_cfg_ports = (portid_t) nb_pt; 2359 if (nb_fwd_ports != (portid_t) nb_pt) { 2360 printf("previous number of forwarding ports %u - changed to " 2361 "number of configured ports %u\n", 2362 (unsigned int) nb_fwd_ports, nb_pt); 2363 nb_fwd_ports = (portid_t) nb_pt; 2364 } 2365 } 2366 2367 void 2368 set_fwd_ports_mask(uint64_t portmask) 2369 { 2370 unsigned int portlist[64]; 2371 unsigned int nb_pt; 2372 unsigned int i; 2373 2374 if (portmask == 0) { 2375 printf("Invalid NULL mask of ports\n"); 2376 return; 2377 } 2378 nb_pt = 0; 2379 RTE_ETH_FOREACH_DEV(i) { 2380 if (! ((uint64_t)(1ULL << i) & portmask)) 2381 continue; 2382 portlist[nb_pt++] = i; 2383 } 2384 set_fwd_ports_list(portlist, nb_pt); 2385 } 2386 2387 void 2388 set_fwd_ports_number(uint16_t nb_pt) 2389 { 2390 if (nb_pt > nb_cfg_ports) { 2391 printf("nb fwd ports %u > %u (number of configured " 2392 "ports) - ignored\n", 2393 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2394 return; 2395 } 2396 nb_fwd_ports = (portid_t) nb_pt; 2397 printf("Number of forwarding ports set to %u\n", 2398 (unsigned int) nb_fwd_ports); 2399 } 2400 2401 int 2402 port_is_forwarding(portid_t port_id) 2403 { 2404 unsigned int i; 2405 2406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2407 return -1; 2408 2409 for (i = 0; i < nb_fwd_ports; i++) { 2410 if (fwd_ports_ids[i] == port_id) 2411 return 1; 2412 } 2413 2414 return 0; 2415 } 2416 2417 void 2418 set_nb_pkt_per_burst(uint16_t nb) 2419 { 2420 if (nb > MAX_PKT_BURST) { 2421 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2422 " ignored\n", 2423 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2424 return; 2425 } 2426 nb_pkt_per_burst = nb; 2427 printf("Number of packets per burst set to %u\n", 2428 (unsigned int) nb_pkt_per_burst); 2429 } 2430 2431 static const char * 2432 tx_split_get_name(enum tx_pkt_split split) 2433 { 2434 uint32_t i; 2435 2436 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2437 if (tx_split_name[i].split == split) 2438 return tx_split_name[i].name; 2439 } 2440 return NULL; 2441 } 2442 2443 void 2444 set_tx_pkt_split(const char *name) 2445 { 2446 uint32_t i; 2447 2448 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2449 if (strcmp(tx_split_name[i].name, name) == 0) { 2450 tx_pkt_split = tx_split_name[i].split; 2451 return; 2452 } 2453 } 2454 printf("unknown value: \"%s\"\n", name); 2455 } 2456 2457 void 2458 show_tx_pkt_segments(void) 2459 { 2460 uint32_t i, n; 2461 const char *split; 2462 2463 n = tx_pkt_nb_segs; 2464 split = tx_split_get_name(tx_pkt_split); 2465 2466 printf("Number of segments: %u\n", n); 2467 printf("Segment sizes: "); 2468 for (i = 0; i != n - 1; i++) 2469 printf("%hu,", tx_pkt_seg_lengths[i]); 2470 printf("%hu\n", tx_pkt_seg_lengths[i]); 2471 printf("Split packet: %s\n", split); 2472 } 2473 2474 void 2475 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2476 { 2477 uint16_t tx_pkt_len; 2478 unsigned i; 2479 2480 if (nb_segs >= (unsigned) nb_txd) { 2481 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2482 nb_segs, (unsigned int) nb_txd); 2483 return; 2484 } 2485 2486 /* 2487 * Check that each segment length is greater or equal than 2488 * the mbuf data sise. 2489 * Check also that the total packet length is greater or equal than the 2490 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2491 */ 2492 tx_pkt_len = 0; 2493 for (i = 0; i < nb_segs; i++) { 2494 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2495 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2496 i, seg_lengths[i], (unsigned) mbuf_data_size); 2497 return; 2498 } 2499 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2500 } 2501 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2502 printf("total packet length=%u < %d - give up\n", 2503 (unsigned) tx_pkt_len, 2504 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2505 return; 2506 } 2507 2508 for (i = 0; i < nb_segs; i++) 2509 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2510 2511 tx_pkt_length = tx_pkt_len; 2512 tx_pkt_nb_segs = (uint8_t) nb_segs; 2513 } 2514 2515 void 2516 setup_gro(const char *onoff, portid_t port_id) 2517 { 2518 if (!rte_eth_dev_is_valid_port(port_id)) { 2519 printf("invalid port id %u\n", port_id); 2520 return; 2521 } 2522 if (test_done == 0) { 2523 printf("Before enable/disable GRO," 2524 " please stop forwarding first\n"); 2525 return; 2526 } 2527 if (strcmp(onoff, "on") == 0) { 2528 if (gro_ports[port_id].enable != 0) { 2529 printf("Port %u has enabled GRO. Please" 2530 " disable GRO first\n", port_id); 2531 return; 2532 } 2533 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2534 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2535 gro_ports[port_id].param.max_flow_num = 2536 GRO_DEFAULT_FLOW_NUM; 2537 gro_ports[port_id].param.max_item_per_flow = 2538 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2539 } 2540 gro_ports[port_id].enable = 1; 2541 } else { 2542 if (gro_ports[port_id].enable == 0) { 2543 printf("Port %u has disabled GRO\n", port_id); 2544 return; 2545 } 2546 gro_ports[port_id].enable = 0; 2547 } 2548 } 2549 2550 void 2551 setup_gro_flush_cycles(uint8_t cycles) 2552 { 2553 if (test_done == 0) { 2554 printf("Before change flush interval for GRO," 2555 " please stop forwarding first.\n"); 2556 return; 2557 } 2558 2559 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2560 GRO_DEFAULT_FLUSH_CYCLES) { 2561 printf("The flushing cycle be in the range" 2562 " of 1 to %u. Revert to the default" 2563 " value %u.\n", 2564 GRO_MAX_FLUSH_CYCLES, 2565 GRO_DEFAULT_FLUSH_CYCLES); 2566 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2567 } 2568 2569 gro_flush_cycles = cycles; 2570 } 2571 2572 void 2573 show_gro(portid_t port_id) 2574 { 2575 struct rte_gro_param *param; 2576 uint32_t max_pkts_num; 2577 2578 param = &gro_ports[port_id].param; 2579 2580 if (!rte_eth_dev_is_valid_port(port_id)) { 2581 printf("Invalid port id %u.\n", port_id); 2582 return; 2583 } 2584 if (gro_ports[port_id].enable) { 2585 printf("GRO type: TCP/IPv4\n"); 2586 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2587 max_pkts_num = param->max_flow_num * 2588 param->max_item_per_flow; 2589 } else 2590 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2591 printf("Max number of packets to perform GRO: %u\n", 2592 max_pkts_num); 2593 printf("Flushing cycles: %u\n", gro_flush_cycles); 2594 } else 2595 printf("Port %u doesn't enable GRO.\n", port_id); 2596 } 2597 2598 void 2599 setup_gso(const char *mode, portid_t port_id) 2600 { 2601 if (!rte_eth_dev_is_valid_port(port_id)) { 2602 printf("invalid port id %u\n", port_id); 2603 return; 2604 } 2605 if (strcmp(mode, "on") == 0) { 2606 if (test_done == 0) { 2607 printf("before enabling GSO," 2608 " please stop forwarding first\n"); 2609 return; 2610 } 2611 gso_ports[port_id].enable = 1; 2612 } else if (strcmp(mode, "off") == 0) { 2613 if (test_done == 0) { 2614 printf("before disabling GSO," 2615 " please stop forwarding first\n"); 2616 return; 2617 } 2618 gso_ports[port_id].enable = 0; 2619 } 2620 } 2621 2622 char* 2623 list_pkt_forwarding_modes(void) 2624 { 2625 static char fwd_modes[128] = ""; 2626 const char *separator = "|"; 2627 struct fwd_engine *fwd_eng; 2628 unsigned i = 0; 2629 2630 if (strlen (fwd_modes) == 0) { 2631 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2632 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2633 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2634 strncat(fwd_modes, separator, 2635 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2636 } 2637 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2638 } 2639 2640 return fwd_modes; 2641 } 2642 2643 char* 2644 list_pkt_forwarding_retry_modes(void) 2645 { 2646 static char fwd_modes[128] = ""; 2647 const char *separator = "|"; 2648 struct fwd_engine *fwd_eng; 2649 unsigned i = 0; 2650 2651 if (strlen(fwd_modes) == 0) { 2652 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2653 if (fwd_eng == &rx_only_engine) 2654 continue; 2655 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2656 sizeof(fwd_modes) - 2657 strlen(fwd_modes) - 1); 2658 strncat(fwd_modes, separator, 2659 sizeof(fwd_modes) - 2660 strlen(fwd_modes) - 1); 2661 } 2662 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2663 } 2664 2665 return fwd_modes; 2666 } 2667 2668 void 2669 set_pkt_forwarding_mode(const char *fwd_mode_name) 2670 { 2671 struct fwd_engine *fwd_eng; 2672 unsigned i; 2673 2674 i = 0; 2675 while ((fwd_eng = fwd_engines[i]) != NULL) { 2676 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2677 printf("Set %s packet forwarding mode%s\n", 2678 fwd_mode_name, 2679 retry_enabled == 0 ? "" : " with retry"); 2680 cur_fwd_eng = fwd_eng; 2681 return; 2682 } 2683 i++; 2684 } 2685 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2686 } 2687 2688 void 2689 set_verbose_level(uint16_t vb_level) 2690 { 2691 printf("Change verbose level from %u to %u\n", 2692 (unsigned int) verbose_level, (unsigned int) vb_level); 2693 verbose_level = vb_level; 2694 } 2695 2696 void 2697 vlan_extend_set(portid_t port_id, int on) 2698 { 2699 int diag; 2700 int vlan_offload; 2701 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2702 2703 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2704 return; 2705 2706 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2707 2708 if (on) { 2709 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2710 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2711 } else { 2712 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2713 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2714 } 2715 2716 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2717 if (diag < 0) 2718 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2719 "diag=%d\n", port_id, on, diag); 2720 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2721 } 2722 2723 void 2724 rx_vlan_strip_set(portid_t port_id, int on) 2725 { 2726 int diag; 2727 int vlan_offload; 2728 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2729 2730 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2731 return; 2732 2733 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2734 2735 if (on) { 2736 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2737 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2738 } else { 2739 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2740 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2741 } 2742 2743 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2744 if (diag < 0) 2745 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2746 "diag=%d\n", port_id, on, diag); 2747 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2748 } 2749 2750 void 2751 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2752 { 2753 int diag; 2754 2755 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2756 return; 2757 2758 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2759 if (diag < 0) 2760 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2761 "diag=%d\n", port_id, queue_id, on, diag); 2762 } 2763 2764 void 2765 rx_vlan_filter_set(portid_t port_id, int on) 2766 { 2767 int diag; 2768 int vlan_offload; 2769 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2770 2771 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2772 return; 2773 2774 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2775 2776 if (on) { 2777 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2778 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2779 } else { 2780 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2781 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2782 } 2783 2784 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2785 if (diag < 0) 2786 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2787 "diag=%d\n", port_id, on, diag); 2788 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2789 } 2790 2791 int 2792 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2793 { 2794 int diag; 2795 2796 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2797 return 1; 2798 if (vlan_id_is_invalid(vlan_id)) 2799 return 1; 2800 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2801 if (diag == 0) 2802 return 0; 2803 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2804 "diag=%d\n", 2805 port_id, vlan_id, on, diag); 2806 return -1; 2807 } 2808 2809 void 2810 rx_vlan_all_filter_set(portid_t port_id, int on) 2811 { 2812 uint16_t vlan_id; 2813 2814 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2815 return; 2816 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2817 if (rx_vft_set(port_id, vlan_id, on)) 2818 break; 2819 } 2820 } 2821 2822 void 2823 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2824 { 2825 int diag; 2826 2827 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2828 return; 2829 2830 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2831 if (diag == 0) 2832 return; 2833 2834 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2835 "diag=%d\n", 2836 port_id, vlan_type, tp_id, diag); 2837 } 2838 2839 void 2840 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2841 { 2842 int vlan_offload; 2843 struct rte_eth_dev_info dev_info; 2844 2845 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2846 return; 2847 if (vlan_id_is_invalid(vlan_id)) 2848 return; 2849 2850 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2851 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2852 printf("Error, as QinQ has been enabled.\n"); 2853 return; 2854 } 2855 rte_eth_dev_info_get(port_id, &dev_info); 2856 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2857 printf("Error: vlan insert is not supported by port %d\n", 2858 port_id); 2859 return; 2860 } 2861 2862 tx_vlan_reset(port_id); 2863 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2864 ports[port_id].tx_vlan_id = vlan_id; 2865 } 2866 2867 void 2868 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2869 { 2870 int vlan_offload; 2871 struct rte_eth_dev_info dev_info; 2872 2873 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2874 return; 2875 if (vlan_id_is_invalid(vlan_id)) 2876 return; 2877 if (vlan_id_is_invalid(vlan_id_outer)) 2878 return; 2879 2880 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2881 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2882 printf("Error, as QinQ hasn't been enabled.\n"); 2883 return; 2884 } 2885 rte_eth_dev_info_get(port_id, &dev_info); 2886 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2887 printf("Error: qinq insert not supported by port %d\n", 2888 port_id); 2889 return; 2890 } 2891 2892 tx_vlan_reset(port_id); 2893 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2894 ports[port_id].tx_vlan_id = vlan_id; 2895 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2896 } 2897 2898 void 2899 tx_vlan_reset(portid_t port_id) 2900 { 2901 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2902 return; 2903 ports[port_id].dev_conf.txmode.offloads &= 2904 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2905 DEV_TX_OFFLOAD_QINQ_INSERT); 2906 ports[port_id].tx_vlan_id = 0; 2907 ports[port_id].tx_vlan_id_outer = 0; 2908 } 2909 2910 void 2911 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2912 { 2913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2914 return; 2915 2916 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2917 } 2918 2919 void 2920 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2921 { 2922 uint16_t i; 2923 uint8_t existing_mapping_found = 0; 2924 2925 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2926 return; 2927 2928 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2929 return; 2930 2931 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2932 printf("map_value not in required range 0..%d\n", 2933 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2934 return; 2935 } 2936 2937 if (!is_rx) { /*then tx*/ 2938 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2939 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2940 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2941 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2942 existing_mapping_found = 1; 2943 break; 2944 } 2945 } 2946 if (!existing_mapping_found) { /* A new additional mapping... */ 2947 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2948 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2949 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2950 nb_tx_queue_stats_mappings++; 2951 } 2952 } 2953 else { /*rx*/ 2954 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2955 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2956 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2957 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2958 existing_mapping_found = 1; 2959 break; 2960 } 2961 } 2962 if (!existing_mapping_found) { /* A new additional mapping... */ 2963 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2964 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2965 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2966 nb_rx_queue_stats_mappings++; 2967 } 2968 } 2969 } 2970 2971 void 2972 set_xstats_hide_zero(uint8_t on_off) 2973 { 2974 xstats_hide_zero = on_off; 2975 } 2976 2977 static inline void 2978 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2979 { 2980 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2981 2982 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2983 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2984 " tunnel_id: 0x%08x", 2985 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2986 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2987 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2988 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2989 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2990 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2991 2992 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2993 rte_be_to_cpu_16(mask->src_port_mask), 2994 rte_be_to_cpu_16(mask->dst_port_mask)); 2995 2996 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2997 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2998 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2999 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3000 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3001 3002 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3003 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3004 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3005 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3006 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3007 } 3008 3009 printf("\n"); 3010 } 3011 3012 static inline void 3013 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3014 { 3015 struct rte_eth_flex_payload_cfg *cfg; 3016 uint32_t i, j; 3017 3018 for (i = 0; i < flex_conf->nb_payloads; i++) { 3019 cfg = &flex_conf->flex_set[i]; 3020 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3021 printf("\n RAW: "); 3022 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3023 printf("\n L2_PAYLOAD: "); 3024 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3025 printf("\n L3_PAYLOAD: "); 3026 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3027 printf("\n L4_PAYLOAD: "); 3028 else 3029 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3030 for (j = 0; j < num; j++) 3031 printf(" %-5u", cfg->src_offset[j]); 3032 } 3033 printf("\n"); 3034 } 3035 3036 static char * 3037 flowtype_to_str(uint16_t flow_type) 3038 { 3039 struct flow_type_info { 3040 char str[32]; 3041 uint16_t ftype; 3042 }; 3043 3044 uint8_t i; 3045 static struct flow_type_info flowtype_str_table[] = { 3046 {"raw", RTE_ETH_FLOW_RAW}, 3047 {"ipv4", RTE_ETH_FLOW_IPV4}, 3048 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3049 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3050 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3051 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3052 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3053 {"ipv6", RTE_ETH_FLOW_IPV6}, 3054 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3055 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3056 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3057 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3058 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3059 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3060 {"port", RTE_ETH_FLOW_PORT}, 3061 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3062 {"geneve", RTE_ETH_FLOW_GENEVE}, 3063 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3064 }; 3065 3066 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3067 if (flowtype_str_table[i].ftype == flow_type) 3068 return flowtype_str_table[i].str; 3069 } 3070 3071 return NULL; 3072 } 3073 3074 static inline void 3075 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3076 { 3077 struct rte_eth_fdir_flex_mask *mask; 3078 uint32_t i, j; 3079 char *p; 3080 3081 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3082 mask = &flex_conf->flex_mask[i]; 3083 p = flowtype_to_str(mask->flow_type); 3084 printf("\n %s:\t", p ? p : "unknown"); 3085 for (j = 0; j < num; j++) 3086 printf(" %02x", mask->mask[j]); 3087 } 3088 printf("\n"); 3089 } 3090 3091 static inline void 3092 print_fdir_flow_type(uint32_t flow_types_mask) 3093 { 3094 int i; 3095 char *p; 3096 3097 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3098 if (!(flow_types_mask & (1 << i))) 3099 continue; 3100 p = flowtype_to_str(i); 3101 if (p) 3102 printf(" %s", p); 3103 else 3104 printf(" unknown"); 3105 } 3106 printf("\n"); 3107 } 3108 3109 void 3110 fdir_get_infos(portid_t port_id) 3111 { 3112 struct rte_eth_fdir_stats fdir_stat; 3113 struct rte_eth_fdir_info fdir_info; 3114 int ret; 3115 3116 static const char *fdir_stats_border = "########################"; 3117 3118 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3119 return; 3120 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3121 if (ret < 0) { 3122 printf("\n FDIR is not supported on port %-2d\n", 3123 port_id); 3124 return; 3125 } 3126 3127 memset(&fdir_info, 0, sizeof(fdir_info)); 3128 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3129 RTE_ETH_FILTER_INFO, &fdir_info); 3130 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3131 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3132 RTE_ETH_FILTER_STATS, &fdir_stat); 3133 printf("\n %s FDIR infos for port %-2d %s\n", 3134 fdir_stats_border, port_id, fdir_stats_border); 3135 printf(" MODE: "); 3136 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3137 printf(" PERFECT\n"); 3138 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3139 printf(" PERFECT-MAC-VLAN\n"); 3140 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3141 printf(" PERFECT-TUNNEL\n"); 3142 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3143 printf(" SIGNATURE\n"); 3144 else 3145 printf(" DISABLE\n"); 3146 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3147 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3148 printf(" SUPPORTED FLOW TYPE: "); 3149 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3150 } 3151 printf(" FLEX PAYLOAD INFO:\n"); 3152 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3153 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3154 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3155 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3156 fdir_info.flex_payload_unit, 3157 fdir_info.max_flex_payload_segment_num, 3158 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3159 printf(" MASK: "); 3160 print_fdir_mask(&fdir_info.mask); 3161 if (fdir_info.flex_conf.nb_payloads > 0) { 3162 printf(" FLEX PAYLOAD SRC OFFSET:"); 3163 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3164 } 3165 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3166 printf(" FLEX MASK CFG:"); 3167 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3168 } 3169 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3170 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3171 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3172 fdir_info.guarant_spc, fdir_info.best_spc); 3173 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3174 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3175 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3176 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3177 fdir_stat.collision, fdir_stat.free, 3178 fdir_stat.maxhash, fdir_stat.maxlen, 3179 fdir_stat.add, fdir_stat.remove, 3180 fdir_stat.f_add, fdir_stat.f_remove); 3181 printf(" %s############################%s\n", 3182 fdir_stats_border, fdir_stats_border); 3183 } 3184 3185 void 3186 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3187 { 3188 struct rte_port *port; 3189 struct rte_eth_fdir_flex_conf *flex_conf; 3190 int i, idx = 0; 3191 3192 port = &ports[port_id]; 3193 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3194 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3195 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3196 idx = i; 3197 break; 3198 } 3199 } 3200 if (i >= RTE_ETH_FLOW_MAX) { 3201 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3202 idx = flex_conf->nb_flexmasks; 3203 flex_conf->nb_flexmasks++; 3204 } else { 3205 printf("The flex mask table is full. Can not set flex" 3206 " mask for flow_type(%u).", cfg->flow_type); 3207 return; 3208 } 3209 } 3210 rte_memcpy(&flex_conf->flex_mask[idx], 3211 cfg, 3212 sizeof(struct rte_eth_fdir_flex_mask)); 3213 } 3214 3215 void 3216 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3217 { 3218 struct rte_port *port; 3219 struct rte_eth_fdir_flex_conf *flex_conf; 3220 int i, idx = 0; 3221 3222 port = &ports[port_id]; 3223 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3224 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3225 if (cfg->type == flex_conf->flex_set[i].type) { 3226 idx = i; 3227 break; 3228 } 3229 } 3230 if (i >= RTE_ETH_PAYLOAD_MAX) { 3231 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3232 idx = flex_conf->nb_payloads; 3233 flex_conf->nb_payloads++; 3234 } else { 3235 printf("The flex payload table is full. Can not set" 3236 " flex payload for type(%u).", cfg->type); 3237 return; 3238 } 3239 } 3240 rte_memcpy(&flex_conf->flex_set[idx], 3241 cfg, 3242 sizeof(struct rte_eth_flex_payload_cfg)); 3243 3244 } 3245 3246 void 3247 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3248 { 3249 #ifdef RTE_LIBRTE_IXGBE_PMD 3250 int diag; 3251 3252 if (is_rx) 3253 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3254 else 3255 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3256 3257 if (diag == 0) 3258 return; 3259 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3260 is_rx ? "rx" : "tx", port_id, diag); 3261 return; 3262 #endif 3263 printf("VF %s setting not supported for port %d\n", 3264 is_rx ? "Rx" : "Tx", port_id); 3265 RTE_SET_USED(vf); 3266 RTE_SET_USED(on); 3267 } 3268 3269 int 3270 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3271 { 3272 int diag; 3273 struct rte_eth_link link; 3274 3275 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3276 return 1; 3277 rte_eth_link_get_nowait(port_id, &link); 3278 if (rate > link.link_speed) { 3279 printf("Invalid rate value:%u bigger than link speed: %u\n", 3280 rate, link.link_speed); 3281 return 1; 3282 } 3283 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3284 if (diag == 0) 3285 return diag; 3286 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3287 port_id, diag); 3288 return diag; 3289 } 3290 3291 int 3292 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3293 { 3294 int diag = -ENOTSUP; 3295 3296 RTE_SET_USED(vf); 3297 RTE_SET_USED(rate); 3298 RTE_SET_USED(q_msk); 3299 3300 #ifdef RTE_LIBRTE_IXGBE_PMD 3301 if (diag == -ENOTSUP) 3302 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3303 q_msk); 3304 #endif 3305 #ifdef RTE_LIBRTE_BNXT_PMD 3306 if (diag == -ENOTSUP) 3307 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3308 #endif 3309 if (diag == 0) 3310 return diag; 3311 3312 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3313 port_id, diag); 3314 return diag; 3315 } 3316 3317 /* 3318 * Functions to manage the set of filtered Multicast MAC addresses. 3319 * 3320 * A pool of filtered multicast MAC addresses is associated with each port. 3321 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3322 * The address of the pool and the number of valid multicast MAC addresses 3323 * recorded in the pool are stored in the fields "mc_addr_pool" and 3324 * "mc_addr_nb" of the "rte_port" data structure. 3325 * 3326 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3327 * to be supplied a contiguous array of multicast MAC addresses. 3328 * To comply with this constraint, the set of multicast addresses recorded 3329 * into the pool are systematically compacted at the beginning of the pool. 3330 * Hence, when a multicast address is removed from the pool, all following 3331 * addresses, if any, are copied back to keep the set contiguous. 3332 */ 3333 #define MCAST_POOL_INC 32 3334 3335 static int 3336 mcast_addr_pool_extend(struct rte_port *port) 3337 { 3338 struct ether_addr *mc_pool; 3339 size_t mc_pool_size; 3340 3341 /* 3342 * If a free entry is available at the end of the pool, just 3343 * increment the number of recorded multicast addresses. 3344 */ 3345 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3346 port->mc_addr_nb++; 3347 return 0; 3348 } 3349 3350 /* 3351 * [re]allocate a pool with MCAST_POOL_INC more entries. 3352 * The previous test guarantees that port->mc_addr_nb is a multiple 3353 * of MCAST_POOL_INC. 3354 */ 3355 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3356 MCAST_POOL_INC); 3357 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3358 mc_pool_size); 3359 if (mc_pool == NULL) { 3360 printf("allocation of pool of %u multicast addresses failed\n", 3361 port->mc_addr_nb + MCAST_POOL_INC); 3362 return -ENOMEM; 3363 } 3364 3365 port->mc_addr_pool = mc_pool; 3366 port->mc_addr_nb++; 3367 return 0; 3368 3369 } 3370 3371 static void 3372 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3373 { 3374 port->mc_addr_nb--; 3375 if (addr_idx == port->mc_addr_nb) { 3376 /* No need to recompact the set of multicast addressses. */ 3377 if (port->mc_addr_nb == 0) { 3378 /* free the pool of multicast addresses. */ 3379 free(port->mc_addr_pool); 3380 port->mc_addr_pool = NULL; 3381 } 3382 return; 3383 } 3384 memmove(&port->mc_addr_pool[addr_idx], 3385 &port->mc_addr_pool[addr_idx + 1], 3386 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3387 } 3388 3389 static void 3390 eth_port_multicast_addr_list_set(portid_t port_id) 3391 { 3392 struct rte_port *port; 3393 int diag; 3394 3395 port = &ports[port_id]; 3396 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3397 port->mc_addr_nb); 3398 if (diag == 0) 3399 return; 3400 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3401 port->mc_addr_nb, port_id, -diag); 3402 } 3403 3404 void 3405 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3406 { 3407 struct rte_port *port; 3408 uint32_t i; 3409 3410 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3411 return; 3412 3413 port = &ports[port_id]; 3414 3415 /* 3416 * Check that the added multicast MAC address is not already recorded 3417 * in the pool of multicast addresses. 3418 */ 3419 for (i = 0; i < port->mc_addr_nb; i++) { 3420 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3421 printf("multicast address already filtered by port\n"); 3422 return; 3423 } 3424 } 3425 3426 if (mcast_addr_pool_extend(port) != 0) 3427 return; 3428 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3429 eth_port_multicast_addr_list_set(port_id); 3430 } 3431 3432 void 3433 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3434 { 3435 struct rte_port *port; 3436 uint32_t i; 3437 3438 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3439 return; 3440 3441 port = &ports[port_id]; 3442 3443 /* 3444 * Search the pool of multicast MAC addresses for the removed address. 3445 */ 3446 for (i = 0; i < port->mc_addr_nb; i++) { 3447 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3448 break; 3449 } 3450 if (i == port->mc_addr_nb) { 3451 printf("multicast address not filtered by port %d\n", port_id); 3452 return; 3453 } 3454 3455 mcast_addr_pool_remove(port, i); 3456 eth_port_multicast_addr_list_set(port_id); 3457 } 3458 3459 void 3460 port_dcb_info_display(portid_t port_id) 3461 { 3462 struct rte_eth_dcb_info dcb_info; 3463 uint16_t i; 3464 int ret; 3465 static const char *border = "================"; 3466 3467 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3468 return; 3469 3470 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3471 if (ret) { 3472 printf("\n Failed to get dcb infos on port %-2d\n", 3473 port_id); 3474 return; 3475 } 3476 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3477 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3478 printf("\n TC : "); 3479 for (i = 0; i < dcb_info.nb_tcs; i++) 3480 printf("\t%4d", i); 3481 printf("\n Priority : "); 3482 for (i = 0; i < dcb_info.nb_tcs; i++) 3483 printf("\t%4d", dcb_info.prio_tc[i]); 3484 printf("\n BW percent :"); 3485 for (i = 0; i < dcb_info.nb_tcs; i++) 3486 printf("\t%4d%%", dcb_info.tc_bws[i]); 3487 printf("\n RXQ base : "); 3488 for (i = 0; i < dcb_info.nb_tcs; i++) 3489 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3490 printf("\n RXQ number :"); 3491 for (i = 0; i < dcb_info.nb_tcs; i++) 3492 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3493 printf("\n TXQ base : "); 3494 for (i = 0; i < dcb_info.nb_tcs; i++) 3495 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3496 printf("\n TXQ number :"); 3497 for (i = 0; i < dcb_info.nb_tcs; i++) 3498 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3499 printf("\n"); 3500 } 3501 3502 uint8_t * 3503 open_file(const char *file_path, uint32_t *size) 3504 { 3505 int fd = open(file_path, O_RDONLY); 3506 off_t pkg_size; 3507 uint8_t *buf = NULL; 3508 int ret = 0; 3509 struct stat st_buf; 3510 3511 if (size) 3512 *size = 0; 3513 3514 if (fd == -1) { 3515 printf("%s: Failed to open %s\n", __func__, file_path); 3516 return buf; 3517 } 3518 3519 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3520 close(fd); 3521 printf("%s: File operations failed\n", __func__); 3522 return buf; 3523 } 3524 3525 pkg_size = st_buf.st_size; 3526 if (pkg_size < 0) { 3527 close(fd); 3528 printf("%s: File operations failed\n", __func__); 3529 return buf; 3530 } 3531 3532 buf = (uint8_t *)malloc(pkg_size); 3533 if (!buf) { 3534 close(fd); 3535 printf("%s: Failed to malloc memory\n", __func__); 3536 return buf; 3537 } 3538 3539 ret = read(fd, buf, pkg_size); 3540 if (ret < 0) { 3541 close(fd); 3542 printf("%s: File read operation failed\n", __func__); 3543 close_file(buf); 3544 return NULL; 3545 } 3546 3547 if (size) 3548 *size = pkg_size; 3549 3550 close(fd); 3551 3552 return buf; 3553 } 3554 3555 int 3556 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3557 { 3558 FILE *fh = fopen(file_path, "wb"); 3559 3560 if (fh == NULL) { 3561 printf("%s: Failed to open %s\n", __func__, file_path); 3562 return -1; 3563 } 3564 3565 if (fwrite(buf, 1, size, fh) != size) { 3566 fclose(fh); 3567 printf("%s: File write operation failed\n", __func__); 3568 return -1; 3569 } 3570 3571 fclose(fh); 3572 3573 return 0; 3574 } 3575 3576 int 3577 close_file(uint8_t *buf) 3578 { 3579 if (buf) { 3580 free((void *)buf); 3581 return 0; 3582 } 3583 3584 return -1; 3585 } 3586 3587 void 3588 port_queue_region_info_display(portid_t port_id, void *buf) 3589 { 3590 #ifdef RTE_LIBRTE_I40E_PMD 3591 uint16_t i, j; 3592 struct rte_pmd_i40e_queue_regions *info = 3593 (struct rte_pmd_i40e_queue_regions *)buf; 3594 static const char *queue_region_info_stats_border = "-------"; 3595 3596 if (!info->queue_region_number) 3597 printf("there is no region has been set before"); 3598 3599 printf("\n %s All queue region info for port=%2d %s", 3600 queue_region_info_stats_border, port_id, 3601 queue_region_info_stats_border); 3602 printf("\n queue_region_number: %-14u \n", 3603 info->queue_region_number); 3604 3605 for (i = 0; i < info->queue_region_number; i++) { 3606 printf("\n region_id: %-14u queue_number: %-14u " 3607 "queue_start_index: %-14u \n", 3608 info->region[i].region_id, 3609 info->region[i].queue_num, 3610 info->region[i].queue_start_index); 3611 3612 printf(" user_priority_num is %-14u :", 3613 info->region[i].user_priority_num); 3614 for (j = 0; j < info->region[i].user_priority_num; j++) 3615 printf(" %-14u ", info->region[i].user_priority[j]); 3616 3617 printf("\n flowtype_num is %-14u :", 3618 info->region[i].flowtype_num); 3619 for (j = 0; j < info->region[i].flowtype_num; j++) 3620 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3621 } 3622 #else 3623 RTE_SET_USED(port_id); 3624 RTE_SET_USED(buf); 3625 #endif 3626 3627 printf("\n\n"); 3628 } 3629