1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * Copyright 2013-2014 6WIND S.A. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <stdarg.h> 36 #include <errno.h> 37 #include <stdio.h> 38 #include <string.h> 39 #include <stdint.h> 40 #include <inttypes.h> 41 42 #include <sys/queue.h> 43 #include <sys/types.h> 44 #include <sys/stat.h> 45 #include <fcntl.h> 46 #include <unistd.h> 47 48 #include <rte_common.h> 49 #include <rte_byteorder.h> 50 #include <rte_debug.h> 51 #include <rte_log.h> 52 #include <rte_memory.h> 53 #include <rte_memcpy.h> 54 #include <rte_memzone.h> 55 #include <rte_launch.h> 56 #include <rte_eal.h> 57 #include <rte_per_lcore.h> 58 #include <rte_lcore.h> 59 #include <rte_atomic.h> 60 #include <rte_branch_prediction.h> 61 #include <rte_mempool.h> 62 #include <rte_mbuf.h> 63 #include <rte_interrupts.h> 64 #include <rte_pci.h> 65 #include <rte_ether.h> 66 #include <rte_ethdev.h> 67 #include <rte_string_fns.h> 68 #include <rte_cycles.h> 69 #include <rte_flow.h> 70 #include <rte_errno.h> 71 #ifdef RTE_LIBRTE_IXGBE_PMD 72 #include <rte_pmd_ixgbe.h> 73 #endif 74 #ifdef RTE_LIBRTE_I40E_PMD 75 #include <rte_pmd_i40e.h> 76 #endif 77 #ifdef RTE_LIBRTE_BNXT_PMD 78 #include <rte_pmd_bnxt.h> 79 #endif 80 #include <rte_gro.h> 81 #include <cmdline_parse_etheraddr.h> 82 83 #include "testpmd.h" 84 85 static char *flowtype_to_str(uint16_t flow_type); 86 87 static const struct { 88 enum tx_pkt_split split; 89 const char *name; 90 } tx_split_name[] = { 91 { 92 .split = TX_PKT_SPLIT_OFF, 93 .name = "off", 94 }, 95 { 96 .split = TX_PKT_SPLIT_ON, 97 .name = "on", 98 }, 99 { 100 .split = TX_PKT_SPLIT_RND, 101 .name = "rand", 102 }, 103 }; 104 105 struct rss_type_info { 106 char str[32]; 107 uint64_t rss_type; 108 }; 109 110 static const struct rss_type_info rss_type_table[] = { 111 { "ipv4", ETH_RSS_IPV4 }, 112 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 113 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 114 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 115 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 116 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 117 { "ipv6", ETH_RSS_IPV6 }, 118 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 119 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 120 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 121 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 122 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 123 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 124 { "ipv6-ex", ETH_RSS_IPV6_EX }, 125 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 126 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 127 { "port", ETH_RSS_PORT }, 128 { "vxlan", ETH_RSS_VXLAN }, 129 { "geneve", ETH_RSS_GENEVE }, 130 { "nvgre", ETH_RSS_NVGRE }, 131 132 }; 133 134 static void 135 print_ethaddr(const char *name, struct ether_addr *eth_addr) 136 { 137 char buf[ETHER_ADDR_FMT_SIZE]; 138 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 139 printf("%s%s", name, buf); 140 } 141 142 void 143 nic_stats_display(portid_t port_id) 144 { 145 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 146 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 147 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 148 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 149 uint64_t mpps_rx, mpps_tx; 150 struct rte_eth_stats stats; 151 struct rte_port *port = &ports[port_id]; 152 uint8_t i; 153 portid_t pid; 154 155 static const char *nic_stats_border = "########################"; 156 157 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 158 printf("Valid port range is [0"); 159 RTE_ETH_FOREACH_DEV(pid) 160 printf(", %d", pid); 161 printf("]\n"); 162 return; 163 } 164 rte_eth_stats_get(port_id, &stats); 165 printf("\n %s NIC statistics for port %-2d %s\n", 166 nic_stats_border, port_id, nic_stats_border); 167 168 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 169 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 170 "%-"PRIu64"\n", 171 stats.ipackets, stats.imissed, stats.ibytes); 172 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 173 printf(" RX-nombuf: %-10"PRIu64"\n", 174 stats.rx_nombuf); 175 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 176 "%-"PRIu64"\n", 177 stats.opackets, stats.oerrors, stats.obytes); 178 } 179 else { 180 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 181 " RX-bytes: %10"PRIu64"\n", 182 stats.ipackets, stats.ierrors, stats.ibytes); 183 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 184 printf(" RX-nombuf: %10"PRIu64"\n", 185 stats.rx_nombuf); 186 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 187 " TX-bytes: %10"PRIu64"\n", 188 stats.opackets, stats.oerrors, stats.obytes); 189 } 190 191 if (port->rx_queue_stats_mapping_enabled) { 192 printf("\n"); 193 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 194 printf(" Stats reg %2d RX-packets: %10"PRIu64 195 " RX-errors: %10"PRIu64 196 " RX-bytes: %10"PRIu64"\n", 197 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 198 } 199 } 200 if (port->tx_queue_stats_mapping_enabled) { 201 printf("\n"); 202 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 203 printf(" Stats reg %2d TX-packets: %10"PRIu64 204 " TX-bytes: %10"PRIu64"\n", 205 i, stats.q_opackets[i], stats.q_obytes[i]); 206 } 207 } 208 209 diff_cycles = prev_cycles[port_id]; 210 prev_cycles[port_id] = rte_rdtsc(); 211 if (diff_cycles > 0) 212 diff_cycles = prev_cycles[port_id] - diff_cycles; 213 214 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 215 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 216 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 217 (stats.opackets - prev_pkts_tx[port_id]) : 0; 218 prev_pkts_rx[port_id] = stats.ipackets; 219 prev_pkts_tx[port_id] = stats.opackets; 220 mpps_rx = diff_cycles > 0 ? 221 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 222 mpps_tx = diff_cycles > 0 ? 223 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 224 printf("\n Throughput (since last show)\n"); 225 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 226 mpps_rx, mpps_tx); 227 228 printf(" %s############################%s\n", 229 nic_stats_border, nic_stats_border); 230 } 231 232 void 233 nic_stats_clear(portid_t port_id) 234 { 235 portid_t pid; 236 237 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 238 printf("Valid port range is [0"); 239 RTE_ETH_FOREACH_DEV(pid) 240 printf(", %d", pid); 241 printf("]\n"); 242 return; 243 } 244 rte_eth_stats_reset(port_id); 245 printf("\n NIC statistics for port %d cleared\n", port_id); 246 } 247 248 void 249 nic_xstats_display(portid_t port_id) 250 { 251 struct rte_eth_xstat *xstats; 252 int cnt_xstats, idx_xstat; 253 struct rte_eth_xstat_name *xstats_names; 254 255 printf("###### NIC extended statistics for port %-2d\n", port_id); 256 if (!rte_eth_dev_is_valid_port(port_id)) { 257 printf("Error: Invalid port number %i\n", port_id); 258 return; 259 } 260 261 /* Get count */ 262 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 263 if (cnt_xstats < 0) { 264 printf("Error: Cannot get count of xstats\n"); 265 return; 266 } 267 268 /* Get id-name lookup table */ 269 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 270 if (xstats_names == NULL) { 271 printf("Cannot allocate memory for xstats lookup\n"); 272 return; 273 } 274 if (cnt_xstats != rte_eth_xstats_get_names( 275 port_id, xstats_names, cnt_xstats)) { 276 printf("Error: Cannot get xstats lookup\n"); 277 free(xstats_names); 278 return; 279 } 280 281 /* Get stats themselves */ 282 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 283 if (xstats == NULL) { 284 printf("Cannot allocate memory for xstats\n"); 285 free(xstats_names); 286 return; 287 } 288 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 289 printf("Error: Unable to get xstats\n"); 290 free(xstats_names); 291 free(xstats); 292 return; 293 } 294 295 /* Display xstats */ 296 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 297 if (xstats_hide_zero && !xstats[idx_xstat].value) 298 continue; 299 printf("%s: %"PRIu64"\n", 300 xstats_names[idx_xstat].name, 301 xstats[idx_xstat].value); 302 } 303 free(xstats_names); 304 free(xstats); 305 } 306 307 void 308 nic_xstats_clear(portid_t port_id) 309 { 310 rte_eth_xstats_reset(port_id); 311 } 312 313 void 314 nic_stats_mapping_display(portid_t port_id) 315 { 316 struct rte_port *port = &ports[port_id]; 317 uint16_t i; 318 portid_t pid; 319 320 static const char *nic_stats_mapping_border = "########################"; 321 322 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 323 printf("Valid port range is [0"); 324 RTE_ETH_FOREACH_DEV(pid) 325 printf(", %d", pid); 326 printf("]\n"); 327 return; 328 } 329 330 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 331 printf("Port id %d - either does not support queue statistic mapping or" 332 " no queue statistic mapping set\n", port_id); 333 return; 334 } 335 336 printf("\n %s NIC statistics mapping for port %-2d %s\n", 337 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 338 339 if (port->rx_queue_stats_mapping_enabled) { 340 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 341 if (rx_queue_stats_mappings[i].port_id == port_id) { 342 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 343 rx_queue_stats_mappings[i].queue_id, 344 rx_queue_stats_mappings[i].stats_counter_id); 345 } 346 } 347 printf("\n"); 348 } 349 350 351 if (port->tx_queue_stats_mapping_enabled) { 352 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 353 if (tx_queue_stats_mappings[i].port_id == port_id) { 354 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 355 tx_queue_stats_mappings[i].queue_id, 356 tx_queue_stats_mappings[i].stats_counter_id); 357 } 358 } 359 } 360 361 printf(" %s####################################%s\n", 362 nic_stats_mapping_border, nic_stats_mapping_border); 363 } 364 365 void 366 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 367 { 368 struct rte_eth_rxq_info qinfo; 369 int32_t rc; 370 static const char *info_border = "*********************"; 371 372 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 373 if (rc != 0) { 374 printf("Failed to retrieve information for port: %u, " 375 "RX queue: %hu\nerror desc: %s(%d)\n", 376 port_id, queue_id, strerror(-rc), rc); 377 return; 378 } 379 380 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 381 info_border, port_id, queue_id, info_border); 382 383 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 384 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 385 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 386 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 387 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 388 printf("\nRX drop packets: %s", 389 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 390 printf("\nRX deferred start: %s", 391 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 392 printf("\nRX scattered packets: %s", 393 (qinfo.scattered_rx != 0) ? "on" : "off"); 394 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 400 { 401 struct rte_eth_txq_info qinfo; 402 int32_t rc; 403 static const char *info_border = "*********************"; 404 405 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 406 if (rc != 0) { 407 printf("Failed to retrieve information for port: %u, " 408 "TX queue: %hu\nerror desc: %s(%d)\n", 409 port_id, queue_id, strerror(-rc), rc); 410 return; 411 } 412 413 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 414 info_border, port_id, queue_id, info_border); 415 416 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 417 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 418 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 419 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 420 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 421 printf("\nTX deferred start: %s", 422 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 423 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 424 printf("\n"); 425 } 426 427 void 428 port_infos_display(portid_t port_id) 429 { 430 struct rte_port *port; 431 struct ether_addr mac_addr; 432 struct rte_eth_link link; 433 struct rte_eth_dev_info dev_info; 434 int vlan_offload; 435 struct rte_mempool * mp; 436 static const char *info_border = "*********************"; 437 portid_t pid; 438 uint16_t mtu; 439 440 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 441 printf("Valid port range is [0"); 442 RTE_ETH_FOREACH_DEV(pid) 443 printf(", %d", pid); 444 printf("]\n"); 445 return; 446 } 447 port = &ports[port_id]; 448 rte_eth_link_get_nowait(port_id, &link); 449 memset(&dev_info, 0, sizeof(dev_info)); 450 rte_eth_dev_info_get(port_id, &dev_info); 451 printf("\n%s Infos for port %-2d %s\n", 452 info_border, port_id, info_border); 453 rte_eth_macaddr_get(port_id, &mac_addr); 454 print_ethaddr("MAC address: ", &mac_addr); 455 printf("\nDriver name: %s", dev_info.driver_name); 456 printf("\nConnect to socket: %u", port->socket_id); 457 458 if (port_numa[port_id] != NUMA_NO_CONFIG) { 459 mp = mbuf_pool_find(port_numa[port_id]); 460 if (mp) 461 printf("\nmemory allocation on the socket: %d", 462 port_numa[port_id]); 463 } else 464 printf("\nmemory allocation on the socket: %u",port->socket_id); 465 466 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 467 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 468 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 469 ("full-duplex") : ("half-duplex")); 470 471 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 472 printf("MTU: %u\n", mtu); 473 474 printf("Promiscuous mode: %s\n", 475 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 476 printf("Allmulticast mode: %s\n", 477 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 478 printf("Maximum number of MAC addresses: %u\n", 479 (unsigned int)(port->dev_info.max_mac_addrs)); 480 printf("Maximum number of MAC addresses of hash filtering: %u\n", 481 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 482 483 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 484 if (vlan_offload >= 0){ 485 printf("VLAN offload: \n"); 486 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 487 printf(" strip on \n"); 488 else 489 printf(" strip off \n"); 490 491 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 492 printf(" filter on \n"); 493 else 494 printf(" filter off \n"); 495 496 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 497 printf(" qinq(extend) on \n"); 498 else 499 printf(" qinq(extend) off \n"); 500 } 501 502 if (dev_info.hash_key_size > 0) 503 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 504 if (dev_info.reta_size > 0) 505 printf("Redirection table size: %u\n", dev_info.reta_size); 506 if (!dev_info.flow_type_rss_offloads) 507 printf("No flow type is supported.\n"); 508 else { 509 uint16_t i; 510 char *p; 511 512 printf("Supported flow types:\n"); 513 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 514 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 515 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 516 continue; 517 p = flowtype_to_str(i); 518 if (p) 519 printf(" %s\n", p); 520 else 521 printf(" user defined %d\n", i); 522 } 523 } 524 525 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 526 printf("Maximum configurable length of RX packet: %u\n", 527 dev_info.max_rx_pktlen); 528 if (dev_info.max_vfs) 529 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 530 if (dev_info.max_vmdq_pools) 531 printf("Maximum number of VMDq pools: %u\n", 532 dev_info.max_vmdq_pools); 533 534 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 535 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 536 printf("Max possible number of RXDs per queue: %hu\n", 537 dev_info.rx_desc_lim.nb_max); 538 printf("Min possible number of RXDs per queue: %hu\n", 539 dev_info.rx_desc_lim.nb_min); 540 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 541 542 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 543 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 544 printf("Max possible number of TXDs per queue: %hu\n", 545 dev_info.tx_desc_lim.nb_max); 546 printf("Min possible number of TXDs per queue: %hu\n", 547 dev_info.tx_desc_lim.nb_min); 548 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 549 } 550 551 void 552 port_offload_cap_display(portid_t port_id) 553 { 554 struct rte_eth_dev_info dev_info; 555 static const char *info_border = "************"; 556 557 if (port_id_is_invalid(port_id, ENABLED_WARN)) 558 return; 559 560 rte_eth_dev_info_get(port_id, &dev_info); 561 562 printf("\n%s Port %d supported offload features: %s\n", 563 info_border, port_id, info_border); 564 565 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 566 printf("VLAN stripped: "); 567 if (ports[port_id].dev_conf.rxmode.offloads & 568 DEV_RX_OFFLOAD_VLAN_STRIP) 569 printf("on\n"); 570 else 571 printf("off\n"); 572 } 573 574 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 575 printf("Double VLANs stripped: "); 576 if (ports[port_id].dev_conf.rxmode.offloads & 577 DEV_RX_OFFLOAD_VLAN_EXTEND) 578 printf("on\n"); 579 else 580 printf("off\n"); 581 } 582 583 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 584 printf("RX IPv4 checksum: "); 585 if (ports[port_id].dev_conf.rxmode.offloads & 586 DEV_RX_OFFLOAD_IPV4_CKSUM) 587 printf("on\n"); 588 else 589 printf("off\n"); 590 } 591 592 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 593 printf("RX UDP checksum: "); 594 if (ports[port_id].dev_conf.rxmode.offloads & 595 DEV_RX_OFFLOAD_UDP_CKSUM) 596 printf("on\n"); 597 else 598 printf("off\n"); 599 } 600 601 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 602 printf("RX TCP checksum: "); 603 if (ports[port_id].dev_conf.rxmode.offloads & 604 DEV_RX_OFFLOAD_TCP_CKSUM) 605 printf("on\n"); 606 else 607 printf("off\n"); 608 } 609 610 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 611 printf("RX Outer IPv4 checksum: "); 612 if (ports[port_id].dev_conf.rxmode.offloads & 613 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 614 printf("on\n"); 615 else 616 printf("off\n"); 617 } 618 619 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 620 printf("Large receive offload: "); 621 if (ports[port_id].dev_conf.rxmode.offloads & 622 DEV_RX_OFFLOAD_TCP_LRO) 623 printf("on\n"); 624 else 625 printf("off\n"); 626 } 627 628 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 629 printf("VLAN insert: "); 630 if (ports[port_id].dev_conf.txmode.offloads & 631 DEV_TX_OFFLOAD_VLAN_INSERT) 632 printf("on\n"); 633 else 634 printf("off\n"); 635 } 636 637 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 638 printf("HW timestamp: "); 639 if (ports[port_id].dev_conf.rxmode.offloads & 640 DEV_RX_OFFLOAD_TIMESTAMP) 641 printf("on\n"); 642 else 643 printf("off\n"); 644 } 645 646 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 647 printf("Double VLANs insert: "); 648 if (ports[port_id].dev_conf.txmode.offloads & 649 DEV_TX_OFFLOAD_QINQ_INSERT) 650 printf("on\n"); 651 else 652 printf("off\n"); 653 } 654 655 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 656 printf("TX IPv4 checksum: "); 657 if (ports[port_id].dev_conf.txmode.offloads & 658 DEV_TX_OFFLOAD_IPV4_CKSUM) 659 printf("on\n"); 660 else 661 printf("off\n"); 662 } 663 664 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 665 printf("TX UDP checksum: "); 666 if (ports[port_id].dev_conf.txmode.offloads & 667 DEV_TX_OFFLOAD_UDP_CKSUM) 668 printf("on\n"); 669 else 670 printf("off\n"); 671 } 672 673 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 674 printf("TX TCP checksum: "); 675 if (ports[port_id].dev_conf.txmode.offloads & 676 DEV_TX_OFFLOAD_TCP_CKSUM) 677 printf("on\n"); 678 else 679 printf("off\n"); 680 } 681 682 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 683 printf("TX SCTP checksum: "); 684 if (ports[port_id].dev_conf.txmode.offloads & 685 DEV_TX_OFFLOAD_SCTP_CKSUM) 686 printf("on\n"); 687 else 688 printf("off\n"); 689 } 690 691 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 692 printf("TX Outer IPv4 checksum: "); 693 if (ports[port_id].dev_conf.txmode.offloads & 694 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 695 printf("on\n"); 696 else 697 printf("off\n"); 698 } 699 700 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 701 printf("TX TCP segmentation: "); 702 if (ports[port_id].dev_conf.txmode.offloads & 703 DEV_TX_OFFLOAD_TCP_TSO) 704 printf("on\n"); 705 else 706 printf("off\n"); 707 } 708 709 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 710 printf("TX UDP segmentation: "); 711 if (ports[port_id].dev_conf.txmode.offloads & 712 DEV_TX_OFFLOAD_UDP_TSO) 713 printf("on\n"); 714 else 715 printf("off\n"); 716 } 717 718 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 719 printf("TSO for VXLAN tunnel packet: "); 720 if (ports[port_id].dev_conf.txmode.offloads & 721 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 722 printf("on\n"); 723 else 724 printf("off\n"); 725 } 726 727 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 728 printf("TSO for GRE tunnel packet: "); 729 if (ports[port_id].dev_conf.txmode.offloads & 730 DEV_TX_OFFLOAD_GRE_TNL_TSO) 731 printf("on\n"); 732 else 733 printf("off\n"); 734 } 735 736 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 737 printf("TSO for IPIP tunnel packet: "); 738 if (ports[port_id].dev_conf.txmode.offloads & 739 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 740 printf("on\n"); 741 else 742 printf("off\n"); 743 } 744 745 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 746 printf("TSO for GENEVE tunnel packet: "); 747 if (ports[port_id].dev_conf.txmode.offloads & 748 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 749 printf("on\n"); 750 else 751 printf("off\n"); 752 } 753 754 } 755 756 int 757 port_id_is_invalid(portid_t port_id, enum print_warning warning) 758 { 759 if (port_id == (portid_t)RTE_PORT_ALL) 760 return 0; 761 762 if (rte_eth_dev_is_valid_port(port_id)) 763 return 0; 764 765 if (warning == ENABLED_WARN) 766 printf("Invalid port %d\n", port_id); 767 768 return 1; 769 } 770 771 static int 772 vlan_id_is_invalid(uint16_t vlan_id) 773 { 774 if (vlan_id < 4096) 775 return 0; 776 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 777 return 1; 778 } 779 780 static int 781 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 782 { 783 uint64_t pci_len; 784 785 if (reg_off & 0x3) { 786 printf("Port register offset 0x%X not aligned on a 4-byte " 787 "boundary\n", 788 (unsigned)reg_off); 789 return 1; 790 } 791 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 792 if (reg_off >= pci_len) { 793 printf("Port %d: register offset %u (0x%X) out of port PCI " 794 "resource (length=%"PRIu64")\n", 795 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 796 return 1; 797 } 798 return 0; 799 } 800 801 static int 802 reg_bit_pos_is_invalid(uint8_t bit_pos) 803 { 804 if (bit_pos <= 31) 805 return 0; 806 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 807 return 1; 808 } 809 810 #define display_port_and_reg_off(port_id, reg_off) \ 811 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 812 813 static inline void 814 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 815 { 816 display_port_and_reg_off(port_id, (unsigned)reg_off); 817 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 818 } 819 820 void 821 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 822 { 823 uint32_t reg_v; 824 825 826 if (port_id_is_invalid(port_id, ENABLED_WARN)) 827 return; 828 if (port_reg_off_is_invalid(port_id, reg_off)) 829 return; 830 if (reg_bit_pos_is_invalid(bit_x)) 831 return; 832 reg_v = port_id_pci_reg_read(port_id, reg_off); 833 display_port_and_reg_off(port_id, (unsigned)reg_off); 834 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 835 } 836 837 void 838 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 839 uint8_t bit1_pos, uint8_t bit2_pos) 840 { 841 uint32_t reg_v; 842 uint8_t l_bit; 843 uint8_t h_bit; 844 845 if (port_id_is_invalid(port_id, ENABLED_WARN)) 846 return; 847 if (port_reg_off_is_invalid(port_id, reg_off)) 848 return; 849 if (reg_bit_pos_is_invalid(bit1_pos)) 850 return; 851 if (reg_bit_pos_is_invalid(bit2_pos)) 852 return; 853 if (bit1_pos > bit2_pos) 854 l_bit = bit2_pos, h_bit = bit1_pos; 855 else 856 l_bit = bit1_pos, h_bit = bit2_pos; 857 858 reg_v = port_id_pci_reg_read(port_id, reg_off); 859 reg_v >>= l_bit; 860 if (h_bit < 31) 861 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 862 display_port_and_reg_off(port_id, (unsigned)reg_off); 863 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 864 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 865 } 866 867 void 868 port_reg_display(portid_t port_id, uint32_t reg_off) 869 { 870 uint32_t reg_v; 871 872 if (port_id_is_invalid(port_id, ENABLED_WARN)) 873 return; 874 if (port_reg_off_is_invalid(port_id, reg_off)) 875 return; 876 reg_v = port_id_pci_reg_read(port_id, reg_off); 877 display_port_reg_value(port_id, reg_off, reg_v); 878 } 879 880 void 881 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 882 uint8_t bit_v) 883 { 884 uint32_t reg_v; 885 886 if (port_id_is_invalid(port_id, ENABLED_WARN)) 887 return; 888 if (port_reg_off_is_invalid(port_id, reg_off)) 889 return; 890 if (reg_bit_pos_is_invalid(bit_pos)) 891 return; 892 if (bit_v > 1) { 893 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 894 return; 895 } 896 reg_v = port_id_pci_reg_read(port_id, reg_off); 897 if (bit_v == 0) 898 reg_v &= ~(1 << bit_pos); 899 else 900 reg_v |= (1 << bit_pos); 901 port_id_pci_reg_write(port_id, reg_off, reg_v); 902 display_port_reg_value(port_id, reg_off, reg_v); 903 } 904 905 void 906 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 907 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 908 { 909 uint32_t max_v; 910 uint32_t reg_v; 911 uint8_t l_bit; 912 uint8_t h_bit; 913 914 if (port_id_is_invalid(port_id, ENABLED_WARN)) 915 return; 916 if (port_reg_off_is_invalid(port_id, reg_off)) 917 return; 918 if (reg_bit_pos_is_invalid(bit1_pos)) 919 return; 920 if (reg_bit_pos_is_invalid(bit2_pos)) 921 return; 922 if (bit1_pos > bit2_pos) 923 l_bit = bit2_pos, h_bit = bit1_pos; 924 else 925 l_bit = bit1_pos, h_bit = bit2_pos; 926 927 if ((h_bit - l_bit) < 31) 928 max_v = (1 << (h_bit - l_bit + 1)) - 1; 929 else 930 max_v = 0xFFFFFFFF; 931 932 if (value > max_v) { 933 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 934 (unsigned)value, (unsigned)value, 935 (unsigned)max_v, (unsigned)max_v); 936 return; 937 } 938 reg_v = port_id_pci_reg_read(port_id, reg_off); 939 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 940 reg_v |= (value << l_bit); /* Set changed bits */ 941 port_id_pci_reg_write(port_id, reg_off, reg_v); 942 display_port_reg_value(port_id, reg_off, reg_v); 943 } 944 945 void 946 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 947 { 948 if (port_id_is_invalid(port_id, ENABLED_WARN)) 949 return; 950 if (port_reg_off_is_invalid(port_id, reg_off)) 951 return; 952 port_id_pci_reg_write(port_id, reg_off, reg_v); 953 display_port_reg_value(port_id, reg_off, reg_v); 954 } 955 956 void 957 port_mtu_set(portid_t port_id, uint16_t mtu) 958 { 959 int diag; 960 961 if (port_id_is_invalid(port_id, ENABLED_WARN)) 962 return; 963 diag = rte_eth_dev_set_mtu(port_id, mtu); 964 if (diag == 0) 965 return; 966 printf("Set MTU failed. diag=%d\n", diag); 967 } 968 969 /* Generic flow management functions. */ 970 971 /** Generate flow_item[] entry. */ 972 #define MK_FLOW_ITEM(t, s) \ 973 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 974 .name = # t, \ 975 .size = s, \ 976 } 977 978 /** Information about known flow pattern items. */ 979 static const struct { 980 const char *name; 981 size_t size; 982 } flow_item[] = { 983 MK_FLOW_ITEM(END, 0), 984 MK_FLOW_ITEM(VOID, 0), 985 MK_FLOW_ITEM(INVERT, 0), 986 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 987 MK_FLOW_ITEM(PF, 0), 988 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 989 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 990 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 991 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 992 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 993 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 994 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 995 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 996 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 997 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 998 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 999 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1000 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1001 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1002 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1003 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1004 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1005 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1006 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1007 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1008 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1009 }; 1010 1011 /** Compute storage space needed by item specification. */ 1012 static void 1013 flow_item_spec_size(const struct rte_flow_item *item, 1014 size_t *size, size_t *pad) 1015 { 1016 if (!item->spec) { 1017 *size = 0; 1018 goto empty; 1019 } 1020 switch (item->type) { 1021 union { 1022 const struct rte_flow_item_raw *raw; 1023 } spec; 1024 1025 case RTE_FLOW_ITEM_TYPE_RAW: 1026 spec.raw = item->spec; 1027 *size = offsetof(struct rte_flow_item_raw, pattern) + 1028 spec.raw->length * sizeof(*spec.raw->pattern); 1029 break; 1030 default: 1031 *size = flow_item[item->type].size; 1032 break; 1033 } 1034 empty: 1035 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1036 } 1037 1038 /** Generate flow_action[] entry. */ 1039 #define MK_FLOW_ACTION(t, s) \ 1040 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1041 .name = # t, \ 1042 .size = s, \ 1043 } 1044 1045 /** Information about known flow actions. */ 1046 static const struct { 1047 const char *name; 1048 size_t size; 1049 } flow_action[] = { 1050 MK_FLOW_ACTION(END, 0), 1051 MK_FLOW_ACTION(VOID, 0), 1052 MK_FLOW_ACTION(PASSTHRU, 0), 1053 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1054 MK_FLOW_ACTION(FLAG, 0), 1055 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1056 MK_FLOW_ACTION(DROP, 0), 1057 MK_FLOW_ACTION(COUNT, 0), 1058 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 1059 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 1060 MK_FLOW_ACTION(PF, 0), 1061 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1062 }; 1063 1064 /** Compute storage space needed by action configuration. */ 1065 static void 1066 flow_action_conf_size(const struct rte_flow_action *action, 1067 size_t *size, size_t *pad) 1068 { 1069 if (!action->conf) { 1070 *size = 0; 1071 goto empty; 1072 } 1073 switch (action->type) { 1074 union { 1075 const struct rte_flow_action_rss *rss; 1076 } conf; 1077 1078 case RTE_FLOW_ACTION_TYPE_RSS: 1079 conf.rss = action->conf; 1080 *size = offsetof(struct rte_flow_action_rss, queue) + 1081 conf.rss->num * sizeof(*conf.rss->queue); 1082 break; 1083 default: 1084 *size = flow_action[action->type].size; 1085 break; 1086 } 1087 empty: 1088 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1089 } 1090 1091 /** Generate a port_flow entry from attributes/pattern/actions. */ 1092 static struct port_flow * 1093 port_flow_new(const struct rte_flow_attr *attr, 1094 const struct rte_flow_item *pattern, 1095 const struct rte_flow_action *actions) 1096 { 1097 const struct rte_flow_item *item; 1098 const struct rte_flow_action *action; 1099 struct port_flow *pf = NULL; 1100 size_t tmp; 1101 size_t pad; 1102 size_t off1 = 0; 1103 size_t off2 = 0; 1104 int err = ENOTSUP; 1105 1106 store: 1107 item = pattern; 1108 if (pf) 1109 pf->pattern = (void *)&pf->data[off1]; 1110 do { 1111 struct rte_flow_item *dst = NULL; 1112 1113 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1114 !flow_item[item->type].name) 1115 goto notsup; 1116 if (pf) 1117 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1118 off1 += sizeof(*item); 1119 flow_item_spec_size(item, &tmp, &pad); 1120 if (item->spec) { 1121 if (pf) 1122 dst->spec = memcpy(pf->data + off2, 1123 item->spec, tmp); 1124 off2 += tmp + pad; 1125 } 1126 if (item->last) { 1127 if (pf) 1128 dst->last = memcpy(pf->data + off2, 1129 item->last, tmp); 1130 off2 += tmp + pad; 1131 } 1132 if (item->mask) { 1133 if (pf) 1134 dst->mask = memcpy(pf->data + off2, 1135 item->mask, tmp); 1136 off2 += tmp + pad; 1137 } 1138 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1139 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1140 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1141 action = actions; 1142 if (pf) 1143 pf->actions = (void *)&pf->data[off1]; 1144 do { 1145 struct rte_flow_action *dst = NULL; 1146 1147 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1148 !flow_action[action->type].name) 1149 goto notsup; 1150 if (pf) 1151 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1152 off1 += sizeof(*action); 1153 flow_action_conf_size(action, &tmp, &pad); 1154 if (action->conf) { 1155 if (pf) 1156 dst->conf = memcpy(pf->data + off2, 1157 action->conf, tmp); 1158 off2 += tmp + pad; 1159 } 1160 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1161 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1162 if (pf != NULL) 1163 return pf; 1164 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1165 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1166 pf = calloc(1, tmp + off1 + off2); 1167 if (pf == NULL) 1168 err = errno; 1169 else { 1170 *pf = (const struct port_flow){ 1171 .size = tmp + off1 + off2, 1172 .attr = *attr, 1173 }; 1174 tmp -= offsetof(struct port_flow, data); 1175 off2 = tmp + off1; 1176 off1 = tmp; 1177 goto store; 1178 } 1179 notsup: 1180 rte_errno = err; 1181 return NULL; 1182 } 1183 1184 /** Print a message out of a flow error. */ 1185 static int 1186 port_flow_complain(struct rte_flow_error *error) 1187 { 1188 static const char *const errstrlist[] = { 1189 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1190 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1191 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1192 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1193 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1194 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1195 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1196 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1197 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1198 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1199 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1200 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1201 }; 1202 const char *errstr; 1203 char buf[32]; 1204 int err = rte_errno; 1205 1206 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1207 !errstrlist[error->type]) 1208 errstr = "unknown type"; 1209 else 1210 errstr = errstrlist[error->type]; 1211 printf("Caught error type %d (%s): %s%s\n", 1212 error->type, errstr, 1213 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1214 error->cause), buf) : "", 1215 error->message ? error->message : "(no stated reason)"); 1216 return -err; 1217 } 1218 1219 /** Validate flow rule. */ 1220 int 1221 port_flow_validate(portid_t port_id, 1222 const struct rte_flow_attr *attr, 1223 const struct rte_flow_item *pattern, 1224 const struct rte_flow_action *actions) 1225 { 1226 struct rte_flow_error error; 1227 1228 /* Poisoning to make sure PMDs update it in case of error. */ 1229 memset(&error, 0x11, sizeof(error)); 1230 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1231 return port_flow_complain(&error); 1232 printf("Flow rule validated\n"); 1233 return 0; 1234 } 1235 1236 /** Create flow rule. */ 1237 int 1238 port_flow_create(portid_t port_id, 1239 const struct rte_flow_attr *attr, 1240 const struct rte_flow_item *pattern, 1241 const struct rte_flow_action *actions) 1242 { 1243 struct rte_flow *flow; 1244 struct rte_port *port; 1245 struct port_flow *pf; 1246 uint32_t id; 1247 struct rte_flow_error error; 1248 1249 /* Poisoning to make sure PMDs update it in case of error. */ 1250 memset(&error, 0x22, sizeof(error)); 1251 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1252 if (!flow) 1253 return port_flow_complain(&error); 1254 port = &ports[port_id]; 1255 if (port->flow_list) { 1256 if (port->flow_list->id == UINT32_MAX) { 1257 printf("Highest rule ID is already assigned, delete" 1258 " it first"); 1259 rte_flow_destroy(port_id, flow, NULL); 1260 return -ENOMEM; 1261 } 1262 id = port->flow_list->id + 1; 1263 } else 1264 id = 0; 1265 pf = port_flow_new(attr, pattern, actions); 1266 if (!pf) { 1267 int err = rte_errno; 1268 1269 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1270 rte_flow_destroy(port_id, flow, NULL); 1271 return -err; 1272 } 1273 pf->next = port->flow_list; 1274 pf->id = id; 1275 pf->flow = flow; 1276 port->flow_list = pf; 1277 printf("Flow rule #%u created\n", pf->id); 1278 return 0; 1279 } 1280 1281 /** Destroy a number of flow rules. */ 1282 int 1283 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1284 { 1285 struct rte_port *port; 1286 struct port_flow **tmp; 1287 uint32_t c = 0; 1288 int ret = 0; 1289 1290 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1291 port_id == (portid_t)RTE_PORT_ALL) 1292 return -EINVAL; 1293 port = &ports[port_id]; 1294 tmp = &port->flow_list; 1295 while (*tmp) { 1296 uint32_t i; 1297 1298 for (i = 0; i != n; ++i) { 1299 struct rte_flow_error error; 1300 struct port_flow *pf = *tmp; 1301 1302 if (rule[i] != pf->id) 1303 continue; 1304 /* 1305 * Poisoning to make sure PMDs update it in case 1306 * of error. 1307 */ 1308 memset(&error, 0x33, sizeof(error)); 1309 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1310 ret = port_flow_complain(&error); 1311 continue; 1312 } 1313 printf("Flow rule #%u destroyed\n", pf->id); 1314 *tmp = pf->next; 1315 free(pf); 1316 break; 1317 } 1318 if (i == n) 1319 tmp = &(*tmp)->next; 1320 ++c; 1321 } 1322 return ret; 1323 } 1324 1325 /** Remove all flow rules. */ 1326 int 1327 port_flow_flush(portid_t port_id) 1328 { 1329 struct rte_flow_error error; 1330 struct rte_port *port; 1331 int ret = 0; 1332 1333 /* Poisoning to make sure PMDs update it in case of error. */ 1334 memset(&error, 0x44, sizeof(error)); 1335 if (rte_flow_flush(port_id, &error)) { 1336 ret = port_flow_complain(&error); 1337 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1338 port_id == (portid_t)RTE_PORT_ALL) 1339 return ret; 1340 } 1341 port = &ports[port_id]; 1342 while (port->flow_list) { 1343 struct port_flow *pf = port->flow_list->next; 1344 1345 free(port->flow_list); 1346 port->flow_list = pf; 1347 } 1348 return ret; 1349 } 1350 1351 /** Query a flow rule. */ 1352 int 1353 port_flow_query(portid_t port_id, uint32_t rule, 1354 enum rte_flow_action_type action) 1355 { 1356 struct rte_flow_error error; 1357 struct rte_port *port; 1358 struct port_flow *pf; 1359 const char *name; 1360 union { 1361 struct rte_flow_query_count count; 1362 } query; 1363 1364 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1365 port_id == (portid_t)RTE_PORT_ALL) 1366 return -EINVAL; 1367 port = &ports[port_id]; 1368 for (pf = port->flow_list; pf; pf = pf->next) 1369 if (pf->id == rule) 1370 break; 1371 if (!pf) { 1372 printf("Flow rule #%u not found\n", rule); 1373 return -ENOENT; 1374 } 1375 if ((unsigned int)action >= RTE_DIM(flow_action) || 1376 !flow_action[action].name) 1377 name = "unknown"; 1378 else 1379 name = flow_action[action].name; 1380 switch (action) { 1381 case RTE_FLOW_ACTION_TYPE_COUNT: 1382 break; 1383 default: 1384 printf("Cannot query action type %d (%s)\n", action, name); 1385 return -ENOTSUP; 1386 } 1387 /* Poisoning to make sure PMDs update it in case of error. */ 1388 memset(&error, 0x55, sizeof(error)); 1389 memset(&query, 0, sizeof(query)); 1390 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1391 return port_flow_complain(&error); 1392 switch (action) { 1393 case RTE_FLOW_ACTION_TYPE_COUNT: 1394 printf("%s:\n" 1395 " hits_set: %u\n" 1396 " bytes_set: %u\n" 1397 " hits: %" PRIu64 "\n" 1398 " bytes: %" PRIu64 "\n", 1399 name, 1400 query.count.hits_set, 1401 query.count.bytes_set, 1402 query.count.hits, 1403 query.count.bytes); 1404 break; 1405 default: 1406 printf("Cannot display result for action type %d (%s)\n", 1407 action, name); 1408 break; 1409 } 1410 return 0; 1411 } 1412 1413 /** List flow rules. */ 1414 void 1415 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1416 { 1417 struct rte_port *port; 1418 struct port_flow *pf; 1419 struct port_flow *list = NULL; 1420 uint32_t i; 1421 1422 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1423 port_id == (portid_t)RTE_PORT_ALL) 1424 return; 1425 port = &ports[port_id]; 1426 if (!port->flow_list) 1427 return; 1428 /* Sort flows by group, priority and ID. */ 1429 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1430 struct port_flow **tmp; 1431 1432 if (n) { 1433 /* Filter out unwanted groups. */ 1434 for (i = 0; i != n; ++i) 1435 if (pf->attr.group == group[i]) 1436 break; 1437 if (i == n) 1438 continue; 1439 } 1440 tmp = &list; 1441 while (*tmp && 1442 (pf->attr.group > (*tmp)->attr.group || 1443 (pf->attr.group == (*tmp)->attr.group && 1444 pf->attr.priority > (*tmp)->attr.priority) || 1445 (pf->attr.group == (*tmp)->attr.group && 1446 pf->attr.priority == (*tmp)->attr.priority && 1447 pf->id > (*tmp)->id))) 1448 tmp = &(*tmp)->tmp; 1449 pf->tmp = *tmp; 1450 *tmp = pf; 1451 } 1452 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1453 for (pf = list; pf != NULL; pf = pf->tmp) { 1454 const struct rte_flow_item *item = pf->pattern; 1455 const struct rte_flow_action *action = pf->actions; 1456 1457 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1458 pf->id, 1459 pf->attr.group, 1460 pf->attr.priority, 1461 pf->attr.ingress ? 'i' : '-', 1462 pf->attr.egress ? 'e' : '-'); 1463 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1464 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1465 printf("%s ", flow_item[item->type].name); 1466 ++item; 1467 } 1468 printf("=>"); 1469 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1470 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1471 printf(" %s", flow_action[action->type].name); 1472 ++action; 1473 } 1474 printf("\n"); 1475 } 1476 } 1477 1478 /** Restrict ingress traffic to the defined flow rules. */ 1479 int 1480 port_flow_isolate(portid_t port_id, int set) 1481 { 1482 struct rte_flow_error error; 1483 1484 /* Poisoning to make sure PMDs update it in case of error. */ 1485 memset(&error, 0x66, sizeof(error)); 1486 if (rte_flow_isolate(port_id, set, &error)) 1487 return port_flow_complain(&error); 1488 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1489 port_id, 1490 set ? "now restricted" : "not restricted anymore"); 1491 return 0; 1492 } 1493 1494 /* 1495 * RX/TX ring descriptors display functions. 1496 */ 1497 int 1498 rx_queue_id_is_invalid(queueid_t rxq_id) 1499 { 1500 if (rxq_id < nb_rxq) 1501 return 0; 1502 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1503 return 1; 1504 } 1505 1506 int 1507 tx_queue_id_is_invalid(queueid_t txq_id) 1508 { 1509 if (txq_id < nb_txq) 1510 return 0; 1511 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1512 return 1; 1513 } 1514 1515 static int 1516 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1517 { 1518 if (rxdesc_id < nb_rxd) 1519 return 0; 1520 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1521 rxdesc_id, nb_rxd); 1522 return 1; 1523 } 1524 1525 static int 1526 tx_desc_id_is_invalid(uint16_t txdesc_id) 1527 { 1528 if (txdesc_id < nb_txd) 1529 return 0; 1530 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1531 txdesc_id, nb_txd); 1532 return 1; 1533 } 1534 1535 static const struct rte_memzone * 1536 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1537 { 1538 char mz_name[RTE_MEMZONE_NAMESIZE]; 1539 const struct rte_memzone *mz; 1540 1541 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1542 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1543 mz = rte_memzone_lookup(mz_name); 1544 if (mz == NULL) 1545 printf("%s ring memory zoneof (port %d, queue %d) not" 1546 "found (zone name = %s\n", 1547 ring_name, port_id, q_id, mz_name); 1548 return mz; 1549 } 1550 1551 union igb_ring_dword { 1552 uint64_t dword; 1553 struct { 1554 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1555 uint32_t lo; 1556 uint32_t hi; 1557 #else 1558 uint32_t hi; 1559 uint32_t lo; 1560 #endif 1561 } words; 1562 }; 1563 1564 struct igb_ring_desc_32_bytes { 1565 union igb_ring_dword lo_dword; 1566 union igb_ring_dword hi_dword; 1567 union igb_ring_dword resv1; 1568 union igb_ring_dword resv2; 1569 }; 1570 1571 struct igb_ring_desc_16_bytes { 1572 union igb_ring_dword lo_dword; 1573 union igb_ring_dword hi_dword; 1574 }; 1575 1576 static void 1577 ring_rxd_display_dword(union igb_ring_dword dword) 1578 { 1579 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1580 (unsigned)dword.words.hi); 1581 } 1582 1583 static void 1584 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1585 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1586 portid_t port_id, 1587 #else 1588 __rte_unused portid_t port_id, 1589 #endif 1590 uint16_t desc_id) 1591 { 1592 struct igb_ring_desc_16_bytes *ring = 1593 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1594 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1595 struct rte_eth_dev_info dev_info; 1596 1597 memset(&dev_info, 0, sizeof(dev_info)); 1598 rte_eth_dev_info_get(port_id, &dev_info); 1599 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1600 /* 32 bytes RX descriptor, i40e only */ 1601 struct igb_ring_desc_32_bytes *ring = 1602 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1603 ring[desc_id].lo_dword.dword = 1604 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1605 ring_rxd_display_dword(ring[desc_id].lo_dword); 1606 ring[desc_id].hi_dword.dword = 1607 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1608 ring_rxd_display_dword(ring[desc_id].hi_dword); 1609 ring[desc_id].resv1.dword = 1610 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1611 ring_rxd_display_dword(ring[desc_id].resv1); 1612 ring[desc_id].resv2.dword = 1613 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1614 ring_rxd_display_dword(ring[desc_id].resv2); 1615 1616 return; 1617 } 1618 #endif 1619 /* 16 bytes RX descriptor */ 1620 ring[desc_id].lo_dword.dword = 1621 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1622 ring_rxd_display_dword(ring[desc_id].lo_dword); 1623 ring[desc_id].hi_dword.dword = 1624 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1625 ring_rxd_display_dword(ring[desc_id].hi_dword); 1626 } 1627 1628 static void 1629 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1630 { 1631 struct igb_ring_desc_16_bytes *ring; 1632 struct igb_ring_desc_16_bytes txd; 1633 1634 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1635 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1636 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1637 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1638 (unsigned)txd.lo_dword.words.lo, 1639 (unsigned)txd.lo_dword.words.hi, 1640 (unsigned)txd.hi_dword.words.lo, 1641 (unsigned)txd.hi_dword.words.hi); 1642 } 1643 1644 void 1645 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1646 { 1647 const struct rte_memzone *rx_mz; 1648 1649 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1650 return; 1651 if (rx_queue_id_is_invalid(rxq_id)) 1652 return; 1653 if (rx_desc_id_is_invalid(rxd_id)) 1654 return; 1655 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1656 if (rx_mz == NULL) 1657 return; 1658 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1659 } 1660 1661 void 1662 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1663 { 1664 const struct rte_memzone *tx_mz; 1665 1666 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1667 return; 1668 if (tx_queue_id_is_invalid(txq_id)) 1669 return; 1670 if (tx_desc_id_is_invalid(txd_id)) 1671 return; 1672 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1673 if (tx_mz == NULL) 1674 return; 1675 ring_tx_descriptor_display(tx_mz, txd_id); 1676 } 1677 1678 void 1679 fwd_lcores_config_display(void) 1680 { 1681 lcoreid_t lc_id; 1682 1683 printf("List of forwarding lcores:"); 1684 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1685 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1686 printf("\n"); 1687 } 1688 void 1689 rxtx_config_display(void) 1690 { 1691 portid_t pid; 1692 1693 printf(" %s packet forwarding%s packets/burst=%d\n", 1694 cur_fwd_eng->fwd_mode_name, 1695 retry_enabled == 0 ? "" : " with retry", 1696 nb_pkt_per_burst); 1697 1698 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1699 printf(" packet len=%u - nb packet segments=%d\n", 1700 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1701 1702 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1703 nb_fwd_lcores, nb_fwd_ports); 1704 1705 RTE_ETH_FOREACH_DEV(pid) { 1706 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; 1707 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; 1708 1709 printf(" port %d:\n", (unsigned int)pid); 1710 printf(" CRC stripping %s\n", 1711 (ports[pid].dev_conf.rxmode.offloads & 1712 DEV_RX_OFFLOAD_CRC_STRIP) ? 1713 "enabled" : "disabled"); 1714 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1715 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1716 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1717 " wthresh=%d\n", 1718 rx_conf->rx_thresh.pthresh, 1719 rx_conf->rx_thresh.hthresh, 1720 rx_conf->rx_thresh.wthresh); 1721 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1722 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1723 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1724 " wthresh=%d\n", 1725 tx_conf->tx_thresh.pthresh, 1726 tx_conf->tx_thresh.hthresh, 1727 tx_conf->tx_thresh.wthresh); 1728 printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", 1729 tx_conf->tx_rs_thresh, tx_conf->offloads); 1730 } 1731 } 1732 1733 void 1734 port_rss_reta_info(portid_t port_id, 1735 struct rte_eth_rss_reta_entry64 *reta_conf, 1736 uint16_t nb_entries) 1737 { 1738 uint16_t i, idx, shift; 1739 int ret; 1740 1741 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1742 return; 1743 1744 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1745 if (ret != 0) { 1746 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1747 return; 1748 } 1749 1750 for (i = 0; i < nb_entries; i++) { 1751 idx = i / RTE_RETA_GROUP_SIZE; 1752 shift = i % RTE_RETA_GROUP_SIZE; 1753 if (!(reta_conf[idx].mask & (1ULL << shift))) 1754 continue; 1755 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1756 i, reta_conf[idx].reta[shift]); 1757 } 1758 } 1759 1760 /* 1761 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1762 * key of the port. 1763 */ 1764 void 1765 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1766 { 1767 struct rte_eth_rss_conf rss_conf; 1768 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1769 uint64_t rss_hf; 1770 uint8_t i; 1771 int diag; 1772 struct rte_eth_dev_info dev_info; 1773 uint8_t hash_key_size; 1774 1775 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1776 return; 1777 1778 memset(&dev_info, 0, sizeof(dev_info)); 1779 rte_eth_dev_info_get(port_id, &dev_info); 1780 if (dev_info.hash_key_size > 0 && 1781 dev_info.hash_key_size <= sizeof(rss_key)) 1782 hash_key_size = dev_info.hash_key_size; 1783 else { 1784 printf("dev_info did not provide a valid hash key size\n"); 1785 return; 1786 } 1787 1788 rss_conf.rss_hf = 0; 1789 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1790 if (!strcmp(rss_info, rss_type_table[i].str)) 1791 rss_conf.rss_hf = rss_type_table[i].rss_type; 1792 } 1793 1794 /* Get RSS hash key if asked to display it */ 1795 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1796 rss_conf.rss_key_len = hash_key_size; 1797 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1798 if (diag != 0) { 1799 switch (diag) { 1800 case -ENODEV: 1801 printf("port index %d invalid\n", port_id); 1802 break; 1803 case -ENOTSUP: 1804 printf("operation not supported by device\n"); 1805 break; 1806 default: 1807 printf("operation failed - diag=%d\n", diag); 1808 break; 1809 } 1810 return; 1811 } 1812 rss_hf = rss_conf.rss_hf; 1813 if (rss_hf == 0) { 1814 printf("RSS disabled\n"); 1815 return; 1816 } 1817 printf("RSS functions:\n "); 1818 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1819 if (rss_hf & rss_type_table[i].rss_type) 1820 printf("%s ", rss_type_table[i].str); 1821 } 1822 printf("\n"); 1823 if (!show_rss_key) 1824 return; 1825 printf("RSS key:\n"); 1826 for (i = 0; i < hash_key_size; i++) 1827 printf("%02X", rss_key[i]); 1828 printf("\n"); 1829 } 1830 1831 void 1832 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1833 uint hash_key_len) 1834 { 1835 struct rte_eth_rss_conf rss_conf; 1836 int diag; 1837 unsigned int i; 1838 1839 rss_conf.rss_key = NULL; 1840 rss_conf.rss_key_len = hash_key_len; 1841 rss_conf.rss_hf = 0; 1842 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1843 if (!strcmp(rss_type_table[i].str, rss_type)) 1844 rss_conf.rss_hf = rss_type_table[i].rss_type; 1845 } 1846 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1847 if (diag == 0) { 1848 rss_conf.rss_key = hash_key; 1849 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1850 } 1851 if (diag == 0) 1852 return; 1853 1854 switch (diag) { 1855 case -ENODEV: 1856 printf("port index %d invalid\n", port_id); 1857 break; 1858 case -ENOTSUP: 1859 printf("operation not supported by device\n"); 1860 break; 1861 default: 1862 printf("operation failed - diag=%d\n", diag); 1863 break; 1864 } 1865 } 1866 1867 /* 1868 * Setup forwarding configuration for each logical core. 1869 */ 1870 static void 1871 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1872 { 1873 streamid_t nb_fs_per_lcore; 1874 streamid_t nb_fs; 1875 streamid_t sm_id; 1876 lcoreid_t nb_extra; 1877 lcoreid_t nb_fc; 1878 lcoreid_t nb_lc; 1879 lcoreid_t lc_id; 1880 1881 nb_fs = cfg->nb_fwd_streams; 1882 nb_fc = cfg->nb_fwd_lcores; 1883 if (nb_fs <= nb_fc) { 1884 nb_fs_per_lcore = 1; 1885 nb_extra = 0; 1886 } else { 1887 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1888 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1889 } 1890 1891 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1892 sm_id = 0; 1893 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1894 fwd_lcores[lc_id]->stream_idx = sm_id; 1895 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1896 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1897 } 1898 1899 /* 1900 * Assign extra remaining streams, if any. 1901 */ 1902 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1903 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1904 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1905 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1906 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1907 } 1908 } 1909 1910 static void 1911 simple_fwd_config_setup(void) 1912 { 1913 portid_t i; 1914 portid_t j; 1915 portid_t inc = 2; 1916 1917 if (port_topology == PORT_TOPOLOGY_CHAINED || 1918 port_topology == PORT_TOPOLOGY_LOOP) { 1919 inc = 1; 1920 } else if (nb_fwd_ports % 2) { 1921 printf("\nWarning! Cannot handle an odd number of ports " 1922 "with the current port topology. Configuration " 1923 "must be changed to have an even number of ports, " 1924 "or relaunch application with " 1925 "--port-topology=chained\n\n"); 1926 } 1927 1928 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1929 cur_fwd_config.nb_fwd_streams = 1930 (streamid_t) cur_fwd_config.nb_fwd_ports; 1931 1932 /* reinitialize forwarding streams */ 1933 init_fwd_streams(); 1934 1935 /* 1936 * In the simple forwarding test, the number of forwarding cores 1937 * must be lower or equal to the number of forwarding ports. 1938 */ 1939 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1940 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1941 cur_fwd_config.nb_fwd_lcores = 1942 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1943 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1944 1945 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1946 if (port_topology != PORT_TOPOLOGY_LOOP) 1947 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1948 else 1949 j = i; 1950 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1951 fwd_streams[i]->rx_queue = 0; 1952 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1953 fwd_streams[i]->tx_queue = 0; 1954 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 1955 fwd_streams[i]->retry_enabled = retry_enabled; 1956 1957 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1958 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1959 fwd_streams[j]->rx_queue = 0; 1960 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1961 fwd_streams[j]->tx_queue = 0; 1962 fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port; 1963 fwd_streams[j]->retry_enabled = retry_enabled; 1964 } 1965 } 1966 } 1967 1968 /** 1969 * For the RSS forwarding test all streams distributed over lcores. Each stream 1970 * being composed of a RX queue to poll on a RX port for input messages, 1971 * associated with a TX queue of a TX port where to send forwarded packets. 1972 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1973 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1974 * following rules: 1975 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1976 * - TxQl = RxQj 1977 */ 1978 static void 1979 rss_fwd_config_setup(void) 1980 { 1981 portid_t rxp; 1982 portid_t txp; 1983 queueid_t rxq; 1984 queueid_t nb_q; 1985 streamid_t sm_id; 1986 1987 nb_q = nb_rxq; 1988 if (nb_q > nb_txq) 1989 nb_q = nb_txq; 1990 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1991 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1992 cur_fwd_config.nb_fwd_streams = 1993 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1994 1995 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1996 cur_fwd_config.nb_fwd_lcores = 1997 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1998 1999 /* reinitialize forwarding streams */ 2000 init_fwd_streams(); 2001 2002 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2003 rxp = 0; rxq = 0; 2004 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2005 struct fwd_stream *fs; 2006 2007 fs = fwd_streams[sm_id]; 2008 2009 if ((rxp & 0x1) == 0) 2010 txp = (portid_t) (rxp + 1); 2011 else 2012 txp = (portid_t) (rxp - 1); 2013 /* 2014 * if we are in loopback, simply send stuff out through the 2015 * ingress port 2016 */ 2017 if (port_topology == PORT_TOPOLOGY_LOOP) 2018 txp = rxp; 2019 2020 fs->rx_port = fwd_ports_ids[rxp]; 2021 fs->rx_queue = rxq; 2022 fs->tx_port = fwd_ports_ids[txp]; 2023 fs->tx_queue = rxq; 2024 fs->peer_addr = fs->tx_port; 2025 fs->retry_enabled = retry_enabled; 2026 rxq = (queueid_t) (rxq + 1); 2027 if (rxq < nb_q) 2028 continue; 2029 /* 2030 * rxq == nb_q 2031 * Restart from RX queue 0 on next RX port 2032 */ 2033 rxq = 0; 2034 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2035 rxp = (portid_t) 2036 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2037 else 2038 rxp = (portid_t) (rxp + 1); 2039 } 2040 } 2041 2042 /** 2043 * For the DCB forwarding test, each core is assigned on each traffic class. 2044 * 2045 * Each core is assigned a multi-stream, each stream being composed of 2046 * a RX queue to poll on a RX port for input messages, associated with 2047 * a TX queue of a TX port where to send forwarded packets. All RX and 2048 * TX queues are mapping to the same traffic class. 2049 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2050 * the same core 2051 */ 2052 static void 2053 dcb_fwd_config_setup(void) 2054 { 2055 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2056 portid_t txp, rxp = 0; 2057 queueid_t txq, rxq = 0; 2058 lcoreid_t lc_id; 2059 uint16_t nb_rx_queue, nb_tx_queue; 2060 uint16_t i, j, k, sm_id = 0; 2061 uint8_t tc = 0; 2062 2063 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2064 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2065 cur_fwd_config.nb_fwd_streams = 2066 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2067 2068 /* reinitialize forwarding streams */ 2069 init_fwd_streams(); 2070 sm_id = 0; 2071 txp = 1; 2072 /* get the dcb info on the first RX and TX ports */ 2073 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2074 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2075 2076 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2077 fwd_lcores[lc_id]->stream_nb = 0; 2078 fwd_lcores[lc_id]->stream_idx = sm_id; 2079 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2080 /* if the nb_queue is zero, means this tc is 2081 * not enabled on the POOL 2082 */ 2083 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2084 break; 2085 k = fwd_lcores[lc_id]->stream_nb + 2086 fwd_lcores[lc_id]->stream_idx; 2087 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2088 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2089 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2090 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2091 for (j = 0; j < nb_rx_queue; j++) { 2092 struct fwd_stream *fs; 2093 2094 fs = fwd_streams[k + j]; 2095 fs->rx_port = fwd_ports_ids[rxp]; 2096 fs->rx_queue = rxq + j; 2097 fs->tx_port = fwd_ports_ids[txp]; 2098 fs->tx_queue = txq + j % nb_tx_queue; 2099 fs->peer_addr = fs->tx_port; 2100 fs->retry_enabled = retry_enabled; 2101 } 2102 fwd_lcores[lc_id]->stream_nb += 2103 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2104 } 2105 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2106 2107 tc++; 2108 if (tc < rxp_dcb_info.nb_tcs) 2109 continue; 2110 /* Restart from TC 0 on next RX port */ 2111 tc = 0; 2112 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2113 rxp = (portid_t) 2114 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2115 else 2116 rxp++; 2117 if (rxp >= nb_fwd_ports) 2118 return; 2119 /* get the dcb information on next RX and TX ports */ 2120 if ((rxp & 0x1) == 0) 2121 txp = (portid_t) (rxp + 1); 2122 else 2123 txp = (portid_t) (rxp - 1); 2124 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2125 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2126 } 2127 } 2128 2129 static void 2130 icmp_echo_config_setup(void) 2131 { 2132 portid_t rxp; 2133 queueid_t rxq; 2134 lcoreid_t lc_id; 2135 uint16_t sm_id; 2136 2137 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2138 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2139 (nb_txq * nb_fwd_ports); 2140 else 2141 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2142 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2143 cur_fwd_config.nb_fwd_streams = 2144 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2145 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2146 cur_fwd_config.nb_fwd_lcores = 2147 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2148 if (verbose_level > 0) { 2149 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2150 __FUNCTION__, 2151 cur_fwd_config.nb_fwd_lcores, 2152 cur_fwd_config.nb_fwd_ports, 2153 cur_fwd_config.nb_fwd_streams); 2154 } 2155 2156 /* reinitialize forwarding streams */ 2157 init_fwd_streams(); 2158 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2159 rxp = 0; rxq = 0; 2160 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2161 if (verbose_level > 0) 2162 printf(" core=%d: \n", lc_id); 2163 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2164 struct fwd_stream *fs; 2165 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2166 fs->rx_port = fwd_ports_ids[rxp]; 2167 fs->rx_queue = rxq; 2168 fs->tx_port = fs->rx_port; 2169 fs->tx_queue = rxq; 2170 fs->peer_addr = fs->tx_port; 2171 fs->retry_enabled = retry_enabled; 2172 if (verbose_level > 0) 2173 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2174 sm_id, fs->rx_port, fs->rx_queue, 2175 fs->tx_queue); 2176 rxq = (queueid_t) (rxq + 1); 2177 if (rxq == nb_rxq) { 2178 rxq = 0; 2179 rxp = (portid_t) (rxp + 1); 2180 } 2181 } 2182 } 2183 } 2184 2185 void 2186 fwd_config_setup(void) 2187 { 2188 cur_fwd_config.fwd_eng = cur_fwd_eng; 2189 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2190 icmp_echo_config_setup(); 2191 return; 2192 } 2193 if ((nb_rxq > 1) && (nb_txq > 1)){ 2194 if (dcb_config) 2195 dcb_fwd_config_setup(); 2196 else 2197 rss_fwd_config_setup(); 2198 } 2199 else 2200 simple_fwd_config_setup(); 2201 } 2202 2203 void 2204 pkt_fwd_config_display(struct fwd_config *cfg) 2205 { 2206 struct fwd_stream *fs; 2207 lcoreid_t lc_id; 2208 streamid_t sm_id; 2209 2210 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2211 "NUMA support %s, MP over anonymous pages %s\n", 2212 cfg->fwd_eng->fwd_mode_name, 2213 retry_enabled == 0 ? "" : " with retry", 2214 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2215 numa_support == 1 ? "enabled" : "disabled", 2216 mp_anon != 0 ? "enabled" : "disabled"); 2217 2218 if (retry_enabled) 2219 printf("TX retry num: %u, delay between TX retries: %uus\n", 2220 burst_tx_retry_num, burst_tx_delay_time); 2221 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2222 printf("Logical Core %u (socket %u) forwards packets on " 2223 "%d streams:", 2224 fwd_lcores_cpuids[lc_id], 2225 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2226 fwd_lcores[lc_id]->stream_nb); 2227 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2228 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2229 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2230 "P=%d/Q=%d (socket %u) ", 2231 fs->rx_port, fs->rx_queue, 2232 ports[fs->rx_port].socket_id, 2233 fs->tx_port, fs->tx_queue, 2234 ports[fs->tx_port].socket_id); 2235 print_ethaddr("peer=", 2236 &peer_eth_addrs[fs->peer_addr]); 2237 } 2238 printf("\n"); 2239 } 2240 printf("\n"); 2241 } 2242 2243 void 2244 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2245 { 2246 uint8_t c, new_peer_addr[6]; 2247 if (!rte_eth_dev_is_valid_port(port_id)) { 2248 printf("Error: Invalid port number %i\n", port_id); 2249 return; 2250 } 2251 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2252 sizeof(new_peer_addr)) < 0) { 2253 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2254 return; 2255 } 2256 for (c = 0; c < 6; c++) 2257 peer_eth_addrs[port_id].addr_bytes[c] = 2258 new_peer_addr[c]; 2259 } 2260 2261 int 2262 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2263 { 2264 unsigned int i; 2265 unsigned int lcore_cpuid; 2266 int record_now; 2267 2268 record_now = 0; 2269 again: 2270 for (i = 0; i < nb_lc; i++) { 2271 lcore_cpuid = lcorelist[i]; 2272 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2273 printf("lcore %u not enabled\n", lcore_cpuid); 2274 return -1; 2275 } 2276 if (lcore_cpuid == rte_get_master_lcore()) { 2277 printf("lcore %u cannot be masked on for running " 2278 "packet forwarding, which is the master lcore " 2279 "and reserved for command line parsing only\n", 2280 lcore_cpuid); 2281 return -1; 2282 } 2283 if (record_now) 2284 fwd_lcores_cpuids[i] = lcore_cpuid; 2285 } 2286 if (record_now == 0) { 2287 record_now = 1; 2288 goto again; 2289 } 2290 nb_cfg_lcores = (lcoreid_t) nb_lc; 2291 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2292 printf("previous number of forwarding cores %u - changed to " 2293 "number of configured cores %u\n", 2294 (unsigned int) nb_fwd_lcores, nb_lc); 2295 nb_fwd_lcores = (lcoreid_t) nb_lc; 2296 } 2297 2298 return 0; 2299 } 2300 2301 int 2302 set_fwd_lcores_mask(uint64_t lcoremask) 2303 { 2304 unsigned int lcorelist[64]; 2305 unsigned int nb_lc; 2306 unsigned int i; 2307 2308 if (lcoremask == 0) { 2309 printf("Invalid NULL mask of cores\n"); 2310 return -1; 2311 } 2312 nb_lc = 0; 2313 for (i = 0; i < 64; i++) { 2314 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2315 continue; 2316 lcorelist[nb_lc++] = i; 2317 } 2318 return set_fwd_lcores_list(lcorelist, nb_lc); 2319 } 2320 2321 void 2322 set_fwd_lcores_number(uint16_t nb_lc) 2323 { 2324 if (nb_lc > nb_cfg_lcores) { 2325 printf("nb fwd cores %u > %u (max. number of configured " 2326 "lcores) - ignored\n", 2327 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2328 return; 2329 } 2330 nb_fwd_lcores = (lcoreid_t) nb_lc; 2331 printf("Number of forwarding cores set to %u\n", 2332 (unsigned int) nb_fwd_lcores); 2333 } 2334 2335 void 2336 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2337 { 2338 unsigned int i; 2339 portid_t port_id; 2340 int record_now; 2341 2342 record_now = 0; 2343 again: 2344 for (i = 0; i < nb_pt; i++) { 2345 port_id = (portid_t) portlist[i]; 2346 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2347 return; 2348 if (record_now) 2349 fwd_ports_ids[i] = port_id; 2350 } 2351 if (record_now == 0) { 2352 record_now = 1; 2353 goto again; 2354 } 2355 nb_cfg_ports = (portid_t) nb_pt; 2356 if (nb_fwd_ports != (portid_t) nb_pt) { 2357 printf("previous number of forwarding ports %u - changed to " 2358 "number of configured ports %u\n", 2359 (unsigned int) nb_fwd_ports, nb_pt); 2360 nb_fwd_ports = (portid_t) nb_pt; 2361 } 2362 } 2363 2364 void 2365 set_fwd_ports_mask(uint64_t portmask) 2366 { 2367 unsigned int portlist[64]; 2368 unsigned int nb_pt; 2369 unsigned int i; 2370 2371 if (portmask == 0) { 2372 printf("Invalid NULL mask of ports\n"); 2373 return; 2374 } 2375 nb_pt = 0; 2376 RTE_ETH_FOREACH_DEV(i) { 2377 if (! ((uint64_t)(1ULL << i) & portmask)) 2378 continue; 2379 portlist[nb_pt++] = i; 2380 } 2381 set_fwd_ports_list(portlist, nb_pt); 2382 } 2383 2384 void 2385 set_fwd_ports_number(uint16_t nb_pt) 2386 { 2387 if (nb_pt > nb_cfg_ports) { 2388 printf("nb fwd ports %u > %u (number of configured " 2389 "ports) - ignored\n", 2390 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2391 return; 2392 } 2393 nb_fwd_ports = (portid_t) nb_pt; 2394 printf("Number of forwarding ports set to %u\n", 2395 (unsigned int) nb_fwd_ports); 2396 } 2397 2398 int 2399 port_is_forwarding(portid_t port_id) 2400 { 2401 unsigned int i; 2402 2403 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2404 return -1; 2405 2406 for (i = 0; i < nb_fwd_ports; i++) { 2407 if (fwd_ports_ids[i] == port_id) 2408 return 1; 2409 } 2410 2411 return 0; 2412 } 2413 2414 void 2415 set_nb_pkt_per_burst(uint16_t nb) 2416 { 2417 if (nb > MAX_PKT_BURST) { 2418 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2419 " ignored\n", 2420 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2421 return; 2422 } 2423 nb_pkt_per_burst = nb; 2424 printf("Number of packets per burst set to %u\n", 2425 (unsigned int) nb_pkt_per_burst); 2426 } 2427 2428 static const char * 2429 tx_split_get_name(enum tx_pkt_split split) 2430 { 2431 uint32_t i; 2432 2433 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2434 if (tx_split_name[i].split == split) 2435 return tx_split_name[i].name; 2436 } 2437 return NULL; 2438 } 2439 2440 void 2441 set_tx_pkt_split(const char *name) 2442 { 2443 uint32_t i; 2444 2445 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2446 if (strcmp(tx_split_name[i].name, name) == 0) { 2447 tx_pkt_split = tx_split_name[i].split; 2448 return; 2449 } 2450 } 2451 printf("unknown value: \"%s\"\n", name); 2452 } 2453 2454 void 2455 show_tx_pkt_segments(void) 2456 { 2457 uint32_t i, n; 2458 const char *split; 2459 2460 n = tx_pkt_nb_segs; 2461 split = tx_split_get_name(tx_pkt_split); 2462 2463 printf("Number of segments: %u\n", n); 2464 printf("Segment sizes: "); 2465 for (i = 0; i != n - 1; i++) 2466 printf("%hu,", tx_pkt_seg_lengths[i]); 2467 printf("%hu\n", tx_pkt_seg_lengths[i]); 2468 printf("Split packet: %s\n", split); 2469 } 2470 2471 void 2472 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2473 { 2474 uint16_t tx_pkt_len; 2475 unsigned i; 2476 2477 if (nb_segs >= (unsigned) nb_txd) { 2478 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2479 nb_segs, (unsigned int) nb_txd); 2480 return; 2481 } 2482 2483 /* 2484 * Check that each segment length is greater or equal than 2485 * the mbuf data sise. 2486 * Check also that the total packet length is greater or equal than the 2487 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2488 */ 2489 tx_pkt_len = 0; 2490 for (i = 0; i < nb_segs; i++) { 2491 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2492 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2493 i, seg_lengths[i], (unsigned) mbuf_data_size); 2494 return; 2495 } 2496 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2497 } 2498 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2499 printf("total packet length=%u < %d - give up\n", 2500 (unsigned) tx_pkt_len, 2501 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2502 return; 2503 } 2504 2505 for (i = 0; i < nb_segs; i++) 2506 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2507 2508 tx_pkt_length = tx_pkt_len; 2509 tx_pkt_nb_segs = (uint8_t) nb_segs; 2510 } 2511 2512 void 2513 setup_gro(const char *onoff, portid_t port_id) 2514 { 2515 if (!rte_eth_dev_is_valid_port(port_id)) { 2516 printf("invalid port id %u\n", port_id); 2517 return; 2518 } 2519 if (test_done == 0) { 2520 printf("Before enable/disable GRO," 2521 " please stop forwarding first\n"); 2522 return; 2523 } 2524 if (strcmp(onoff, "on") == 0) { 2525 if (gro_ports[port_id].enable != 0) { 2526 printf("Port %u has enabled GRO. Please" 2527 " disable GRO first\n", port_id); 2528 return; 2529 } 2530 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2531 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2532 gro_ports[port_id].param.max_flow_num = 2533 GRO_DEFAULT_FLOW_NUM; 2534 gro_ports[port_id].param.max_item_per_flow = 2535 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2536 } 2537 gro_ports[port_id].enable = 1; 2538 } else { 2539 if (gro_ports[port_id].enable == 0) { 2540 printf("Port %u has disabled GRO\n", port_id); 2541 return; 2542 } 2543 gro_ports[port_id].enable = 0; 2544 } 2545 } 2546 2547 void 2548 setup_gro_flush_cycles(uint8_t cycles) 2549 { 2550 if (test_done == 0) { 2551 printf("Before change flush interval for GRO," 2552 " please stop forwarding first.\n"); 2553 return; 2554 } 2555 2556 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2557 GRO_DEFAULT_FLUSH_CYCLES) { 2558 printf("The flushing cycle be in the range" 2559 " of 1 to %u. Revert to the default" 2560 " value %u.\n", 2561 GRO_MAX_FLUSH_CYCLES, 2562 GRO_DEFAULT_FLUSH_CYCLES); 2563 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2564 } 2565 2566 gro_flush_cycles = cycles; 2567 } 2568 2569 void 2570 show_gro(portid_t port_id) 2571 { 2572 struct rte_gro_param *param; 2573 uint32_t max_pkts_num; 2574 2575 param = &gro_ports[port_id].param; 2576 2577 if (!rte_eth_dev_is_valid_port(port_id)) { 2578 printf("Invalid port id %u.\n", port_id); 2579 return; 2580 } 2581 if (gro_ports[port_id].enable) { 2582 printf("GRO type: TCP/IPv4\n"); 2583 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2584 max_pkts_num = param->max_flow_num * 2585 param->max_item_per_flow; 2586 } else 2587 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2588 printf("Max number of packets to perform GRO: %u\n", 2589 max_pkts_num); 2590 printf("Flushing cycles: %u\n", gro_flush_cycles); 2591 } else 2592 printf("Port %u doesn't enable GRO.\n", port_id); 2593 } 2594 2595 void 2596 setup_gso(const char *mode, portid_t port_id) 2597 { 2598 if (!rte_eth_dev_is_valid_port(port_id)) { 2599 printf("invalid port id %u\n", port_id); 2600 return; 2601 } 2602 if (strcmp(mode, "on") == 0) { 2603 if (test_done == 0) { 2604 printf("before enabling GSO," 2605 " please stop forwarding first\n"); 2606 return; 2607 } 2608 gso_ports[port_id].enable = 1; 2609 } else if (strcmp(mode, "off") == 0) { 2610 if (test_done == 0) { 2611 printf("before disabling GSO," 2612 " please stop forwarding first\n"); 2613 return; 2614 } 2615 gso_ports[port_id].enable = 0; 2616 } 2617 } 2618 2619 char* 2620 list_pkt_forwarding_modes(void) 2621 { 2622 static char fwd_modes[128] = ""; 2623 const char *separator = "|"; 2624 struct fwd_engine *fwd_eng; 2625 unsigned i = 0; 2626 2627 if (strlen (fwd_modes) == 0) { 2628 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2629 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2630 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2631 strncat(fwd_modes, separator, 2632 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2633 } 2634 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2635 } 2636 2637 return fwd_modes; 2638 } 2639 2640 char* 2641 list_pkt_forwarding_retry_modes(void) 2642 { 2643 static char fwd_modes[128] = ""; 2644 const char *separator = "|"; 2645 struct fwd_engine *fwd_eng; 2646 unsigned i = 0; 2647 2648 if (strlen(fwd_modes) == 0) { 2649 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2650 if (fwd_eng == &rx_only_engine) 2651 continue; 2652 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2653 sizeof(fwd_modes) - 2654 strlen(fwd_modes) - 1); 2655 strncat(fwd_modes, separator, 2656 sizeof(fwd_modes) - 2657 strlen(fwd_modes) - 1); 2658 } 2659 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2660 } 2661 2662 return fwd_modes; 2663 } 2664 2665 void 2666 set_pkt_forwarding_mode(const char *fwd_mode_name) 2667 { 2668 struct fwd_engine *fwd_eng; 2669 unsigned i; 2670 2671 i = 0; 2672 while ((fwd_eng = fwd_engines[i]) != NULL) { 2673 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2674 printf("Set %s packet forwarding mode%s\n", 2675 fwd_mode_name, 2676 retry_enabled == 0 ? "" : " with retry"); 2677 cur_fwd_eng = fwd_eng; 2678 return; 2679 } 2680 i++; 2681 } 2682 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2683 } 2684 2685 void 2686 set_verbose_level(uint16_t vb_level) 2687 { 2688 printf("Change verbose level from %u to %u\n", 2689 (unsigned int) verbose_level, (unsigned int) vb_level); 2690 verbose_level = vb_level; 2691 } 2692 2693 void 2694 vlan_extend_set(portid_t port_id, int on) 2695 { 2696 int diag; 2697 int vlan_offload; 2698 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2699 2700 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2701 return; 2702 2703 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2704 2705 if (on) { 2706 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2707 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2708 } else { 2709 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2710 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2711 } 2712 2713 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2714 if (diag < 0) 2715 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2716 "diag=%d\n", port_id, on, diag); 2717 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2718 } 2719 2720 void 2721 rx_vlan_strip_set(portid_t port_id, int on) 2722 { 2723 int diag; 2724 int vlan_offload; 2725 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2726 2727 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2728 return; 2729 2730 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2731 2732 if (on) { 2733 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2734 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2735 } else { 2736 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2737 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2738 } 2739 2740 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2741 if (diag < 0) 2742 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2743 "diag=%d\n", port_id, on, diag); 2744 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2745 } 2746 2747 void 2748 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2749 { 2750 int diag; 2751 2752 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2753 return; 2754 2755 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2756 if (diag < 0) 2757 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2758 "diag=%d\n", port_id, queue_id, on, diag); 2759 } 2760 2761 void 2762 rx_vlan_filter_set(portid_t port_id, int on) 2763 { 2764 int diag; 2765 int vlan_offload; 2766 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2767 2768 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2769 return; 2770 2771 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2772 2773 if (on) { 2774 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2775 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2776 } else { 2777 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2778 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2779 } 2780 2781 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2782 if (diag < 0) 2783 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2784 "diag=%d\n", port_id, on, diag); 2785 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2786 } 2787 2788 int 2789 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2790 { 2791 int diag; 2792 2793 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2794 return 1; 2795 if (vlan_id_is_invalid(vlan_id)) 2796 return 1; 2797 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2798 if (diag == 0) 2799 return 0; 2800 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2801 "diag=%d\n", 2802 port_id, vlan_id, on, diag); 2803 return -1; 2804 } 2805 2806 void 2807 rx_vlan_all_filter_set(portid_t port_id, int on) 2808 { 2809 uint16_t vlan_id; 2810 2811 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2812 return; 2813 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2814 if (rx_vft_set(port_id, vlan_id, on)) 2815 break; 2816 } 2817 } 2818 2819 void 2820 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2821 { 2822 int diag; 2823 2824 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2825 return; 2826 2827 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2828 if (diag == 0) 2829 return; 2830 2831 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2832 "diag=%d\n", 2833 port_id, vlan_type, tp_id, diag); 2834 } 2835 2836 void 2837 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2838 { 2839 int vlan_offload; 2840 struct rte_eth_dev_info dev_info; 2841 2842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2843 return; 2844 if (vlan_id_is_invalid(vlan_id)) 2845 return; 2846 2847 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2848 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2849 printf("Error, as QinQ has been enabled.\n"); 2850 return; 2851 } 2852 rte_eth_dev_info_get(port_id, &dev_info); 2853 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2854 printf("Error: vlan insert is not supported by port %d\n", 2855 port_id); 2856 return; 2857 } 2858 2859 tx_vlan_reset(port_id); 2860 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2861 ports[port_id].tx_vlan_id = vlan_id; 2862 } 2863 2864 void 2865 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2866 { 2867 int vlan_offload; 2868 struct rte_eth_dev_info dev_info; 2869 2870 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2871 return; 2872 if (vlan_id_is_invalid(vlan_id)) 2873 return; 2874 if (vlan_id_is_invalid(vlan_id_outer)) 2875 return; 2876 2877 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2878 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2879 printf("Error, as QinQ hasn't been enabled.\n"); 2880 return; 2881 } 2882 rte_eth_dev_info_get(port_id, &dev_info); 2883 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2884 printf("Error: qinq insert not supported by port %d\n", 2885 port_id); 2886 return; 2887 } 2888 2889 tx_vlan_reset(port_id); 2890 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2891 ports[port_id].tx_vlan_id = vlan_id; 2892 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2893 } 2894 2895 void 2896 tx_vlan_reset(portid_t port_id) 2897 { 2898 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2899 return; 2900 ports[port_id].dev_conf.txmode.offloads &= 2901 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2902 DEV_TX_OFFLOAD_QINQ_INSERT); 2903 ports[port_id].tx_vlan_id = 0; 2904 ports[port_id].tx_vlan_id_outer = 0; 2905 } 2906 2907 void 2908 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2909 { 2910 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2911 return; 2912 2913 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2914 } 2915 2916 void 2917 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2918 { 2919 uint16_t i; 2920 uint8_t existing_mapping_found = 0; 2921 2922 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2923 return; 2924 2925 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2926 return; 2927 2928 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2929 printf("map_value not in required range 0..%d\n", 2930 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2931 return; 2932 } 2933 2934 if (!is_rx) { /*then tx*/ 2935 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2936 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2937 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2938 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2939 existing_mapping_found = 1; 2940 break; 2941 } 2942 } 2943 if (!existing_mapping_found) { /* A new additional mapping... */ 2944 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2945 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2946 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2947 nb_tx_queue_stats_mappings++; 2948 } 2949 } 2950 else { /*rx*/ 2951 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2952 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2953 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2954 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2955 existing_mapping_found = 1; 2956 break; 2957 } 2958 } 2959 if (!existing_mapping_found) { /* A new additional mapping... */ 2960 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2961 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2962 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2963 nb_rx_queue_stats_mappings++; 2964 } 2965 } 2966 } 2967 2968 void 2969 set_xstats_hide_zero(uint8_t on_off) 2970 { 2971 xstats_hide_zero = on_off; 2972 } 2973 2974 static inline void 2975 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2976 { 2977 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2978 2979 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2980 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2981 " tunnel_id: 0x%08x", 2982 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2983 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2984 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2985 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2986 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2987 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2988 2989 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2990 rte_be_to_cpu_16(mask->src_port_mask), 2991 rte_be_to_cpu_16(mask->dst_port_mask)); 2992 2993 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2994 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2995 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2996 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2997 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2998 2999 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3000 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3001 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3002 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3003 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3004 } 3005 3006 printf("\n"); 3007 } 3008 3009 static inline void 3010 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3011 { 3012 struct rte_eth_flex_payload_cfg *cfg; 3013 uint32_t i, j; 3014 3015 for (i = 0; i < flex_conf->nb_payloads; i++) { 3016 cfg = &flex_conf->flex_set[i]; 3017 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3018 printf("\n RAW: "); 3019 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3020 printf("\n L2_PAYLOAD: "); 3021 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3022 printf("\n L3_PAYLOAD: "); 3023 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3024 printf("\n L4_PAYLOAD: "); 3025 else 3026 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3027 for (j = 0; j < num; j++) 3028 printf(" %-5u", cfg->src_offset[j]); 3029 } 3030 printf("\n"); 3031 } 3032 3033 static char * 3034 flowtype_to_str(uint16_t flow_type) 3035 { 3036 struct flow_type_info { 3037 char str[32]; 3038 uint16_t ftype; 3039 }; 3040 3041 uint8_t i; 3042 static struct flow_type_info flowtype_str_table[] = { 3043 {"raw", RTE_ETH_FLOW_RAW}, 3044 {"ipv4", RTE_ETH_FLOW_IPV4}, 3045 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3046 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3047 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3048 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3049 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3050 {"ipv6", RTE_ETH_FLOW_IPV6}, 3051 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3052 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3053 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3054 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3055 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3056 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3057 {"port", RTE_ETH_FLOW_PORT}, 3058 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3059 {"geneve", RTE_ETH_FLOW_GENEVE}, 3060 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3061 }; 3062 3063 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3064 if (flowtype_str_table[i].ftype == flow_type) 3065 return flowtype_str_table[i].str; 3066 } 3067 3068 return NULL; 3069 } 3070 3071 static inline void 3072 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3073 { 3074 struct rte_eth_fdir_flex_mask *mask; 3075 uint32_t i, j; 3076 char *p; 3077 3078 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3079 mask = &flex_conf->flex_mask[i]; 3080 p = flowtype_to_str(mask->flow_type); 3081 printf("\n %s:\t", p ? p : "unknown"); 3082 for (j = 0; j < num; j++) 3083 printf(" %02x", mask->mask[j]); 3084 } 3085 printf("\n"); 3086 } 3087 3088 static inline void 3089 print_fdir_flow_type(uint32_t flow_types_mask) 3090 { 3091 int i; 3092 char *p; 3093 3094 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3095 if (!(flow_types_mask & (1 << i))) 3096 continue; 3097 p = flowtype_to_str(i); 3098 if (p) 3099 printf(" %s", p); 3100 else 3101 printf(" unknown"); 3102 } 3103 printf("\n"); 3104 } 3105 3106 void 3107 fdir_get_infos(portid_t port_id) 3108 { 3109 struct rte_eth_fdir_stats fdir_stat; 3110 struct rte_eth_fdir_info fdir_info; 3111 int ret; 3112 3113 static const char *fdir_stats_border = "########################"; 3114 3115 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3116 return; 3117 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3118 if (ret < 0) { 3119 printf("\n FDIR is not supported on port %-2d\n", 3120 port_id); 3121 return; 3122 } 3123 3124 memset(&fdir_info, 0, sizeof(fdir_info)); 3125 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3126 RTE_ETH_FILTER_INFO, &fdir_info); 3127 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3128 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3129 RTE_ETH_FILTER_STATS, &fdir_stat); 3130 printf("\n %s FDIR infos for port %-2d %s\n", 3131 fdir_stats_border, port_id, fdir_stats_border); 3132 printf(" MODE: "); 3133 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3134 printf(" PERFECT\n"); 3135 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3136 printf(" PERFECT-MAC-VLAN\n"); 3137 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3138 printf(" PERFECT-TUNNEL\n"); 3139 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3140 printf(" SIGNATURE\n"); 3141 else 3142 printf(" DISABLE\n"); 3143 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3144 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3145 printf(" SUPPORTED FLOW TYPE: "); 3146 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3147 } 3148 printf(" FLEX PAYLOAD INFO:\n"); 3149 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3150 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3151 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3152 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3153 fdir_info.flex_payload_unit, 3154 fdir_info.max_flex_payload_segment_num, 3155 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3156 printf(" MASK: "); 3157 print_fdir_mask(&fdir_info.mask); 3158 if (fdir_info.flex_conf.nb_payloads > 0) { 3159 printf(" FLEX PAYLOAD SRC OFFSET:"); 3160 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3161 } 3162 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3163 printf(" FLEX MASK CFG:"); 3164 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3165 } 3166 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3167 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3168 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3169 fdir_info.guarant_spc, fdir_info.best_spc); 3170 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3171 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3172 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3173 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3174 fdir_stat.collision, fdir_stat.free, 3175 fdir_stat.maxhash, fdir_stat.maxlen, 3176 fdir_stat.add, fdir_stat.remove, 3177 fdir_stat.f_add, fdir_stat.f_remove); 3178 printf(" %s############################%s\n", 3179 fdir_stats_border, fdir_stats_border); 3180 } 3181 3182 void 3183 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3184 { 3185 struct rte_port *port; 3186 struct rte_eth_fdir_flex_conf *flex_conf; 3187 int i, idx = 0; 3188 3189 port = &ports[port_id]; 3190 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3191 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3192 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3193 idx = i; 3194 break; 3195 } 3196 } 3197 if (i >= RTE_ETH_FLOW_MAX) { 3198 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3199 idx = flex_conf->nb_flexmasks; 3200 flex_conf->nb_flexmasks++; 3201 } else { 3202 printf("The flex mask table is full. Can not set flex" 3203 " mask for flow_type(%u).", cfg->flow_type); 3204 return; 3205 } 3206 } 3207 rte_memcpy(&flex_conf->flex_mask[idx], 3208 cfg, 3209 sizeof(struct rte_eth_fdir_flex_mask)); 3210 } 3211 3212 void 3213 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3214 { 3215 struct rte_port *port; 3216 struct rte_eth_fdir_flex_conf *flex_conf; 3217 int i, idx = 0; 3218 3219 port = &ports[port_id]; 3220 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3221 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3222 if (cfg->type == flex_conf->flex_set[i].type) { 3223 idx = i; 3224 break; 3225 } 3226 } 3227 if (i >= RTE_ETH_PAYLOAD_MAX) { 3228 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3229 idx = flex_conf->nb_payloads; 3230 flex_conf->nb_payloads++; 3231 } else { 3232 printf("The flex payload table is full. Can not set" 3233 " flex payload for type(%u).", cfg->type); 3234 return; 3235 } 3236 } 3237 rte_memcpy(&flex_conf->flex_set[idx], 3238 cfg, 3239 sizeof(struct rte_eth_flex_payload_cfg)); 3240 3241 } 3242 3243 void 3244 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3245 { 3246 #ifdef RTE_LIBRTE_IXGBE_PMD 3247 int diag; 3248 3249 if (is_rx) 3250 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3251 else 3252 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3253 3254 if (diag == 0) 3255 return; 3256 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3257 is_rx ? "rx" : "tx", port_id, diag); 3258 return; 3259 #endif 3260 printf("VF %s setting not supported for port %d\n", 3261 is_rx ? "Rx" : "Tx", port_id); 3262 RTE_SET_USED(vf); 3263 RTE_SET_USED(on); 3264 } 3265 3266 int 3267 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3268 { 3269 int diag; 3270 struct rte_eth_link link; 3271 3272 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3273 return 1; 3274 rte_eth_link_get_nowait(port_id, &link); 3275 if (rate > link.link_speed) { 3276 printf("Invalid rate value:%u bigger than link speed: %u\n", 3277 rate, link.link_speed); 3278 return 1; 3279 } 3280 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3281 if (diag == 0) 3282 return diag; 3283 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3284 port_id, diag); 3285 return diag; 3286 } 3287 3288 int 3289 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3290 { 3291 int diag = -ENOTSUP; 3292 3293 RTE_SET_USED(vf); 3294 RTE_SET_USED(rate); 3295 RTE_SET_USED(q_msk); 3296 3297 #ifdef RTE_LIBRTE_IXGBE_PMD 3298 if (diag == -ENOTSUP) 3299 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3300 q_msk); 3301 #endif 3302 #ifdef RTE_LIBRTE_BNXT_PMD 3303 if (diag == -ENOTSUP) 3304 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3305 #endif 3306 if (diag == 0) 3307 return diag; 3308 3309 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3310 port_id, diag); 3311 return diag; 3312 } 3313 3314 /* 3315 * Functions to manage the set of filtered Multicast MAC addresses. 3316 * 3317 * A pool of filtered multicast MAC addresses is associated with each port. 3318 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3319 * The address of the pool and the number of valid multicast MAC addresses 3320 * recorded in the pool are stored in the fields "mc_addr_pool" and 3321 * "mc_addr_nb" of the "rte_port" data structure. 3322 * 3323 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3324 * to be supplied a contiguous array of multicast MAC addresses. 3325 * To comply with this constraint, the set of multicast addresses recorded 3326 * into the pool are systematically compacted at the beginning of the pool. 3327 * Hence, when a multicast address is removed from the pool, all following 3328 * addresses, if any, are copied back to keep the set contiguous. 3329 */ 3330 #define MCAST_POOL_INC 32 3331 3332 static int 3333 mcast_addr_pool_extend(struct rte_port *port) 3334 { 3335 struct ether_addr *mc_pool; 3336 size_t mc_pool_size; 3337 3338 /* 3339 * If a free entry is available at the end of the pool, just 3340 * increment the number of recorded multicast addresses. 3341 */ 3342 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3343 port->mc_addr_nb++; 3344 return 0; 3345 } 3346 3347 /* 3348 * [re]allocate a pool with MCAST_POOL_INC more entries. 3349 * The previous test guarantees that port->mc_addr_nb is a multiple 3350 * of MCAST_POOL_INC. 3351 */ 3352 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3353 MCAST_POOL_INC); 3354 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3355 mc_pool_size); 3356 if (mc_pool == NULL) { 3357 printf("allocation of pool of %u multicast addresses failed\n", 3358 port->mc_addr_nb + MCAST_POOL_INC); 3359 return -ENOMEM; 3360 } 3361 3362 port->mc_addr_pool = mc_pool; 3363 port->mc_addr_nb++; 3364 return 0; 3365 3366 } 3367 3368 static void 3369 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3370 { 3371 port->mc_addr_nb--; 3372 if (addr_idx == port->mc_addr_nb) { 3373 /* No need to recompact the set of multicast addressses. */ 3374 if (port->mc_addr_nb == 0) { 3375 /* free the pool of multicast addresses. */ 3376 free(port->mc_addr_pool); 3377 port->mc_addr_pool = NULL; 3378 } 3379 return; 3380 } 3381 memmove(&port->mc_addr_pool[addr_idx], 3382 &port->mc_addr_pool[addr_idx + 1], 3383 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3384 } 3385 3386 static void 3387 eth_port_multicast_addr_list_set(portid_t port_id) 3388 { 3389 struct rte_port *port; 3390 int diag; 3391 3392 port = &ports[port_id]; 3393 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3394 port->mc_addr_nb); 3395 if (diag == 0) 3396 return; 3397 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3398 port->mc_addr_nb, port_id, -diag); 3399 } 3400 3401 void 3402 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3403 { 3404 struct rte_port *port; 3405 uint32_t i; 3406 3407 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3408 return; 3409 3410 port = &ports[port_id]; 3411 3412 /* 3413 * Check that the added multicast MAC address is not already recorded 3414 * in the pool of multicast addresses. 3415 */ 3416 for (i = 0; i < port->mc_addr_nb; i++) { 3417 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3418 printf("multicast address already filtered by port\n"); 3419 return; 3420 } 3421 } 3422 3423 if (mcast_addr_pool_extend(port) != 0) 3424 return; 3425 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3426 eth_port_multicast_addr_list_set(port_id); 3427 } 3428 3429 void 3430 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3431 { 3432 struct rte_port *port; 3433 uint32_t i; 3434 3435 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3436 return; 3437 3438 port = &ports[port_id]; 3439 3440 /* 3441 * Search the pool of multicast MAC addresses for the removed address. 3442 */ 3443 for (i = 0; i < port->mc_addr_nb; i++) { 3444 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3445 break; 3446 } 3447 if (i == port->mc_addr_nb) { 3448 printf("multicast address not filtered by port %d\n", port_id); 3449 return; 3450 } 3451 3452 mcast_addr_pool_remove(port, i); 3453 eth_port_multicast_addr_list_set(port_id); 3454 } 3455 3456 void 3457 port_dcb_info_display(portid_t port_id) 3458 { 3459 struct rte_eth_dcb_info dcb_info; 3460 uint16_t i; 3461 int ret; 3462 static const char *border = "================"; 3463 3464 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3465 return; 3466 3467 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3468 if (ret) { 3469 printf("\n Failed to get dcb infos on port %-2d\n", 3470 port_id); 3471 return; 3472 } 3473 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3474 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3475 printf("\n TC : "); 3476 for (i = 0; i < dcb_info.nb_tcs; i++) 3477 printf("\t%4d", i); 3478 printf("\n Priority : "); 3479 for (i = 0; i < dcb_info.nb_tcs; i++) 3480 printf("\t%4d", dcb_info.prio_tc[i]); 3481 printf("\n BW percent :"); 3482 for (i = 0; i < dcb_info.nb_tcs; i++) 3483 printf("\t%4d%%", dcb_info.tc_bws[i]); 3484 printf("\n RXQ base : "); 3485 for (i = 0; i < dcb_info.nb_tcs; i++) 3486 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3487 printf("\n RXQ number :"); 3488 for (i = 0; i < dcb_info.nb_tcs; i++) 3489 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3490 printf("\n TXQ base : "); 3491 for (i = 0; i < dcb_info.nb_tcs; i++) 3492 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3493 printf("\n TXQ number :"); 3494 for (i = 0; i < dcb_info.nb_tcs; i++) 3495 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3496 printf("\n"); 3497 } 3498 3499 uint8_t * 3500 open_file(const char *file_path, uint32_t *size) 3501 { 3502 int fd = open(file_path, O_RDONLY); 3503 off_t pkg_size; 3504 uint8_t *buf = NULL; 3505 int ret = 0; 3506 struct stat st_buf; 3507 3508 if (size) 3509 *size = 0; 3510 3511 if (fd == -1) { 3512 printf("%s: Failed to open %s\n", __func__, file_path); 3513 return buf; 3514 } 3515 3516 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3517 close(fd); 3518 printf("%s: File operations failed\n", __func__); 3519 return buf; 3520 } 3521 3522 pkg_size = st_buf.st_size; 3523 if (pkg_size < 0) { 3524 close(fd); 3525 printf("%s: File operations failed\n", __func__); 3526 return buf; 3527 } 3528 3529 buf = (uint8_t *)malloc(pkg_size); 3530 if (!buf) { 3531 close(fd); 3532 printf("%s: Failed to malloc memory\n", __func__); 3533 return buf; 3534 } 3535 3536 ret = read(fd, buf, pkg_size); 3537 if (ret < 0) { 3538 close(fd); 3539 printf("%s: File read operation failed\n", __func__); 3540 close_file(buf); 3541 return NULL; 3542 } 3543 3544 if (size) 3545 *size = pkg_size; 3546 3547 close(fd); 3548 3549 return buf; 3550 } 3551 3552 int 3553 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3554 { 3555 FILE *fh = fopen(file_path, "wb"); 3556 3557 if (fh == NULL) { 3558 printf("%s: Failed to open %s\n", __func__, file_path); 3559 return -1; 3560 } 3561 3562 if (fwrite(buf, 1, size, fh) != size) { 3563 fclose(fh); 3564 printf("%s: File write operation failed\n", __func__); 3565 return -1; 3566 } 3567 3568 fclose(fh); 3569 3570 return 0; 3571 } 3572 3573 int 3574 close_file(uint8_t *buf) 3575 { 3576 if (buf) { 3577 free((void *)buf); 3578 return 0; 3579 } 3580 3581 return -1; 3582 } 3583 3584 void 3585 port_queue_region_info_display(portid_t port_id, void *buf) 3586 { 3587 #ifdef RTE_LIBRTE_I40E_PMD 3588 uint16_t i, j; 3589 struct rte_pmd_i40e_queue_regions *info = 3590 (struct rte_pmd_i40e_queue_regions *)buf; 3591 static const char *queue_region_info_stats_border = "-------"; 3592 3593 if (!info->queue_region_number) 3594 printf("there is no region has been set before"); 3595 3596 printf("\n %s All queue region info for port=%2d %s", 3597 queue_region_info_stats_border, port_id, 3598 queue_region_info_stats_border); 3599 printf("\n queue_region_number: %-14u \n", 3600 info->queue_region_number); 3601 3602 for (i = 0; i < info->queue_region_number; i++) { 3603 printf("\n region_id: %-14u queue_number: %-14u " 3604 "queue_start_index: %-14u \n", 3605 info->region[i].region_id, 3606 info->region[i].queue_num, 3607 info->region[i].queue_start_index); 3608 3609 printf(" user_priority_num is %-14u :", 3610 info->region[i].user_priority_num); 3611 for (j = 0; j < info->region[i].user_priority_num; j++) 3612 printf(" %-14u ", info->region[i].user_priority[j]); 3613 3614 printf("\n flowtype_num is %-14u :", 3615 info->region[i].flowtype_num); 3616 for (j = 0; j < info->region[i].flowtype_num; j++) 3617 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3618 } 3619 #else 3620 RTE_SET_USED(port_id); 3621 RTE_SET_USED(buf); 3622 #endif 3623 3624 printf("\n\n"); 3625 } 3626