1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * Copyright 2013-2014 6WIND S.A. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <stdarg.h> 36 #include <errno.h> 37 #include <stdio.h> 38 #include <string.h> 39 #include <stdint.h> 40 #include <inttypes.h> 41 42 #include <sys/queue.h> 43 #include <sys/types.h> 44 #include <sys/stat.h> 45 #include <fcntl.h> 46 #include <unistd.h> 47 48 #include <rte_common.h> 49 #include <rte_byteorder.h> 50 #include <rte_debug.h> 51 #include <rte_log.h> 52 #include <rte_memory.h> 53 #include <rte_memcpy.h> 54 #include <rte_memzone.h> 55 #include <rte_launch.h> 56 #include <rte_eal.h> 57 #include <rte_per_lcore.h> 58 #include <rte_lcore.h> 59 #include <rte_atomic.h> 60 #include <rte_branch_prediction.h> 61 #include <rte_mempool.h> 62 #include <rte_mbuf.h> 63 #include <rte_interrupts.h> 64 #include <rte_pci.h> 65 #include <rte_ether.h> 66 #include <rte_ethdev.h> 67 #include <rte_string_fns.h> 68 #include <rte_cycles.h> 69 #include <rte_flow.h> 70 #include <rte_errno.h> 71 #ifdef RTE_LIBRTE_IXGBE_PMD 72 #include <rte_pmd_ixgbe.h> 73 #endif 74 #ifdef RTE_LIBRTE_I40E_PMD 75 #include <rte_pmd_i40e.h> 76 #endif 77 #ifdef RTE_LIBRTE_BNXT_PMD 78 #include <rte_pmd_bnxt.h> 79 #endif 80 #include <rte_gro.h> 81 82 #include "testpmd.h" 83 84 static char *flowtype_to_str(uint16_t flow_type); 85 86 static const struct { 87 enum tx_pkt_split split; 88 const char *name; 89 } tx_split_name[] = { 90 { 91 .split = TX_PKT_SPLIT_OFF, 92 .name = "off", 93 }, 94 { 95 .split = TX_PKT_SPLIT_ON, 96 .name = "on", 97 }, 98 { 99 .split = TX_PKT_SPLIT_RND, 100 .name = "rand", 101 }, 102 }; 103 104 struct rss_type_info { 105 char str[32]; 106 uint64_t rss_type; 107 }; 108 109 static const struct rss_type_info rss_type_table[] = { 110 { "ipv4", ETH_RSS_IPV4 }, 111 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 112 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 113 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 114 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 115 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 116 { "ipv6", ETH_RSS_IPV6 }, 117 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 118 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 119 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 120 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 121 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 122 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 123 { "ipv6-ex", ETH_RSS_IPV6_EX }, 124 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 125 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 126 { "port", ETH_RSS_PORT }, 127 { "vxlan", ETH_RSS_VXLAN }, 128 { "geneve", ETH_RSS_GENEVE }, 129 { "nvgre", ETH_RSS_NVGRE }, 130 131 }; 132 133 static void 134 print_ethaddr(const char *name, struct ether_addr *eth_addr) 135 { 136 char buf[ETHER_ADDR_FMT_SIZE]; 137 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 138 printf("%s%s", name, buf); 139 } 140 141 void 142 nic_stats_display(portid_t port_id) 143 { 144 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 145 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 146 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 147 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 148 uint64_t mpps_rx, mpps_tx; 149 struct rte_eth_stats stats; 150 struct rte_port *port = &ports[port_id]; 151 uint8_t i; 152 portid_t pid; 153 154 static const char *nic_stats_border = "########################"; 155 156 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 157 printf("Valid port range is [0"); 158 RTE_ETH_FOREACH_DEV(pid) 159 printf(", %d", pid); 160 printf("]\n"); 161 return; 162 } 163 rte_eth_stats_get(port_id, &stats); 164 printf("\n %s NIC statistics for port %-2d %s\n", 165 nic_stats_border, port_id, nic_stats_border); 166 167 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 168 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 169 "%-"PRIu64"\n", 170 stats.ipackets, stats.imissed, stats.ibytes); 171 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 172 printf(" RX-nombuf: %-10"PRIu64"\n", 173 stats.rx_nombuf); 174 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 175 "%-"PRIu64"\n", 176 stats.opackets, stats.oerrors, stats.obytes); 177 } 178 else { 179 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 180 " RX-bytes: %10"PRIu64"\n", 181 stats.ipackets, stats.ierrors, stats.ibytes); 182 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 183 printf(" RX-nombuf: %10"PRIu64"\n", 184 stats.rx_nombuf); 185 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 186 " TX-bytes: %10"PRIu64"\n", 187 stats.opackets, stats.oerrors, stats.obytes); 188 } 189 190 if (port->rx_queue_stats_mapping_enabled) { 191 printf("\n"); 192 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 193 printf(" Stats reg %2d RX-packets: %10"PRIu64 194 " RX-errors: %10"PRIu64 195 " RX-bytes: %10"PRIu64"\n", 196 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 197 } 198 } 199 if (port->tx_queue_stats_mapping_enabled) { 200 printf("\n"); 201 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 202 printf(" Stats reg %2d TX-packets: %10"PRIu64 203 " TX-bytes: %10"PRIu64"\n", 204 i, stats.q_opackets[i], stats.q_obytes[i]); 205 } 206 } 207 208 diff_cycles = prev_cycles[port_id]; 209 prev_cycles[port_id] = rte_rdtsc(); 210 if (diff_cycles > 0) 211 diff_cycles = prev_cycles[port_id] - diff_cycles; 212 213 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 214 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 215 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 216 (stats.opackets - prev_pkts_tx[port_id]) : 0; 217 prev_pkts_rx[port_id] = stats.ipackets; 218 prev_pkts_tx[port_id] = stats.opackets; 219 mpps_rx = diff_cycles > 0 ? 220 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 221 mpps_tx = diff_cycles > 0 ? 222 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 223 printf("\n Throughput (since last show)\n"); 224 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 225 mpps_rx, mpps_tx); 226 227 printf(" %s############################%s\n", 228 nic_stats_border, nic_stats_border); 229 } 230 231 void 232 nic_stats_clear(portid_t port_id) 233 { 234 portid_t pid; 235 236 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 237 printf("Valid port range is [0"); 238 RTE_ETH_FOREACH_DEV(pid) 239 printf(", %d", pid); 240 printf("]\n"); 241 return; 242 } 243 rte_eth_stats_reset(port_id); 244 printf("\n NIC statistics for port %d cleared\n", port_id); 245 } 246 247 void 248 nic_xstats_display(portid_t port_id) 249 { 250 struct rte_eth_xstat *xstats; 251 int cnt_xstats, idx_xstat; 252 struct rte_eth_xstat_name *xstats_names; 253 254 printf("###### NIC extended statistics for port %-2d\n", port_id); 255 if (!rte_eth_dev_is_valid_port(port_id)) { 256 printf("Error: Invalid port number %i\n", port_id); 257 return; 258 } 259 260 /* Get count */ 261 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 262 if (cnt_xstats < 0) { 263 printf("Error: Cannot get count of xstats\n"); 264 return; 265 } 266 267 /* Get id-name lookup table */ 268 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 269 if (xstats_names == NULL) { 270 printf("Cannot allocate memory for xstats lookup\n"); 271 return; 272 } 273 if (cnt_xstats != rte_eth_xstats_get_names( 274 port_id, xstats_names, cnt_xstats)) { 275 printf("Error: Cannot get xstats lookup\n"); 276 free(xstats_names); 277 return; 278 } 279 280 /* Get stats themselves */ 281 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 282 if (xstats == NULL) { 283 printf("Cannot allocate memory for xstats\n"); 284 free(xstats_names); 285 return; 286 } 287 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 288 printf("Error: Unable to get xstats\n"); 289 free(xstats_names); 290 free(xstats); 291 return; 292 } 293 294 /* Display xstats */ 295 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 296 printf("%s: %"PRIu64"\n", 297 xstats_names[idx_xstat].name, 298 xstats[idx_xstat].value); 299 free(xstats_names); 300 free(xstats); 301 } 302 303 void 304 nic_xstats_clear(portid_t port_id) 305 { 306 rte_eth_xstats_reset(port_id); 307 } 308 309 void 310 nic_stats_mapping_display(portid_t port_id) 311 { 312 struct rte_port *port = &ports[port_id]; 313 uint16_t i; 314 portid_t pid; 315 316 static const char *nic_stats_mapping_border = "########################"; 317 318 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 319 printf("Valid port range is [0"); 320 RTE_ETH_FOREACH_DEV(pid) 321 printf(", %d", pid); 322 printf("]\n"); 323 return; 324 } 325 326 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 327 printf("Port id %d - either does not support queue statistic mapping or" 328 " no queue statistic mapping set\n", port_id); 329 return; 330 } 331 332 printf("\n %s NIC statistics mapping for port %-2d %s\n", 333 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 334 335 if (port->rx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 337 if (rx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 339 rx_queue_stats_mappings[i].queue_id, 340 rx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 printf("\n"); 344 } 345 346 347 if (port->tx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 349 if (tx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 351 tx_queue_stats_mappings[i].queue_id, 352 tx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 } 356 357 printf(" %s####################################%s\n", 358 nic_stats_mapping_border, nic_stats_mapping_border); 359 } 360 361 void 362 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 363 { 364 struct rte_eth_rxq_info qinfo; 365 int32_t rc; 366 static const char *info_border = "*********************"; 367 368 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 369 if (rc != 0) { 370 printf("Failed to retrieve information for port: %u, " 371 "RX queue: %hu\nerror desc: %s(%d)\n", 372 port_id, queue_id, strerror(-rc), rc); 373 return; 374 } 375 376 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 377 info_border, port_id, queue_id, info_border); 378 379 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 380 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 381 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 382 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 383 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 384 printf("\nRX drop packets: %s", 385 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 386 printf("\nRX deferred start: %s", 387 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 388 printf("\nRX scattered packets: %s", 389 (qinfo.scattered_rx != 0) ? "on" : "off"); 390 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 391 printf("\n"); 392 } 393 394 void 395 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 396 { 397 struct rte_eth_txq_info qinfo; 398 int32_t rc; 399 static const char *info_border = "*********************"; 400 401 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 402 if (rc != 0) { 403 printf("Failed to retrieve information for port: %u, " 404 "TX queue: %hu\nerror desc: %s(%d)\n", 405 port_id, queue_id, strerror(-rc), rc); 406 return; 407 } 408 409 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 410 info_border, port_id, queue_id, info_border); 411 412 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 413 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 414 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 415 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 416 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 417 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 418 printf("\nTX deferred start: %s", 419 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 420 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 421 printf("\n"); 422 } 423 424 void 425 port_infos_display(portid_t port_id) 426 { 427 struct rte_port *port; 428 struct ether_addr mac_addr; 429 struct rte_eth_link link; 430 struct rte_eth_dev_info dev_info; 431 int vlan_offload; 432 struct rte_mempool * mp; 433 static const char *info_border = "*********************"; 434 portid_t pid; 435 uint16_t mtu; 436 437 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 438 printf("Valid port range is [0"); 439 RTE_ETH_FOREACH_DEV(pid) 440 printf(", %d", pid); 441 printf("]\n"); 442 return; 443 } 444 port = &ports[port_id]; 445 rte_eth_link_get_nowait(port_id, &link); 446 memset(&dev_info, 0, sizeof(dev_info)); 447 rte_eth_dev_info_get(port_id, &dev_info); 448 printf("\n%s Infos for port %-2d %s\n", 449 info_border, port_id, info_border); 450 rte_eth_macaddr_get(port_id, &mac_addr); 451 print_ethaddr("MAC address: ", &mac_addr); 452 printf("\nDriver name: %s", dev_info.driver_name); 453 printf("\nConnect to socket: %u", port->socket_id); 454 455 if (port_numa[port_id] != NUMA_NO_CONFIG) { 456 mp = mbuf_pool_find(port_numa[port_id]); 457 if (mp) 458 printf("\nmemory allocation on the socket: %d", 459 port_numa[port_id]); 460 } else 461 printf("\nmemory allocation on the socket: %u",port->socket_id); 462 463 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 464 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 465 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 466 ("full-duplex") : ("half-duplex")); 467 468 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 469 printf("MTU: %u\n", mtu); 470 471 printf("Promiscuous mode: %s\n", 472 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 473 printf("Allmulticast mode: %s\n", 474 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 475 printf("Maximum number of MAC addresses: %u\n", 476 (unsigned int)(port->dev_info.max_mac_addrs)); 477 printf("Maximum number of MAC addresses of hash filtering: %u\n", 478 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 479 480 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 481 if (vlan_offload >= 0){ 482 printf("VLAN offload: \n"); 483 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 484 printf(" strip on \n"); 485 else 486 printf(" strip off \n"); 487 488 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 489 printf(" filter on \n"); 490 else 491 printf(" filter off \n"); 492 493 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 494 printf(" qinq(extend) on \n"); 495 else 496 printf(" qinq(extend) off \n"); 497 } 498 499 if (dev_info.hash_key_size > 0) 500 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 501 if (dev_info.reta_size > 0) 502 printf("Redirection table size: %u\n", dev_info.reta_size); 503 if (!dev_info.flow_type_rss_offloads) 504 printf("No flow type is supported.\n"); 505 else { 506 uint16_t i; 507 char *p; 508 509 printf("Supported flow types:\n"); 510 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 511 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 512 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 513 continue; 514 p = flowtype_to_str(i); 515 if (p) 516 printf(" %s\n", p); 517 else 518 printf(" user defined %d\n", i); 519 } 520 } 521 522 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 523 printf("Max possible number of RXDs per queue: %hu\n", 524 dev_info.rx_desc_lim.nb_max); 525 printf("Min possible number of RXDs per queue: %hu\n", 526 dev_info.rx_desc_lim.nb_min); 527 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 528 529 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 530 printf("Max possible number of TXDs per queue: %hu\n", 531 dev_info.tx_desc_lim.nb_max); 532 printf("Min possible number of TXDs per queue: %hu\n", 533 dev_info.tx_desc_lim.nb_min); 534 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 535 } 536 537 void 538 port_offload_cap_display(portid_t port_id) 539 { 540 struct rte_eth_dev *dev; 541 struct rte_eth_dev_info dev_info; 542 static const char *info_border = "************"; 543 544 if (port_id_is_invalid(port_id, ENABLED_WARN)) 545 return; 546 547 dev = &rte_eth_devices[port_id]; 548 rte_eth_dev_info_get(port_id, &dev_info); 549 550 printf("\n%s Port %d supported offload features: %s\n", 551 info_border, port_id, info_border); 552 553 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 554 printf("VLAN stripped: "); 555 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 556 printf("on\n"); 557 else 558 printf("off\n"); 559 } 560 561 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 562 printf("Double VLANs stripped: "); 563 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 564 printf("on\n"); 565 else 566 printf("off\n"); 567 } 568 569 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 570 printf("RX IPv4 checksum: "); 571 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 572 printf("on\n"); 573 else 574 printf("off\n"); 575 } 576 577 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 578 printf("RX UDP checksum: "); 579 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 580 printf("on\n"); 581 else 582 printf("off\n"); 583 } 584 585 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 586 printf("RX TCP checksum: "); 587 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 588 printf("on\n"); 589 else 590 printf("off\n"); 591 } 592 593 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 594 printf("RX Outer IPv4 checksum: on"); 595 596 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 597 printf("Large receive offload: "); 598 if (dev->data->dev_conf.rxmode.enable_lro) 599 printf("on\n"); 600 else 601 printf("off\n"); 602 } 603 604 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 605 printf("VLAN insert: "); 606 if (ports[port_id].tx_ol_flags & 607 TESTPMD_TX_OFFLOAD_INSERT_VLAN) 608 printf("on\n"); 609 else 610 printf("off\n"); 611 } 612 613 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 614 printf("HW timestamp: "); 615 if (dev->data->dev_conf.rxmode.hw_timestamp) 616 printf("on\n"); 617 else 618 printf("off\n"); 619 } 620 621 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 622 printf("Double VLANs insert: "); 623 if (ports[port_id].tx_ol_flags & 624 TESTPMD_TX_OFFLOAD_INSERT_QINQ) 625 printf("on\n"); 626 else 627 printf("off\n"); 628 } 629 630 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 631 printf("TX IPv4 checksum: "); 632 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) 633 printf("on\n"); 634 else 635 printf("off\n"); 636 } 637 638 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 639 printf("TX UDP checksum: "); 640 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) 641 printf("on\n"); 642 else 643 printf("off\n"); 644 } 645 646 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 647 printf("TX TCP checksum: "); 648 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) 649 printf("on\n"); 650 else 651 printf("off\n"); 652 } 653 654 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 655 printf("TX SCTP checksum: "); 656 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) 657 printf("on\n"); 658 else 659 printf("off\n"); 660 } 661 662 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 663 printf("TX Outer IPv4 checksum: "); 664 if (ports[port_id].tx_ol_flags & 665 TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) 666 printf("on\n"); 667 else 668 printf("off\n"); 669 } 670 671 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 672 printf("TX TCP segmentation: "); 673 if (ports[port_id].tso_segsz != 0) 674 printf("on\n"); 675 else 676 printf("off\n"); 677 } 678 679 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 680 printf("TX UDP segmentation: "); 681 if (ports[port_id].tso_segsz != 0) 682 printf("on\n"); 683 else 684 printf("off\n"); 685 } 686 687 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 688 printf("TSO for VXLAN tunnel packet: "); 689 if (ports[port_id].tunnel_tso_segsz) 690 printf("on\n"); 691 else 692 printf("off\n"); 693 } 694 695 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 696 printf("TSO for GRE tunnel packet: "); 697 if (ports[port_id].tunnel_tso_segsz) 698 printf("on\n"); 699 else 700 printf("off\n"); 701 } 702 703 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 704 printf("TSO for IPIP tunnel packet: "); 705 if (ports[port_id].tunnel_tso_segsz) 706 printf("on\n"); 707 else 708 printf("off\n"); 709 } 710 711 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 712 printf("TSO for GENEVE tunnel packet: "); 713 if (ports[port_id].tunnel_tso_segsz) 714 printf("on\n"); 715 else 716 printf("off\n"); 717 } 718 719 } 720 721 int 722 port_id_is_invalid(portid_t port_id, enum print_warning warning) 723 { 724 if (port_id == (portid_t)RTE_PORT_ALL) 725 return 0; 726 727 if (rte_eth_dev_is_valid_port(port_id)) 728 return 0; 729 730 if (warning == ENABLED_WARN) 731 printf("Invalid port %d\n", port_id); 732 733 return 1; 734 } 735 736 static int 737 vlan_id_is_invalid(uint16_t vlan_id) 738 { 739 if (vlan_id < 4096) 740 return 0; 741 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 742 return 1; 743 } 744 745 static int 746 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 747 { 748 uint64_t pci_len; 749 750 if (reg_off & 0x3) { 751 printf("Port register offset 0x%X not aligned on a 4-byte " 752 "boundary\n", 753 (unsigned)reg_off); 754 return 1; 755 } 756 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 757 if (reg_off >= pci_len) { 758 printf("Port %d: register offset %u (0x%X) out of port PCI " 759 "resource (length=%"PRIu64")\n", 760 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 761 return 1; 762 } 763 return 0; 764 } 765 766 static int 767 reg_bit_pos_is_invalid(uint8_t bit_pos) 768 { 769 if (bit_pos <= 31) 770 return 0; 771 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 772 return 1; 773 } 774 775 #define display_port_and_reg_off(port_id, reg_off) \ 776 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 777 778 static inline void 779 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 780 { 781 display_port_and_reg_off(port_id, (unsigned)reg_off); 782 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 783 } 784 785 void 786 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 787 { 788 uint32_t reg_v; 789 790 791 if (port_id_is_invalid(port_id, ENABLED_WARN)) 792 return; 793 if (port_reg_off_is_invalid(port_id, reg_off)) 794 return; 795 if (reg_bit_pos_is_invalid(bit_x)) 796 return; 797 reg_v = port_id_pci_reg_read(port_id, reg_off); 798 display_port_and_reg_off(port_id, (unsigned)reg_off); 799 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 800 } 801 802 void 803 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 804 uint8_t bit1_pos, uint8_t bit2_pos) 805 { 806 uint32_t reg_v; 807 uint8_t l_bit; 808 uint8_t h_bit; 809 810 if (port_id_is_invalid(port_id, ENABLED_WARN)) 811 return; 812 if (port_reg_off_is_invalid(port_id, reg_off)) 813 return; 814 if (reg_bit_pos_is_invalid(bit1_pos)) 815 return; 816 if (reg_bit_pos_is_invalid(bit2_pos)) 817 return; 818 if (bit1_pos > bit2_pos) 819 l_bit = bit2_pos, h_bit = bit1_pos; 820 else 821 l_bit = bit1_pos, h_bit = bit2_pos; 822 823 reg_v = port_id_pci_reg_read(port_id, reg_off); 824 reg_v >>= l_bit; 825 if (h_bit < 31) 826 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 827 display_port_and_reg_off(port_id, (unsigned)reg_off); 828 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 829 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 830 } 831 832 void 833 port_reg_display(portid_t port_id, uint32_t reg_off) 834 { 835 uint32_t reg_v; 836 837 if (port_id_is_invalid(port_id, ENABLED_WARN)) 838 return; 839 if (port_reg_off_is_invalid(port_id, reg_off)) 840 return; 841 reg_v = port_id_pci_reg_read(port_id, reg_off); 842 display_port_reg_value(port_id, reg_off, reg_v); 843 } 844 845 void 846 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 847 uint8_t bit_v) 848 { 849 uint32_t reg_v; 850 851 if (port_id_is_invalid(port_id, ENABLED_WARN)) 852 return; 853 if (port_reg_off_is_invalid(port_id, reg_off)) 854 return; 855 if (reg_bit_pos_is_invalid(bit_pos)) 856 return; 857 if (bit_v > 1) { 858 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 859 return; 860 } 861 reg_v = port_id_pci_reg_read(port_id, reg_off); 862 if (bit_v == 0) 863 reg_v &= ~(1 << bit_pos); 864 else 865 reg_v |= (1 << bit_pos); 866 port_id_pci_reg_write(port_id, reg_off, reg_v); 867 display_port_reg_value(port_id, reg_off, reg_v); 868 } 869 870 void 871 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 872 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 873 { 874 uint32_t max_v; 875 uint32_t reg_v; 876 uint8_t l_bit; 877 uint8_t h_bit; 878 879 if (port_id_is_invalid(port_id, ENABLED_WARN)) 880 return; 881 if (port_reg_off_is_invalid(port_id, reg_off)) 882 return; 883 if (reg_bit_pos_is_invalid(bit1_pos)) 884 return; 885 if (reg_bit_pos_is_invalid(bit2_pos)) 886 return; 887 if (bit1_pos > bit2_pos) 888 l_bit = bit2_pos, h_bit = bit1_pos; 889 else 890 l_bit = bit1_pos, h_bit = bit2_pos; 891 892 if ((h_bit - l_bit) < 31) 893 max_v = (1 << (h_bit - l_bit + 1)) - 1; 894 else 895 max_v = 0xFFFFFFFF; 896 897 if (value > max_v) { 898 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 899 (unsigned)value, (unsigned)value, 900 (unsigned)max_v, (unsigned)max_v); 901 return; 902 } 903 reg_v = port_id_pci_reg_read(port_id, reg_off); 904 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 905 reg_v |= (value << l_bit); /* Set changed bits */ 906 port_id_pci_reg_write(port_id, reg_off, reg_v); 907 display_port_reg_value(port_id, reg_off, reg_v); 908 } 909 910 void 911 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 912 { 913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 914 return; 915 if (port_reg_off_is_invalid(port_id, reg_off)) 916 return; 917 port_id_pci_reg_write(port_id, reg_off, reg_v); 918 display_port_reg_value(port_id, reg_off, reg_v); 919 } 920 921 void 922 port_mtu_set(portid_t port_id, uint16_t mtu) 923 { 924 int diag; 925 926 if (port_id_is_invalid(port_id, ENABLED_WARN)) 927 return; 928 diag = rte_eth_dev_set_mtu(port_id, mtu); 929 if (diag == 0) 930 return; 931 printf("Set MTU failed. diag=%d\n", diag); 932 } 933 934 /* Generic flow management functions. */ 935 936 /** Generate flow_item[] entry. */ 937 #define MK_FLOW_ITEM(t, s) \ 938 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 939 .name = # t, \ 940 .size = s, \ 941 } 942 943 /** Information about known flow pattern items. */ 944 static const struct { 945 const char *name; 946 size_t size; 947 } flow_item[] = { 948 MK_FLOW_ITEM(END, 0), 949 MK_FLOW_ITEM(VOID, 0), 950 MK_FLOW_ITEM(INVERT, 0), 951 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 952 MK_FLOW_ITEM(PF, 0), 953 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 954 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 955 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 956 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 957 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 958 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 959 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 960 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 961 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 962 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 963 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 964 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 965 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 966 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 967 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 968 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 969 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 970 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 971 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 972 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 973 }; 974 975 /** Compute storage space needed by item specification. */ 976 static void 977 flow_item_spec_size(const struct rte_flow_item *item, 978 size_t *size, size_t *pad) 979 { 980 if (!item->spec) { 981 *size = 0; 982 goto empty; 983 } 984 switch (item->type) { 985 union { 986 const struct rte_flow_item_raw *raw; 987 } spec; 988 989 case RTE_FLOW_ITEM_TYPE_RAW: 990 spec.raw = item->spec; 991 *size = offsetof(struct rte_flow_item_raw, pattern) + 992 spec.raw->length * sizeof(*spec.raw->pattern); 993 break; 994 default: 995 *size = flow_item[item->type].size; 996 break; 997 } 998 empty: 999 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1000 } 1001 1002 /** Generate flow_action[] entry. */ 1003 #define MK_FLOW_ACTION(t, s) \ 1004 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1005 .name = # t, \ 1006 .size = s, \ 1007 } 1008 1009 /** Information about known flow actions. */ 1010 static const struct { 1011 const char *name; 1012 size_t size; 1013 } flow_action[] = { 1014 MK_FLOW_ACTION(END, 0), 1015 MK_FLOW_ACTION(VOID, 0), 1016 MK_FLOW_ACTION(PASSTHRU, 0), 1017 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1018 MK_FLOW_ACTION(FLAG, 0), 1019 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1020 MK_FLOW_ACTION(DROP, 0), 1021 MK_FLOW_ACTION(COUNT, 0), 1022 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 1023 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 1024 MK_FLOW_ACTION(PF, 0), 1025 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1026 }; 1027 1028 /** Compute storage space needed by action configuration. */ 1029 static void 1030 flow_action_conf_size(const struct rte_flow_action *action, 1031 size_t *size, size_t *pad) 1032 { 1033 if (!action->conf) { 1034 *size = 0; 1035 goto empty; 1036 } 1037 switch (action->type) { 1038 union { 1039 const struct rte_flow_action_rss *rss; 1040 } conf; 1041 1042 case RTE_FLOW_ACTION_TYPE_RSS: 1043 conf.rss = action->conf; 1044 *size = offsetof(struct rte_flow_action_rss, queue) + 1045 conf.rss->num * sizeof(*conf.rss->queue); 1046 break; 1047 default: 1048 *size = flow_action[action->type].size; 1049 break; 1050 } 1051 empty: 1052 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1053 } 1054 1055 /** Generate a port_flow entry from attributes/pattern/actions. */ 1056 static struct port_flow * 1057 port_flow_new(const struct rte_flow_attr *attr, 1058 const struct rte_flow_item *pattern, 1059 const struct rte_flow_action *actions) 1060 { 1061 const struct rte_flow_item *item; 1062 const struct rte_flow_action *action; 1063 struct port_flow *pf = NULL; 1064 size_t tmp; 1065 size_t pad; 1066 size_t off1 = 0; 1067 size_t off2 = 0; 1068 int err = ENOTSUP; 1069 1070 store: 1071 item = pattern; 1072 if (pf) 1073 pf->pattern = (void *)&pf->data[off1]; 1074 do { 1075 struct rte_flow_item *dst = NULL; 1076 1077 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1078 !flow_item[item->type].name) 1079 goto notsup; 1080 if (pf) 1081 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1082 off1 += sizeof(*item); 1083 flow_item_spec_size(item, &tmp, &pad); 1084 if (item->spec) { 1085 if (pf) 1086 dst->spec = memcpy(pf->data + off2, 1087 item->spec, tmp); 1088 off2 += tmp + pad; 1089 } 1090 if (item->last) { 1091 if (pf) 1092 dst->last = memcpy(pf->data + off2, 1093 item->last, tmp); 1094 off2 += tmp + pad; 1095 } 1096 if (item->mask) { 1097 if (pf) 1098 dst->mask = memcpy(pf->data + off2, 1099 item->mask, tmp); 1100 off2 += tmp + pad; 1101 } 1102 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1103 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1104 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1105 action = actions; 1106 if (pf) 1107 pf->actions = (void *)&pf->data[off1]; 1108 do { 1109 struct rte_flow_action *dst = NULL; 1110 1111 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1112 !flow_action[action->type].name) 1113 goto notsup; 1114 if (pf) 1115 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1116 off1 += sizeof(*action); 1117 flow_action_conf_size(action, &tmp, &pad); 1118 if (action->conf) { 1119 if (pf) 1120 dst->conf = memcpy(pf->data + off2, 1121 action->conf, tmp); 1122 off2 += tmp + pad; 1123 } 1124 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1125 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1126 if (pf != NULL) 1127 return pf; 1128 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1129 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1130 pf = calloc(1, tmp + off1 + off2); 1131 if (pf == NULL) 1132 err = errno; 1133 else { 1134 *pf = (const struct port_flow){ 1135 .size = tmp + off1 + off2, 1136 .attr = *attr, 1137 }; 1138 tmp -= offsetof(struct port_flow, data); 1139 off2 = tmp + off1; 1140 off1 = tmp; 1141 goto store; 1142 } 1143 notsup: 1144 rte_errno = err; 1145 return NULL; 1146 } 1147 1148 /** Print a message out of a flow error. */ 1149 static int 1150 port_flow_complain(struct rte_flow_error *error) 1151 { 1152 static const char *const errstrlist[] = { 1153 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1154 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1155 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1156 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1157 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1158 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1159 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1160 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1161 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1162 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1163 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1164 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1165 }; 1166 const char *errstr; 1167 char buf[32]; 1168 int err = rte_errno; 1169 1170 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1171 !errstrlist[error->type]) 1172 errstr = "unknown type"; 1173 else 1174 errstr = errstrlist[error->type]; 1175 printf("Caught error type %d (%s): %s%s\n", 1176 error->type, errstr, 1177 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1178 error->cause), buf) : "", 1179 error->message ? error->message : "(no stated reason)"); 1180 return -err; 1181 } 1182 1183 /** Validate flow rule. */ 1184 int 1185 port_flow_validate(portid_t port_id, 1186 const struct rte_flow_attr *attr, 1187 const struct rte_flow_item *pattern, 1188 const struct rte_flow_action *actions) 1189 { 1190 struct rte_flow_error error; 1191 1192 /* Poisoning to make sure PMDs update it in case of error. */ 1193 memset(&error, 0x11, sizeof(error)); 1194 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1195 return port_flow_complain(&error); 1196 printf("Flow rule validated\n"); 1197 return 0; 1198 } 1199 1200 /** Create flow rule. */ 1201 int 1202 port_flow_create(portid_t port_id, 1203 const struct rte_flow_attr *attr, 1204 const struct rte_flow_item *pattern, 1205 const struct rte_flow_action *actions) 1206 { 1207 struct rte_flow *flow; 1208 struct rte_port *port; 1209 struct port_flow *pf; 1210 uint32_t id; 1211 struct rte_flow_error error; 1212 1213 /* Poisoning to make sure PMDs update it in case of error. */ 1214 memset(&error, 0x22, sizeof(error)); 1215 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1216 if (!flow) 1217 return port_flow_complain(&error); 1218 port = &ports[port_id]; 1219 if (port->flow_list) { 1220 if (port->flow_list->id == UINT32_MAX) { 1221 printf("Highest rule ID is already assigned, delete" 1222 " it first"); 1223 rte_flow_destroy(port_id, flow, NULL); 1224 return -ENOMEM; 1225 } 1226 id = port->flow_list->id + 1; 1227 } else 1228 id = 0; 1229 pf = port_flow_new(attr, pattern, actions); 1230 if (!pf) { 1231 int err = rte_errno; 1232 1233 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1234 rte_flow_destroy(port_id, flow, NULL); 1235 return -err; 1236 } 1237 pf->next = port->flow_list; 1238 pf->id = id; 1239 pf->flow = flow; 1240 port->flow_list = pf; 1241 printf("Flow rule #%u created\n", pf->id); 1242 return 0; 1243 } 1244 1245 /** Destroy a number of flow rules. */ 1246 int 1247 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1248 { 1249 struct rte_port *port; 1250 struct port_flow **tmp; 1251 uint32_t c = 0; 1252 int ret = 0; 1253 1254 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1255 port_id == (portid_t)RTE_PORT_ALL) 1256 return -EINVAL; 1257 port = &ports[port_id]; 1258 tmp = &port->flow_list; 1259 while (*tmp) { 1260 uint32_t i; 1261 1262 for (i = 0; i != n; ++i) { 1263 struct rte_flow_error error; 1264 struct port_flow *pf = *tmp; 1265 1266 if (rule[i] != pf->id) 1267 continue; 1268 /* 1269 * Poisoning to make sure PMDs update it in case 1270 * of error. 1271 */ 1272 memset(&error, 0x33, sizeof(error)); 1273 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1274 ret = port_flow_complain(&error); 1275 continue; 1276 } 1277 printf("Flow rule #%u destroyed\n", pf->id); 1278 *tmp = pf->next; 1279 free(pf); 1280 break; 1281 } 1282 if (i == n) 1283 tmp = &(*tmp)->next; 1284 ++c; 1285 } 1286 return ret; 1287 } 1288 1289 /** Remove all flow rules. */ 1290 int 1291 port_flow_flush(portid_t port_id) 1292 { 1293 struct rte_flow_error error; 1294 struct rte_port *port; 1295 int ret = 0; 1296 1297 /* Poisoning to make sure PMDs update it in case of error. */ 1298 memset(&error, 0x44, sizeof(error)); 1299 if (rte_flow_flush(port_id, &error)) { 1300 ret = port_flow_complain(&error); 1301 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1302 port_id == (portid_t)RTE_PORT_ALL) 1303 return ret; 1304 } 1305 port = &ports[port_id]; 1306 while (port->flow_list) { 1307 struct port_flow *pf = port->flow_list->next; 1308 1309 free(port->flow_list); 1310 port->flow_list = pf; 1311 } 1312 return ret; 1313 } 1314 1315 /** Query a flow rule. */ 1316 int 1317 port_flow_query(portid_t port_id, uint32_t rule, 1318 enum rte_flow_action_type action) 1319 { 1320 struct rte_flow_error error; 1321 struct rte_port *port; 1322 struct port_flow *pf; 1323 const char *name; 1324 union { 1325 struct rte_flow_query_count count; 1326 } query; 1327 1328 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1329 port_id == (portid_t)RTE_PORT_ALL) 1330 return -EINVAL; 1331 port = &ports[port_id]; 1332 for (pf = port->flow_list; pf; pf = pf->next) 1333 if (pf->id == rule) 1334 break; 1335 if (!pf) { 1336 printf("Flow rule #%u not found\n", rule); 1337 return -ENOENT; 1338 } 1339 if ((unsigned int)action >= RTE_DIM(flow_action) || 1340 !flow_action[action].name) 1341 name = "unknown"; 1342 else 1343 name = flow_action[action].name; 1344 switch (action) { 1345 case RTE_FLOW_ACTION_TYPE_COUNT: 1346 break; 1347 default: 1348 printf("Cannot query action type %d (%s)\n", action, name); 1349 return -ENOTSUP; 1350 } 1351 /* Poisoning to make sure PMDs update it in case of error. */ 1352 memset(&error, 0x55, sizeof(error)); 1353 memset(&query, 0, sizeof(query)); 1354 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1355 return port_flow_complain(&error); 1356 switch (action) { 1357 case RTE_FLOW_ACTION_TYPE_COUNT: 1358 printf("%s:\n" 1359 " hits_set: %u\n" 1360 " bytes_set: %u\n" 1361 " hits: %" PRIu64 "\n" 1362 " bytes: %" PRIu64 "\n", 1363 name, 1364 query.count.hits_set, 1365 query.count.bytes_set, 1366 query.count.hits, 1367 query.count.bytes); 1368 break; 1369 default: 1370 printf("Cannot display result for action type %d (%s)\n", 1371 action, name); 1372 break; 1373 } 1374 return 0; 1375 } 1376 1377 /** List flow rules. */ 1378 void 1379 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1380 { 1381 struct rte_port *port; 1382 struct port_flow *pf; 1383 struct port_flow *list = NULL; 1384 uint32_t i; 1385 1386 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1387 port_id == (portid_t)RTE_PORT_ALL) 1388 return; 1389 port = &ports[port_id]; 1390 if (!port->flow_list) 1391 return; 1392 /* Sort flows by group, priority and ID. */ 1393 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1394 struct port_flow **tmp; 1395 1396 if (n) { 1397 /* Filter out unwanted groups. */ 1398 for (i = 0; i != n; ++i) 1399 if (pf->attr.group == group[i]) 1400 break; 1401 if (i == n) 1402 continue; 1403 } 1404 tmp = &list; 1405 while (*tmp && 1406 (pf->attr.group > (*tmp)->attr.group || 1407 (pf->attr.group == (*tmp)->attr.group && 1408 pf->attr.priority > (*tmp)->attr.priority) || 1409 (pf->attr.group == (*tmp)->attr.group && 1410 pf->attr.priority == (*tmp)->attr.priority && 1411 pf->id > (*tmp)->id))) 1412 tmp = &(*tmp)->tmp; 1413 pf->tmp = *tmp; 1414 *tmp = pf; 1415 } 1416 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1417 for (pf = list; pf != NULL; pf = pf->tmp) { 1418 const struct rte_flow_item *item = pf->pattern; 1419 const struct rte_flow_action *action = pf->actions; 1420 1421 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1422 pf->id, 1423 pf->attr.group, 1424 pf->attr.priority, 1425 pf->attr.ingress ? 'i' : '-', 1426 pf->attr.egress ? 'e' : '-'); 1427 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1428 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1429 printf("%s ", flow_item[item->type].name); 1430 ++item; 1431 } 1432 printf("=>"); 1433 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1434 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1435 printf(" %s", flow_action[action->type].name); 1436 ++action; 1437 } 1438 printf("\n"); 1439 } 1440 } 1441 1442 /** Restrict ingress traffic to the defined flow rules. */ 1443 int 1444 port_flow_isolate(portid_t port_id, int set) 1445 { 1446 struct rte_flow_error error; 1447 1448 /* Poisoning to make sure PMDs update it in case of error. */ 1449 memset(&error, 0x66, sizeof(error)); 1450 if (rte_flow_isolate(port_id, set, &error)) 1451 return port_flow_complain(&error); 1452 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1453 port_id, 1454 set ? "now restricted" : "not restricted anymore"); 1455 return 0; 1456 } 1457 1458 /* 1459 * RX/TX ring descriptors display functions. 1460 */ 1461 int 1462 rx_queue_id_is_invalid(queueid_t rxq_id) 1463 { 1464 if (rxq_id < nb_rxq) 1465 return 0; 1466 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1467 return 1; 1468 } 1469 1470 int 1471 tx_queue_id_is_invalid(queueid_t txq_id) 1472 { 1473 if (txq_id < nb_txq) 1474 return 0; 1475 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1476 return 1; 1477 } 1478 1479 static int 1480 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1481 { 1482 if (rxdesc_id < nb_rxd) 1483 return 0; 1484 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1485 rxdesc_id, nb_rxd); 1486 return 1; 1487 } 1488 1489 static int 1490 tx_desc_id_is_invalid(uint16_t txdesc_id) 1491 { 1492 if (txdesc_id < nb_txd) 1493 return 0; 1494 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1495 txdesc_id, nb_txd); 1496 return 1; 1497 } 1498 1499 static const struct rte_memzone * 1500 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 1501 { 1502 char mz_name[RTE_MEMZONE_NAMESIZE]; 1503 const struct rte_memzone *mz; 1504 1505 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1506 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1507 mz = rte_memzone_lookup(mz_name); 1508 if (mz == NULL) 1509 printf("%s ring memory zoneof (port %d, queue %d) not" 1510 "found (zone name = %s\n", 1511 ring_name, port_id, q_id, mz_name); 1512 return mz; 1513 } 1514 1515 union igb_ring_dword { 1516 uint64_t dword; 1517 struct { 1518 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1519 uint32_t lo; 1520 uint32_t hi; 1521 #else 1522 uint32_t hi; 1523 uint32_t lo; 1524 #endif 1525 } words; 1526 }; 1527 1528 struct igb_ring_desc_32_bytes { 1529 union igb_ring_dword lo_dword; 1530 union igb_ring_dword hi_dword; 1531 union igb_ring_dword resv1; 1532 union igb_ring_dword resv2; 1533 }; 1534 1535 struct igb_ring_desc_16_bytes { 1536 union igb_ring_dword lo_dword; 1537 union igb_ring_dword hi_dword; 1538 }; 1539 1540 static void 1541 ring_rxd_display_dword(union igb_ring_dword dword) 1542 { 1543 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1544 (unsigned)dword.words.hi); 1545 } 1546 1547 static void 1548 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1549 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1550 uint8_t port_id, 1551 #else 1552 __rte_unused uint8_t port_id, 1553 #endif 1554 uint16_t desc_id) 1555 { 1556 struct igb_ring_desc_16_bytes *ring = 1557 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1558 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1559 struct rte_eth_dev_info dev_info; 1560 1561 memset(&dev_info, 0, sizeof(dev_info)); 1562 rte_eth_dev_info_get(port_id, &dev_info); 1563 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1564 /* 32 bytes RX descriptor, i40e only */ 1565 struct igb_ring_desc_32_bytes *ring = 1566 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1567 ring[desc_id].lo_dword.dword = 1568 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1569 ring_rxd_display_dword(ring[desc_id].lo_dword); 1570 ring[desc_id].hi_dword.dword = 1571 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1572 ring_rxd_display_dword(ring[desc_id].hi_dword); 1573 ring[desc_id].resv1.dword = 1574 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1575 ring_rxd_display_dword(ring[desc_id].resv1); 1576 ring[desc_id].resv2.dword = 1577 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1578 ring_rxd_display_dword(ring[desc_id].resv2); 1579 1580 return; 1581 } 1582 #endif 1583 /* 16 bytes RX descriptor */ 1584 ring[desc_id].lo_dword.dword = 1585 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1586 ring_rxd_display_dword(ring[desc_id].lo_dword); 1587 ring[desc_id].hi_dword.dword = 1588 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1589 ring_rxd_display_dword(ring[desc_id].hi_dword); 1590 } 1591 1592 static void 1593 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1594 { 1595 struct igb_ring_desc_16_bytes *ring; 1596 struct igb_ring_desc_16_bytes txd; 1597 1598 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1599 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1600 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1601 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1602 (unsigned)txd.lo_dword.words.lo, 1603 (unsigned)txd.lo_dword.words.hi, 1604 (unsigned)txd.hi_dword.words.lo, 1605 (unsigned)txd.hi_dword.words.hi); 1606 } 1607 1608 void 1609 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1610 { 1611 const struct rte_memzone *rx_mz; 1612 1613 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1614 return; 1615 if (rx_queue_id_is_invalid(rxq_id)) 1616 return; 1617 if (rx_desc_id_is_invalid(rxd_id)) 1618 return; 1619 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1620 if (rx_mz == NULL) 1621 return; 1622 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1623 } 1624 1625 void 1626 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1627 { 1628 const struct rte_memzone *tx_mz; 1629 1630 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1631 return; 1632 if (tx_queue_id_is_invalid(txq_id)) 1633 return; 1634 if (tx_desc_id_is_invalid(txd_id)) 1635 return; 1636 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1637 if (tx_mz == NULL) 1638 return; 1639 ring_tx_descriptor_display(tx_mz, txd_id); 1640 } 1641 1642 void 1643 fwd_lcores_config_display(void) 1644 { 1645 lcoreid_t lc_id; 1646 1647 printf("List of forwarding lcores:"); 1648 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1649 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1650 printf("\n"); 1651 } 1652 void 1653 rxtx_config_display(void) 1654 { 1655 printf(" %s packet forwarding%s - CRC stripping %s - " 1656 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 1657 retry_enabled == 0 ? "" : " with retry", 1658 rx_mode.hw_strip_crc ? "enabled" : "disabled", 1659 nb_pkt_per_burst); 1660 1661 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1662 printf(" packet len=%u - nb packet segments=%d\n", 1663 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1664 1665 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 1666 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 1667 1668 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1669 nb_fwd_lcores, nb_fwd_ports); 1670 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1671 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1672 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1673 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 1674 rx_conf->rx_thresh.wthresh); 1675 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1676 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1677 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1678 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 1679 tx_conf->tx_thresh.wthresh); 1680 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 1681 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 1682 } 1683 1684 void 1685 port_rss_reta_info(portid_t port_id, 1686 struct rte_eth_rss_reta_entry64 *reta_conf, 1687 uint16_t nb_entries) 1688 { 1689 uint16_t i, idx, shift; 1690 int ret; 1691 1692 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1693 return; 1694 1695 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1696 if (ret != 0) { 1697 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1698 return; 1699 } 1700 1701 for (i = 0; i < nb_entries; i++) { 1702 idx = i / RTE_RETA_GROUP_SIZE; 1703 shift = i % RTE_RETA_GROUP_SIZE; 1704 if (!(reta_conf[idx].mask & (1ULL << shift))) 1705 continue; 1706 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1707 i, reta_conf[idx].reta[shift]); 1708 } 1709 } 1710 1711 /* 1712 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1713 * key of the port. 1714 */ 1715 void 1716 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1717 { 1718 struct rte_eth_rss_conf rss_conf; 1719 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1720 uint64_t rss_hf; 1721 uint8_t i; 1722 int diag; 1723 struct rte_eth_dev_info dev_info; 1724 uint8_t hash_key_size; 1725 1726 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1727 return; 1728 1729 memset(&dev_info, 0, sizeof(dev_info)); 1730 rte_eth_dev_info_get(port_id, &dev_info); 1731 if (dev_info.hash_key_size > 0 && 1732 dev_info.hash_key_size <= sizeof(rss_key)) 1733 hash_key_size = dev_info.hash_key_size; 1734 else { 1735 printf("dev_info did not provide a valid hash key size\n"); 1736 return; 1737 } 1738 1739 rss_conf.rss_hf = 0; 1740 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1741 if (!strcmp(rss_info, rss_type_table[i].str)) 1742 rss_conf.rss_hf = rss_type_table[i].rss_type; 1743 } 1744 1745 /* Get RSS hash key if asked to display it */ 1746 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1747 rss_conf.rss_key_len = hash_key_size; 1748 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1749 if (diag != 0) { 1750 switch (diag) { 1751 case -ENODEV: 1752 printf("port index %d invalid\n", port_id); 1753 break; 1754 case -ENOTSUP: 1755 printf("operation not supported by device\n"); 1756 break; 1757 default: 1758 printf("operation failed - diag=%d\n", diag); 1759 break; 1760 } 1761 return; 1762 } 1763 rss_hf = rss_conf.rss_hf; 1764 if (rss_hf == 0) { 1765 printf("RSS disabled\n"); 1766 return; 1767 } 1768 printf("RSS functions:\n "); 1769 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1770 if (rss_hf & rss_type_table[i].rss_type) 1771 printf("%s ", rss_type_table[i].str); 1772 } 1773 printf("\n"); 1774 if (!show_rss_key) 1775 return; 1776 printf("RSS key:\n"); 1777 for (i = 0; i < hash_key_size; i++) 1778 printf("%02X", rss_key[i]); 1779 printf("\n"); 1780 } 1781 1782 void 1783 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1784 uint hash_key_len) 1785 { 1786 struct rte_eth_rss_conf rss_conf; 1787 int diag; 1788 unsigned int i; 1789 1790 rss_conf.rss_key = NULL; 1791 rss_conf.rss_key_len = hash_key_len; 1792 rss_conf.rss_hf = 0; 1793 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1794 if (!strcmp(rss_type_table[i].str, rss_type)) 1795 rss_conf.rss_hf = rss_type_table[i].rss_type; 1796 } 1797 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1798 if (diag == 0) { 1799 rss_conf.rss_key = hash_key; 1800 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1801 } 1802 if (diag == 0) 1803 return; 1804 1805 switch (diag) { 1806 case -ENODEV: 1807 printf("port index %d invalid\n", port_id); 1808 break; 1809 case -ENOTSUP: 1810 printf("operation not supported by device\n"); 1811 break; 1812 default: 1813 printf("operation failed - diag=%d\n", diag); 1814 break; 1815 } 1816 } 1817 1818 /* 1819 * Setup forwarding configuration for each logical core. 1820 */ 1821 static void 1822 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1823 { 1824 streamid_t nb_fs_per_lcore; 1825 streamid_t nb_fs; 1826 streamid_t sm_id; 1827 lcoreid_t nb_extra; 1828 lcoreid_t nb_fc; 1829 lcoreid_t nb_lc; 1830 lcoreid_t lc_id; 1831 1832 nb_fs = cfg->nb_fwd_streams; 1833 nb_fc = cfg->nb_fwd_lcores; 1834 if (nb_fs <= nb_fc) { 1835 nb_fs_per_lcore = 1; 1836 nb_extra = 0; 1837 } else { 1838 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1839 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1840 } 1841 1842 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1843 sm_id = 0; 1844 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1845 fwd_lcores[lc_id]->stream_idx = sm_id; 1846 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1847 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1848 } 1849 1850 /* 1851 * Assign extra remaining streams, if any. 1852 */ 1853 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1854 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1855 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1856 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1857 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1858 } 1859 } 1860 1861 static void 1862 simple_fwd_config_setup(void) 1863 { 1864 portid_t i; 1865 portid_t j; 1866 portid_t inc = 2; 1867 1868 if (port_topology == PORT_TOPOLOGY_CHAINED || 1869 port_topology == PORT_TOPOLOGY_LOOP) { 1870 inc = 1; 1871 } else if (nb_fwd_ports % 2) { 1872 printf("\nWarning! Cannot handle an odd number of ports " 1873 "with the current port topology. Configuration " 1874 "must be changed to have an even number of ports, " 1875 "or relaunch application with " 1876 "--port-topology=chained\n\n"); 1877 } 1878 1879 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1880 cur_fwd_config.nb_fwd_streams = 1881 (streamid_t) cur_fwd_config.nb_fwd_ports; 1882 1883 /* reinitialize forwarding streams */ 1884 init_fwd_streams(); 1885 1886 /* 1887 * In the simple forwarding test, the number of forwarding cores 1888 * must be lower or equal to the number of forwarding ports. 1889 */ 1890 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1891 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1892 cur_fwd_config.nb_fwd_lcores = 1893 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1894 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1895 1896 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1897 if (port_topology != PORT_TOPOLOGY_LOOP) 1898 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1899 else 1900 j = i; 1901 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1902 fwd_streams[i]->rx_queue = 0; 1903 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1904 fwd_streams[i]->tx_queue = 0; 1905 fwd_streams[i]->peer_addr = j; 1906 fwd_streams[i]->retry_enabled = retry_enabled; 1907 1908 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1909 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1910 fwd_streams[j]->rx_queue = 0; 1911 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1912 fwd_streams[j]->tx_queue = 0; 1913 fwd_streams[j]->peer_addr = i; 1914 fwd_streams[j]->retry_enabled = retry_enabled; 1915 } 1916 } 1917 } 1918 1919 /** 1920 * For the RSS forwarding test all streams distributed over lcores. Each stream 1921 * being composed of a RX queue to poll on a RX port for input messages, 1922 * associated with a TX queue of a TX port where to send forwarded packets. 1923 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1924 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1925 * following rules: 1926 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1927 * - TxQl = RxQj 1928 */ 1929 static void 1930 rss_fwd_config_setup(void) 1931 { 1932 portid_t rxp; 1933 portid_t txp; 1934 queueid_t rxq; 1935 queueid_t nb_q; 1936 streamid_t sm_id; 1937 1938 nb_q = nb_rxq; 1939 if (nb_q > nb_txq) 1940 nb_q = nb_txq; 1941 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1942 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1943 cur_fwd_config.nb_fwd_streams = 1944 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1945 1946 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1947 cur_fwd_config.nb_fwd_lcores = 1948 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1949 1950 /* reinitialize forwarding streams */ 1951 init_fwd_streams(); 1952 1953 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1954 rxp = 0; rxq = 0; 1955 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1956 struct fwd_stream *fs; 1957 1958 fs = fwd_streams[sm_id]; 1959 1960 if ((rxp & 0x1) == 0) 1961 txp = (portid_t) (rxp + 1); 1962 else 1963 txp = (portid_t) (rxp - 1); 1964 /* 1965 * if we are in loopback, simply send stuff out through the 1966 * ingress port 1967 */ 1968 if (port_topology == PORT_TOPOLOGY_LOOP) 1969 txp = rxp; 1970 1971 fs->rx_port = fwd_ports_ids[rxp]; 1972 fs->rx_queue = rxq; 1973 fs->tx_port = fwd_ports_ids[txp]; 1974 fs->tx_queue = rxq; 1975 fs->peer_addr = fs->tx_port; 1976 fs->retry_enabled = retry_enabled; 1977 rxq = (queueid_t) (rxq + 1); 1978 if (rxq < nb_q) 1979 continue; 1980 /* 1981 * rxq == nb_q 1982 * Restart from RX queue 0 on next RX port 1983 */ 1984 rxq = 0; 1985 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1986 rxp = (portid_t) 1987 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1988 else 1989 rxp = (portid_t) (rxp + 1); 1990 } 1991 } 1992 1993 /** 1994 * For the DCB forwarding test, each core is assigned on each traffic class. 1995 * 1996 * Each core is assigned a multi-stream, each stream being composed of 1997 * a RX queue to poll on a RX port for input messages, associated with 1998 * a TX queue of a TX port where to send forwarded packets. All RX and 1999 * TX queues are mapping to the same traffic class. 2000 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2001 * the same core 2002 */ 2003 static void 2004 dcb_fwd_config_setup(void) 2005 { 2006 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2007 portid_t txp, rxp = 0; 2008 queueid_t txq, rxq = 0; 2009 lcoreid_t lc_id; 2010 uint16_t nb_rx_queue, nb_tx_queue; 2011 uint16_t i, j, k, sm_id = 0; 2012 uint8_t tc = 0; 2013 2014 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2015 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2016 cur_fwd_config.nb_fwd_streams = 2017 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2018 2019 /* reinitialize forwarding streams */ 2020 init_fwd_streams(); 2021 sm_id = 0; 2022 txp = 1; 2023 /* get the dcb info on the first RX and TX ports */ 2024 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2025 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2026 2027 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2028 fwd_lcores[lc_id]->stream_nb = 0; 2029 fwd_lcores[lc_id]->stream_idx = sm_id; 2030 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2031 /* if the nb_queue is zero, means this tc is 2032 * not enabled on the POOL 2033 */ 2034 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2035 break; 2036 k = fwd_lcores[lc_id]->stream_nb + 2037 fwd_lcores[lc_id]->stream_idx; 2038 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2039 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2040 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2041 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2042 for (j = 0; j < nb_rx_queue; j++) { 2043 struct fwd_stream *fs; 2044 2045 fs = fwd_streams[k + j]; 2046 fs->rx_port = fwd_ports_ids[rxp]; 2047 fs->rx_queue = rxq + j; 2048 fs->tx_port = fwd_ports_ids[txp]; 2049 fs->tx_queue = txq + j % nb_tx_queue; 2050 fs->peer_addr = fs->tx_port; 2051 fs->retry_enabled = retry_enabled; 2052 } 2053 fwd_lcores[lc_id]->stream_nb += 2054 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2055 } 2056 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2057 2058 tc++; 2059 if (tc < rxp_dcb_info.nb_tcs) 2060 continue; 2061 /* Restart from TC 0 on next RX port */ 2062 tc = 0; 2063 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2064 rxp = (portid_t) 2065 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2066 else 2067 rxp++; 2068 if (rxp >= nb_fwd_ports) 2069 return; 2070 /* get the dcb information on next RX and TX ports */ 2071 if ((rxp & 0x1) == 0) 2072 txp = (portid_t) (rxp + 1); 2073 else 2074 txp = (portid_t) (rxp - 1); 2075 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2076 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2077 } 2078 } 2079 2080 static void 2081 icmp_echo_config_setup(void) 2082 { 2083 portid_t rxp; 2084 queueid_t rxq; 2085 lcoreid_t lc_id; 2086 uint16_t sm_id; 2087 2088 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2089 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2090 (nb_txq * nb_fwd_ports); 2091 else 2092 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2093 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2094 cur_fwd_config.nb_fwd_streams = 2095 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2096 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2097 cur_fwd_config.nb_fwd_lcores = 2098 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2099 if (verbose_level > 0) { 2100 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2101 __FUNCTION__, 2102 cur_fwd_config.nb_fwd_lcores, 2103 cur_fwd_config.nb_fwd_ports, 2104 cur_fwd_config.nb_fwd_streams); 2105 } 2106 2107 /* reinitialize forwarding streams */ 2108 init_fwd_streams(); 2109 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2110 rxp = 0; rxq = 0; 2111 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2112 if (verbose_level > 0) 2113 printf(" core=%d: \n", lc_id); 2114 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2115 struct fwd_stream *fs; 2116 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2117 fs->rx_port = fwd_ports_ids[rxp]; 2118 fs->rx_queue = rxq; 2119 fs->tx_port = fs->rx_port; 2120 fs->tx_queue = rxq; 2121 fs->peer_addr = fs->tx_port; 2122 fs->retry_enabled = retry_enabled; 2123 if (verbose_level > 0) 2124 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2125 sm_id, fs->rx_port, fs->rx_queue, 2126 fs->tx_queue); 2127 rxq = (queueid_t) (rxq + 1); 2128 if (rxq == nb_rxq) { 2129 rxq = 0; 2130 rxp = (portid_t) (rxp + 1); 2131 } 2132 } 2133 } 2134 } 2135 2136 void 2137 fwd_config_setup(void) 2138 { 2139 cur_fwd_config.fwd_eng = cur_fwd_eng; 2140 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2141 icmp_echo_config_setup(); 2142 return; 2143 } 2144 if ((nb_rxq > 1) && (nb_txq > 1)){ 2145 if (dcb_config) 2146 dcb_fwd_config_setup(); 2147 else 2148 rss_fwd_config_setup(); 2149 } 2150 else 2151 simple_fwd_config_setup(); 2152 } 2153 2154 void 2155 pkt_fwd_config_display(struct fwd_config *cfg) 2156 { 2157 struct fwd_stream *fs; 2158 lcoreid_t lc_id; 2159 streamid_t sm_id; 2160 2161 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2162 "NUMA support %s, MP over anonymous pages %s\n", 2163 cfg->fwd_eng->fwd_mode_name, 2164 retry_enabled == 0 ? "" : " with retry", 2165 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2166 numa_support == 1 ? "enabled" : "disabled", 2167 mp_anon != 0 ? "enabled" : "disabled"); 2168 2169 if (retry_enabled) 2170 printf("TX retry num: %u, delay between TX retries: %uus\n", 2171 burst_tx_retry_num, burst_tx_delay_time); 2172 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2173 printf("Logical Core %u (socket %u) forwards packets on " 2174 "%d streams:", 2175 fwd_lcores_cpuids[lc_id], 2176 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2177 fwd_lcores[lc_id]->stream_nb); 2178 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2179 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2180 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2181 "P=%d/Q=%d (socket %u) ", 2182 fs->rx_port, fs->rx_queue, 2183 ports[fs->rx_port].socket_id, 2184 fs->tx_port, fs->tx_queue, 2185 ports[fs->tx_port].socket_id); 2186 print_ethaddr("peer=", 2187 &peer_eth_addrs[fs->peer_addr]); 2188 } 2189 printf("\n"); 2190 } 2191 printf("\n"); 2192 } 2193 2194 int 2195 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2196 { 2197 unsigned int i; 2198 unsigned int lcore_cpuid; 2199 int record_now; 2200 2201 record_now = 0; 2202 again: 2203 for (i = 0; i < nb_lc; i++) { 2204 lcore_cpuid = lcorelist[i]; 2205 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2206 printf("lcore %u not enabled\n", lcore_cpuid); 2207 return -1; 2208 } 2209 if (lcore_cpuid == rte_get_master_lcore()) { 2210 printf("lcore %u cannot be masked on for running " 2211 "packet forwarding, which is the master lcore " 2212 "and reserved for command line parsing only\n", 2213 lcore_cpuid); 2214 return -1; 2215 } 2216 if (record_now) 2217 fwd_lcores_cpuids[i] = lcore_cpuid; 2218 } 2219 if (record_now == 0) { 2220 record_now = 1; 2221 goto again; 2222 } 2223 nb_cfg_lcores = (lcoreid_t) nb_lc; 2224 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2225 printf("previous number of forwarding cores %u - changed to " 2226 "number of configured cores %u\n", 2227 (unsigned int) nb_fwd_lcores, nb_lc); 2228 nb_fwd_lcores = (lcoreid_t) nb_lc; 2229 } 2230 2231 return 0; 2232 } 2233 2234 int 2235 set_fwd_lcores_mask(uint64_t lcoremask) 2236 { 2237 unsigned int lcorelist[64]; 2238 unsigned int nb_lc; 2239 unsigned int i; 2240 2241 if (lcoremask == 0) { 2242 printf("Invalid NULL mask of cores\n"); 2243 return -1; 2244 } 2245 nb_lc = 0; 2246 for (i = 0; i < 64; i++) { 2247 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2248 continue; 2249 lcorelist[nb_lc++] = i; 2250 } 2251 return set_fwd_lcores_list(lcorelist, nb_lc); 2252 } 2253 2254 void 2255 set_fwd_lcores_number(uint16_t nb_lc) 2256 { 2257 if (nb_lc > nb_cfg_lcores) { 2258 printf("nb fwd cores %u > %u (max. number of configured " 2259 "lcores) - ignored\n", 2260 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2261 return; 2262 } 2263 nb_fwd_lcores = (lcoreid_t) nb_lc; 2264 printf("Number of forwarding cores set to %u\n", 2265 (unsigned int) nb_fwd_lcores); 2266 } 2267 2268 void 2269 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2270 { 2271 unsigned int i; 2272 portid_t port_id; 2273 int record_now; 2274 2275 record_now = 0; 2276 again: 2277 for (i = 0; i < nb_pt; i++) { 2278 port_id = (portid_t) portlist[i]; 2279 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2280 return; 2281 if (record_now) 2282 fwd_ports_ids[i] = port_id; 2283 } 2284 if (record_now == 0) { 2285 record_now = 1; 2286 goto again; 2287 } 2288 nb_cfg_ports = (portid_t) nb_pt; 2289 if (nb_fwd_ports != (portid_t) nb_pt) { 2290 printf("previous number of forwarding ports %u - changed to " 2291 "number of configured ports %u\n", 2292 (unsigned int) nb_fwd_ports, nb_pt); 2293 nb_fwd_ports = (portid_t) nb_pt; 2294 } 2295 } 2296 2297 void 2298 set_fwd_ports_mask(uint64_t portmask) 2299 { 2300 unsigned int portlist[64]; 2301 unsigned int nb_pt; 2302 unsigned int i; 2303 2304 if (portmask == 0) { 2305 printf("Invalid NULL mask of ports\n"); 2306 return; 2307 } 2308 nb_pt = 0; 2309 RTE_ETH_FOREACH_DEV(i) { 2310 if (! ((uint64_t)(1ULL << i) & portmask)) 2311 continue; 2312 portlist[nb_pt++] = i; 2313 } 2314 set_fwd_ports_list(portlist, nb_pt); 2315 } 2316 2317 void 2318 set_fwd_ports_number(uint16_t nb_pt) 2319 { 2320 if (nb_pt > nb_cfg_ports) { 2321 printf("nb fwd ports %u > %u (number of configured " 2322 "ports) - ignored\n", 2323 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2324 return; 2325 } 2326 nb_fwd_ports = (portid_t) nb_pt; 2327 printf("Number of forwarding ports set to %u\n", 2328 (unsigned int) nb_fwd_ports); 2329 } 2330 2331 int 2332 port_is_forwarding(portid_t port_id) 2333 { 2334 unsigned int i; 2335 2336 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2337 return -1; 2338 2339 for (i = 0; i < nb_fwd_ports; i++) { 2340 if (fwd_ports_ids[i] == port_id) 2341 return 1; 2342 } 2343 2344 return 0; 2345 } 2346 2347 void 2348 set_nb_pkt_per_burst(uint16_t nb) 2349 { 2350 if (nb > MAX_PKT_BURST) { 2351 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2352 " ignored\n", 2353 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2354 return; 2355 } 2356 nb_pkt_per_burst = nb; 2357 printf("Number of packets per burst set to %u\n", 2358 (unsigned int) nb_pkt_per_burst); 2359 } 2360 2361 static const char * 2362 tx_split_get_name(enum tx_pkt_split split) 2363 { 2364 uint32_t i; 2365 2366 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2367 if (tx_split_name[i].split == split) 2368 return tx_split_name[i].name; 2369 } 2370 return NULL; 2371 } 2372 2373 void 2374 set_tx_pkt_split(const char *name) 2375 { 2376 uint32_t i; 2377 2378 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2379 if (strcmp(tx_split_name[i].name, name) == 0) { 2380 tx_pkt_split = tx_split_name[i].split; 2381 return; 2382 } 2383 } 2384 printf("unknown value: \"%s\"\n", name); 2385 } 2386 2387 void 2388 show_tx_pkt_segments(void) 2389 { 2390 uint32_t i, n; 2391 const char *split; 2392 2393 n = tx_pkt_nb_segs; 2394 split = tx_split_get_name(tx_pkt_split); 2395 2396 printf("Number of segments: %u\n", n); 2397 printf("Segment sizes: "); 2398 for (i = 0; i != n - 1; i++) 2399 printf("%hu,", tx_pkt_seg_lengths[i]); 2400 printf("%hu\n", tx_pkt_seg_lengths[i]); 2401 printf("Split packet: %s\n", split); 2402 } 2403 2404 void 2405 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2406 { 2407 uint16_t tx_pkt_len; 2408 unsigned i; 2409 2410 if (nb_segs >= (unsigned) nb_txd) { 2411 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2412 nb_segs, (unsigned int) nb_txd); 2413 return; 2414 } 2415 2416 /* 2417 * Check that each segment length is greater or equal than 2418 * the mbuf data sise. 2419 * Check also that the total packet length is greater or equal than the 2420 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2421 */ 2422 tx_pkt_len = 0; 2423 for (i = 0; i < nb_segs; i++) { 2424 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2425 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2426 i, seg_lengths[i], (unsigned) mbuf_data_size); 2427 return; 2428 } 2429 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2430 } 2431 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2432 printf("total packet length=%u < %d - give up\n", 2433 (unsigned) tx_pkt_len, 2434 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2435 return; 2436 } 2437 2438 for (i = 0; i < nb_segs; i++) 2439 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2440 2441 tx_pkt_length = tx_pkt_len; 2442 tx_pkt_nb_segs = (uint8_t) nb_segs; 2443 } 2444 2445 void 2446 setup_gro(const char *onoff, portid_t port_id) 2447 { 2448 if (!rte_eth_dev_is_valid_port(port_id)) { 2449 printf("invalid port id %u\n", port_id); 2450 return; 2451 } 2452 if (test_done == 0) { 2453 printf("Before enable/disable GRO," 2454 " please stop forwarding first\n"); 2455 return; 2456 } 2457 if (strcmp(onoff, "on") == 0) { 2458 if (gro_ports[port_id].enable != 0) { 2459 printf("Port %u has enabled GRO. Please" 2460 " disable GRO first\n", port_id); 2461 return; 2462 } 2463 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2464 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2465 gro_ports[port_id].param.max_flow_num = 2466 GRO_DEFAULT_FLOW_NUM; 2467 gro_ports[port_id].param.max_item_per_flow = 2468 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2469 } 2470 gro_ports[port_id].enable = 1; 2471 } else { 2472 if (gro_ports[port_id].enable == 0) { 2473 printf("Port %u has disabled GRO\n", port_id); 2474 return; 2475 } 2476 gro_ports[port_id].enable = 0; 2477 } 2478 } 2479 2480 void 2481 setup_gro_flush_cycles(uint8_t cycles) 2482 { 2483 if (test_done == 0) { 2484 printf("Before change flush interval for GRO," 2485 " please stop forwarding first.\n"); 2486 return; 2487 } 2488 2489 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2490 GRO_DEFAULT_FLUSH_CYCLES) { 2491 printf("The flushing cycle be in the range" 2492 " of 1 to %u. Revert to the default" 2493 " value %u.\n", 2494 GRO_MAX_FLUSH_CYCLES, 2495 GRO_DEFAULT_FLUSH_CYCLES); 2496 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2497 } 2498 2499 gro_flush_cycles = cycles; 2500 } 2501 2502 void 2503 show_gro(portid_t port_id) 2504 { 2505 struct rte_gro_param *param; 2506 uint32_t max_pkts_num; 2507 2508 param = &gro_ports[port_id].param; 2509 2510 if (!rte_eth_dev_is_valid_port(port_id)) { 2511 printf("Invalid port id %u.\n", port_id); 2512 return; 2513 } 2514 if (gro_ports[port_id].enable) { 2515 printf("GRO type: TCP/IPv4\n"); 2516 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2517 max_pkts_num = param->max_flow_num * 2518 param->max_item_per_flow; 2519 } else 2520 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2521 printf("Max number of packets to perform GRO: %u\n", 2522 max_pkts_num); 2523 printf("Flushing cycles: %u\n", gro_flush_cycles); 2524 } else 2525 printf("Port %u doesn't enable GRO.\n", port_id); 2526 } 2527 2528 void 2529 setup_gso(const char *mode, portid_t port_id) 2530 { 2531 if (!rte_eth_dev_is_valid_port(port_id)) { 2532 printf("invalid port id %u\n", port_id); 2533 return; 2534 } 2535 if (strcmp(mode, "on") == 0) { 2536 if (test_done == 0) { 2537 printf("before enabling GSO," 2538 " please stop forwarding first\n"); 2539 return; 2540 } 2541 gso_ports[port_id].enable = 1; 2542 } else if (strcmp(mode, "off") == 0) { 2543 if (test_done == 0) { 2544 printf("before disabling GSO," 2545 " please stop forwarding first\n"); 2546 return; 2547 } 2548 gso_ports[port_id].enable = 0; 2549 } 2550 } 2551 2552 char* 2553 list_pkt_forwarding_modes(void) 2554 { 2555 static char fwd_modes[128] = ""; 2556 const char *separator = "|"; 2557 struct fwd_engine *fwd_eng; 2558 unsigned i = 0; 2559 2560 if (strlen (fwd_modes) == 0) { 2561 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2562 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2563 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2564 strncat(fwd_modes, separator, 2565 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2566 } 2567 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2568 } 2569 2570 return fwd_modes; 2571 } 2572 2573 char* 2574 list_pkt_forwarding_retry_modes(void) 2575 { 2576 static char fwd_modes[128] = ""; 2577 const char *separator = "|"; 2578 struct fwd_engine *fwd_eng; 2579 unsigned i = 0; 2580 2581 if (strlen(fwd_modes) == 0) { 2582 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2583 if (fwd_eng == &rx_only_engine) 2584 continue; 2585 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2586 sizeof(fwd_modes) - 2587 strlen(fwd_modes) - 1); 2588 strncat(fwd_modes, separator, 2589 sizeof(fwd_modes) - 2590 strlen(fwd_modes) - 1); 2591 } 2592 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2593 } 2594 2595 return fwd_modes; 2596 } 2597 2598 void 2599 set_pkt_forwarding_mode(const char *fwd_mode_name) 2600 { 2601 struct fwd_engine *fwd_eng; 2602 unsigned i; 2603 2604 i = 0; 2605 while ((fwd_eng = fwd_engines[i]) != NULL) { 2606 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2607 printf("Set %s packet forwarding mode%s\n", 2608 fwd_mode_name, 2609 retry_enabled == 0 ? "" : " with retry"); 2610 cur_fwd_eng = fwd_eng; 2611 return; 2612 } 2613 i++; 2614 } 2615 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2616 } 2617 2618 void 2619 set_verbose_level(uint16_t vb_level) 2620 { 2621 printf("Change verbose level from %u to %u\n", 2622 (unsigned int) verbose_level, (unsigned int) vb_level); 2623 verbose_level = vb_level; 2624 } 2625 2626 void 2627 vlan_extend_set(portid_t port_id, int on) 2628 { 2629 int diag; 2630 int vlan_offload; 2631 2632 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2633 return; 2634 2635 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2636 2637 if (on) 2638 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2639 else 2640 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2641 2642 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2643 if (diag < 0) 2644 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2645 "diag=%d\n", port_id, on, diag); 2646 } 2647 2648 void 2649 rx_vlan_strip_set(portid_t port_id, int on) 2650 { 2651 int diag; 2652 int vlan_offload; 2653 2654 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2655 return; 2656 2657 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2658 2659 if (on) 2660 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2661 else 2662 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2663 2664 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2665 if (diag < 0) 2666 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2667 "diag=%d\n", port_id, on, diag); 2668 } 2669 2670 void 2671 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2672 { 2673 int diag; 2674 2675 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2676 return; 2677 2678 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2679 if (diag < 0) 2680 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2681 "diag=%d\n", port_id, queue_id, on, diag); 2682 } 2683 2684 void 2685 rx_vlan_filter_set(portid_t port_id, int on) 2686 { 2687 int diag; 2688 int vlan_offload; 2689 2690 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2691 return; 2692 2693 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2694 2695 if (on) 2696 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2697 else 2698 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2699 2700 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2701 if (diag < 0) 2702 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2703 "diag=%d\n", port_id, on, diag); 2704 } 2705 2706 int 2707 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2708 { 2709 int diag; 2710 2711 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2712 return 1; 2713 if (vlan_id_is_invalid(vlan_id)) 2714 return 1; 2715 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2716 if (diag == 0) 2717 return 0; 2718 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2719 "diag=%d\n", 2720 port_id, vlan_id, on, diag); 2721 return -1; 2722 } 2723 2724 void 2725 rx_vlan_all_filter_set(portid_t port_id, int on) 2726 { 2727 uint16_t vlan_id; 2728 2729 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2730 return; 2731 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2732 if (rx_vft_set(port_id, vlan_id, on)) 2733 break; 2734 } 2735 } 2736 2737 void 2738 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2739 { 2740 int diag; 2741 2742 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2743 return; 2744 2745 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2746 if (diag == 0) 2747 return; 2748 2749 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2750 "diag=%d\n", 2751 port_id, vlan_type, tp_id, diag); 2752 } 2753 2754 void 2755 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2756 { 2757 int vlan_offload; 2758 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2759 return; 2760 if (vlan_id_is_invalid(vlan_id)) 2761 return; 2762 2763 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2764 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2765 printf("Error, as QinQ has been enabled.\n"); 2766 return; 2767 } 2768 2769 tx_vlan_reset(port_id); 2770 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 2771 ports[port_id].tx_vlan_id = vlan_id; 2772 } 2773 2774 void 2775 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2776 { 2777 int vlan_offload; 2778 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2779 return; 2780 if (vlan_id_is_invalid(vlan_id)) 2781 return; 2782 if (vlan_id_is_invalid(vlan_id_outer)) 2783 return; 2784 2785 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2786 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2787 printf("Error, as QinQ hasn't been enabled.\n"); 2788 return; 2789 } 2790 2791 tx_vlan_reset(port_id); 2792 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 2793 ports[port_id].tx_vlan_id = vlan_id; 2794 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2795 } 2796 2797 void 2798 tx_vlan_reset(portid_t port_id) 2799 { 2800 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2801 return; 2802 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 2803 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 2804 ports[port_id].tx_vlan_id = 0; 2805 ports[port_id].tx_vlan_id_outer = 0; 2806 } 2807 2808 void 2809 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2810 { 2811 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2812 return; 2813 2814 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2815 } 2816 2817 void 2818 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2819 { 2820 uint16_t i; 2821 uint8_t existing_mapping_found = 0; 2822 2823 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2824 return; 2825 2826 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2827 return; 2828 2829 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2830 printf("map_value not in required range 0..%d\n", 2831 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2832 return; 2833 } 2834 2835 if (!is_rx) { /*then tx*/ 2836 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2837 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2838 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2839 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2840 existing_mapping_found = 1; 2841 break; 2842 } 2843 } 2844 if (!existing_mapping_found) { /* A new additional mapping... */ 2845 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2846 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2847 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2848 nb_tx_queue_stats_mappings++; 2849 } 2850 } 2851 else { /*rx*/ 2852 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2853 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2854 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2855 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2856 existing_mapping_found = 1; 2857 break; 2858 } 2859 } 2860 if (!existing_mapping_found) { /* A new additional mapping... */ 2861 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2862 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2863 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2864 nb_rx_queue_stats_mappings++; 2865 } 2866 } 2867 } 2868 2869 static inline void 2870 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2871 { 2872 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2873 2874 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2875 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2876 " tunnel_id: 0x%08x", 2877 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2878 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2879 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2880 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2881 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2882 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2883 2884 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2885 rte_be_to_cpu_16(mask->src_port_mask), 2886 rte_be_to_cpu_16(mask->dst_port_mask)); 2887 2888 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2889 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2890 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2891 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2892 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2893 2894 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2895 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2896 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2897 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2898 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2899 } 2900 2901 printf("\n"); 2902 } 2903 2904 static inline void 2905 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2906 { 2907 struct rte_eth_flex_payload_cfg *cfg; 2908 uint32_t i, j; 2909 2910 for (i = 0; i < flex_conf->nb_payloads; i++) { 2911 cfg = &flex_conf->flex_set[i]; 2912 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2913 printf("\n RAW: "); 2914 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2915 printf("\n L2_PAYLOAD: "); 2916 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2917 printf("\n L3_PAYLOAD: "); 2918 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2919 printf("\n L4_PAYLOAD: "); 2920 else 2921 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2922 for (j = 0; j < num; j++) 2923 printf(" %-5u", cfg->src_offset[j]); 2924 } 2925 printf("\n"); 2926 } 2927 2928 static char * 2929 flowtype_to_str(uint16_t flow_type) 2930 { 2931 struct flow_type_info { 2932 char str[32]; 2933 uint16_t ftype; 2934 }; 2935 2936 uint8_t i; 2937 static struct flow_type_info flowtype_str_table[] = { 2938 {"raw", RTE_ETH_FLOW_RAW}, 2939 {"ipv4", RTE_ETH_FLOW_IPV4}, 2940 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2941 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2942 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2943 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2944 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2945 {"ipv6", RTE_ETH_FLOW_IPV6}, 2946 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2947 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2948 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2949 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2950 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2951 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2952 {"port", RTE_ETH_FLOW_PORT}, 2953 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2954 {"geneve", RTE_ETH_FLOW_GENEVE}, 2955 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2956 }; 2957 2958 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2959 if (flowtype_str_table[i].ftype == flow_type) 2960 return flowtype_str_table[i].str; 2961 } 2962 2963 return NULL; 2964 } 2965 2966 static inline void 2967 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2968 { 2969 struct rte_eth_fdir_flex_mask *mask; 2970 uint32_t i, j; 2971 char *p; 2972 2973 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2974 mask = &flex_conf->flex_mask[i]; 2975 p = flowtype_to_str(mask->flow_type); 2976 printf("\n %s:\t", p ? p : "unknown"); 2977 for (j = 0; j < num; j++) 2978 printf(" %02x", mask->mask[j]); 2979 } 2980 printf("\n"); 2981 } 2982 2983 static inline void 2984 print_fdir_flow_type(uint32_t flow_types_mask) 2985 { 2986 int i; 2987 char *p; 2988 2989 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2990 if (!(flow_types_mask & (1 << i))) 2991 continue; 2992 p = flowtype_to_str(i); 2993 if (p) 2994 printf(" %s", p); 2995 else 2996 printf(" unknown"); 2997 } 2998 printf("\n"); 2999 } 3000 3001 void 3002 fdir_get_infos(portid_t port_id) 3003 { 3004 struct rte_eth_fdir_stats fdir_stat; 3005 struct rte_eth_fdir_info fdir_info; 3006 int ret; 3007 3008 static const char *fdir_stats_border = "########################"; 3009 3010 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3011 return; 3012 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3013 if (ret < 0) { 3014 printf("\n FDIR is not supported on port %-2d\n", 3015 port_id); 3016 return; 3017 } 3018 3019 memset(&fdir_info, 0, sizeof(fdir_info)); 3020 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3021 RTE_ETH_FILTER_INFO, &fdir_info); 3022 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3023 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3024 RTE_ETH_FILTER_STATS, &fdir_stat); 3025 printf("\n %s FDIR infos for port %-2d %s\n", 3026 fdir_stats_border, port_id, fdir_stats_border); 3027 printf(" MODE: "); 3028 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3029 printf(" PERFECT\n"); 3030 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3031 printf(" PERFECT-MAC-VLAN\n"); 3032 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3033 printf(" PERFECT-TUNNEL\n"); 3034 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3035 printf(" SIGNATURE\n"); 3036 else 3037 printf(" DISABLE\n"); 3038 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3039 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3040 printf(" SUPPORTED FLOW TYPE: "); 3041 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3042 } 3043 printf(" FLEX PAYLOAD INFO:\n"); 3044 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3045 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3046 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3047 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3048 fdir_info.flex_payload_unit, 3049 fdir_info.max_flex_payload_segment_num, 3050 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3051 printf(" MASK: "); 3052 print_fdir_mask(&fdir_info.mask); 3053 if (fdir_info.flex_conf.nb_payloads > 0) { 3054 printf(" FLEX PAYLOAD SRC OFFSET:"); 3055 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3056 } 3057 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3058 printf(" FLEX MASK CFG:"); 3059 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3060 } 3061 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3062 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3063 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3064 fdir_info.guarant_spc, fdir_info.best_spc); 3065 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3066 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3067 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3068 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3069 fdir_stat.collision, fdir_stat.free, 3070 fdir_stat.maxhash, fdir_stat.maxlen, 3071 fdir_stat.add, fdir_stat.remove, 3072 fdir_stat.f_add, fdir_stat.f_remove); 3073 printf(" %s############################%s\n", 3074 fdir_stats_border, fdir_stats_border); 3075 } 3076 3077 void 3078 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3079 { 3080 struct rte_port *port; 3081 struct rte_eth_fdir_flex_conf *flex_conf; 3082 int i, idx = 0; 3083 3084 port = &ports[port_id]; 3085 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3086 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3087 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3088 idx = i; 3089 break; 3090 } 3091 } 3092 if (i >= RTE_ETH_FLOW_MAX) { 3093 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3094 idx = flex_conf->nb_flexmasks; 3095 flex_conf->nb_flexmasks++; 3096 } else { 3097 printf("The flex mask table is full. Can not set flex" 3098 " mask for flow_type(%u).", cfg->flow_type); 3099 return; 3100 } 3101 } 3102 rte_memcpy(&flex_conf->flex_mask[idx], 3103 cfg, 3104 sizeof(struct rte_eth_fdir_flex_mask)); 3105 } 3106 3107 void 3108 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3109 { 3110 struct rte_port *port; 3111 struct rte_eth_fdir_flex_conf *flex_conf; 3112 int i, idx = 0; 3113 3114 port = &ports[port_id]; 3115 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3116 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3117 if (cfg->type == flex_conf->flex_set[i].type) { 3118 idx = i; 3119 break; 3120 } 3121 } 3122 if (i >= RTE_ETH_PAYLOAD_MAX) { 3123 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3124 idx = flex_conf->nb_payloads; 3125 flex_conf->nb_payloads++; 3126 } else { 3127 printf("The flex payload table is full. Can not set" 3128 " flex payload for type(%u).", cfg->type); 3129 return; 3130 } 3131 } 3132 rte_memcpy(&flex_conf->flex_set[idx], 3133 cfg, 3134 sizeof(struct rte_eth_flex_payload_cfg)); 3135 3136 } 3137 3138 void 3139 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3140 { 3141 #ifdef RTE_LIBRTE_IXGBE_PMD 3142 int diag; 3143 3144 if (is_rx) 3145 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3146 else 3147 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3148 3149 if (diag == 0) 3150 return; 3151 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3152 is_rx ? "rx" : "tx", port_id, diag); 3153 return; 3154 #endif 3155 printf("VF %s setting not supported for port %d\n", 3156 is_rx ? "Rx" : "Tx", port_id); 3157 RTE_SET_USED(vf); 3158 RTE_SET_USED(on); 3159 } 3160 3161 int 3162 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3163 { 3164 int diag; 3165 struct rte_eth_link link; 3166 3167 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3168 return 1; 3169 rte_eth_link_get_nowait(port_id, &link); 3170 if (rate > link.link_speed) { 3171 printf("Invalid rate value:%u bigger than link speed: %u\n", 3172 rate, link.link_speed); 3173 return 1; 3174 } 3175 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3176 if (diag == 0) 3177 return diag; 3178 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3179 port_id, diag); 3180 return diag; 3181 } 3182 3183 int 3184 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3185 { 3186 int diag = -ENOTSUP; 3187 3188 #ifdef RTE_LIBRTE_IXGBE_PMD 3189 if (diag == -ENOTSUP) 3190 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3191 q_msk); 3192 #endif 3193 #ifdef RTE_LIBRTE_BNXT_PMD 3194 if (diag == -ENOTSUP) 3195 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3196 #endif 3197 if (diag == 0) 3198 return diag; 3199 3200 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3201 port_id, diag); 3202 return diag; 3203 } 3204 3205 /* 3206 * Functions to manage the set of filtered Multicast MAC addresses. 3207 * 3208 * A pool of filtered multicast MAC addresses is associated with each port. 3209 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3210 * The address of the pool and the number of valid multicast MAC addresses 3211 * recorded in the pool are stored in the fields "mc_addr_pool" and 3212 * "mc_addr_nb" of the "rte_port" data structure. 3213 * 3214 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3215 * to be supplied a contiguous array of multicast MAC addresses. 3216 * To comply with this constraint, the set of multicast addresses recorded 3217 * into the pool are systematically compacted at the beginning of the pool. 3218 * Hence, when a multicast address is removed from the pool, all following 3219 * addresses, if any, are copied back to keep the set contiguous. 3220 */ 3221 #define MCAST_POOL_INC 32 3222 3223 static int 3224 mcast_addr_pool_extend(struct rte_port *port) 3225 { 3226 struct ether_addr *mc_pool; 3227 size_t mc_pool_size; 3228 3229 /* 3230 * If a free entry is available at the end of the pool, just 3231 * increment the number of recorded multicast addresses. 3232 */ 3233 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3234 port->mc_addr_nb++; 3235 return 0; 3236 } 3237 3238 /* 3239 * [re]allocate a pool with MCAST_POOL_INC more entries. 3240 * The previous test guarantees that port->mc_addr_nb is a multiple 3241 * of MCAST_POOL_INC. 3242 */ 3243 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3244 MCAST_POOL_INC); 3245 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3246 mc_pool_size); 3247 if (mc_pool == NULL) { 3248 printf("allocation of pool of %u multicast addresses failed\n", 3249 port->mc_addr_nb + MCAST_POOL_INC); 3250 return -ENOMEM; 3251 } 3252 3253 port->mc_addr_pool = mc_pool; 3254 port->mc_addr_nb++; 3255 return 0; 3256 3257 } 3258 3259 static void 3260 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3261 { 3262 port->mc_addr_nb--; 3263 if (addr_idx == port->mc_addr_nb) { 3264 /* No need to recompact the set of multicast addressses. */ 3265 if (port->mc_addr_nb == 0) { 3266 /* free the pool of multicast addresses. */ 3267 free(port->mc_addr_pool); 3268 port->mc_addr_pool = NULL; 3269 } 3270 return; 3271 } 3272 memmove(&port->mc_addr_pool[addr_idx], 3273 &port->mc_addr_pool[addr_idx + 1], 3274 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3275 } 3276 3277 static void 3278 eth_port_multicast_addr_list_set(uint8_t port_id) 3279 { 3280 struct rte_port *port; 3281 int diag; 3282 3283 port = &ports[port_id]; 3284 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3285 port->mc_addr_nb); 3286 if (diag == 0) 3287 return; 3288 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3289 port->mc_addr_nb, port_id, -diag); 3290 } 3291 3292 void 3293 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 3294 { 3295 struct rte_port *port; 3296 uint32_t i; 3297 3298 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3299 return; 3300 3301 port = &ports[port_id]; 3302 3303 /* 3304 * Check that the added multicast MAC address is not already recorded 3305 * in the pool of multicast addresses. 3306 */ 3307 for (i = 0; i < port->mc_addr_nb; i++) { 3308 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3309 printf("multicast address already filtered by port\n"); 3310 return; 3311 } 3312 } 3313 3314 if (mcast_addr_pool_extend(port) != 0) 3315 return; 3316 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3317 eth_port_multicast_addr_list_set(port_id); 3318 } 3319 3320 void 3321 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 3322 { 3323 struct rte_port *port; 3324 uint32_t i; 3325 3326 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3327 return; 3328 3329 port = &ports[port_id]; 3330 3331 /* 3332 * Search the pool of multicast MAC addresses for the removed address. 3333 */ 3334 for (i = 0; i < port->mc_addr_nb; i++) { 3335 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3336 break; 3337 } 3338 if (i == port->mc_addr_nb) { 3339 printf("multicast address not filtered by port %d\n", port_id); 3340 return; 3341 } 3342 3343 mcast_addr_pool_remove(port, i); 3344 eth_port_multicast_addr_list_set(port_id); 3345 } 3346 3347 void 3348 port_dcb_info_display(uint8_t port_id) 3349 { 3350 struct rte_eth_dcb_info dcb_info; 3351 uint16_t i; 3352 int ret; 3353 static const char *border = "================"; 3354 3355 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3356 return; 3357 3358 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3359 if (ret) { 3360 printf("\n Failed to get dcb infos on port %-2d\n", 3361 port_id); 3362 return; 3363 } 3364 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3365 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3366 printf("\n TC : "); 3367 for (i = 0; i < dcb_info.nb_tcs; i++) 3368 printf("\t%4d", i); 3369 printf("\n Priority : "); 3370 for (i = 0; i < dcb_info.nb_tcs; i++) 3371 printf("\t%4d", dcb_info.prio_tc[i]); 3372 printf("\n BW percent :"); 3373 for (i = 0; i < dcb_info.nb_tcs; i++) 3374 printf("\t%4d%%", dcb_info.tc_bws[i]); 3375 printf("\n RXQ base : "); 3376 for (i = 0; i < dcb_info.nb_tcs; i++) 3377 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3378 printf("\n RXQ number :"); 3379 for (i = 0; i < dcb_info.nb_tcs; i++) 3380 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3381 printf("\n TXQ base : "); 3382 for (i = 0; i < dcb_info.nb_tcs; i++) 3383 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3384 printf("\n TXQ number :"); 3385 for (i = 0; i < dcb_info.nb_tcs; i++) 3386 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3387 printf("\n"); 3388 } 3389 3390 uint8_t * 3391 open_ddp_package_file(const char *file_path, uint32_t *size) 3392 { 3393 int fd = open(file_path, O_RDONLY); 3394 off_t pkg_size; 3395 uint8_t *buf = NULL; 3396 int ret = 0; 3397 struct stat st_buf; 3398 3399 if (size) 3400 *size = 0; 3401 3402 if (fd == -1) { 3403 printf("%s: Failed to open %s\n", __func__, file_path); 3404 return buf; 3405 } 3406 3407 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3408 close(fd); 3409 printf("%s: File operations failed\n", __func__); 3410 return buf; 3411 } 3412 3413 pkg_size = st_buf.st_size; 3414 if (pkg_size < 0) { 3415 close(fd); 3416 printf("%s: File operations failed\n", __func__); 3417 return buf; 3418 } 3419 3420 buf = (uint8_t *)malloc(pkg_size); 3421 if (!buf) { 3422 close(fd); 3423 printf("%s: Failed to malloc memory\n", __func__); 3424 return buf; 3425 } 3426 3427 ret = read(fd, buf, pkg_size); 3428 if (ret < 0) { 3429 close(fd); 3430 printf("%s: File read operation failed\n", __func__); 3431 close_ddp_package_file(buf); 3432 return NULL; 3433 } 3434 3435 if (size) 3436 *size = pkg_size; 3437 3438 close(fd); 3439 3440 return buf; 3441 } 3442 3443 int 3444 save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size) 3445 { 3446 FILE *fh = fopen(file_path, "wb"); 3447 3448 if (fh == NULL) { 3449 printf("%s: Failed to open %s\n", __func__, file_path); 3450 return -1; 3451 } 3452 3453 if (fwrite(buf, 1, size, fh) != size) { 3454 fclose(fh); 3455 printf("%s: File write operation failed\n", __func__); 3456 return -1; 3457 } 3458 3459 fclose(fh); 3460 3461 return 0; 3462 } 3463 3464 int 3465 close_ddp_package_file(uint8_t *buf) 3466 { 3467 if (buf) { 3468 free((void *)buf); 3469 return 0; 3470 } 3471 3472 return -1; 3473 } 3474 3475 void 3476 port_queue_region_info_display(portid_t port_id, void *buf) 3477 { 3478 #ifdef RTE_LIBRTE_I40E_PMD 3479 uint16_t i, j; 3480 struct rte_pmd_i40e_queue_regions *info = 3481 (struct rte_pmd_i40e_queue_regions *)buf; 3482 static const char *queue_region_info_stats_border = "-------"; 3483 3484 if (!info->queue_region_number) 3485 printf("there is no region has been set before"); 3486 3487 printf("\n %s All queue region info for port=%2d %s", 3488 queue_region_info_stats_border, port_id, 3489 queue_region_info_stats_border); 3490 printf("\n queue_region_number: %-14u \n", 3491 info->queue_region_number); 3492 3493 for (i = 0; i < info->queue_region_number; i++) { 3494 printf("\n region_id: %-14u queue_number: %-14u " 3495 "queue_start_index: %-14u \n", 3496 info->region[i].region_id, 3497 info->region[i].queue_num, 3498 info->region[i].queue_start_index); 3499 3500 printf(" user_priority_num is %-14u :", 3501 info->region[i].user_priority_num); 3502 for (j = 0; j < info->region[i].user_priority_num; j++) 3503 printf(" %-14u ", info->region[i].user_priority[j]); 3504 3505 printf("\n flowtype_num is %-14u :", 3506 info->region[i].flowtype_num); 3507 for (j = 0; j < info->region[i].flowtype_num; j++) 3508 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3509 } 3510 #else 3511 RTE_SET_USED(port_id); 3512 RTE_SET_USED(buf); 3513 #endif 3514 3515 printf("\n\n"); 3516 } 3517