1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * Copyright 2013-2014 6WIND S.A. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <stdarg.h> 36 #include <errno.h> 37 #include <stdio.h> 38 #include <string.h> 39 #include <stdarg.h> 40 #include <stdint.h> 41 #include <inttypes.h> 42 43 #include <sys/queue.h> 44 45 #include <rte_common.h> 46 #include <rte_byteorder.h> 47 #include <rte_debug.h> 48 #include <rte_log.h> 49 #include <rte_memory.h> 50 #include <rte_memcpy.h> 51 #include <rte_memzone.h> 52 #include <rte_launch.h> 53 #include <rte_eal.h> 54 #include <rte_per_lcore.h> 55 #include <rte_lcore.h> 56 #include <rte_atomic.h> 57 #include <rte_branch_prediction.h> 58 #include <rte_mempool.h> 59 #include <rte_mbuf.h> 60 #include <rte_interrupts.h> 61 #include <rte_pci.h> 62 #include <rte_ether.h> 63 #include <rte_ethdev.h> 64 #include <rte_string_fns.h> 65 #include <rte_cycles.h> 66 #include <rte_flow.h> 67 #include <rte_errno.h> 68 #ifdef RTE_LIBRTE_IXGBE_PMD 69 #include <rte_pmd_ixgbe.h> 70 #endif 71 72 #include "testpmd.h" 73 74 static char *flowtype_to_str(uint16_t flow_type); 75 76 static const struct { 77 enum tx_pkt_split split; 78 const char *name; 79 } tx_split_name[] = { 80 { 81 .split = TX_PKT_SPLIT_OFF, 82 .name = "off", 83 }, 84 { 85 .split = TX_PKT_SPLIT_ON, 86 .name = "on", 87 }, 88 { 89 .split = TX_PKT_SPLIT_RND, 90 .name = "rand", 91 }, 92 }; 93 94 struct rss_type_info { 95 char str[32]; 96 uint64_t rss_type; 97 }; 98 99 static const struct rss_type_info rss_type_table[] = { 100 { "ipv4", ETH_RSS_IPV4 }, 101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", ETH_RSS_IPV6 }, 107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 116 { "port", ETH_RSS_PORT }, 117 { "vxlan", ETH_RSS_VXLAN }, 118 { "geneve", ETH_RSS_GENEVE }, 119 { "nvgre", ETH_RSS_NVGRE }, 120 121 }; 122 123 static void 124 print_ethaddr(const char *name, struct ether_addr *eth_addr) 125 { 126 char buf[ETHER_ADDR_FMT_SIZE]; 127 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 128 printf("%s%s", name, buf); 129 } 130 131 void 132 nic_stats_display(portid_t port_id) 133 { 134 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 135 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 136 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 137 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 138 uint64_t mpps_rx, mpps_tx; 139 struct rte_eth_stats stats; 140 struct rte_port *port = &ports[port_id]; 141 uint8_t i; 142 portid_t pid; 143 144 static const char *nic_stats_border = "########################"; 145 146 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 147 printf("Valid port range is [0"); 148 RTE_ETH_FOREACH_DEV(pid) 149 printf(", %d", pid); 150 printf("]\n"); 151 return; 152 } 153 rte_eth_stats_get(port_id, &stats); 154 printf("\n %s NIC statistics for port %-2d %s\n", 155 nic_stats_border, port_id, nic_stats_border); 156 157 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 158 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 159 "%-"PRIu64"\n", 160 stats.ipackets, stats.imissed, stats.ibytes); 161 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 162 printf(" RX-nombuf: %-10"PRIu64"\n", 163 stats.rx_nombuf); 164 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 165 "%-"PRIu64"\n", 166 stats.opackets, stats.oerrors, stats.obytes); 167 } 168 else { 169 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 170 " RX-bytes: %10"PRIu64"\n", 171 stats.ipackets, stats.ierrors, stats.ibytes); 172 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 173 printf(" RX-nombuf: %10"PRIu64"\n", 174 stats.rx_nombuf); 175 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 176 " TX-bytes: %10"PRIu64"\n", 177 stats.opackets, stats.oerrors, stats.obytes); 178 } 179 180 if (port->rx_queue_stats_mapping_enabled) { 181 printf("\n"); 182 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 183 printf(" Stats reg %2d RX-packets: %10"PRIu64 184 " RX-errors: %10"PRIu64 185 " RX-bytes: %10"PRIu64"\n", 186 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 187 } 188 } 189 if (port->tx_queue_stats_mapping_enabled) { 190 printf("\n"); 191 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 192 printf(" Stats reg %2d TX-packets: %10"PRIu64 193 " TX-bytes: %10"PRIu64"\n", 194 i, stats.q_opackets[i], stats.q_obytes[i]); 195 } 196 } 197 198 diff_cycles = prev_cycles[port_id]; 199 prev_cycles[port_id] = rte_rdtsc(); 200 if (diff_cycles > 0) 201 diff_cycles = prev_cycles[port_id] - diff_cycles; 202 203 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 204 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 205 prev_pkts_rx[port_id] = stats.ipackets; 206 prev_pkts_tx[port_id] = stats.opackets; 207 mpps_rx = diff_cycles > 0 ? 208 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 209 mpps_tx = diff_cycles > 0 ? 210 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 211 printf("\n Throughput (since last show)\n"); 212 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 213 mpps_rx, mpps_tx); 214 215 printf(" %s############################%s\n", 216 nic_stats_border, nic_stats_border); 217 } 218 219 void 220 nic_stats_clear(portid_t port_id) 221 { 222 portid_t pid; 223 224 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 225 printf("Valid port range is [0"); 226 RTE_ETH_FOREACH_DEV(pid) 227 printf(", %d", pid); 228 printf("]\n"); 229 return; 230 } 231 rte_eth_stats_reset(port_id); 232 printf("\n NIC statistics for port %d cleared\n", port_id); 233 } 234 235 void 236 nic_xstats_display(portid_t port_id) 237 { 238 struct rte_eth_xstat *xstats; 239 int cnt_xstats, idx_xstat; 240 struct rte_eth_xstat_name *xstats_names; 241 242 printf("###### NIC extended statistics for port %-2d\n", port_id); 243 if (!rte_eth_dev_is_valid_port(port_id)) { 244 printf("Error: Invalid port number %i\n", port_id); 245 return; 246 } 247 248 /* Get count */ 249 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 250 if (cnt_xstats < 0) { 251 printf("Error: Cannot get count of xstats\n"); 252 return; 253 } 254 255 /* Get id-name lookup table */ 256 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 257 if (xstats_names == NULL) { 258 printf("Cannot allocate memory for xstats lookup\n"); 259 return; 260 } 261 if (cnt_xstats != rte_eth_xstats_get_names( 262 port_id, xstats_names, cnt_xstats)) { 263 printf("Error: Cannot get xstats lookup\n"); 264 free(xstats_names); 265 return; 266 } 267 268 /* Get stats themselves */ 269 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 270 if (xstats == NULL) { 271 printf("Cannot allocate memory for xstats\n"); 272 free(xstats_names); 273 return; 274 } 275 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 276 printf("Error: Unable to get xstats\n"); 277 free(xstats_names); 278 free(xstats); 279 return; 280 } 281 282 /* Display xstats */ 283 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 284 printf("%s: %"PRIu64"\n", 285 xstats_names[idx_xstat].name, 286 xstats[idx_xstat].value); 287 free(xstats_names); 288 free(xstats); 289 } 290 291 void 292 nic_xstats_clear(portid_t port_id) 293 { 294 rte_eth_xstats_reset(port_id); 295 } 296 297 void 298 nic_stats_mapping_display(portid_t port_id) 299 { 300 struct rte_port *port = &ports[port_id]; 301 uint16_t i; 302 portid_t pid; 303 304 static const char *nic_stats_mapping_border = "########################"; 305 306 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 307 printf("Valid port range is [0"); 308 RTE_ETH_FOREACH_DEV(pid) 309 printf(", %d", pid); 310 printf("]\n"); 311 return; 312 } 313 314 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 315 printf("Port id %d - either does not support queue statistic mapping or" 316 " no queue statistic mapping set\n", port_id); 317 return; 318 } 319 320 printf("\n %s NIC statistics mapping for port %-2d %s\n", 321 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 322 323 if (port->rx_queue_stats_mapping_enabled) { 324 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 325 if (rx_queue_stats_mappings[i].port_id == port_id) { 326 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 327 rx_queue_stats_mappings[i].queue_id, 328 rx_queue_stats_mappings[i].stats_counter_id); 329 } 330 } 331 printf("\n"); 332 } 333 334 335 if (port->tx_queue_stats_mapping_enabled) { 336 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 337 if (tx_queue_stats_mappings[i].port_id == port_id) { 338 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 339 tx_queue_stats_mappings[i].queue_id, 340 tx_queue_stats_mappings[i].stats_counter_id); 341 } 342 } 343 } 344 345 printf(" %s####################################%s\n", 346 nic_stats_mapping_border, nic_stats_mapping_border); 347 } 348 349 void 350 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 351 { 352 struct rte_eth_rxq_info qinfo; 353 int32_t rc; 354 static const char *info_border = "*********************"; 355 356 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 357 if (rc != 0) { 358 printf("Failed to retrieve information for port: %hhu, " 359 "RX queue: %hu\nerror desc: %s(%d)\n", 360 port_id, queue_id, strerror(-rc), rc); 361 return; 362 } 363 364 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 365 info_border, port_id, queue_id, info_border); 366 367 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 368 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 369 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 370 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 371 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 372 printf("\nRX drop packets: %s", 373 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 374 printf("\nRX deferred start: %s", 375 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 376 printf("\nRX scattered packets: %s", 377 (qinfo.scattered_rx != 0) ? "on" : "off"); 378 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 379 printf("\n"); 380 } 381 382 void 383 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 384 { 385 struct rte_eth_txq_info qinfo; 386 int32_t rc; 387 static const char *info_border = "*********************"; 388 389 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 390 if (rc != 0) { 391 printf("Failed to retrieve information for port: %hhu, " 392 "TX queue: %hu\nerror desc: %s(%d)\n", 393 port_id, queue_id, strerror(-rc), rc); 394 return; 395 } 396 397 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 398 info_border, port_id, queue_id, info_border); 399 400 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 401 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 402 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 403 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 404 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 405 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 406 printf("\nTX deferred start: %s", 407 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 408 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 409 printf("\n"); 410 } 411 412 void 413 port_infos_display(portid_t port_id) 414 { 415 struct rte_port *port; 416 struct ether_addr mac_addr; 417 struct rte_eth_link link; 418 struct rte_eth_dev_info dev_info; 419 int vlan_offload; 420 struct rte_mempool * mp; 421 static const char *info_border = "*********************"; 422 portid_t pid; 423 uint16_t mtu; 424 425 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 426 printf("Valid port range is [0"); 427 RTE_ETH_FOREACH_DEV(pid) 428 printf(", %d", pid); 429 printf("]\n"); 430 return; 431 } 432 port = &ports[port_id]; 433 rte_eth_link_get_nowait(port_id, &link); 434 memset(&dev_info, 0, sizeof(dev_info)); 435 rte_eth_dev_info_get(port_id, &dev_info); 436 printf("\n%s Infos for port %-2d %s\n", 437 info_border, port_id, info_border); 438 rte_eth_macaddr_get(port_id, &mac_addr); 439 print_ethaddr("MAC address: ", &mac_addr); 440 printf("\nDriver name: %s", dev_info.driver_name); 441 printf("\nConnect to socket: %u", port->socket_id); 442 443 if (port_numa[port_id] != NUMA_NO_CONFIG) { 444 mp = mbuf_pool_find(port_numa[port_id]); 445 if (mp) 446 printf("\nmemory allocation on the socket: %d", 447 port_numa[port_id]); 448 } else 449 printf("\nmemory allocation on the socket: %u",port->socket_id); 450 451 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 452 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 453 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 454 ("full-duplex") : ("half-duplex")); 455 456 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 457 printf("MTU: %u\n", mtu); 458 459 printf("Promiscuous mode: %s\n", 460 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 461 printf("Allmulticast mode: %s\n", 462 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 463 printf("Maximum number of MAC addresses: %u\n", 464 (unsigned int)(port->dev_info.max_mac_addrs)); 465 printf("Maximum number of MAC addresses of hash filtering: %u\n", 466 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 467 468 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 469 if (vlan_offload >= 0){ 470 printf("VLAN offload: \n"); 471 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 472 printf(" strip on \n"); 473 else 474 printf(" strip off \n"); 475 476 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 477 printf(" filter on \n"); 478 else 479 printf(" filter off \n"); 480 481 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 482 printf(" qinq(extend) on \n"); 483 else 484 printf(" qinq(extend) off \n"); 485 } 486 487 if (dev_info.hash_key_size > 0) 488 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 489 if (dev_info.reta_size > 0) 490 printf("Redirection table size: %u\n", dev_info.reta_size); 491 if (!dev_info.flow_type_rss_offloads) 492 printf("No flow type is supported.\n"); 493 else { 494 uint16_t i; 495 char *p; 496 497 printf("Supported flow types:\n"); 498 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 499 i++) { 500 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 501 continue; 502 p = flowtype_to_str(i); 503 printf(" %s\n", (p ? p : "unknown")); 504 } 505 } 506 507 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 508 printf("Max possible number of RXDs per queue: %hu\n", 509 dev_info.rx_desc_lim.nb_max); 510 printf("Min possible number of RXDs per queue: %hu\n", 511 dev_info.rx_desc_lim.nb_min); 512 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 513 514 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 515 printf("Max possible number of TXDs per queue: %hu\n", 516 dev_info.tx_desc_lim.nb_max); 517 printf("Min possible number of TXDs per queue: %hu\n", 518 dev_info.tx_desc_lim.nb_min); 519 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 520 } 521 522 void 523 port_offload_cap_display(portid_t port_id) 524 { 525 struct rte_eth_dev *dev; 526 struct rte_eth_dev_info dev_info; 527 static const char *info_border = "************"; 528 529 if (port_id_is_invalid(port_id, ENABLED_WARN)) 530 return; 531 532 dev = &rte_eth_devices[port_id]; 533 rte_eth_dev_info_get(port_id, &dev_info); 534 535 printf("\n%s Port %d supported offload features: %s\n", 536 info_border, port_id, info_border); 537 538 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 539 printf("VLAN stripped: "); 540 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 541 printf("on\n"); 542 else 543 printf("off\n"); 544 } 545 546 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 547 printf("Double VLANs stripped: "); 548 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 549 printf("on\n"); 550 else 551 printf("off\n"); 552 } 553 554 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 555 printf("RX IPv4 checksum: "); 556 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 557 printf("on\n"); 558 else 559 printf("off\n"); 560 } 561 562 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 563 printf("RX UDP checksum: "); 564 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 565 printf("on\n"); 566 else 567 printf("off\n"); 568 } 569 570 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 571 printf("RX TCP checksum: "); 572 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 573 printf("on\n"); 574 else 575 printf("off\n"); 576 } 577 578 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 579 printf("RX Outer IPv4 checksum: on"); 580 581 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 582 printf("Large receive offload: "); 583 if (dev->data->dev_conf.rxmode.enable_lro) 584 printf("on\n"); 585 else 586 printf("off\n"); 587 } 588 589 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 590 printf("VLAN insert: "); 591 if (ports[port_id].tx_ol_flags & 592 TESTPMD_TX_OFFLOAD_INSERT_VLAN) 593 printf("on\n"); 594 else 595 printf("off\n"); 596 } 597 598 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 599 printf("Double VLANs insert: "); 600 if (ports[port_id].tx_ol_flags & 601 TESTPMD_TX_OFFLOAD_INSERT_QINQ) 602 printf("on\n"); 603 else 604 printf("off\n"); 605 } 606 607 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 608 printf("TX IPv4 checksum: "); 609 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) 610 printf("on\n"); 611 else 612 printf("off\n"); 613 } 614 615 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 616 printf("TX UDP checksum: "); 617 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) 618 printf("on\n"); 619 else 620 printf("off\n"); 621 } 622 623 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 624 printf("TX TCP checksum: "); 625 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) 626 printf("on\n"); 627 else 628 printf("off\n"); 629 } 630 631 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 632 printf("TX SCTP checksum: "); 633 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) 634 printf("on\n"); 635 else 636 printf("off\n"); 637 } 638 639 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 640 printf("TX Outer IPv4 checksum: "); 641 if (ports[port_id].tx_ol_flags & 642 TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) 643 printf("on\n"); 644 else 645 printf("off\n"); 646 } 647 648 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 649 printf("TX TCP segmentation: "); 650 if (ports[port_id].tso_segsz != 0) 651 printf("on\n"); 652 else 653 printf("off\n"); 654 } 655 656 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 657 printf("TX UDP segmentation: "); 658 if (ports[port_id].tso_segsz != 0) 659 printf("on\n"); 660 else 661 printf("off\n"); 662 } 663 664 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 665 printf("TSO for VXLAN tunnel packet: "); 666 if (ports[port_id].tunnel_tso_segsz) 667 printf("on\n"); 668 else 669 printf("off\n"); 670 } 671 672 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 673 printf("TSO for GRE tunnel packet: "); 674 if (ports[port_id].tunnel_tso_segsz) 675 printf("on\n"); 676 else 677 printf("off\n"); 678 } 679 680 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 681 printf("TSO for IPIP tunnel packet: "); 682 if (ports[port_id].tunnel_tso_segsz) 683 printf("on\n"); 684 else 685 printf("off\n"); 686 } 687 688 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 689 printf("TSO for GENEVE tunnel packet: "); 690 if (ports[port_id].tunnel_tso_segsz) 691 printf("on\n"); 692 else 693 printf("off\n"); 694 } 695 696 } 697 698 int 699 port_id_is_invalid(portid_t port_id, enum print_warning warning) 700 { 701 if (port_id == (portid_t)RTE_PORT_ALL) 702 return 0; 703 704 if (rte_eth_dev_is_valid_port(port_id)) 705 return 0; 706 707 if (warning == ENABLED_WARN) 708 printf("Invalid port %d\n", port_id); 709 710 return 1; 711 } 712 713 static int 714 vlan_id_is_invalid(uint16_t vlan_id) 715 { 716 if (vlan_id < 4096) 717 return 0; 718 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 719 return 1; 720 } 721 722 static int 723 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 724 { 725 uint64_t pci_len; 726 727 if (reg_off & 0x3) { 728 printf("Port register offset 0x%X not aligned on a 4-byte " 729 "boundary\n", 730 (unsigned)reg_off); 731 return 1; 732 } 733 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 734 if (reg_off >= pci_len) { 735 printf("Port %d: register offset %u (0x%X) out of port PCI " 736 "resource (length=%"PRIu64")\n", 737 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 738 return 1; 739 } 740 return 0; 741 } 742 743 static int 744 reg_bit_pos_is_invalid(uint8_t bit_pos) 745 { 746 if (bit_pos <= 31) 747 return 0; 748 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 749 return 1; 750 } 751 752 #define display_port_and_reg_off(port_id, reg_off) \ 753 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 754 755 static inline void 756 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 757 { 758 display_port_and_reg_off(port_id, (unsigned)reg_off); 759 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 760 } 761 762 void 763 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 764 { 765 uint32_t reg_v; 766 767 768 if (port_id_is_invalid(port_id, ENABLED_WARN)) 769 return; 770 if (port_reg_off_is_invalid(port_id, reg_off)) 771 return; 772 if (reg_bit_pos_is_invalid(bit_x)) 773 return; 774 reg_v = port_id_pci_reg_read(port_id, reg_off); 775 display_port_and_reg_off(port_id, (unsigned)reg_off); 776 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 777 } 778 779 void 780 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 781 uint8_t bit1_pos, uint8_t bit2_pos) 782 { 783 uint32_t reg_v; 784 uint8_t l_bit; 785 uint8_t h_bit; 786 787 if (port_id_is_invalid(port_id, ENABLED_WARN)) 788 return; 789 if (port_reg_off_is_invalid(port_id, reg_off)) 790 return; 791 if (reg_bit_pos_is_invalid(bit1_pos)) 792 return; 793 if (reg_bit_pos_is_invalid(bit2_pos)) 794 return; 795 if (bit1_pos > bit2_pos) 796 l_bit = bit2_pos, h_bit = bit1_pos; 797 else 798 l_bit = bit1_pos, h_bit = bit2_pos; 799 800 reg_v = port_id_pci_reg_read(port_id, reg_off); 801 reg_v >>= l_bit; 802 if (h_bit < 31) 803 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 804 display_port_and_reg_off(port_id, (unsigned)reg_off); 805 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 806 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 807 } 808 809 void 810 port_reg_display(portid_t port_id, uint32_t reg_off) 811 { 812 uint32_t reg_v; 813 814 if (port_id_is_invalid(port_id, ENABLED_WARN)) 815 return; 816 if (port_reg_off_is_invalid(port_id, reg_off)) 817 return; 818 reg_v = port_id_pci_reg_read(port_id, reg_off); 819 display_port_reg_value(port_id, reg_off, reg_v); 820 } 821 822 void 823 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 824 uint8_t bit_v) 825 { 826 uint32_t reg_v; 827 828 if (port_id_is_invalid(port_id, ENABLED_WARN)) 829 return; 830 if (port_reg_off_is_invalid(port_id, reg_off)) 831 return; 832 if (reg_bit_pos_is_invalid(bit_pos)) 833 return; 834 if (bit_v > 1) { 835 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 836 return; 837 } 838 reg_v = port_id_pci_reg_read(port_id, reg_off); 839 if (bit_v == 0) 840 reg_v &= ~(1 << bit_pos); 841 else 842 reg_v |= (1 << bit_pos); 843 port_id_pci_reg_write(port_id, reg_off, reg_v); 844 display_port_reg_value(port_id, reg_off, reg_v); 845 } 846 847 void 848 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 849 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 850 { 851 uint32_t max_v; 852 uint32_t reg_v; 853 uint8_t l_bit; 854 uint8_t h_bit; 855 856 if (port_id_is_invalid(port_id, ENABLED_WARN)) 857 return; 858 if (port_reg_off_is_invalid(port_id, reg_off)) 859 return; 860 if (reg_bit_pos_is_invalid(bit1_pos)) 861 return; 862 if (reg_bit_pos_is_invalid(bit2_pos)) 863 return; 864 if (bit1_pos > bit2_pos) 865 l_bit = bit2_pos, h_bit = bit1_pos; 866 else 867 l_bit = bit1_pos, h_bit = bit2_pos; 868 869 if ((h_bit - l_bit) < 31) 870 max_v = (1 << (h_bit - l_bit + 1)) - 1; 871 else 872 max_v = 0xFFFFFFFF; 873 874 if (value > max_v) { 875 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 876 (unsigned)value, (unsigned)value, 877 (unsigned)max_v, (unsigned)max_v); 878 return; 879 } 880 reg_v = port_id_pci_reg_read(port_id, reg_off); 881 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 882 reg_v |= (value << l_bit); /* Set changed bits */ 883 port_id_pci_reg_write(port_id, reg_off, reg_v); 884 display_port_reg_value(port_id, reg_off, reg_v); 885 } 886 887 void 888 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 889 { 890 if (port_id_is_invalid(port_id, ENABLED_WARN)) 891 return; 892 if (port_reg_off_is_invalid(port_id, reg_off)) 893 return; 894 port_id_pci_reg_write(port_id, reg_off, reg_v); 895 display_port_reg_value(port_id, reg_off, reg_v); 896 } 897 898 void 899 port_mtu_set(portid_t port_id, uint16_t mtu) 900 { 901 int diag; 902 903 if (port_id_is_invalid(port_id, ENABLED_WARN)) 904 return; 905 diag = rte_eth_dev_set_mtu(port_id, mtu); 906 if (diag == 0) 907 return; 908 printf("Set MTU failed. diag=%d\n", diag); 909 } 910 911 /* Generic flow management functions. */ 912 913 /** Generate flow_item[] entry. */ 914 #define MK_FLOW_ITEM(t, s) \ 915 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 916 .name = # t, \ 917 .size = s, \ 918 } 919 920 /** Information about known flow pattern items. */ 921 static const struct { 922 const char *name; 923 size_t size; 924 } flow_item[] = { 925 MK_FLOW_ITEM(END, 0), 926 MK_FLOW_ITEM(VOID, 0), 927 MK_FLOW_ITEM(INVERT, 0), 928 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 929 MK_FLOW_ITEM(PF, 0), 930 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 931 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 932 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 933 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 934 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 935 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 936 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 937 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 938 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 939 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 940 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 941 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 942 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 943 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 944 }; 945 946 /** Compute storage space needed by item specification. */ 947 static void 948 flow_item_spec_size(const struct rte_flow_item *item, 949 size_t *size, size_t *pad) 950 { 951 if (!item->spec) 952 goto empty; 953 switch (item->type) { 954 union { 955 const struct rte_flow_item_raw *raw; 956 } spec; 957 958 case RTE_FLOW_ITEM_TYPE_RAW: 959 spec.raw = item->spec; 960 *size = offsetof(struct rte_flow_item_raw, pattern) + 961 spec.raw->length * sizeof(*spec.raw->pattern); 962 break; 963 default: 964 empty: 965 *size = 0; 966 break; 967 } 968 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 969 } 970 971 /** Generate flow_action[] entry. */ 972 #define MK_FLOW_ACTION(t, s) \ 973 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 974 .name = # t, \ 975 .size = s, \ 976 } 977 978 /** Information about known flow actions. */ 979 static const struct { 980 const char *name; 981 size_t size; 982 } flow_action[] = { 983 MK_FLOW_ACTION(END, 0), 984 MK_FLOW_ACTION(VOID, 0), 985 MK_FLOW_ACTION(PASSTHRU, 0), 986 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 987 MK_FLOW_ACTION(FLAG, 0), 988 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 989 MK_FLOW_ACTION(DROP, 0), 990 MK_FLOW_ACTION(COUNT, 0), 991 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 992 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 993 MK_FLOW_ACTION(PF, 0), 994 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 995 }; 996 997 /** Compute storage space needed by action configuration. */ 998 static void 999 flow_action_conf_size(const struct rte_flow_action *action, 1000 size_t *size, size_t *pad) 1001 { 1002 if (!action->conf) 1003 goto empty; 1004 switch (action->type) { 1005 union { 1006 const struct rte_flow_action_rss *rss; 1007 } conf; 1008 1009 case RTE_FLOW_ACTION_TYPE_RSS: 1010 conf.rss = action->conf; 1011 *size = offsetof(struct rte_flow_action_rss, queue) + 1012 conf.rss->num * sizeof(*conf.rss->queue); 1013 break; 1014 default: 1015 empty: 1016 *size = 0; 1017 break; 1018 } 1019 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1020 } 1021 1022 /** Generate a port_flow entry from attributes/pattern/actions. */ 1023 static struct port_flow * 1024 port_flow_new(const struct rte_flow_attr *attr, 1025 const struct rte_flow_item *pattern, 1026 const struct rte_flow_action *actions) 1027 { 1028 const struct rte_flow_item *item; 1029 const struct rte_flow_action *action; 1030 struct port_flow *pf = NULL; 1031 size_t tmp; 1032 size_t pad; 1033 size_t off1 = 0; 1034 size_t off2 = 0; 1035 int err = ENOTSUP; 1036 1037 store: 1038 item = pattern; 1039 if (pf) 1040 pf->pattern = (void *)&pf->data[off1]; 1041 do { 1042 struct rte_flow_item *dst = NULL; 1043 1044 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1045 !flow_item[item->type].name) 1046 goto notsup; 1047 if (pf) 1048 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1049 off1 += sizeof(*item); 1050 flow_item_spec_size(item, &tmp, &pad); 1051 if (item->spec) { 1052 if (pf) 1053 dst->spec = memcpy(pf->data + off2, 1054 item->spec, tmp); 1055 off2 += tmp + pad; 1056 } 1057 if (item->last) { 1058 if (pf) 1059 dst->last = memcpy(pf->data + off2, 1060 item->last, tmp); 1061 off2 += tmp + pad; 1062 } 1063 if (item->mask) { 1064 if (pf) 1065 dst->mask = memcpy(pf->data + off2, 1066 item->mask, tmp); 1067 off2 += tmp + pad; 1068 } 1069 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1070 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1071 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1072 action = actions; 1073 if (pf) 1074 pf->actions = (void *)&pf->data[off1]; 1075 do { 1076 struct rte_flow_action *dst = NULL; 1077 1078 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1079 !flow_action[action->type].name) 1080 goto notsup; 1081 if (pf) 1082 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1083 off1 += sizeof(*action); 1084 flow_action_conf_size(action, &tmp, &pad); 1085 if (action->conf) { 1086 if (pf) 1087 dst->conf = memcpy(pf->data + off2, 1088 action->conf, tmp); 1089 off2 += tmp + pad; 1090 } 1091 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1092 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1093 if (pf != NULL) 1094 return pf; 1095 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1096 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1097 pf = calloc(1, tmp + off1 + off2); 1098 if (pf == NULL) 1099 err = errno; 1100 else { 1101 *pf = (const struct port_flow){ 1102 .size = tmp + off1 + off2, 1103 .attr = *attr, 1104 }; 1105 tmp -= offsetof(struct port_flow, data); 1106 off2 = tmp + off1; 1107 off1 = tmp; 1108 goto store; 1109 } 1110 notsup: 1111 rte_errno = err; 1112 return NULL; 1113 } 1114 1115 /** Print a message out of a flow error. */ 1116 static int 1117 port_flow_complain(struct rte_flow_error *error) 1118 { 1119 static const char *const errstrlist[] = { 1120 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1121 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1122 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1123 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1124 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1125 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1126 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1127 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1128 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1129 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1130 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1131 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1132 }; 1133 const char *errstr; 1134 char buf[32]; 1135 int err = rte_errno; 1136 1137 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1138 !errstrlist[error->type]) 1139 errstr = "unknown type"; 1140 else 1141 errstr = errstrlist[error->type]; 1142 printf("Caught error type %d (%s): %s%s\n", 1143 error->type, errstr, 1144 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1145 error->cause), buf) : "", 1146 error->message ? error->message : "(no stated reason)"); 1147 return -err; 1148 } 1149 1150 /** Validate flow rule. */ 1151 int 1152 port_flow_validate(portid_t port_id, 1153 const struct rte_flow_attr *attr, 1154 const struct rte_flow_item *pattern, 1155 const struct rte_flow_action *actions) 1156 { 1157 struct rte_flow_error error; 1158 1159 /* Poisoning to make sure PMDs update it in case of error. */ 1160 memset(&error, 0x11, sizeof(error)); 1161 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1162 return port_flow_complain(&error); 1163 printf("Flow rule validated\n"); 1164 return 0; 1165 } 1166 1167 /** Create flow rule. */ 1168 int 1169 port_flow_create(portid_t port_id, 1170 const struct rte_flow_attr *attr, 1171 const struct rte_flow_item *pattern, 1172 const struct rte_flow_action *actions) 1173 { 1174 struct rte_flow *flow; 1175 struct rte_port *port; 1176 struct port_flow *pf; 1177 uint32_t id; 1178 struct rte_flow_error error; 1179 1180 /* Poisoning to make sure PMDs update it in case of error. */ 1181 memset(&error, 0x22, sizeof(error)); 1182 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1183 if (!flow) 1184 return port_flow_complain(&error); 1185 port = &ports[port_id]; 1186 if (port->flow_list) { 1187 if (port->flow_list->id == UINT32_MAX) { 1188 printf("Highest rule ID is already assigned, delete" 1189 " it first"); 1190 rte_flow_destroy(port_id, flow, NULL); 1191 return -ENOMEM; 1192 } 1193 id = port->flow_list->id + 1; 1194 } else 1195 id = 0; 1196 pf = port_flow_new(attr, pattern, actions); 1197 if (!pf) { 1198 int err = rte_errno; 1199 1200 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1201 rte_flow_destroy(port_id, flow, NULL); 1202 return -err; 1203 } 1204 pf->next = port->flow_list; 1205 pf->id = id; 1206 pf->flow = flow; 1207 port->flow_list = pf; 1208 printf("Flow rule #%u created\n", pf->id); 1209 return 0; 1210 } 1211 1212 /** Destroy a number of flow rules. */ 1213 int 1214 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1215 { 1216 struct rte_port *port; 1217 struct port_flow **tmp; 1218 uint32_t c = 0; 1219 int ret = 0; 1220 1221 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1222 port_id == (portid_t)RTE_PORT_ALL) 1223 return -EINVAL; 1224 port = &ports[port_id]; 1225 tmp = &port->flow_list; 1226 while (*tmp) { 1227 uint32_t i; 1228 1229 for (i = 0; i != n; ++i) { 1230 struct rte_flow_error error; 1231 struct port_flow *pf = *tmp; 1232 1233 if (rule[i] != pf->id) 1234 continue; 1235 /* 1236 * Poisoning to make sure PMDs update it in case 1237 * of error. 1238 */ 1239 memset(&error, 0x33, sizeof(error)); 1240 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1241 ret = port_flow_complain(&error); 1242 continue; 1243 } 1244 printf("Flow rule #%u destroyed\n", pf->id); 1245 *tmp = pf->next; 1246 free(pf); 1247 break; 1248 } 1249 if (i == n) 1250 tmp = &(*tmp)->next; 1251 ++c; 1252 } 1253 return ret; 1254 } 1255 1256 /** Remove all flow rules. */ 1257 int 1258 port_flow_flush(portid_t port_id) 1259 { 1260 struct rte_flow_error error; 1261 struct rte_port *port; 1262 int ret = 0; 1263 1264 /* Poisoning to make sure PMDs update it in case of error. */ 1265 memset(&error, 0x44, sizeof(error)); 1266 if (rte_flow_flush(port_id, &error)) { 1267 ret = port_flow_complain(&error); 1268 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1269 port_id == (portid_t)RTE_PORT_ALL) 1270 return ret; 1271 } 1272 port = &ports[port_id]; 1273 while (port->flow_list) { 1274 struct port_flow *pf = port->flow_list->next; 1275 1276 free(port->flow_list); 1277 port->flow_list = pf; 1278 } 1279 return ret; 1280 } 1281 1282 /** Query a flow rule. */ 1283 int 1284 port_flow_query(portid_t port_id, uint32_t rule, 1285 enum rte_flow_action_type action) 1286 { 1287 struct rte_flow_error error; 1288 struct rte_port *port; 1289 struct port_flow *pf; 1290 const char *name; 1291 union { 1292 struct rte_flow_query_count count; 1293 } query; 1294 1295 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1296 port_id == (portid_t)RTE_PORT_ALL) 1297 return -EINVAL; 1298 port = &ports[port_id]; 1299 for (pf = port->flow_list; pf; pf = pf->next) 1300 if (pf->id == rule) 1301 break; 1302 if (!pf) { 1303 printf("Flow rule #%u not found\n", rule); 1304 return -ENOENT; 1305 } 1306 if ((unsigned int)action >= RTE_DIM(flow_action) || 1307 !flow_action[action].name) 1308 name = "unknown"; 1309 else 1310 name = flow_action[action].name; 1311 switch (action) { 1312 case RTE_FLOW_ACTION_TYPE_COUNT: 1313 break; 1314 default: 1315 printf("Cannot query action type %d (%s)\n", action, name); 1316 return -ENOTSUP; 1317 } 1318 /* Poisoning to make sure PMDs update it in case of error. */ 1319 memset(&error, 0x55, sizeof(error)); 1320 memset(&query, 0, sizeof(query)); 1321 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1322 return port_flow_complain(&error); 1323 switch (action) { 1324 case RTE_FLOW_ACTION_TYPE_COUNT: 1325 printf("%s:\n" 1326 " hits_set: %u\n" 1327 " bytes_set: %u\n" 1328 " hits: %" PRIu64 "\n" 1329 " bytes: %" PRIu64 "\n", 1330 name, 1331 query.count.hits_set, 1332 query.count.bytes_set, 1333 query.count.hits, 1334 query.count.bytes); 1335 break; 1336 default: 1337 printf("Cannot display result for action type %d (%s)\n", 1338 action, name); 1339 break; 1340 } 1341 return 0; 1342 } 1343 1344 /** List flow rules. */ 1345 void 1346 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1347 { 1348 struct rte_port *port; 1349 struct port_flow *pf; 1350 struct port_flow *list = NULL; 1351 uint32_t i; 1352 1353 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1354 port_id == (portid_t)RTE_PORT_ALL) 1355 return; 1356 port = &ports[port_id]; 1357 if (!port->flow_list) 1358 return; 1359 /* Sort flows by group, priority and ID. */ 1360 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1361 struct port_flow **tmp; 1362 1363 if (n) { 1364 /* Filter out unwanted groups. */ 1365 for (i = 0; i != n; ++i) 1366 if (pf->attr.group == group[i]) 1367 break; 1368 if (i == n) 1369 continue; 1370 } 1371 tmp = &list; 1372 while (*tmp && 1373 (pf->attr.group > (*tmp)->attr.group || 1374 (pf->attr.group == (*tmp)->attr.group && 1375 pf->attr.priority > (*tmp)->attr.priority) || 1376 (pf->attr.group == (*tmp)->attr.group && 1377 pf->attr.priority == (*tmp)->attr.priority && 1378 pf->id > (*tmp)->id))) 1379 tmp = &(*tmp)->tmp; 1380 pf->tmp = *tmp; 1381 *tmp = pf; 1382 } 1383 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1384 for (pf = list; pf != NULL; pf = pf->tmp) { 1385 const struct rte_flow_item *item = pf->pattern; 1386 const struct rte_flow_action *action = pf->actions; 1387 1388 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1389 pf->id, 1390 pf->attr.group, 1391 pf->attr.priority, 1392 pf->attr.ingress ? 'i' : '-', 1393 pf->attr.egress ? 'e' : '-'); 1394 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1395 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1396 printf("%s ", flow_item[item->type].name); 1397 ++item; 1398 } 1399 printf("=>"); 1400 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1401 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1402 printf(" %s", flow_action[action->type].name); 1403 ++action; 1404 } 1405 printf("\n"); 1406 } 1407 } 1408 1409 /* 1410 * RX/TX ring descriptors display functions. 1411 */ 1412 int 1413 rx_queue_id_is_invalid(queueid_t rxq_id) 1414 { 1415 if (rxq_id < nb_rxq) 1416 return 0; 1417 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1418 return 1; 1419 } 1420 1421 int 1422 tx_queue_id_is_invalid(queueid_t txq_id) 1423 { 1424 if (txq_id < nb_txq) 1425 return 0; 1426 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1427 return 1; 1428 } 1429 1430 static int 1431 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1432 { 1433 if (rxdesc_id < nb_rxd) 1434 return 0; 1435 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1436 rxdesc_id, nb_rxd); 1437 return 1; 1438 } 1439 1440 static int 1441 tx_desc_id_is_invalid(uint16_t txdesc_id) 1442 { 1443 if (txdesc_id < nb_txd) 1444 return 0; 1445 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1446 txdesc_id, nb_txd); 1447 return 1; 1448 } 1449 1450 static const struct rte_memzone * 1451 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 1452 { 1453 char mz_name[RTE_MEMZONE_NAMESIZE]; 1454 const struct rte_memzone *mz; 1455 1456 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1457 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1458 mz = rte_memzone_lookup(mz_name); 1459 if (mz == NULL) 1460 printf("%s ring memory zoneof (port %d, queue %d) not" 1461 "found (zone name = %s\n", 1462 ring_name, port_id, q_id, mz_name); 1463 return mz; 1464 } 1465 1466 union igb_ring_dword { 1467 uint64_t dword; 1468 struct { 1469 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1470 uint32_t lo; 1471 uint32_t hi; 1472 #else 1473 uint32_t hi; 1474 uint32_t lo; 1475 #endif 1476 } words; 1477 }; 1478 1479 struct igb_ring_desc_32_bytes { 1480 union igb_ring_dword lo_dword; 1481 union igb_ring_dword hi_dword; 1482 union igb_ring_dword resv1; 1483 union igb_ring_dword resv2; 1484 }; 1485 1486 struct igb_ring_desc_16_bytes { 1487 union igb_ring_dword lo_dword; 1488 union igb_ring_dword hi_dword; 1489 }; 1490 1491 static void 1492 ring_rxd_display_dword(union igb_ring_dword dword) 1493 { 1494 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1495 (unsigned)dword.words.hi); 1496 } 1497 1498 static void 1499 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1500 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1501 uint8_t port_id, 1502 #else 1503 __rte_unused uint8_t port_id, 1504 #endif 1505 uint16_t desc_id) 1506 { 1507 struct igb_ring_desc_16_bytes *ring = 1508 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1509 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1510 struct rte_eth_dev_info dev_info; 1511 1512 memset(&dev_info, 0, sizeof(dev_info)); 1513 rte_eth_dev_info_get(port_id, &dev_info); 1514 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1515 /* 32 bytes RX descriptor, i40e only */ 1516 struct igb_ring_desc_32_bytes *ring = 1517 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1518 ring[desc_id].lo_dword.dword = 1519 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1520 ring_rxd_display_dword(ring[desc_id].lo_dword); 1521 ring[desc_id].hi_dword.dword = 1522 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1523 ring_rxd_display_dword(ring[desc_id].hi_dword); 1524 ring[desc_id].resv1.dword = 1525 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1526 ring_rxd_display_dword(ring[desc_id].resv1); 1527 ring[desc_id].resv2.dword = 1528 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1529 ring_rxd_display_dword(ring[desc_id].resv2); 1530 1531 return; 1532 } 1533 #endif 1534 /* 16 bytes RX descriptor */ 1535 ring[desc_id].lo_dword.dword = 1536 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1537 ring_rxd_display_dword(ring[desc_id].lo_dword); 1538 ring[desc_id].hi_dword.dword = 1539 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1540 ring_rxd_display_dword(ring[desc_id].hi_dword); 1541 } 1542 1543 static void 1544 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1545 { 1546 struct igb_ring_desc_16_bytes *ring; 1547 struct igb_ring_desc_16_bytes txd; 1548 1549 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1550 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1551 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1552 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1553 (unsigned)txd.lo_dword.words.lo, 1554 (unsigned)txd.lo_dword.words.hi, 1555 (unsigned)txd.hi_dword.words.lo, 1556 (unsigned)txd.hi_dword.words.hi); 1557 } 1558 1559 void 1560 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1561 { 1562 const struct rte_memzone *rx_mz; 1563 1564 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1565 return; 1566 if (rx_queue_id_is_invalid(rxq_id)) 1567 return; 1568 if (rx_desc_id_is_invalid(rxd_id)) 1569 return; 1570 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1571 if (rx_mz == NULL) 1572 return; 1573 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1574 } 1575 1576 void 1577 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1578 { 1579 const struct rte_memzone *tx_mz; 1580 1581 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1582 return; 1583 if (tx_queue_id_is_invalid(txq_id)) 1584 return; 1585 if (tx_desc_id_is_invalid(txd_id)) 1586 return; 1587 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1588 if (tx_mz == NULL) 1589 return; 1590 ring_tx_descriptor_display(tx_mz, txd_id); 1591 } 1592 1593 void 1594 fwd_lcores_config_display(void) 1595 { 1596 lcoreid_t lc_id; 1597 1598 printf("List of forwarding lcores:"); 1599 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1600 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1601 printf("\n"); 1602 } 1603 void 1604 rxtx_config_display(void) 1605 { 1606 printf(" %s packet forwarding%s - CRC stripping %s - " 1607 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 1608 retry_enabled == 0 ? "" : " with retry", 1609 rx_mode.hw_strip_crc ? "enabled" : "disabled", 1610 nb_pkt_per_burst); 1611 1612 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1613 printf(" packet len=%u - nb packet segments=%d\n", 1614 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1615 1616 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 1617 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 1618 1619 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1620 nb_fwd_lcores, nb_fwd_ports); 1621 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1622 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1623 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1624 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 1625 rx_conf->rx_thresh.wthresh); 1626 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1627 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1628 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1629 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 1630 tx_conf->tx_thresh.wthresh); 1631 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 1632 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 1633 } 1634 1635 void 1636 port_rss_reta_info(portid_t port_id, 1637 struct rte_eth_rss_reta_entry64 *reta_conf, 1638 uint16_t nb_entries) 1639 { 1640 uint16_t i, idx, shift; 1641 int ret; 1642 1643 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1644 return; 1645 1646 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1647 if (ret != 0) { 1648 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1649 return; 1650 } 1651 1652 for (i = 0; i < nb_entries; i++) { 1653 idx = i / RTE_RETA_GROUP_SIZE; 1654 shift = i % RTE_RETA_GROUP_SIZE; 1655 if (!(reta_conf[idx].mask & (1ULL << shift))) 1656 continue; 1657 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1658 i, reta_conf[idx].reta[shift]); 1659 } 1660 } 1661 1662 /* 1663 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1664 * key of the port. 1665 */ 1666 void 1667 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1668 { 1669 struct rte_eth_rss_conf rss_conf; 1670 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1671 uint64_t rss_hf; 1672 uint8_t i; 1673 int diag; 1674 struct rte_eth_dev_info dev_info; 1675 uint8_t hash_key_size; 1676 1677 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1678 return; 1679 1680 memset(&dev_info, 0, sizeof(dev_info)); 1681 rte_eth_dev_info_get(port_id, &dev_info); 1682 if (dev_info.hash_key_size > 0 && 1683 dev_info.hash_key_size <= sizeof(rss_key)) 1684 hash_key_size = dev_info.hash_key_size; 1685 else { 1686 printf("dev_info did not provide a valid hash key size\n"); 1687 return; 1688 } 1689 1690 rss_conf.rss_hf = 0; 1691 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1692 if (!strcmp(rss_info, rss_type_table[i].str)) 1693 rss_conf.rss_hf = rss_type_table[i].rss_type; 1694 } 1695 1696 /* Get RSS hash key if asked to display it */ 1697 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1698 rss_conf.rss_key_len = hash_key_size; 1699 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1700 if (diag != 0) { 1701 switch (diag) { 1702 case -ENODEV: 1703 printf("port index %d invalid\n", port_id); 1704 break; 1705 case -ENOTSUP: 1706 printf("operation not supported by device\n"); 1707 break; 1708 default: 1709 printf("operation failed - diag=%d\n", diag); 1710 break; 1711 } 1712 return; 1713 } 1714 rss_hf = rss_conf.rss_hf; 1715 if (rss_hf == 0) { 1716 printf("RSS disabled\n"); 1717 return; 1718 } 1719 printf("RSS functions:\n "); 1720 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1721 if (rss_hf & rss_type_table[i].rss_type) 1722 printf("%s ", rss_type_table[i].str); 1723 } 1724 printf("\n"); 1725 if (!show_rss_key) 1726 return; 1727 printf("RSS key:\n"); 1728 for (i = 0; i < hash_key_size; i++) 1729 printf("%02X", rss_key[i]); 1730 printf("\n"); 1731 } 1732 1733 void 1734 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1735 uint hash_key_len) 1736 { 1737 struct rte_eth_rss_conf rss_conf; 1738 int diag; 1739 unsigned int i; 1740 1741 rss_conf.rss_key = NULL; 1742 rss_conf.rss_key_len = hash_key_len; 1743 rss_conf.rss_hf = 0; 1744 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1745 if (!strcmp(rss_type_table[i].str, rss_type)) 1746 rss_conf.rss_hf = rss_type_table[i].rss_type; 1747 } 1748 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1749 if (diag == 0) { 1750 rss_conf.rss_key = hash_key; 1751 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1752 } 1753 if (diag == 0) 1754 return; 1755 1756 switch (diag) { 1757 case -ENODEV: 1758 printf("port index %d invalid\n", port_id); 1759 break; 1760 case -ENOTSUP: 1761 printf("operation not supported by device\n"); 1762 break; 1763 default: 1764 printf("operation failed - diag=%d\n", diag); 1765 break; 1766 } 1767 } 1768 1769 /* 1770 * Setup forwarding configuration for each logical core. 1771 */ 1772 static void 1773 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1774 { 1775 streamid_t nb_fs_per_lcore; 1776 streamid_t nb_fs; 1777 streamid_t sm_id; 1778 lcoreid_t nb_extra; 1779 lcoreid_t nb_fc; 1780 lcoreid_t nb_lc; 1781 lcoreid_t lc_id; 1782 1783 nb_fs = cfg->nb_fwd_streams; 1784 nb_fc = cfg->nb_fwd_lcores; 1785 if (nb_fs <= nb_fc) { 1786 nb_fs_per_lcore = 1; 1787 nb_extra = 0; 1788 } else { 1789 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1790 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1791 } 1792 1793 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1794 sm_id = 0; 1795 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1796 fwd_lcores[lc_id]->stream_idx = sm_id; 1797 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1798 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1799 } 1800 1801 /* 1802 * Assign extra remaining streams, if any. 1803 */ 1804 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1805 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1806 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1807 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1808 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1809 } 1810 } 1811 1812 static void 1813 simple_fwd_config_setup(void) 1814 { 1815 portid_t i; 1816 portid_t j; 1817 portid_t inc = 2; 1818 1819 if (port_topology == PORT_TOPOLOGY_CHAINED || 1820 port_topology == PORT_TOPOLOGY_LOOP) { 1821 inc = 1; 1822 } else if (nb_fwd_ports % 2) { 1823 printf("\nWarning! Cannot handle an odd number of ports " 1824 "with the current port topology. Configuration " 1825 "must be changed to have an even number of ports, " 1826 "or relaunch application with " 1827 "--port-topology=chained\n\n"); 1828 } 1829 1830 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1831 cur_fwd_config.nb_fwd_streams = 1832 (streamid_t) cur_fwd_config.nb_fwd_ports; 1833 1834 /* reinitialize forwarding streams */ 1835 init_fwd_streams(); 1836 1837 /* 1838 * In the simple forwarding test, the number of forwarding cores 1839 * must be lower or equal to the number of forwarding ports. 1840 */ 1841 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1842 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1843 cur_fwd_config.nb_fwd_lcores = 1844 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1845 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1846 1847 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1848 if (port_topology != PORT_TOPOLOGY_LOOP) 1849 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1850 else 1851 j = i; 1852 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1853 fwd_streams[i]->rx_queue = 0; 1854 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1855 fwd_streams[i]->tx_queue = 0; 1856 fwd_streams[i]->peer_addr = j; 1857 fwd_streams[i]->retry_enabled = retry_enabled; 1858 1859 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1860 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1861 fwd_streams[j]->rx_queue = 0; 1862 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1863 fwd_streams[j]->tx_queue = 0; 1864 fwd_streams[j]->peer_addr = i; 1865 fwd_streams[j]->retry_enabled = retry_enabled; 1866 } 1867 } 1868 } 1869 1870 /** 1871 * For the RSS forwarding test all streams distributed over lcores. Each stream 1872 * being composed of a RX queue to poll on a RX port for input messages, 1873 * associated with a TX queue of a TX port where to send forwarded packets. 1874 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1875 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1876 * following rules: 1877 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1878 * - TxQl = RxQj 1879 */ 1880 static void 1881 rss_fwd_config_setup(void) 1882 { 1883 portid_t rxp; 1884 portid_t txp; 1885 queueid_t rxq; 1886 queueid_t nb_q; 1887 streamid_t sm_id; 1888 1889 nb_q = nb_rxq; 1890 if (nb_q > nb_txq) 1891 nb_q = nb_txq; 1892 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1893 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1894 cur_fwd_config.nb_fwd_streams = 1895 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1896 1897 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1898 cur_fwd_config.nb_fwd_lcores = 1899 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1900 1901 /* reinitialize forwarding streams */ 1902 init_fwd_streams(); 1903 1904 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1905 rxp = 0; rxq = 0; 1906 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1907 struct fwd_stream *fs; 1908 1909 fs = fwd_streams[sm_id]; 1910 1911 if ((rxp & 0x1) == 0) 1912 txp = (portid_t) (rxp + 1); 1913 else 1914 txp = (portid_t) (rxp - 1); 1915 /* 1916 * if we are in loopback, simply send stuff out through the 1917 * ingress port 1918 */ 1919 if (port_topology == PORT_TOPOLOGY_LOOP) 1920 txp = rxp; 1921 1922 fs->rx_port = fwd_ports_ids[rxp]; 1923 fs->rx_queue = rxq; 1924 fs->tx_port = fwd_ports_ids[txp]; 1925 fs->tx_queue = rxq; 1926 fs->peer_addr = fs->tx_port; 1927 fs->retry_enabled = retry_enabled; 1928 rxq = (queueid_t) (rxq + 1); 1929 if (rxq < nb_q) 1930 continue; 1931 /* 1932 * rxq == nb_q 1933 * Restart from RX queue 0 on next RX port 1934 */ 1935 rxq = 0; 1936 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1937 rxp = (portid_t) 1938 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1939 else 1940 rxp = (portid_t) (rxp + 1); 1941 } 1942 } 1943 1944 /** 1945 * For the DCB forwarding test, each core is assigned on each traffic class. 1946 * 1947 * Each core is assigned a multi-stream, each stream being composed of 1948 * a RX queue to poll on a RX port for input messages, associated with 1949 * a TX queue of a TX port where to send forwarded packets. All RX and 1950 * TX queues are mapping to the same traffic class. 1951 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1952 * the same core 1953 */ 1954 static void 1955 dcb_fwd_config_setup(void) 1956 { 1957 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1958 portid_t txp, rxp = 0; 1959 queueid_t txq, rxq = 0; 1960 lcoreid_t lc_id; 1961 uint16_t nb_rx_queue, nb_tx_queue; 1962 uint16_t i, j, k, sm_id = 0; 1963 uint8_t tc = 0; 1964 1965 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1966 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1967 cur_fwd_config.nb_fwd_streams = 1968 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1969 1970 /* reinitialize forwarding streams */ 1971 init_fwd_streams(); 1972 sm_id = 0; 1973 txp = 1; 1974 /* get the dcb info on the first RX and TX ports */ 1975 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1976 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1977 1978 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1979 fwd_lcores[lc_id]->stream_nb = 0; 1980 fwd_lcores[lc_id]->stream_idx = sm_id; 1981 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1982 /* if the nb_queue is zero, means this tc is 1983 * not enabled on the POOL 1984 */ 1985 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1986 break; 1987 k = fwd_lcores[lc_id]->stream_nb + 1988 fwd_lcores[lc_id]->stream_idx; 1989 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1990 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1991 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1992 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1993 for (j = 0; j < nb_rx_queue; j++) { 1994 struct fwd_stream *fs; 1995 1996 fs = fwd_streams[k + j]; 1997 fs->rx_port = fwd_ports_ids[rxp]; 1998 fs->rx_queue = rxq + j; 1999 fs->tx_port = fwd_ports_ids[txp]; 2000 fs->tx_queue = txq + j % nb_tx_queue; 2001 fs->peer_addr = fs->tx_port; 2002 fs->retry_enabled = retry_enabled; 2003 } 2004 fwd_lcores[lc_id]->stream_nb += 2005 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2006 } 2007 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2008 2009 tc++; 2010 if (tc < rxp_dcb_info.nb_tcs) 2011 continue; 2012 /* Restart from TC 0 on next RX port */ 2013 tc = 0; 2014 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2015 rxp = (portid_t) 2016 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2017 else 2018 rxp++; 2019 if (rxp >= nb_fwd_ports) 2020 return; 2021 /* get the dcb information on next RX and TX ports */ 2022 if ((rxp & 0x1) == 0) 2023 txp = (portid_t) (rxp + 1); 2024 else 2025 txp = (portid_t) (rxp - 1); 2026 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2027 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2028 } 2029 } 2030 2031 static void 2032 icmp_echo_config_setup(void) 2033 { 2034 portid_t rxp; 2035 queueid_t rxq; 2036 lcoreid_t lc_id; 2037 uint16_t sm_id; 2038 2039 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2040 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2041 (nb_txq * nb_fwd_ports); 2042 else 2043 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2044 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2045 cur_fwd_config.nb_fwd_streams = 2046 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2047 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2048 cur_fwd_config.nb_fwd_lcores = 2049 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2050 if (verbose_level > 0) { 2051 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2052 __FUNCTION__, 2053 cur_fwd_config.nb_fwd_lcores, 2054 cur_fwd_config.nb_fwd_ports, 2055 cur_fwd_config.nb_fwd_streams); 2056 } 2057 2058 /* reinitialize forwarding streams */ 2059 init_fwd_streams(); 2060 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2061 rxp = 0; rxq = 0; 2062 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2063 if (verbose_level > 0) 2064 printf(" core=%d: \n", lc_id); 2065 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2066 struct fwd_stream *fs; 2067 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2068 fs->rx_port = fwd_ports_ids[rxp]; 2069 fs->rx_queue = rxq; 2070 fs->tx_port = fs->rx_port; 2071 fs->tx_queue = rxq; 2072 fs->peer_addr = fs->tx_port; 2073 fs->retry_enabled = retry_enabled; 2074 if (verbose_level > 0) 2075 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2076 sm_id, fs->rx_port, fs->rx_queue, 2077 fs->tx_queue); 2078 rxq = (queueid_t) (rxq + 1); 2079 if (rxq == nb_rxq) { 2080 rxq = 0; 2081 rxp = (portid_t) (rxp + 1); 2082 } 2083 } 2084 } 2085 } 2086 2087 void 2088 fwd_config_setup(void) 2089 { 2090 cur_fwd_config.fwd_eng = cur_fwd_eng; 2091 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2092 icmp_echo_config_setup(); 2093 return; 2094 } 2095 if ((nb_rxq > 1) && (nb_txq > 1)){ 2096 if (dcb_config) 2097 dcb_fwd_config_setup(); 2098 else 2099 rss_fwd_config_setup(); 2100 } 2101 else 2102 simple_fwd_config_setup(); 2103 } 2104 2105 void 2106 pkt_fwd_config_display(struct fwd_config *cfg) 2107 { 2108 struct fwd_stream *fs; 2109 lcoreid_t lc_id; 2110 streamid_t sm_id; 2111 2112 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2113 "NUMA support %s, MP over anonymous pages %s\n", 2114 cfg->fwd_eng->fwd_mode_name, 2115 retry_enabled == 0 ? "" : " with retry", 2116 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2117 numa_support == 1 ? "enabled" : "disabled", 2118 mp_anon != 0 ? "enabled" : "disabled"); 2119 2120 if (retry_enabled) 2121 printf("TX retry num: %u, delay between TX retries: %uus\n", 2122 burst_tx_retry_num, burst_tx_delay_time); 2123 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2124 printf("Logical Core %u (socket %u) forwards packets on " 2125 "%d streams:", 2126 fwd_lcores_cpuids[lc_id], 2127 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2128 fwd_lcores[lc_id]->stream_nb); 2129 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2130 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2131 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2132 "P=%d/Q=%d (socket %u) ", 2133 fs->rx_port, fs->rx_queue, 2134 ports[fs->rx_port].socket_id, 2135 fs->tx_port, fs->tx_queue, 2136 ports[fs->tx_port].socket_id); 2137 print_ethaddr("peer=", 2138 &peer_eth_addrs[fs->peer_addr]); 2139 } 2140 printf("\n"); 2141 } 2142 printf("\n"); 2143 } 2144 2145 int 2146 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2147 { 2148 unsigned int i; 2149 unsigned int lcore_cpuid; 2150 int record_now; 2151 2152 record_now = 0; 2153 again: 2154 for (i = 0; i < nb_lc; i++) { 2155 lcore_cpuid = lcorelist[i]; 2156 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2157 printf("lcore %u not enabled\n", lcore_cpuid); 2158 return -1; 2159 } 2160 if (lcore_cpuid == rte_get_master_lcore()) { 2161 printf("lcore %u cannot be masked on for running " 2162 "packet forwarding, which is the master lcore " 2163 "and reserved for command line parsing only\n", 2164 lcore_cpuid); 2165 return -1; 2166 } 2167 if (record_now) 2168 fwd_lcores_cpuids[i] = lcore_cpuid; 2169 } 2170 if (record_now == 0) { 2171 record_now = 1; 2172 goto again; 2173 } 2174 nb_cfg_lcores = (lcoreid_t) nb_lc; 2175 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2176 printf("previous number of forwarding cores %u - changed to " 2177 "number of configured cores %u\n", 2178 (unsigned int) nb_fwd_lcores, nb_lc); 2179 nb_fwd_lcores = (lcoreid_t) nb_lc; 2180 } 2181 2182 return 0; 2183 } 2184 2185 int 2186 set_fwd_lcores_mask(uint64_t lcoremask) 2187 { 2188 unsigned int lcorelist[64]; 2189 unsigned int nb_lc; 2190 unsigned int i; 2191 2192 if (lcoremask == 0) { 2193 printf("Invalid NULL mask of cores\n"); 2194 return -1; 2195 } 2196 nb_lc = 0; 2197 for (i = 0; i < 64; i++) { 2198 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2199 continue; 2200 lcorelist[nb_lc++] = i; 2201 } 2202 return set_fwd_lcores_list(lcorelist, nb_lc); 2203 } 2204 2205 void 2206 set_fwd_lcores_number(uint16_t nb_lc) 2207 { 2208 if (nb_lc > nb_cfg_lcores) { 2209 printf("nb fwd cores %u > %u (max. number of configured " 2210 "lcores) - ignored\n", 2211 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2212 return; 2213 } 2214 nb_fwd_lcores = (lcoreid_t) nb_lc; 2215 printf("Number of forwarding cores set to %u\n", 2216 (unsigned int) nb_fwd_lcores); 2217 } 2218 2219 void 2220 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2221 { 2222 unsigned int i; 2223 portid_t port_id; 2224 int record_now; 2225 2226 record_now = 0; 2227 again: 2228 for (i = 0; i < nb_pt; i++) { 2229 port_id = (portid_t) portlist[i]; 2230 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2231 return; 2232 if (record_now) 2233 fwd_ports_ids[i] = port_id; 2234 } 2235 if (record_now == 0) { 2236 record_now = 1; 2237 goto again; 2238 } 2239 nb_cfg_ports = (portid_t) nb_pt; 2240 if (nb_fwd_ports != (portid_t) nb_pt) { 2241 printf("previous number of forwarding ports %u - changed to " 2242 "number of configured ports %u\n", 2243 (unsigned int) nb_fwd_ports, nb_pt); 2244 nb_fwd_ports = (portid_t) nb_pt; 2245 } 2246 } 2247 2248 void 2249 set_fwd_ports_mask(uint64_t portmask) 2250 { 2251 unsigned int portlist[64]; 2252 unsigned int nb_pt; 2253 unsigned int i; 2254 2255 if (portmask == 0) { 2256 printf("Invalid NULL mask of ports\n"); 2257 return; 2258 } 2259 nb_pt = 0; 2260 RTE_ETH_FOREACH_DEV(i) { 2261 if (! ((uint64_t)(1ULL << i) & portmask)) 2262 continue; 2263 portlist[nb_pt++] = i; 2264 } 2265 set_fwd_ports_list(portlist, nb_pt); 2266 } 2267 2268 void 2269 set_fwd_ports_number(uint16_t nb_pt) 2270 { 2271 if (nb_pt > nb_cfg_ports) { 2272 printf("nb fwd ports %u > %u (number of configured " 2273 "ports) - ignored\n", 2274 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2275 return; 2276 } 2277 nb_fwd_ports = (portid_t) nb_pt; 2278 printf("Number of forwarding ports set to %u\n", 2279 (unsigned int) nb_fwd_ports); 2280 } 2281 2282 int 2283 port_is_forwarding(portid_t port_id) 2284 { 2285 unsigned int i; 2286 2287 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2288 return -1; 2289 2290 for (i = 0; i < nb_fwd_ports; i++) { 2291 if (fwd_ports_ids[i] == port_id) 2292 return 1; 2293 } 2294 2295 return 0; 2296 } 2297 2298 void 2299 set_nb_pkt_per_burst(uint16_t nb) 2300 { 2301 if (nb > MAX_PKT_BURST) { 2302 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2303 " ignored\n", 2304 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2305 return; 2306 } 2307 nb_pkt_per_burst = nb; 2308 printf("Number of packets per burst set to %u\n", 2309 (unsigned int) nb_pkt_per_burst); 2310 } 2311 2312 static const char * 2313 tx_split_get_name(enum tx_pkt_split split) 2314 { 2315 uint32_t i; 2316 2317 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2318 if (tx_split_name[i].split == split) 2319 return tx_split_name[i].name; 2320 } 2321 return NULL; 2322 } 2323 2324 void 2325 set_tx_pkt_split(const char *name) 2326 { 2327 uint32_t i; 2328 2329 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2330 if (strcmp(tx_split_name[i].name, name) == 0) { 2331 tx_pkt_split = tx_split_name[i].split; 2332 return; 2333 } 2334 } 2335 printf("unknown value: \"%s\"\n", name); 2336 } 2337 2338 void 2339 show_tx_pkt_segments(void) 2340 { 2341 uint32_t i, n; 2342 const char *split; 2343 2344 n = tx_pkt_nb_segs; 2345 split = tx_split_get_name(tx_pkt_split); 2346 2347 printf("Number of segments: %u\n", n); 2348 printf("Segment sizes: "); 2349 for (i = 0; i != n - 1; i++) 2350 printf("%hu,", tx_pkt_seg_lengths[i]); 2351 printf("%hu\n", tx_pkt_seg_lengths[i]); 2352 printf("Split packet: %s\n", split); 2353 } 2354 2355 void 2356 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2357 { 2358 uint16_t tx_pkt_len; 2359 unsigned i; 2360 2361 if (nb_segs >= (unsigned) nb_txd) { 2362 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2363 nb_segs, (unsigned int) nb_txd); 2364 return; 2365 } 2366 2367 /* 2368 * Check that each segment length is greater or equal than 2369 * the mbuf data sise. 2370 * Check also that the total packet length is greater or equal than the 2371 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2372 */ 2373 tx_pkt_len = 0; 2374 for (i = 0; i < nb_segs; i++) { 2375 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2376 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2377 i, seg_lengths[i], (unsigned) mbuf_data_size); 2378 return; 2379 } 2380 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2381 } 2382 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2383 printf("total packet length=%u < %d - give up\n", 2384 (unsigned) tx_pkt_len, 2385 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2386 return; 2387 } 2388 2389 for (i = 0; i < nb_segs; i++) 2390 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2391 2392 tx_pkt_length = tx_pkt_len; 2393 tx_pkt_nb_segs = (uint8_t) nb_segs; 2394 } 2395 2396 char* 2397 list_pkt_forwarding_modes(void) 2398 { 2399 static char fwd_modes[128] = ""; 2400 const char *separator = "|"; 2401 struct fwd_engine *fwd_eng; 2402 unsigned i = 0; 2403 2404 if (strlen (fwd_modes) == 0) { 2405 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2406 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2407 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2408 strncat(fwd_modes, separator, 2409 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2410 } 2411 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2412 } 2413 2414 return fwd_modes; 2415 } 2416 2417 char* 2418 list_pkt_forwarding_retry_modes(void) 2419 { 2420 static char fwd_modes[128] = ""; 2421 const char *separator = "|"; 2422 struct fwd_engine *fwd_eng; 2423 unsigned i = 0; 2424 2425 if (strlen(fwd_modes) == 0) { 2426 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2427 if (fwd_eng == &rx_only_engine) 2428 continue; 2429 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2430 sizeof(fwd_modes) - 2431 strlen(fwd_modes) - 1); 2432 strncat(fwd_modes, separator, 2433 sizeof(fwd_modes) - 2434 strlen(fwd_modes) - 1); 2435 } 2436 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2437 } 2438 2439 return fwd_modes; 2440 } 2441 2442 void 2443 set_pkt_forwarding_mode(const char *fwd_mode_name) 2444 { 2445 struct fwd_engine *fwd_eng; 2446 unsigned i; 2447 2448 i = 0; 2449 while ((fwd_eng = fwd_engines[i]) != NULL) { 2450 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2451 printf("Set %s packet forwarding mode%s\n", 2452 fwd_mode_name, 2453 retry_enabled == 0 ? "" : " with retry"); 2454 cur_fwd_eng = fwd_eng; 2455 return; 2456 } 2457 i++; 2458 } 2459 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2460 } 2461 2462 void 2463 set_verbose_level(uint16_t vb_level) 2464 { 2465 printf("Change verbose level from %u to %u\n", 2466 (unsigned int) verbose_level, (unsigned int) vb_level); 2467 verbose_level = vb_level; 2468 } 2469 2470 void 2471 vlan_extend_set(portid_t port_id, int on) 2472 { 2473 int diag; 2474 int vlan_offload; 2475 2476 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2477 return; 2478 2479 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2480 2481 if (on) 2482 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2483 else 2484 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2485 2486 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2487 if (diag < 0) 2488 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2489 "diag=%d\n", port_id, on, diag); 2490 } 2491 2492 void 2493 rx_vlan_strip_set(portid_t port_id, int on) 2494 { 2495 int diag; 2496 int vlan_offload; 2497 2498 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2499 return; 2500 2501 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2502 2503 if (on) 2504 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2505 else 2506 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2507 2508 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2509 if (diag < 0) 2510 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2511 "diag=%d\n", port_id, on, diag); 2512 } 2513 2514 void 2515 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2516 { 2517 int diag; 2518 2519 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2520 return; 2521 2522 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2523 if (diag < 0) 2524 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2525 "diag=%d\n", port_id, queue_id, on, diag); 2526 } 2527 2528 void 2529 rx_vlan_filter_set(portid_t port_id, int on) 2530 { 2531 int diag; 2532 int vlan_offload; 2533 2534 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2535 return; 2536 2537 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2538 2539 if (on) 2540 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2541 else 2542 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2543 2544 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2545 if (diag < 0) 2546 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2547 "diag=%d\n", port_id, on, diag); 2548 } 2549 2550 int 2551 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2552 { 2553 int diag; 2554 2555 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2556 return 1; 2557 if (vlan_id_is_invalid(vlan_id)) 2558 return 1; 2559 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2560 if (diag == 0) 2561 return 0; 2562 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2563 "diag=%d\n", 2564 port_id, vlan_id, on, diag); 2565 return -1; 2566 } 2567 2568 void 2569 rx_vlan_all_filter_set(portid_t port_id, int on) 2570 { 2571 uint16_t vlan_id; 2572 2573 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2574 return; 2575 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2576 if (rx_vft_set(port_id, vlan_id, on)) 2577 break; 2578 } 2579 } 2580 2581 void 2582 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2583 { 2584 int diag; 2585 2586 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2587 return; 2588 2589 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2590 if (diag == 0) 2591 return; 2592 2593 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2594 "diag=%d\n", 2595 port_id, vlan_type, tp_id, diag); 2596 } 2597 2598 void 2599 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2600 { 2601 int vlan_offload; 2602 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2603 return; 2604 if (vlan_id_is_invalid(vlan_id)) 2605 return; 2606 2607 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2608 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2609 printf("Error, as QinQ has been enabled.\n"); 2610 return; 2611 } 2612 2613 tx_vlan_reset(port_id); 2614 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 2615 ports[port_id].tx_vlan_id = vlan_id; 2616 } 2617 2618 void 2619 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2620 { 2621 int vlan_offload; 2622 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2623 return; 2624 if (vlan_id_is_invalid(vlan_id)) 2625 return; 2626 if (vlan_id_is_invalid(vlan_id_outer)) 2627 return; 2628 2629 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2630 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2631 printf("Error, as QinQ hasn't been enabled.\n"); 2632 return; 2633 } 2634 2635 tx_vlan_reset(port_id); 2636 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 2637 ports[port_id].tx_vlan_id = vlan_id; 2638 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2639 } 2640 2641 void 2642 tx_vlan_reset(portid_t port_id) 2643 { 2644 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2645 return; 2646 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 2647 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 2648 ports[port_id].tx_vlan_id = 0; 2649 ports[port_id].tx_vlan_id_outer = 0; 2650 } 2651 2652 void 2653 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2654 { 2655 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2656 return; 2657 2658 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2659 } 2660 2661 void 2662 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2663 { 2664 uint16_t i; 2665 uint8_t existing_mapping_found = 0; 2666 2667 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2668 return; 2669 2670 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2671 return; 2672 2673 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2674 printf("map_value not in required range 0..%d\n", 2675 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2676 return; 2677 } 2678 2679 if (!is_rx) { /*then tx*/ 2680 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2681 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2682 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2683 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2684 existing_mapping_found = 1; 2685 break; 2686 } 2687 } 2688 if (!existing_mapping_found) { /* A new additional mapping... */ 2689 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2690 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2691 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2692 nb_tx_queue_stats_mappings++; 2693 } 2694 } 2695 else { /*rx*/ 2696 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2697 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2698 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2699 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2700 existing_mapping_found = 1; 2701 break; 2702 } 2703 } 2704 if (!existing_mapping_found) { /* A new additional mapping... */ 2705 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2706 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2707 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2708 nb_rx_queue_stats_mappings++; 2709 } 2710 } 2711 } 2712 2713 static inline void 2714 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2715 { 2716 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2717 2718 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2719 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2720 " tunnel_id: 0x%08x", 2721 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2722 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2723 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2724 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2725 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2726 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2727 2728 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2729 rte_be_to_cpu_16(mask->src_port_mask), 2730 rte_be_to_cpu_16(mask->dst_port_mask)); 2731 2732 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2733 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2734 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2735 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2736 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2737 2738 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2739 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2740 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2741 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2742 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2743 } 2744 2745 printf("\n"); 2746 } 2747 2748 static inline void 2749 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2750 { 2751 struct rte_eth_flex_payload_cfg *cfg; 2752 uint32_t i, j; 2753 2754 for (i = 0; i < flex_conf->nb_payloads; i++) { 2755 cfg = &flex_conf->flex_set[i]; 2756 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2757 printf("\n RAW: "); 2758 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2759 printf("\n L2_PAYLOAD: "); 2760 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2761 printf("\n L3_PAYLOAD: "); 2762 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2763 printf("\n L4_PAYLOAD: "); 2764 else 2765 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2766 for (j = 0; j < num; j++) 2767 printf(" %-5u", cfg->src_offset[j]); 2768 } 2769 printf("\n"); 2770 } 2771 2772 static char * 2773 flowtype_to_str(uint16_t flow_type) 2774 { 2775 struct flow_type_info { 2776 char str[32]; 2777 uint16_t ftype; 2778 }; 2779 2780 uint8_t i; 2781 static struct flow_type_info flowtype_str_table[] = { 2782 {"raw", RTE_ETH_FLOW_RAW}, 2783 {"ipv4", RTE_ETH_FLOW_IPV4}, 2784 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2785 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2786 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2787 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2788 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2789 {"ipv6", RTE_ETH_FLOW_IPV6}, 2790 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2791 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2792 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2793 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2794 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2795 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2796 {"port", RTE_ETH_FLOW_PORT}, 2797 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2798 {"geneve", RTE_ETH_FLOW_GENEVE}, 2799 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2800 }; 2801 2802 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2803 if (flowtype_str_table[i].ftype == flow_type) 2804 return flowtype_str_table[i].str; 2805 } 2806 2807 return NULL; 2808 } 2809 2810 static inline void 2811 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2812 { 2813 struct rte_eth_fdir_flex_mask *mask; 2814 uint32_t i, j; 2815 char *p; 2816 2817 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2818 mask = &flex_conf->flex_mask[i]; 2819 p = flowtype_to_str(mask->flow_type); 2820 printf("\n %s:\t", p ? p : "unknown"); 2821 for (j = 0; j < num; j++) 2822 printf(" %02x", mask->mask[j]); 2823 } 2824 printf("\n"); 2825 } 2826 2827 static inline void 2828 print_fdir_flow_type(uint32_t flow_types_mask) 2829 { 2830 int i; 2831 char *p; 2832 2833 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2834 if (!(flow_types_mask & (1 << i))) 2835 continue; 2836 p = flowtype_to_str(i); 2837 if (p) 2838 printf(" %s", p); 2839 else 2840 printf(" unknown"); 2841 } 2842 printf("\n"); 2843 } 2844 2845 void 2846 fdir_get_infos(portid_t port_id) 2847 { 2848 struct rte_eth_fdir_stats fdir_stat; 2849 struct rte_eth_fdir_info fdir_info; 2850 int ret; 2851 2852 static const char *fdir_stats_border = "########################"; 2853 2854 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2855 return; 2856 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2857 if (ret < 0) { 2858 printf("\n FDIR is not supported on port %-2d\n", 2859 port_id); 2860 return; 2861 } 2862 2863 memset(&fdir_info, 0, sizeof(fdir_info)); 2864 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2865 RTE_ETH_FILTER_INFO, &fdir_info); 2866 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2867 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2868 RTE_ETH_FILTER_STATS, &fdir_stat); 2869 printf("\n %s FDIR infos for port %-2d %s\n", 2870 fdir_stats_border, port_id, fdir_stats_border); 2871 printf(" MODE: "); 2872 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2873 printf(" PERFECT\n"); 2874 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2875 printf(" PERFECT-MAC-VLAN\n"); 2876 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2877 printf(" PERFECT-TUNNEL\n"); 2878 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2879 printf(" SIGNATURE\n"); 2880 else 2881 printf(" DISABLE\n"); 2882 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2883 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2884 printf(" SUPPORTED FLOW TYPE: "); 2885 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2886 } 2887 printf(" FLEX PAYLOAD INFO:\n"); 2888 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2889 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2890 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2891 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2892 fdir_info.flex_payload_unit, 2893 fdir_info.max_flex_payload_segment_num, 2894 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2895 printf(" MASK: "); 2896 print_fdir_mask(&fdir_info.mask); 2897 if (fdir_info.flex_conf.nb_payloads > 0) { 2898 printf(" FLEX PAYLOAD SRC OFFSET:"); 2899 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2900 } 2901 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2902 printf(" FLEX MASK CFG:"); 2903 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2904 } 2905 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2906 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2907 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2908 fdir_info.guarant_spc, fdir_info.best_spc); 2909 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2910 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2911 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2912 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2913 fdir_stat.collision, fdir_stat.free, 2914 fdir_stat.maxhash, fdir_stat.maxlen, 2915 fdir_stat.add, fdir_stat.remove, 2916 fdir_stat.f_add, fdir_stat.f_remove); 2917 printf(" %s############################%s\n", 2918 fdir_stats_border, fdir_stats_border); 2919 } 2920 2921 void 2922 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2923 { 2924 struct rte_port *port; 2925 struct rte_eth_fdir_flex_conf *flex_conf; 2926 int i, idx = 0; 2927 2928 port = &ports[port_id]; 2929 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2930 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2931 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2932 idx = i; 2933 break; 2934 } 2935 } 2936 if (i >= RTE_ETH_FLOW_MAX) { 2937 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2938 idx = flex_conf->nb_flexmasks; 2939 flex_conf->nb_flexmasks++; 2940 } else { 2941 printf("The flex mask table is full. Can not set flex" 2942 " mask for flow_type(%u).", cfg->flow_type); 2943 return; 2944 } 2945 } 2946 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2947 cfg, 2948 sizeof(struct rte_eth_fdir_flex_mask)); 2949 } 2950 2951 void 2952 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2953 { 2954 struct rte_port *port; 2955 struct rte_eth_fdir_flex_conf *flex_conf; 2956 int i, idx = 0; 2957 2958 port = &ports[port_id]; 2959 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2960 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2961 if (cfg->type == flex_conf->flex_set[i].type) { 2962 idx = i; 2963 break; 2964 } 2965 } 2966 if (i >= RTE_ETH_PAYLOAD_MAX) { 2967 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2968 idx = flex_conf->nb_payloads; 2969 flex_conf->nb_payloads++; 2970 } else { 2971 printf("The flex payload table is full. Can not set" 2972 " flex payload for type(%u).", cfg->type); 2973 return; 2974 } 2975 } 2976 (void)rte_memcpy(&flex_conf->flex_set[idx], 2977 cfg, 2978 sizeof(struct rte_eth_flex_payload_cfg)); 2979 2980 } 2981 2982 #ifdef RTE_LIBRTE_IXGBE_PMD 2983 void 2984 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2985 { 2986 int diag; 2987 2988 if (is_rx) 2989 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 2990 else 2991 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 2992 2993 if (diag == 0) 2994 return; 2995 if(is_rx) 2996 printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed " 2997 "diag=%d\n", port_id, diag); 2998 else 2999 printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed " 3000 "diag=%d\n", port_id, diag); 3001 3002 } 3003 #endif 3004 3005 int 3006 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3007 { 3008 int diag; 3009 struct rte_eth_link link; 3010 3011 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3012 return 1; 3013 rte_eth_link_get_nowait(port_id, &link); 3014 if (rate > link.link_speed) { 3015 printf("Invalid rate value:%u bigger than link speed: %u\n", 3016 rate, link.link_speed); 3017 return 1; 3018 } 3019 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3020 if (diag == 0) 3021 return diag; 3022 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3023 port_id, diag); 3024 return diag; 3025 } 3026 3027 #ifdef RTE_LIBRTE_IXGBE_PMD 3028 int 3029 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3030 { 3031 int diag; 3032 3033 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk); 3034 if (diag == 0) 3035 return diag; 3036 printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n", 3037 port_id, diag); 3038 return diag; 3039 } 3040 #endif 3041 3042 /* 3043 * Functions to manage the set of filtered Multicast MAC addresses. 3044 * 3045 * A pool of filtered multicast MAC addresses is associated with each port. 3046 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3047 * The address of the pool and the number of valid multicast MAC addresses 3048 * recorded in the pool are stored in the fields "mc_addr_pool" and 3049 * "mc_addr_nb" of the "rte_port" data structure. 3050 * 3051 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3052 * to be supplied a contiguous array of multicast MAC addresses. 3053 * To comply with this constraint, the set of multicast addresses recorded 3054 * into the pool are systematically compacted at the beginning of the pool. 3055 * Hence, when a multicast address is removed from the pool, all following 3056 * addresses, if any, are copied back to keep the set contiguous. 3057 */ 3058 #define MCAST_POOL_INC 32 3059 3060 static int 3061 mcast_addr_pool_extend(struct rte_port *port) 3062 { 3063 struct ether_addr *mc_pool; 3064 size_t mc_pool_size; 3065 3066 /* 3067 * If a free entry is available at the end of the pool, just 3068 * increment the number of recorded multicast addresses. 3069 */ 3070 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3071 port->mc_addr_nb++; 3072 return 0; 3073 } 3074 3075 /* 3076 * [re]allocate a pool with MCAST_POOL_INC more entries. 3077 * The previous test guarantees that port->mc_addr_nb is a multiple 3078 * of MCAST_POOL_INC. 3079 */ 3080 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3081 MCAST_POOL_INC); 3082 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3083 mc_pool_size); 3084 if (mc_pool == NULL) { 3085 printf("allocation of pool of %u multicast addresses failed\n", 3086 port->mc_addr_nb + MCAST_POOL_INC); 3087 return -ENOMEM; 3088 } 3089 3090 port->mc_addr_pool = mc_pool; 3091 port->mc_addr_nb++; 3092 return 0; 3093 3094 } 3095 3096 static void 3097 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3098 { 3099 port->mc_addr_nb--; 3100 if (addr_idx == port->mc_addr_nb) { 3101 /* No need to recompact the set of multicast addressses. */ 3102 if (port->mc_addr_nb == 0) { 3103 /* free the pool of multicast addresses. */ 3104 free(port->mc_addr_pool); 3105 port->mc_addr_pool = NULL; 3106 } 3107 return; 3108 } 3109 memmove(&port->mc_addr_pool[addr_idx], 3110 &port->mc_addr_pool[addr_idx + 1], 3111 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3112 } 3113 3114 static void 3115 eth_port_multicast_addr_list_set(uint8_t port_id) 3116 { 3117 struct rte_port *port; 3118 int diag; 3119 3120 port = &ports[port_id]; 3121 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3122 port->mc_addr_nb); 3123 if (diag == 0) 3124 return; 3125 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3126 port->mc_addr_nb, port_id, -diag); 3127 } 3128 3129 void 3130 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 3131 { 3132 struct rte_port *port; 3133 uint32_t i; 3134 3135 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3136 return; 3137 3138 port = &ports[port_id]; 3139 3140 /* 3141 * Check that the added multicast MAC address is not already recorded 3142 * in the pool of multicast addresses. 3143 */ 3144 for (i = 0; i < port->mc_addr_nb; i++) { 3145 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3146 printf("multicast address already filtered by port\n"); 3147 return; 3148 } 3149 } 3150 3151 if (mcast_addr_pool_extend(port) != 0) 3152 return; 3153 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3154 eth_port_multicast_addr_list_set(port_id); 3155 } 3156 3157 void 3158 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 3159 { 3160 struct rte_port *port; 3161 uint32_t i; 3162 3163 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3164 return; 3165 3166 port = &ports[port_id]; 3167 3168 /* 3169 * Search the pool of multicast MAC addresses for the removed address. 3170 */ 3171 for (i = 0; i < port->mc_addr_nb; i++) { 3172 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3173 break; 3174 } 3175 if (i == port->mc_addr_nb) { 3176 printf("multicast address not filtered by port %d\n", port_id); 3177 return; 3178 } 3179 3180 mcast_addr_pool_remove(port, i); 3181 eth_port_multicast_addr_list_set(port_id); 3182 } 3183 3184 void 3185 port_dcb_info_display(uint8_t port_id) 3186 { 3187 struct rte_eth_dcb_info dcb_info; 3188 uint16_t i; 3189 int ret; 3190 static const char *border = "================"; 3191 3192 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3193 return; 3194 3195 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3196 if (ret) { 3197 printf("\n Failed to get dcb infos on port %-2d\n", 3198 port_id); 3199 return; 3200 } 3201 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3202 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3203 printf("\n TC : "); 3204 for (i = 0; i < dcb_info.nb_tcs; i++) 3205 printf("\t%4d", i); 3206 printf("\n Priority : "); 3207 for (i = 0; i < dcb_info.nb_tcs; i++) 3208 printf("\t%4d", dcb_info.prio_tc[i]); 3209 printf("\n BW percent :"); 3210 for (i = 0; i < dcb_info.nb_tcs; i++) 3211 printf("\t%4d%%", dcb_info.tc_bws[i]); 3212 printf("\n RXQ base : "); 3213 for (i = 0; i < dcb_info.nb_tcs; i++) 3214 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3215 printf("\n RXQ number :"); 3216 for (i = 0; i < dcb_info.nb_tcs; i++) 3217 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3218 printf("\n TXQ base : "); 3219 for (i = 0; i < dcb_info.nb_tcs; i++) 3220 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3221 printf("\n TXQ number :"); 3222 for (i = 0; i < dcb_info.nb_tcs; i++) 3223 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3224 printf("\n"); 3225 } 3226 3227 uint8_t * 3228 open_ddp_package_file(const char *file_path, uint32_t *size) 3229 { 3230 FILE *fh = fopen(file_path, "rb"); 3231 uint32_t pkg_size; 3232 uint8_t *buf = NULL; 3233 int ret = 0; 3234 3235 if (size) 3236 *size = 0; 3237 3238 if (fh == NULL) { 3239 printf("%s: Failed to open %s\n", __func__, file_path); 3240 return buf; 3241 } 3242 3243 ret = fseek(fh, 0, SEEK_END); 3244 if (ret < 0) { 3245 fclose(fh); 3246 printf("%s: File operations failed\n", __func__); 3247 return buf; 3248 } 3249 3250 pkg_size = ftell(fh); 3251 3252 buf = (uint8_t *)malloc(pkg_size); 3253 if (!buf) { 3254 fclose(fh); 3255 printf("%s: Failed to malloc memory\n", __func__); 3256 return buf; 3257 } 3258 3259 ret = fseek(fh, 0, SEEK_SET); 3260 if (ret < 0) { 3261 fclose(fh); 3262 printf("%s: File seek operation failed\n", __func__); 3263 close_ddp_package_file(buf); 3264 return NULL; 3265 } 3266 3267 ret = fread(buf, 1, pkg_size, fh); 3268 if (ret < 0) { 3269 fclose(fh); 3270 printf("%s: File read operation failed\n", __func__); 3271 close_ddp_package_file(buf); 3272 return NULL; 3273 } 3274 3275 if (size) 3276 *size = pkg_size; 3277 3278 fclose(fh); 3279 3280 return buf; 3281 } 3282 3283 int 3284 close_ddp_package_file(uint8_t *buf) 3285 { 3286 if (buf) { 3287 free((void *)buf); 3288 return 0; 3289 } 3290 3291 return -1; 3292 } 3293