1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_mempool.h> 88 #include <rte_mbuf.h> 89 #include <rte_interrupts.h> 90 #include <rte_pci.h> 91 #include <rte_ether.h> 92 #include <rte_ethdev.h> 93 #include <rte_string_fns.h> 94 #include <rte_cycles.h> 95 #include <rte_flow.h> 96 #include <rte_errno.h> 97 #ifdef RTE_LIBRTE_IXGBE_PMD 98 #include <rte_pmd_ixgbe.h> 99 #endif 100 101 #include "testpmd.h" 102 103 static char *flowtype_to_str(uint16_t flow_type); 104 105 static const struct { 106 enum tx_pkt_split split; 107 const char *name; 108 } tx_split_name[] = { 109 { 110 .split = TX_PKT_SPLIT_OFF, 111 .name = "off", 112 }, 113 { 114 .split = TX_PKT_SPLIT_ON, 115 .name = "on", 116 }, 117 { 118 .split = TX_PKT_SPLIT_RND, 119 .name = "rand", 120 }, 121 }; 122 123 struct rss_type_info { 124 char str[32]; 125 uint64_t rss_type; 126 }; 127 128 static const struct rss_type_info rss_type_table[] = { 129 { "ipv4", ETH_RSS_IPV4 }, 130 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 131 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 132 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 133 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 134 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 135 { "ipv6", ETH_RSS_IPV6 }, 136 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 137 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 138 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 139 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 140 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 141 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 142 { "ipv6-ex", ETH_RSS_IPV6_EX }, 143 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 144 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 145 { "port", ETH_RSS_PORT }, 146 { "vxlan", ETH_RSS_VXLAN }, 147 { "geneve", ETH_RSS_GENEVE }, 148 { "nvgre", ETH_RSS_NVGRE }, 149 150 }; 151 152 static void 153 print_ethaddr(const char *name, struct ether_addr *eth_addr) 154 { 155 char buf[ETHER_ADDR_FMT_SIZE]; 156 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 157 printf("%s%s", name, buf); 158 } 159 160 void 161 nic_stats_display(portid_t port_id) 162 { 163 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 164 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 165 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 166 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 167 uint64_t mpps_rx, mpps_tx; 168 struct rte_eth_stats stats; 169 struct rte_port *port = &ports[port_id]; 170 uint8_t i; 171 portid_t pid; 172 173 static const char *nic_stats_border = "########################"; 174 175 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 176 printf("Valid port range is [0"); 177 FOREACH_PORT(pid, ports) 178 printf(", %d", pid); 179 printf("]\n"); 180 return; 181 } 182 rte_eth_stats_get(port_id, &stats); 183 printf("\n %s NIC statistics for port %-2d %s\n", 184 nic_stats_border, port_id, nic_stats_border); 185 186 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 187 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 188 "%-"PRIu64"\n", 189 stats.ipackets, stats.imissed, stats.ibytes); 190 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 191 printf(" RX-nombuf: %-10"PRIu64"\n", 192 stats.rx_nombuf); 193 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 194 "%-"PRIu64"\n", 195 stats.opackets, stats.oerrors, stats.obytes); 196 } 197 else { 198 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 199 " RX-bytes: %10"PRIu64"\n", 200 stats.ipackets, stats.ierrors, stats.ibytes); 201 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 202 printf(" RX-nombuf: %10"PRIu64"\n", 203 stats.rx_nombuf); 204 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 205 " TX-bytes: %10"PRIu64"\n", 206 stats.opackets, stats.oerrors, stats.obytes); 207 } 208 209 if (port->rx_queue_stats_mapping_enabled) { 210 printf("\n"); 211 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 212 printf(" Stats reg %2d RX-packets: %10"PRIu64 213 " RX-errors: %10"PRIu64 214 " RX-bytes: %10"PRIu64"\n", 215 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 216 } 217 } 218 if (port->tx_queue_stats_mapping_enabled) { 219 printf("\n"); 220 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 221 printf(" Stats reg %2d TX-packets: %10"PRIu64 222 " TX-bytes: %10"PRIu64"\n", 223 i, stats.q_opackets[i], stats.q_obytes[i]); 224 } 225 } 226 227 diff_cycles = prev_cycles[port_id]; 228 prev_cycles[port_id] = rte_rdtsc(); 229 if (diff_cycles > 0) 230 diff_cycles = prev_cycles[port_id] - diff_cycles; 231 232 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 233 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 234 prev_pkts_rx[port_id] = stats.ipackets; 235 prev_pkts_tx[port_id] = stats.opackets; 236 mpps_rx = diff_cycles > 0 ? 237 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 238 mpps_tx = diff_cycles > 0 ? 239 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 240 printf("\n Throughput (since last show)\n"); 241 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 242 mpps_rx, mpps_tx); 243 244 printf(" %s############################%s\n", 245 nic_stats_border, nic_stats_border); 246 } 247 248 void 249 nic_stats_clear(portid_t port_id) 250 { 251 portid_t pid; 252 253 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 254 printf("Valid port range is [0"); 255 FOREACH_PORT(pid, ports) 256 printf(", %d", pid); 257 printf("]\n"); 258 return; 259 } 260 rte_eth_stats_reset(port_id); 261 printf("\n NIC statistics for port %d cleared\n", port_id); 262 } 263 264 void 265 nic_xstats_display(portid_t port_id) 266 { 267 struct rte_eth_xstat *xstats; 268 int cnt_xstats, idx_xstat; 269 struct rte_eth_xstat_name *xstats_names; 270 271 printf("###### NIC extended statistics for port %-2d\n", port_id); 272 if (!rte_eth_dev_is_valid_port(port_id)) { 273 printf("Error: Invalid port number %i\n", port_id); 274 return; 275 } 276 277 /* Get count */ 278 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 279 if (cnt_xstats < 0) { 280 printf("Error: Cannot get count of xstats\n"); 281 return; 282 } 283 284 /* Get id-name lookup table */ 285 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 286 if (xstats_names == NULL) { 287 printf("Cannot allocate memory for xstats lookup\n"); 288 return; 289 } 290 if (cnt_xstats != rte_eth_xstats_get_names( 291 port_id, xstats_names, cnt_xstats)) { 292 printf("Error: Cannot get xstats lookup\n"); 293 free(xstats_names); 294 return; 295 } 296 297 /* Get stats themselves */ 298 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 299 if (xstats == NULL) { 300 printf("Cannot allocate memory for xstats\n"); 301 free(xstats_names); 302 return; 303 } 304 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 305 printf("Error: Unable to get xstats\n"); 306 free(xstats_names); 307 free(xstats); 308 return; 309 } 310 311 /* Display xstats */ 312 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 313 printf("%s: %"PRIu64"\n", 314 xstats_names[idx_xstat].name, 315 xstats[idx_xstat].value); 316 free(xstats_names); 317 free(xstats); 318 } 319 320 void 321 nic_xstats_clear(portid_t port_id) 322 { 323 rte_eth_xstats_reset(port_id); 324 } 325 326 void 327 nic_stats_mapping_display(portid_t port_id) 328 { 329 struct rte_port *port = &ports[port_id]; 330 uint16_t i; 331 portid_t pid; 332 333 static const char *nic_stats_mapping_border = "########################"; 334 335 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 336 printf("Valid port range is [0"); 337 FOREACH_PORT(pid, ports) 338 printf(", %d", pid); 339 printf("]\n"); 340 return; 341 } 342 343 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 344 printf("Port id %d - either does not support queue statistic mapping or" 345 " no queue statistic mapping set\n", port_id); 346 return; 347 } 348 349 printf("\n %s NIC statistics mapping for port %-2d %s\n", 350 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 351 352 if (port->rx_queue_stats_mapping_enabled) { 353 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 354 if (rx_queue_stats_mappings[i].port_id == port_id) { 355 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 356 rx_queue_stats_mappings[i].queue_id, 357 rx_queue_stats_mappings[i].stats_counter_id); 358 } 359 } 360 printf("\n"); 361 } 362 363 364 if (port->tx_queue_stats_mapping_enabled) { 365 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 366 if (tx_queue_stats_mappings[i].port_id == port_id) { 367 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 368 tx_queue_stats_mappings[i].queue_id, 369 tx_queue_stats_mappings[i].stats_counter_id); 370 } 371 } 372 } 373 374 printf(" %s####################################%s\n", 375 nic_stats_mapping_border, nic_stats_mapping_border); 376 } 377 378 void 379 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 380 { 381 struct rte_eth_rxq_info qinfo; 382 int32_t rc; 383 static const char *info_border = "*********************"; 384 385 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 386 if (rc != 0) { 387 printf("Failed to retrieve information for port: %hhu, " 388 "RX queue: %hu\nerror desc: %s(%d)\n", 389 port_id, queue_id, strerror(-rc), rc); 390 return; 391 } 392 393 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 394 info_border, port_id, queue_id, info_border); 395 396 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 397 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 398 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 399 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 400 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 401 printf("\nRX drop packets: %s", 402 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 403 printf("\nRX deferred start: %s", 404 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 405 printf("\nRX scattered packets: %s", 406 (qinfo.scattered_rx != 0) ? "on" : "off"); 407 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 408 printf("\n"); 409 } 410 411 void 412 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 413 { 414 struct rte_eth_txq_info qinfo; 415 int32_t rc; 416 static const char *info_border = "*********************"; 417 418 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 419 if (rc != 0) { 420 printf("Failed to retrieve information for port: %hhu, " 421 "TX queue: %hu\nerror desc: %s(%d)\n", 422 port_id, queue_id, strerror(-rc), rc); 423 return; 424 } 425 426 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 427 info_border, port_id, queue_id, info_border); 428 429 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 430 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 431 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 432 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 433 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 434 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 435 printf("\nTX deferred start: %s", 436 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 437 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 438 printf("\n"); 439 } 440 441 void 442 port_infos_display(portid_t port_id) 443 { 444 struct rte_port *port; 445 struct ether_addr mac_addr; 446 struct rte_eth_link link; 447 struct rte_eth_dev_info dev_info; 448 int vlan_offload; 449 struct rte_mempool * mp; 450 static const char *info_border = "*********************"; 451 portid_t pid; 452 453 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 454 printf("Valid port range is [0"); 455 FOREACH_PORT(pid, ports) 456 printf(", %d", pid); 457 printf("]\n"); 458 return; 459 } 460 port = &ports[port_id]; 461 rte_eth_link_get_nowait(port_id, &link); 462 memset(&dev_info, 0, sizeof(dev_info)); 463 rte_eth_dev_info_get(port_id, &dev_info); 464 printf("\n%s Infos for port %-2d %s\n", 465 info_border, port_id, info_border); 466 rte_eth_macaddr_get(port_id, &mac_addr); 467 print_ethaddr("MAC address: ", &mac_addr); 468 printf("\nDriver name: %s", dev_info.driver_name); 469 printf("\nConnect to socket: %u", port->socket_id); 470 471 if (port_numa[port_id] != NUMA_NO_CONFIG) { 472 mp = mbuf_pool_find(port_numa[port_id]); 473 if (mp) 474 printf("\nmemory allocation on the socket: %d", 475 port_numa[port_id]); 476 } else 477 printf("\nmemory allocation on the socket: %u",port->socket_id); 478 479 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 480 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 481 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 482 ("full-duplex") : ("half-duplex")); 483 printf("Promiscuous mode: %s\n", 484 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 485 printf("Allmulticast mode: %s\n", 486 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 487 printf("Maximum number of MAC addresses: %u\n", 488 (unsigned int)(port->dev_info.max_mac_addrs)); 489 printf("Maximum number of MAC addresses of hash filtering: %u\n", 490 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 491 492 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 493 if (vlan_offload >= 0){ 494 printf("VLAN offload: \n"); 495 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 496 printf(" strip on \n"); 497 else 498 printf(" strip off \n"); 499 500 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 501 printf(" filter on \n"); 502 else 503 printf(" filter off \n"); 504 505 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 506 printf(" qinq(extend) on \n"); 507 else 508 printf(" qinq(extend) off \n"); 509 } 510 511 if (dev_info.hash_key_size > 0) 512 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 513 if (dev_info.reta_size > 0) 514 printf("Redirection table size: %u\n", dev_info.reta_size); 515 if (!dev_info.flow_type_rss_offloads) 516 printf("No flow type is supported.\n"); 517 else { 518 uint16_t i; 519 char *p; 520 521 printf("Supported flow types:\n"); 522 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 523 i++) { 524 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 525 continue; 526 p = flowtype_to_str(i); 527 printf(" %s\n", (p ? p : "unknown")); 528 } 529 } 530 531 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 532 printf("Max possible number of RXDs per queue: %hu\n", 533 dev_info.rx_desc_lim.nb_max); 534 printf("Min possible number of RXDs per queue: %hu\n", 535 dev_info.rx_desc_lim.nb_min); 536 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 537 538 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 539 printf("Max possible number of TXDs per queue: %hu\n", 540 dev_info.tx_desc_lim.nb_max); 541 printf("Min possible number of TXDs per queue: %hu\n", 542 dev_info.tx_desc_lim.nb_min); 543 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 544 } 545 546 int 547 port_id_is_invalid(portid_t port_id, enum print_warning warning) 548 { 549 if (port_id == (portid_t)RTE_PORT_ALL) 550 return 0; 551 552 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 553 return 0; 554 555 if (warning == ENABLED_WARN) 556 printf("Invalid port %d\n", port_id); 557 558 return 1; 559 } 560 561 static int 562 vlan_id_is_invalid(uint16_t vlan_id) 563 { 564 if (vlan_id < 4096) 565 return 0; 566 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 567 return 1; 568 } 569 570 static int 571 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 572 { 573 uint64_t pci_len; 574 575 if (reg_off & 0x3) { 576 printf("Port register offset 0x%X not aligned on a 4-byte " 577 "boundary\n", 578 (unsigned)reg_off); 579 return 1; 580 } 581 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 582 if (reg_off >= pci_len) { 583 printf("Port %d: register offset %u (0x%X) out of port PCI " 584 "resource (length=%"PRIu64")\n", 585 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 586 return 1; 587 } 588 return 0; 589 } 590 591 static int 592 reg_bit_pos_is_invalid(uint8_t bit_pos) 593 { 594 if (bit_pos <= 31) 595 return 0; 596 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 597 return 1; 598 } 599 600 #define display_port_and_reg_off(port_id, reg_off) \ 601 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 602 603 static inline void 604 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 605 { 606 display_port_and_reg_off(port_id, (unsigned)reg_off); 607 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 608 } 609 610 void 611 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 612 { 613 uint32_t reg_v; 614 615 616 if (port_id_is_invalid(port_id, ENABLED_WARN)) 617 return; 618 if (port_reg_off_is_invalid(port_id, reg_off)) 619 return; 620 if (reg_bit_pos_is_invalid(bit_x)) 621 return; 622 reg_v = port_id_pci_reg_read(port_id, reg_off); 623 display_port_and_reg_off(port_id, (unsigned)reg_off); 624 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 625 } 626 627 void 628 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 629 uint8_t bit1_pos, uint8_t bit2_pos) 630 { 631 uint32_t reg_v; 632 uint8_t l_bit; 633 uint8_t h_bit; 634 635 if (port_id_is_invalid(port_id, ENABLED_WARN)) 636 return; 637 if (port_reg_off_is_invalid(port_id, reg_off)) 638 return; 639 if (reg_bit_pos_is_invalid(bit1_pos)) 640 return; 641 if (reg_bit_pos_is_invalid(bit2_pos)) 642 return; 643 if (bit1_pos > bit2_pos) 644 l_bit = bit2_pos, h_bit = bit1_pos; 645 else 646 l_bit = bit1_pos, h_bit = bit2_pos; 647 648 reg_v = port_id_pci_reg_read(port_id, reg_off); 649 reg_v >>= l_bit; 650 if (h_bit < 31) 651 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 652 display_port_and_reg_off(port_id, (unsigned)reg_off); 653 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 654 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 655 } 656 657 void 658 port_reg_display(portid_t port_id, uint32_t reg_off) 659 { 660 uint32_t reg_v; 661 662 if (port_id_is_invalid(port_id, ENABLED_WARN)) 663 return; 664 if (port_reg_off_is_invalid(port_id, reg_off)) 665 return; 666 reg_v = port_id_pci_reg_read(port_id, reg_off); 667 display_port_reg_value(port_id, reg_off, reg_v); 668 } 669 670 void 671 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 672 uint8_t bit_v) 673 { 674 uint32_t reg_v; 675 676 if (port_id_is_invalid(port_id, ENABLED_WARN)) 677 return; 678 if (port_reg_off_is_invalid(port_id, reg_off)) 679 return; 680 if (reg_bit_pos_is_invalid(bit_pos)) 681 return; 682 if (bit_v > 1) { 683 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 684 return; 685 } 686 reg_v = port_id_pci_reg_read(port_id, reg_off); 687 if (bit_v == 0) 688 reg_v &= ~(1 << bit_pos); 689 else 690 reg_v |= (1 << bit_pos); 691 port_id_pci_reg_write(port_id, reg_off, reg_v); 692 display_port_reg_value(port_id, reg_off, reg_v); 693 } 694 695 void 696 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 697 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 698 { 699 uint32_t max_v; 700 uint32_t reg_v; 701 uint8_t l_bit; 702 uint8_t h_bit; 703 704 if (port_id_is_invalid(port_id, ENABLED_WARN)) 705 return; 706 if (port_reg_off_is_invalid(port_id, reg_off)) 707 return; 708 if (reg_bit_pos_is_invalid(bit1_pos)) 709 return; 710 if (reg_bit_pos_is_invalid(bit2_pos)) 711 return; 712 if (bit1_pos > bit2_pos) 713 l_bit = bit2_pos, h_bit = bit1_pos; 714 else 715 l_bit = bit1_pos, h_bit = bit2_pos; 716 717 if ((h_bit - l_bit) < 31) 718 max_v = (1 << (h_bit - l_bit + 1)) - 1; 719 else 720 max_v = 0xFFFFFFFF; 721 722 if (value > max_v) { 723 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 724 (unsigned)value, (unsigned)value, 725 (unsigned)max_v, (unsigned)max_v); 726 return; 727 } 728 reg_v = port_id_pci_reg_read(port_id, reg_off); 729 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 730 reg_v |= (value << l_bit); /* Set changed bits */ 731 port_id_pci_reg_write(port_id, reg_off, reg_v); 732 display_port_reg_value(port_id, reg_off, reg_v); 733 } 734 735 void 736 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 737 { 738 if (port_id_is_invalid(port_id, ENABLED_WARN)) 739 return; 740 if (port_reg_off_is_invalid(port_id, reg_off)) 741 return; 742 port_id_pci_reg_write(port_id, reg_off, reg_v); 743 display_port_reg_value(port_id, reg_off, reg_v); 744 } 745 746 void 747 port_mtu_set(portid_t port_id, uint16_t mtu) 748 { 749 int diag; 750 751 if (port_id_is_invalid(port_id, ENABLED_WARN)) 752 return; 753 diag = rte_eth_dev_set_mtu(port_id, mtu); 754 if (diag == 0) 755 return; 756 printf("Set MTU failed. diag=%d\n", diag); 757 } 758 759 /* Generic flow management functions. */ 760 761 /** Generate flow_item[] entry. */ 762 #define MK_FLOW_ITEM(t, s) \ 763 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 764 .name = # t, \ 765 .size = s, \ 766 } 767 768 /** Information about known flow pattern items. */ 769 static const struct { 770 const char *name; 771 size_t size; 772 } flow_item[] = { 773 MK_FLOW_ITEM(END, 0), 774 MK_FLOW_ITEM(VOID, 0), 775 MK_FLOW_ITEM(INVERT, 0), 776 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 777 MK_FLOW_ITEM(PF, 0), 778 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 779 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 780 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 781 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 782 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 783 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 784 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 785 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 786 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 787 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 788 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 789 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 790 }; 791 792 /** Compute storage space needed by item specification. */ 793 static void 794 flow_item_spec_size(const struct rte_flow_item *item, 795 size_t *size, size_t *pad) 796 { 797 if (!item->spec) 798 goto empty; 799 switch (item->type) { 800 union { 801 const struct rte_flow_item_raw *raw; 802 } spec; 803 804 case RTE_FLOW_ITEM_TYPE_RAW: 805 spec.raw = item->spec; 806 *size = offsetof(struct rte_flow_item_raw, pattern) + 807 spec.raw->length * sizeof(*spec.raw->pattern); 808 break; 809 default: 810 empty: 811 *size = 0; 812 break; 813 } 814 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 815 } 816 817 /** Generate flow_action[] entry. */ 818 #define MK_FLOW_ACTION(t, s) \ 819 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 820 .name = # t, \ 821 .size = s, \ 822 } 823 824 /** Information about known flow actions. */ 825 static const struct { 826 const char *name; 827 size_t size; 828 } flow_action[] = { 829 MK_FLOW_ACTION(END, 0), 830 MK_FLOW_ACTION(VOID, 0), 831 MK_FLOW_ACTION(PASSTHRU, 0), 832 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 833 MK_FLOW_ACTION(FLAG, 0), 834 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 835 MK_FLOW_ACTION(DROP, 0), 836 MK_FLOW_ACTION(COUNT, 0), 837 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 838 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 839 MK_FLOW_ACTION(PF, 0), 840 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 841 }; 842 843 /** Compute storage space needed by action configuration. */ 844 static void 845 flow_action_conf_size(const struct rte_flow_action *action, 846 size_t *size, size_t *pad) 847 { 848 if (!action->conf) 849 goto empty; 850 switch (action->type) { 851 union { 852 const struct rte_flow_action_rss *rss; 853 } conf; 854 855 case RTE_FLOW_ACTION_TYPE_RSS: 856 conf.rss = action->conf; 857 *size = offsetof(struct rte_flow_action_rss, queue) + 858 conf.rss->num * sizeof(*conf.rss->queue); 859 break; 860 default: 861 empty: 862 *size = 0; 863 break; 864 } 865 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 866 } 867 868 /** Generate a port_flow entry from attributes/pattern/actions. */ 869 static struct port_flow * 870 port_flow_new(const struct rte_flow_attr *attr, 871 const struct rte_flow_item *pattern, 872 const struct rte_flow_action *actions) 873 { 874 const struct rte_flow_item *item; 875 const struct rte_flow_action *action; 876 struct port_flow *pf = NULL; 877 size_t tmp; 878 size_t pad; 879 size_t off1 = 0; 880 size_t off2 = 0; 881 int err = ENOTSUP; 882 883 store: 884 item = pattern; 885 if (pf) 886 pf->pattern = (void *)&pf->data[off1]; 887 do { 888 struct rte_flow_item *dst = NULL; 889 890 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 891 !flow_item[item->type].name) 892 goto notsup; 893 if (pf) 894 dst = memcpy(pf->data + off1, item, sizeof(*item)); 895 off1 += sizeof(*item); 896 flow_item_spec_size(item, &tmp, &pad); 897 if (item->spec) { 898 if (pf) 899 dst->spec = memcpy(pf->data + off2, 900 item->spec, tmp); 901 off2 += tmp + pad; 902 } 903 if (item->last) { 904 if (pf) 905 dst->last = memcpy(pf->data + off2, 906 item->last, tmp); 907 off2 += tmp + pad; 908 } 909 if (item->mask) { 910 if (pf) 911 dst->mask = memcpy(pf->data + off2, 912 item->mask, tmp); 913 off2 += tmp + pad; 914 } 915 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 916 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 917 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 918 action = actions; 919 if (pf) 920 pf->actions = (void *)&pf->data[off1]; 921 do { 922 struct rte_flow_action *dst = NULL; 923 924 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 925 !flow_action[action->type].name) 926 goto notsup; 927 if (pf) 928 dst = memcpy(pf->data + off1, action, sizeof(*action)); 929 off1 += sizeof(*action); 930 flow_action_conf_size(action, &tmp, &pad); 931 if (action->conf) { 932 if (pf) 933 dst->conf = memcpy(pf->data + off2, 934 action->conf, tmp); 935 off2 += tmp + pad; 936 } 937 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 938 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 939 if (pf != NULL) 940 return pf; 941 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 942 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 943 pf = calloc(1, tmp + off1 + off2); 944 if (pf == NULL) 945 err = errno; 946 else { 947 *pf = (const struct port_flow){ 948 .size = tmp + off1 + off2, 949 .attr = *attr, 950 }; 951 tmp -= offsetof(struct port_flow, data); 952 off2 = tmp + off1; 953 off1 = tmp; 954 goto store; 955 } 956 notsup: 957 rte_errno = err; 958 return NULL; 959 } 960 961 /** Print a message out of a flow error. */ 962 static int 963 port_flow_complain(struct rte_flow_error *error) 964 { 965 static const char *const errstrlist[] = { 966 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 967 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 968 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 969 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 970 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 971 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 972 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 973 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 974 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 975 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 976 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 977 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 978 }; 979 const char *errstr; 980 char buf[32]; 981 int err = rte_errno; 982 983 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 984 !errstrlist[error->type]) 985 errstr = "unknown type"; 986 else 987 errstr = errstrlist[error->type]; 988 printf("Caught error type %d (%s): %s%s\n", 989 error->type, errstr, 990 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 991 error->cause), buf) : "", 992 error->message ? error->message : "(no stated reason)"); 993 return -err; 994 } 995 996 /** Validate flow rule. */ 997 int 998 port_flow_validate(portid_t port_id, 999 const struct rte_flow_attr *attr, 1000 const struct rte_flow_item *pattern, 1001 const struct rte_flow_action *actions) 1002 { 1003 struct rte_flow_error error; 1004 1005 /* Poisoning to make sure PMDs update it in case of error. */ 1006 memset(&error, 0x11, sizeof(error)); 1007 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1008 return port_flow_complain(&error); 1009 printf("Flow rule validated\n"); 1010 return 0; 1011 } 1012 1013 /** Create flow rule. */ 1014 int 1015 port_flow_create(portid_t port_id, 1016 const struct rte_flow_attr *attr, 1017 const struct rte_flow_item *pattern, 1018 const struct rte_flow_action *actions) 1019 { 1020 struct rte_flow *flow; 1021 struct rte_port *port; 1022 struct port_flow *pf; 1023 uint32_t id; 1024 struct rte_flow_error error; 1025 1026 /* Poisoning to make sure PMDs update it in case of error. */ 1027 memset(&error, 0x22, sizeof(error)); 1028 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1029 if (!flow) 1030 return port_flow_complain(&error); 1031 port = &ports[port_id]; 1032 if (port->flow_list) { 1033 if (port->flow_list->id == UINT32_MAX) { 1034 printf("Highest rule ID is already assigned, delete" 1035 " it first"); 1036 rte_flow_destroy(port_id, flow, NULL); 1037 return -ENOMEM; 1038 } 1039 id = port->flow_list->id + 1; 1040 } else 1041 id = 0; 1042 pf = port_flow_new(attr, pattern, actions); 1043 if (!pf) { 1044 int err = rte_errno; 1045 1046 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1047 rte_flow_destroy(port_id, flow, NULL); 1048 return -err; 1049 } 1050 pf->next = port->flow_list; 1051 pf->id = id; 1052 pf->flow = flow; 1053 port->flow_list = pf; 1054 printf("Flow rule #%u created\n", pf->id); 1055 return 0; 1056 } 1057 1058 /** Destroy a number of flow rules. */ 1059 int 1060 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1061 { 1062 struct rte_port *port; 1063 struct port_flow **tmp; 1064 uint32_t c = 0; 1065 int ret = 0; 1066 1067 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1068 port_id == (portid_t)RTE_PORT_ALL) 1069 return -EINVAL; 1070 port = &ports[port_id]; 1071 tmp = &port->flow_list; 1072 while (*tmp) { 1073 uint32_t i; 1074 1075 for (i = 0; i != n; ++i) { 1076 struct rte_flow_error error; 1077 struct port_flow *pf = *tmp; 1078 1079 if (rule[i] != pf->id) 1080 continue; 1081 /* 1082 * Poisoning to make sure PMDs update it in case 1083 * of error. 1084 */ 1085 memset(&error, 0x33, sizeof(error)); 1086 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1087 ret = port_flow_complain(&error); 1088 continue; 1089 } 1090 printf("Flow rule #%u destroyed\n", pf->id); 1091 *tmp = pf->next; 1092 free(pf); 1093 break; 1094 } 1095 if (i == n) 1096 tmp = &(*tmp)->next; 1097 ++c; 1098 } 1099 return ret; 1100 } 1101 1102 /** Remove all flow rules. */ 1103 int 1104 port_flow_flush(portid_t port_id) 1105 { 1106 struct rte_flow_error error; 1107 struct rte_port *port; 1108 int ret = 0; 1109 1110 /* Poisoning to make sure PMDs update it in case of error. */ 1111 memset(&error, 0x44, sizeof(error)); 1112 if (rte_flow_flush(port_id, &error)) { 1113 ret = port_flow_complain(&error); 1114 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1115 port_id == (portid_t)RTE_PORT_ALL) 1116 return ret; 1117 } 1118 port = &ports[port_id]; 1119 while (port->flow_list) { 1120 struct port_flow *pf = port->flow_list->next; 1121 1122 free(port->flow_list); 1123 port->flow_list = pf; 1124 } 1125 return ret; 1126 } 1127 1128 /** Query a flow rule. */ 1129 int 1130 port_flow_query(portid_t port_id, uint32_t rule, 1131 enum rte_flow_action_type action) 1132 { 1133 struct rte_flow_error error; 1134 struct rte_port *port; 1135 struct port_flow *pf; 1136 const char *name; 1137 union { 1138 struct rte_flow_query_count count; 1139 } query; 1140 1141 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1142 port_id == (portid_t)RTE_PORT_ALL) 1143 return -EINVAL; 1144 port = &ports[port_id]; 1145 for (pf = port->flow_list; pf; pf = pf->next) 1146 if (pf->id == rule) 1147 break; 1148 if (!pf) { 1149 printf("Flow rule #%u not found\n", rule); 1150 return -ENOENT; 1151 } 1152 if ((unsigned int)action >= RTE_DIM(flow_action) || 1153 !flow_action[action].name) 1154 name = "unknown"; 1155 else 1156 name = flow_action[action].name; 1157 switch (action) { 1158 case RTE_FLOW_ACTION_TYPE_COUNT: 1159 break; 1160 default: 1161 printf("Cannot query action type %d (%s)\n", action, name); 1162 return -ENOTSUP; 1163 } 1164 /* Poisoning to make sure PMDs update it in case of error. */ 1165 memset(&error, 0x55, sizeof(error)); 1166 memset(&query, 0, sizeof(query)); 1167 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1168 return port_flow_complain(&error); 1169 switch (action) { 1170 case RTE_FLOW_ACTION_TYPE_COUNT: 1171 printf("%s:\n" 1172 " hits_set: %u\n" 1173 " bytes_set: %u\n" 1174 " hits: %" PRIu64 "\n" 1175 " bytes: %" PRIu64 "\n", 1176 name, 1177 query.count.hits_set, 1178 query.count.bytes_set, 1179 query.count.hits, 1180 query.count.bytes); 1181 break; 1182 default: 1183 printf("Cannot display result for action type %d (%s)\n", 1184 action, name); 1185 break; 1186 } 1187 return 0; 1188 } 1189 1190 /** List flow rules. */ 1191 void 1192 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1193 { 1194 struct rte_port *port; 1195 struct port_flow *pf; 1196 struct port_flow *list = NULL; 1197 uint32_t i; 1198 1199 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1200 port_id == (portid_t)RTE_PORT_ALL) 1201 return; 1202 port = &ports[port_id]; 1203 if (!port->flow_list) 1204 return; 1205 /* Sort flows by group, priority and ID. */ 1206 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1207 struct port_flow **tmp; 1208 1209 if (n) { 1210 /* Filter out unwanted groups. */ 1211 for (i = 0; i != n; ++i) 1212 if (pf->attr.group == group[i]) 1213 break; 1214 if (i == n) 1215 continue; 1216 } 1217 tmp = &list; 1218 while (*tmp && 1219 (pf->attr.group > (*tmp)->attr.group || 1220 (pf->attr.group == (*tmp)->attr.group && 1221 pf->attr.priority > (*tmp)->attr.priority) || 1222 (pf->attr.group == (*tmp)->attr.group && 1223 pf->attr.priority == (*tmp)->attr.priority && 1224 pf->id > (*tmp)->id))) 1225 tmp = &(*tmp)->tmp; 1226 pf->tmp = *tmp; 1227 *tmp = pf; 1228 } 1229 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1230 for (pf = list; pf != NULL; pf = pf->tmp) { 1231 const struct rte_flow_item *item = pf->pattern; 1232 const struct rte_flow_action *action = pf->actions; 1233 1234 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1235 pf->id, 1236 pf->attr.group, 1237 pf->attr.priority, 1238 pf->attr.ingress ? 'i' : '-', 1239 pf->attr.egress ? 'e' : '-'); 1240 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1241 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1242 printf("%s ", flow_item[item->type].name); 1243 ++item; 1244 } 1245 printf("=>"); 1246 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1247 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1248 printf(" %s", flow_action[action->type].name); 1249 ++action; 1250 } 1251 printf("\n"); 1252 } 1253 } 1254 1255 /* 1256 * RX/TX ring descriptors display functions. 1257 */ 1258 int 1259 rx_queue_id_is_invalid(queueid_t rxq_id) 1260 { 1261 if (rxq_id < nb_rxq) 1262 return 0; 1263 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1264 return 1; 1265 } 1266 1267 int 1268 tx_queue_id_is_invalid(queueid_t txq_id) 1269 { 1270 if (txq_id < nb_txq) 1271 return 0; 1272 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1273 return 1; 1274 } 1275 1276 static int 1277 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1278 { 1279 if (rxdesc_id < nb_rxd) 1280 return 0; 1281 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1282 rxdesc_id, nb_rxd); 1283 return 1; 1284 } 1285 1286 static int 1287 tx_desc_id_is_invalid(uint16_t txdesc_id) 1288 { 1289 if (txdesc_id < nb_txd) 1290 return 0; 1291 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1292 txdesc_id, nb_txd); 1293 return 1; 1294 } 1295 1296 static const struct rte_memzone * 1297 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 1298 { 1299 char mz_name[RTE_MEMZONE_NAMESIZE]; 1300 const struct rte_memzone *mz; 1301 1302 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1303 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1304 mz = rte_memzone_lookup(mz_name); 1305 if (mz == NULL) 1306 printf("%s ring memory zoneof (port %d, queue %d) not" 1307 "found (zone name = %s\n", 1308 ring_name, port_id, q_id, mz_name); 1309 return mz; 1310 } 1311 1312 union igb_ring_dword { 1313 uint64_t dword; 1314 struct { 1315 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1316 uint32_t lo; 1317 uint32_t hi; 1318 #else 1319 uint32_t hi; 1320 uint32_t lo; 1321 #endif 1322 } words; 1323 }; 1324 1325 struct igb_ring_desc_32_bytes { 1326 union igb_ring_dword lo_dword; 1327 union igb_ring_dword hi_dword; 1328 union igb_ring_dword resv1; 1329 union igb_ring_dword resv2; 1330 }; 1331 1332 struct igb_ring_desc_16_bytes { 1333 union igb_ring_dword lo_dword; 1334 union igb_ring_dword hi_dword; 1335 }; 1336 1337 static void 1338 ring_rxd_display_dword(union igb_ring_dword dword) 1339 { 1340 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1341 (unsigned)dword.words.hi); 1342 } 1343 1344 static void 1345 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1346 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1347 uint8_t port_id, 1348 #else 1349 __rte_unused uint8_t port_id, 1350 #endif 1351 uint16_t desc_id) 1352 { 1353 struct igb_ring_desc_16_bytes *ring = 1354 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1355 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1356 struct rte_eth_dev_info dev_info; 1357 1358 memset(&dev_info, 0, sizeof(dev_info)); 1359 rte_eth_dev_info_get(port_id, &dev_info); 1360 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1361 /* 32 bytes RX descriptor, i40e only */ 1362 struct igb_ring_desc_32_bytes *ring = 1363 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1364 ring[desc_id].lo_dword.dword = 1365 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1366 ring_rxd_display_dword(ring[desc_id].lo_dword); 1367 ring[desc_id].hi_dword.dword = 1368 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1369 ring_rxd_display_dword(ring[desc_id].hi_dword); 1370 ring[desc_id].resv1.dword = 1371 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1372 ring_rxd_display_dword(ring[desc_id].resv1); 1373 ring[desc_id].resv2.dword = 1374 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1375 ring_rxd_display_dword(ring[desc_id].resv2); 1376 1377 return; 1378 } 1379 #endif 1380 /* 16 bytes RX descriptor */ 1381 ring[desc_id].lo_dword.dword = 1382 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1383 ring_rxd_display_dword(ring[desc_id].lo_dword); 1384 ring[desc_id].hi_dword.dword = 1385 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1386 ring_rxd_display_dword(ring[desc_id].hi_dword); 1387 } 1388 1389 static void 1390 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1391 { 1392 struct igb_ring_desc_16_bytes *ring; 1393 struct igb_ring_desc_16_bytes txd; 1394 1395 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1396 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1397 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1398 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1399 (unsigned)txd.lo_dword.words.lo, 1400 (unsigned)txd.lo_dword.words.hi, 1401 (unsigned)txd.hi_dword.words.lo, 1402 (unsigned)txd.hi_dword.words.hi); 1403 } 1404 1405 void 1406 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1407 { 1408 const struct rte_memzone *rx_mz; 1409 1410 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1411 return; 1412 if (rx_queue_id_is_invalid(rxq_id)) 1413 return; 1414 if (rx_desc_id_is_invalid(rxd_id)) 1415 return; 1416 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1417 if (rx_mz == NULL) 1418 return; 1419 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1420 } 1421 1422 void 1423 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1424 { 1425 const struct rte_memzone *tx_mz; 1426 1427 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1428 return; 1429 if (tx_queue_id_is_invalid(txq_id)) 1430 return; 1431 if (tx_desc_id_is_invalid(txd_id)) 1432 return; 1433 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1434 if (tx_mz == NULL) 1435 return; 1436 ring_tx_descriptor_display(tx_mz, txd_id); 1437 } 1438 1439 void 1440 fwd_lcores_config_display(void) 1441 { 1442 lcoreid_t lc_id; 1443 1444 printf("List of forwarding lcores:"); 1445 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1446 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1447 printf("\n"); 1448 } 1449 void 1450 rxtx_config_display(void) 1451 { 1452 printf(" %s packet forwarding%s - CRC stripping %s - " 1453 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 1454 retry_enabled == 0 ? "" : " with retry", 1455 rx_mode.hw_strip_crc ? "enabled" : "disabled", 1456 nb_pkt_per_burst); 1457 1458 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1459 printf(" packet len=%u - nb packet segments=%d\n", 1460 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1461 1462 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 1463 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 1464 1465 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1466 nb_fwd_lcores, nb_fwd_ports); 1467 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1468 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1469 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1470 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 1471 rx_conf->rx_thresh.wthresh); 1472 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1473 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1474 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1475 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 1476 tx_conf->tx_thresh.wthresh); 1477 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 1478 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 1479 } 1480 1481 void 1482 port_rss_reta_info(portid_t port_id, 1483 struct rte_eth_rss_reta_entry64 *reta_conf, 1484 uint16_t nb_entries) 1485 { 1486 uint16_t i, idx, shift; 1487 int ret; 1488 1489 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1490 return; 1491 1492 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1493 if (ret != 0) { 1494 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1495 return; 1496 } 1497 1498 for (i = 0; i < nb_entries; i++) { 1499 idx = i / RTE_RETA_GROUP_SIZE; 1500 shift = i % RTE_RETA_GROUP_SIZE; 1501 if (!(reta_conf[idx].mask & (1ULL << shift))) 1502 continue; 1503 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1504 i, reta_conf[idx].reta[shift]); 1505 } 1506 } 1507 1508 /* 1509 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1510 * key of the port. 1511 */ 1512 void 1513 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1514 { 1515 struct rte_eth_rss_conf rss_conf; 1516 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1517 uint64_t rss_hf; 1518 uint8_t i; 1519 int diag; 1520 struct rte_eth_dev_info dev_info; 1521 uint8_t hash_key_size; 1522 1523 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1524 return; 1525 1526 memset(&dev_info, 0, sizeof(dev_info)); 1527 rte_eth_dev_info_get(port_id, &dev_info); 1528 if (dev_info.hash_key_size > 0 && 1529 dev_info.hash_key_size <= sizeof(rss_key)) 1530 hash_key_size = dev_info.hash_key_size; 1531 else { 1532 printf("dev_info did not provide a valid hash key size\n"); 1533 return; 1534 } 1535 1536 rss_conf.rss_hf = 0; 1537 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1538 if (!strcmp(rss_info, rss_type_table[i].str)) 1539 rss_conf.rss_hf = rss_type_table[i].rss_type; 1540 } 1541 1542 /* Get RSS hash key if asked to display it */ 1543 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1544 rss_conf.rss_key_len = hash_key_size; 1545 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1546 if (diag != 0) { 1547 switch (diag) { 1548 case -ENODEV: 1549 printf("port index %d invalid\n", port_id); 1550 break; 1551 case -ENOTSUP: 1552 printf("operation not supported by device\n"); 1553 break; 1554 default: 1555 printf("operation failed - diag=%d\n", diag); 1556 break; 1557 } 1558 return; 1559 } 1560 rss_hf = rss_conf.rss_hf; 1561 if (rss_hf == 0) { 1562 printf("RSS disabled\n"); 1563 return; 1564 } 1565 printf("RSS functions:\n "); 1566 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1567 if (rss_hf & rss_type_table[i].rss_type) 1568 printf("%s ", rss_type_table[i].str); 1569 } 1570 printf("\n"); 1571 if (!show_rss_key) 1572 return; 1573 printf("RSS key:\n"); 1574 for (i = 0; i < hash_key_size; i++) 1575 printf("%02X", rss_key[i]); 1576 printf("\n"); 1577 } 1578 1579 void 1580 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1581 uint hash_key_len) 1582 { 1583 struct rte_eth_rss_conf rss_conf; 1584 int diag; 1585 unsigned int i; 1586 1587 rss_conf.rss_key = NULL; 1588 rss_conf.rss_key_len = hash_key_len; 1589 rss_conf.rss_hf = 0; 1590 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1591 if (!strcmp(rss_type_table[i].str, rss_type)) 1592 rss_conf.rss_hf = rss_type_table[i].rss_type; 1593 } 1594 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1595 if (diag == 0) { 1596 rss_conf.rss_key = hash_key; 1597 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1598 } 1599 if (diag == 0) 1600 return; 1601 1602 switch (diag) { 1603 case -ENODEV: 1604 printf("port index %d invalid\n", port_id); 1605 break; 1606 case -ENOTSUP: 1607 printf("operation not supported by device\n"); 1608 break; 1609 default: 1610 printf("operation failed - diag=%d\n", diag); 1611 break; 1612 } 1613 } 1614 1615 /* 1616 * Setup forwarding configuration for each logical core. 1617 */ 1618 static void 1619 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1620 { 1621 streamid_t nb_fs_per_lcore; 1622 streamid_t nb_fs; 1623 streamid_t sm_id; 1624 lcoreid_t nb_extra; 1625 lcoreid_t nb_fc; 1626 lcoreid_t nb_lc; 1627 lcoreid_t lc_id; 1628 1629 nb_fs = cfg->nb_fwd_streams; 1630 nb_fc = cfg->nb_fwd_lcores; 1631 if (nb_fs <= nb_fc) { 1632 nb_fs_per_lcore = 1; 1633 nb_extra = 0; 1634 } else { 1635 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1636 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1637 } 1638 1639 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1640 sm_id = 0; 1641 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1642 fwd_lcores[lc_id]->stream_idx = sm_id; 1643 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1644 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1645 } 1646 1647 /* 1648 * Assign extra remaining streams, if any. 1649 */ 1650 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1651 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1652 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1653 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1654 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1655 } 1656 } 1657 1658 static void 1659 simple_fwd_config_setup(void) 1660 { 1661 portid_t i; 1662 portid_t j; 1663 portid_t inc = 2; 1664 1665 if (port_topology == PORT_TOPOLOGY_CHAINED || 1666 port_topology == PORT_TOPOLOGY_LOOP) { 1667 inc = 1; 1668 } else if (nb_fwd_ports % 2) { 1669 printf("\nWarning! Cannot handle an odd number of ports " 1670 "with the current port topology. Configuration " 1671 "must be changed to have an even number of ports, " 1672 "or relaunch application with " 1673 "--port-topology=chained\n\n"); 1674 } 1675 1676 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1677 cur_fwd_config.nb_fwd_streams = 1678 (streamid_t) cur_fwd_config.nb_fwd_ports; 1679 1680 /* reinitialize forwarding streams */ 1681 init_fwd_streams(); 1682 1683 /* 1684 * In the simple forwarding test, the number of forwarding cores 1685 * must be lower or equal to the number of forwarding ports. 1686 */ 1687 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1688 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1689 cur_fwd_config.nb_fwd_lcores = 1690 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1691 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1692 1693 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1694 if (port_topology != PORT_TOPOLOGY_LOOP) 1695 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1696 else 1697 j = i; 1698 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1699 fwd_streams[i]->rx_queue = 0; 1700 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1701 fwd_streams[i]->tx_queue = 0; 1702 fwd_streams[i]->peer_addr = j; 1703 fwd_streams[i]->retry_enabled = retry_enabled; 1704 1705 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1706 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1707 fwd_streams[j]->rx_queue = 0; 1708 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1709 fwd_streams[j]->tx_queue = 0; 1710 fwd_streams[j]->peer_addr = i; 1711 fwd_streams[j]->retry_enabled = retry_enabled; 1712 } 1713 } 1714 } 1715 1716 /** 1717 * For the RSS forwarding test all streams distributed over lcores. Each stream 1718 * being composed of a RX queue to poll on a RX port for input messages, 1719 * associated with a TX queue of a TX port where to send forwarded packets. 1720 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1721 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1722 * following rules: 1723 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1724 * - TxQl = RxQj 1725 */ 1726 static void 1727 rss_fwd_config_setup(void) 1728 { 1729 portid_t rxp; 1730 portid_t txp; 1731 queueid_t rxq; 1732 queueid_t nb_q; 1733 streamid_t sm_id; 1734 1735 nb_q = nb_rxq; 1736 if (nb_q > nb_txq) 1737 nb_q = nb_txq; 1738 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1739 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1740 cur_fwd_config.nb_fwd_streams = 1741 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1742 1743 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1744 cur_fwd_config.nb_fwd_lcores = 1745 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1746 1747 /* reinitialize forwarding streams */ 1748 init_fwd_streams(); 1749 1750 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1751 rxp = 0; rxq = 0; 1752 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1753 struct fwd_stream *fs; 1754 1755 fs = fwd_streams[sm_id]; 1756 1757 if ((rxp & 0x1) == 0) 1758 txp = (portid_t) (rxp + 1); 1759 else 1760 txp = (portid_t) (rxp - 1); 1761 /* 1762 * if we are in loopback, simply send stuff out through the 1763 * ingress port 1764 */ 1765 if (port_topology == PORT_TOPOLOGY_LOOP) 1766 txp = rxp; 1767 1768 fs->rx_port = fwd_ports_ids[rxp]; 1769 fs->rx_queue = rxq; 1770 fs->tx_port = fwd_ports_ids[txp]; 1771 fs->tx_queue = rxq; 1772 fs->peer_addr = fs->tx_port; 1773 fs->retry_enabled = retry_enabled; 1774 rxq = (queueid_t) (rxq + 1); 1775 if (rxq < nb_q) 1776 continue; 1777 /* 1778 * rxq == nb_q 1779 * Restart from RX queue 0 on next RX port 1780 */ 1781 rxq = 0; 1782 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1783 rxp = (portid_t) 1784 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1785 else 1786 rxp = (portid_t) (rxp + 1); 1787 } 1788 } 1789 1790 /** 1791 * For the DCB forwarding test, each core is assigned on each traffic class. 1792 * 1793 * Each core is assigned a multi-stream, each stream being composed of 1794 * a RX queue to poll on a RX port for input messages, associated with 1795 * a TX queue of a TX port where to send forwarded packets. All RX and 1796 * TX queues are mapping to the same traffic class. 1797 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1798 * the same core 1799 */ 1800 static void 1801 dcb_fwd_config_setup(void) 1802 { 1803 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1804 portid_t txp, rxp = 0; 1805 queueid_t txq, rxq = 0; 1806 lcoreid_t lc_id; 1807 uint16_t nb_rx_queue, nb_tx_queue; 1808 uint16_t i, j, k, sm_id = 0; 1809 uint8_t tc = 0; 1810 1811 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1812 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1813 cur_fwd_config.nb_fwd_streams = 1814 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1815 1816 /* reinitialize forwarding streams */ 1817 init_fwd_streams(); 1818 sm_id = 0; 1819 txp = 1; 1820 /* get the dcb info on the first RX and TX ports */ 1821 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1822 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1823 1824 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1825 fwd_lcores[lc_id]->stream_nb = 0; 1826 fwd_lcores[lc_id]->stream_idx = sm_id; 1827 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1828 /* if the nb_queue is zero, means this tc is 1829 * not enabled on the POOL 1830 */ 1831 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1832 break; 1833 k = fwd_lcores[lc_id]->stream_nb + 1834 fwd_lcores[lc_id]->stream_idx; 1835 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1836 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1837 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1838 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1839 for (j = 0; j < nb_rx_queue; j++) { 1840 struct fwd_stream *fs; 1841 1842 fs = fwd_streams[k + j]; 1843 fs->rx_port = fwd_ports_ids[rxp]; 1844 fs->rx_queue = rxq + j; 1845 fs->tx_port = fwd_ports_ids[txp]; 1846 fs->tx_queue = txq + j % nb_tx_queue; 1847 fs->peer_addr = fs->tx_port; 1848 fs->retry_enabled = retry_enabled; 1849 } 1850 fwd_lcores[lc_id]->stream_nb += 1851 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1852 } 1853 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1854 1855 tc++; 1856 if (tc < rxp_dcb_info.nb_tcs) 1857 continue; 1858 /* Restart from TC 0 on next RX port */ 1859 tc = 0; 1860 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1861 rxp = (portid_t) 1862 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1863 else 1864 rxp++; 1865 if (rxp >= nb_fwd_ports) 1866 return; 1867 /* get the dcb information on next RX and TX ports */ 1868 if ((rxp & 0x1) == 0) 1869 txp = (portid_t) (rxp + 1); 1870 else 1871 txp = (portid_t) (rxp - 1); 1872 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1873 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1874 } 1875 } 1876 1877 static void 1878 icmp_echo_config_setup(void) 1879 { 1880 portid_t rxp; 1881 queueid_t rxq; 1882 lcoreid_t lc_id; 1883 uint16_t sm_id; 1884 1885 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1886 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1887 (nb_txq * nb_fwd_ports); 1888 else 1889 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1890 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1891 cur_fwd_config.nb_fwd_streams = 1892 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1893 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1894 cur_fwd_config.nb_fwd_lcores = 1895 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1896 if (verbose_level > 0) { 1897 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1898 __FUNCTION__, 1899 cur_fwd_config.nb_fwd_lcores, 1900 cur_fwd_config.nb_fwd_ports, 1901 cur_fwd_config.nb_fwd_streams); 1902 } 1903 1904 /* reinitialize forwarding streams */ 1905 init_fwd_streams(); 1906 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1907 rxp = 0; rxq = 0; 1908 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1909 if (verbose_level > 0) 1910 printf(" core=%d: \n", lc_id); 1911 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1912 struct fwd_stream *fs; 1913 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1914 fs->rx_port = fwd_ports_ids[rxp]; 1915 fs->rx_queue = rxq; 1916 fs->tx_port = fs->rx_port; 1917 fs->tx_queue = rxq; 1918 fs->peer_addr = fs->tx_port; 1919 fs->retry_enabled = retry_enabled; 1920 if (verbose_level > 0) 1921 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1922 sm_id, fs->rx_port, fs->rx_queue, 1923 fs->tx_queue); 1924 rxq = (queueid_t) (rxq + 1); 1925 if (rxq == nb_rxq) { 1926 rxq = 0; 1927 rxp = (portid_t) (rxp + 1); 1928 } 1929 } 1930 } 1931 } 1932 1933 void 1934 fwd_config_setup(void) 1935 { 1936 cur_fwd_config.fwd_eng = cur_fwd_eng; 1937 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1938 icmp_echo_config_setup(); 1939 return; 1940 } 1941 if ((nb_rxq > 1) && (nb_txq > 1)){ 1942 if (dcb_config) 1943 dcb_fwd_config_setup(); 1944 else 1945 rss_fwd_config_setup(); 1946 } 1947 else 1948 simple_fwd_config_setup(); 1949 } 1950 1951 void 1952 pkt_fwd_config_display(struct fwd_config *cfg) 1953 { 1954 struct fwd_stream *fs; 1955 lcoreid_t lc_id; 1956 streamid_t sm_id; 1957 1958 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 1959 "NUMA support %s, MP over anonymous pages %s\n", 1960 cfg->fwd_eng->fwd_mode_name, 1961 retry_enabled == 0 ? "" : " with retry", 1962 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1963 numa_support == 1 ? "enabled" : "disabled", 1964 mp_anon != 0 ? "enabled" : "disabled"); 1965 1966 if (retry_enabled) 1967 printf("TX retry num: %u, delay between TX retries: %uus\n", 1968 burst_tx_retry_num, burst_tx_delay_time); 1969 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1970 printf("Logical Core %u (socket %u) forwards packets on " 1971 "%d streams:", 1972 fwd_lcores_cpuids[lc_id], 1973 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1974 fwd_lcores[lc_id]->stream_nb); 1975 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1976 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1977 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1978 "P=%d/Q=%d (socket %u) ", 1979 fs->rx_port, fs->rx_queue, 1980 ports[fs->rx_port].socket_id, 1981 fs->tx_port, fs->tx_queue, 1982 ports[fs->tx_port].socket_id); 1983 print_ethaddr("peer=", 1984 &peer_eth_addrs[fs->peer_addr]); 1985 } 1986 printf("\n"); 1987 } 1988 printf("\n"); 1989 } 1990 1991 int 1992 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1993 { 1994 unsigned int i; 1995 unsigned int lcore_cpuid; 1996 int record_now; 1997 1998 record_now = 0; 1999 again: 2000 for (i = 0; i < nb_lc; i++) { 2001 lcore_cpuid = lcorelist[i]; 2002 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2003 printf("lcore %u not enabled\n", lcore_cpuid); 2004 return -1; 2005 } 2006 if (lcore_cpuid == rte_get_master_lcore()) { 2007 printf("lcore %u cannot be masked on for running " 2008 "packet forwarding, which is the master lcore " 2009 "and reserved for command line parsing only\n", 2010 lcore_cpuid); 2011 return -1; 2012 } 2013 if (record_now) 2014 fwd_lcores_cpuids[i] = lcore_cpuid; 2015 } 2016 if (record_now == 0) { 2017 record_now = 1; 2018 goto again; 2019 } 2020 nb_cfg_lcores = (lcoreid_t) nb_lc; 2021 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2022 printf("previous number of forwarding cores %u - changed to " 2023 "number of configured cores %u\n", 2024 (unsigned int) nb_fwd_lcores, nb_lc); 2025 nb_fwd_lcores = (lcoreid_t) nb_lc; 2026 } 2027 2028 return 0; 2029 } 2030 2031 int 2032 set_fwd_lcores_mask(uint64_t lcoremask) 2033 { 2034 unsigned int lcorelist[64]; 2035 unsigned int nb_lc; 2036 unsigned int i; 2037 2038 if (lcoremask == 0) { 2039 printf("Invalid NULL mask of cores\n"); 2040 return -1; 2041 } 2042 nb_lc = 0; 2043 for (i = 0; i < 64; i++) { 2044 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2045 continue; 2046 lcorelist[nb_lc++] = i; 2047 } 2048 return set_fwd_lcores_list(lcorelist, nb_lc); 2049 } 2050 2051 void 2052 set_fwd_lcores_number(uint16_t nb_lc) 2053 { 2054 if (nb_lc > nb_cfg_lcores) { 2055 printf("nb fwd cores %u > %u (max. number of configured " 2056 "lcores) - ignored\n", 2057 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2058 return; 2059 } 2060 nb_fwd_lcores = (lcoreid_t) nb_lc; 2061 printf("Number of forwarding cores set to %u\n", 2062 (unsigned int) nb_fwd_lcores); 2063 } 2064 2065 void 2066 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2067 { 2068 unsigned int i; 2069 portid_t port_id; 2070 int record_now; 2071 2072 record_now = 0; 2073 again: 2074 for (i = 0; i < nb_pt; i++) { 2075 port_id = (portid_t) portlist[i]; 2076 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2077 return; 2078 if (record_now) 2079 fwd_ports_ids[i] = port_id; 2080 } 2081 if (record_now == 0) { 2082 record_now = 1; 2083 goto again; 2084 } 2085 nb_cfg_ports = (portid_t) nb_pt; 2086 if (nb_fwd_ports != (portid_t) nb_pt) { 2087 printf("previous number of forwarding ports %u - changed to " 2088 "number of configured ports %u\n", 2089 (unsigned int) nb_fwd_ports, nb_pt); 2090 nb_fwd_ports = (portid_t) nb_pt; 2091 } 2092 } 2093 2094 void 2095 set_fwd_ports_mask(uint64_t portmask) 2096 { 2097 unsigned int portlist[64]; 2098 unsigned int nb_pt; 2099 unsigned int i; 2100 2101 if (portmask == 0) { 2102 printf("Invalid NULL mask of ports\n"); 2103 return; 2104 } 2105 nb_pt = 0; 2106 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 2107 if (! ((uint64_t)(1ULL << i) & portmask)) 2108 continue; 2109 portlist[nb_pt++] = i; 2110 } 2111 set_fwd_ports_list(portlist, nb_pt); 2112 } 2113 2114 void 2115 set_fwd_ports_number(uint16_t nb_pt) 2116 { 2117 if (nb_pt > nb_cfg_ports) { 2118 printf("nb fwd ports %u > %u (number of configured " 2119 "ports) - ignored\n", 2120 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2121 return; 2122 } 2123 nb_fwd_ports = (portid_t) nb_pt; 2124 printf("Number of forwarding ports set to %u\n", 2125 (unsigned int) nb_fwd_ports); 2126 } 2127 2128 int 2129 port_is_forwarding(portid_t port_id) 2130 { 2131 unsigned int i; 2132 2133 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2134 return -1; 2135 2136 for (i = 0; i < nb_fwd_ports; i++) { 2137 if (fwd_ports_ids[i] == port_id) 2138 return 1; 2139 } 2140 2141 return 0; 2142 } 2143 2144 void 2145 set_nb_pkt_per_burst(uint16_t nb) 2146 { 2147 if (nb > MAX_PKT_BURST) { 2148 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2149 " ignored\n", 2150 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2151 return; 2152 } 2153 nb_pkt_per_burst = nb; 2154 printf("Number of packets per burst set to %u\n", 2155 (unsigned int) nb_pkt_per_burst); 2156 } 2157 2158 static const char * 2159 tx_split_get_name(enum tx_pkt_split split) 2160 { 2161 uint32_t i; 2162 2163 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2164 if (tx_split_name[i].split == split) 2165 return tx_split_name[i].name; 2166 } 2167 return NULL; 2168 } 2169 2170 void 2171 set_tx_pkt_split(const char *name) 2172 { 2173 uint32_t i; 2174 2175 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2176 if (strcmp(tx_split_name[i].name, name) == 0) { 2177 tx_pkt_split = tx_split_name[i].split; 2178 return; 2179 } 2180 } 2181 printf("unknown value: \"%s\"\n", name); 2182 } 2183 2184 void 2185 show_tx_pkt_segments(void) 2186 { 2187 uint32_t i, n; 2188 const char *split; 2189 2190 n = tx_pkt_nb_segs; 2191 split = tx_split_get_name(tx_pkt_split); 2192 2193 printf("Number of segments: %u\n", n); 2194 printf("Segment sizes: "); 2195 for (i = 0; i != n - 1; i++) 2196 printf("%hu,", tx_pkt_seg_lengths[i]); 2197 printf("%hu\n", tx_pkt_seg_lengths[i]); 2198 printf("Split packet: %s\n", split); 2199 } 2200 2201 void 2202 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2203 { 2204 uint16_t tx_pkt_len; 2205 unsigned i; 2206 2207 if (nb_segs >= (unsigned) nb_txd) { 2208 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2209 nb_segs, (unsigned int) nb_txd); 2210 return; 2211 } 2212 2213 /* 2214 * Check that each segment length is greater or equal than 2215 * the mbuf data sise. 2216 * Check also that the total packet length is greater or equal than the 2217 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2218 */ 2219 tx_pkt_len = 0; 2220 for (i = 0; i < nb_segs; i++) { 2221 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2222 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2223 i, seg_lengths[i], (unsigned) mbuf_data_size); 2224 return; 2225 } 2226 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2227 } 2228 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2229 printf("total packet length=%u < %d - give up\n", 2230 (unsigned) tx_pkt_len, 2231 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2232 return; 2233 } 2234 2235 for (i = 0; i < nb_segs; i++) 2236 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2237 2238 tx_pkt_length = tx_pkt_len; 2239 tx_pkt_nb_segs = (uint8_t) nb_segs; 2240 } 2241 2242 char* 2243 list_pkt_forwarding_modes(void) 2244 { 2245 static char fwd_modes[128] = ""; 2246 const char *separator = "|"; 2247 struct fwd_engine *fwd_eng; 2248 unsigned i = 0; 2249 2250 if (strlen (fwd_modes) == 0) { 2251 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2252 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2253 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2254 strncat(fwd_modes, separator, 2255 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2256 } 2257 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2258 } 2259 2260 return fwd_modes; 2261 } 2262 2263 char* 2264 list_pkt_forwarding_retry_modes(void) 2265 { 2266 static char fwd_modes[128] = ""; 2267 const char *separator = "|"; 2268 struct fwd_engine *fwd_eng; 2269 unsigned i = 0; 2270 2271 if (strlen(fwd_modes) == 0) { 2272 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2273 if (fwd_eng == &rx_only_engine) 2274 continue; 2275 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2276 sizeof(fwd_modes) - 2277 strlen(fwd_modes) - 1); 2278 strncat(fwd_modes, separator, 2279 sizeof(fwd_modes) - 2280 strlen(fwd_modes) - 1); 2281 } 2282 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2283 } 2284 2285 return fwd_modes; 2286 } 2287 2288 void 2289 set_pkt_forwarding_mode(const char *fwd_mode_name) 2290 { 2291 struct fwd_engine *fwd_eng; 2292 unsigned i; 2293 2294 i = 0; 2295 while ((fwd_eng = fwd_engines[i]) != NULL) { 2296 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2297 printf("Set %s packet forwarding mode%s\n", 2298 fwd_mode_name, 2299 retry_enabled == 0 ? "" : " with retry"); 2300 cur_fwd_eng = fwd_eng; 2301 return; 2302 } 2303 i++; 2304 } 2305 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2306 } 2307 2308 void 2309 set_verbose_level(uint16_t vb_level) 2310 { 2311 printf("Change verbose level from %u to %u\n", 2312 (unsigned int) verbose_level, (unsigned int) vb_level); 2313 verbose_level = vb_level; 2314 } 2315 2316 void 2317 vlan_extend_set(portid_t port_id, int on) 2318 { 2319 int diag; 2320 int vlan_offload; 2321 2322 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2323 return; 2324 2325 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2326 2327 if (on) 2328 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2329 else 2330 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2331 2332 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2333 if (diag < 0) 2334 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2335 "diag=%d\n", port_id, on, diag); 2336 } 2337 2338 void 2339 rx_vlan_strip_set(portid_t port_id, int on) 2340 { 2341 int diag; 2342 int vlan_offload; 2343 2344 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2345 return; 2346 2347 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2348 2349 if (on) 2350 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2351 else 2352 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2353 2354 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2355 if (diag < 0) 2356 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2357 "diag=%d\n", port_id, on, diag); 2358 } 2359 2360 void 2361 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2362 { 2363 int diag; 2364 2365 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2366 return; 2367 2368 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2369 if (diag < 0) 2370 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2371 "diag=%d\n", port_id, queue_id, on, diag); 2372 } 2373 2374 void 2375 rx_vlan_filter_set(portid_t port_id, int on) 2376 { 2377 int diag; 2378 int vlan_offload; 2379 2380 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2381 return; 2382 2383 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2384 2385 if (on) 2386 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2387 else 2388 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2389 2390 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2391 if (diag < 0) 2392 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2393 "diag=%d\n", port_id, on, diag); 2394 } 2395 2396 int 2397 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2398 { 2399 int diag; 2400 2401 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2402 return 1; 2403 if (vlan_id_is_invalid(vlan_id)) 2404 return 1; 2405 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2406 if (diag == 0) 2407 return 0; 2408 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2409 "diag=%d\n", 2410 port_id, vlan_id, on, diag); 2411 return -1; 2412 } 2413 2414 void 2415 rx_vlan_all_filter_set(portid_t port_id, int on) 2416 { 2417 uint16_t vlan_id; 2418 2419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2420 return; 2421 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2422 if (rx_vft_set(port_id, vlan_id, on)) 2423 break; 2424 } 2425 } 2426 2427 void 2428 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2429 { 2430 int diag; 2431 2432 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2433 return; 2434 2435 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2436 if (diag == 0) 2437 return; 2438 2439 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2440 "diag=%d\n", 2441 port_id, vlan_type, tp_id, diag); 2442 } 2443 2444 void 2445 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2446 { 2447 int vlan_offload; 2448 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2449 return; 2450 if (vlan_id_is_invalid(vlan_id)) 2451 return; 2452 2453 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2454 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2455 printf("Error, as QinQ has been enabled.\n"); 2456 return; 2457 } 2458 2459 tx_vlan_reset(port_id); 2460 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 2461 ports[port_id].tx_vlan_id = vlan_id; 2462 } 2463 2464 void 2465 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2466 { 2467 int vlan_offload; 2468 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2469 return; 2470 if (vlan_id_is_invalid(vlan_id)) 2471 return; 2472 if (vlan_id_is_invalid(vlan_id_outer)) 2473 return; 2474 2475 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2476 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2477 printf("Error, as QinQ hasn't been enabled.\n"); 2478 return; 2479 } 2480 2481 tx_vlan_reset(port_id); 2482 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 2483 ports[port_id].tx_vlan_id = vlan_id; 2484 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2485 } 2486 2487 void 2488 tx_vlan_reset(portid_t port_id) 2489 { 2490 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2491 return; 2492 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 2493 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 2494 ports[port_id].tx_vlan_id = 0; 2495 ports[port_id].tx_vlan_id_outer = 0; 2496 } 2497 2498 void 2499 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2500 { 2501 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2502 return; 2503 2504 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2505 } 2506 2507 void 2508 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2509 { 2510 uint16_t i; 2511 uint8_t existing_mapping_found = 0; 2512 2513 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2514 return; 2515 2516 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2517 return; 2518 2519 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2520 printf("map_value not in required range 0..%d\n", 2521 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2522 return; 2523 } 2524 2525 if (!is_rx) { /*then tx*/ 2526 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2527 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2528 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2529 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2530 existing_mapping_found = 1; 2531 break; 2532 } 2533 } 2534 if (!existing_mapping_found) { /* A new additional mapping... */ 2535 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2536 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2537 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2538 nb_tx_queue_stats_mappings++; 2539 } 2540 } 2541 else { /*rx*/ 2542 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2543 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2544 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2545 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2546 existing_mapping_found = 1; 2547 break; 2548 } 2549 } 2550 if (!existing_mapping_found) { /* A new additional mapping... */ 2551 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2552 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2553 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2554 nb_rx_queue_stats_mappings++; 2555 } 2556 } 2557 } 2558 2559 static inline void 2560 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2561 { 2562 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2563 2564 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2565 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2566 " tunnel_id: 0x%08x", 2567 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2568 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2569 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2570 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2571 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2572 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2573 2574 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2575 rte_be_to_cpu_16(mask->src_port_mask), 2576 rte_be_to_cpu_16(mask->dst_port_mask)); 2577 2578 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2579 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2580 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2581 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2582 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2583 2584 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2585 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2586 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2587 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2588 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2589 } 2590 2591 printf("\n"); 2592 } 2593 2594 static inline void 2595 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2596 { 2597 struct rte_eth_flex_payload_cfg *cfg; 2598 uint32_t i, j; 2599 2600 for (i = 0; i < flex_conf->nb_payloads; i++) { 2601 cfg = &flex_conf->flex_set[i]; 2602 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2603 printf("\n RAW: "); 2604 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2605 printf("\n L2_PAYLOAD: "); 2606 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2607 printf("\n L3_PAYLOAD: "); 2608 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2609 printf("\n L4_PAYLOAD: "); 2610 else 2611 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2612 for (j = 0; j < num; j++) 2613 printf(" %-5u", cfg->src_offset[j]); 2614 } 2615 printf("\n"); 2616 } 2617 2618 static char * 2619 flowtype_to_str(uint16_t flow_type) 2620 { 2621 struct flow_type_info { 2622 char str[32]; 2623 uint16_t ftype; 2624 }; 2625 2626 uint8_t i; 2627 static struct flow_type_info flowtype_str_table[] = { 2628 {"raw", RTE_ETH_FLOW_RAW}, 2629 {"ipv4", RTE_ETH_FLOW_IPV4}, 2630 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2631 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2632 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2633 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2634 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2635 {"ipv6", RTE_ETH_FLOW_IPV6}, 2636 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2637 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2638 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2639 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2640 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2641 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2642 {"port", RTE_ETH_FLOW_PORT}, 2643 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2644 {"geneve", RTE_ETH_FLOW_GENEVE}, 2645 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2646 }; 2647 2648 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2649 if (flowtype_str_table[i].ftype == flow_type) 2650 return flowtype_str_table[i].str; 2651 } 2652 2653 return NULL; 2654 } 2655 2656 static inline void 2657 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2658 { 2659 struct rte_eth_fdir_flex_mask *mask; 2660 uint32_t i, j; 2661 char *p; 2662 2663 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2664 mask = &flex_conf->flex_mask[i]; 2665 p = flowtype_to_str(mask->flow_type); 2666 printf("\n %s:\t", p ? p : "unknown"); 2667 for (j = 0; j < num; j++) 2668 printf(" %02x", mask->mask[j]); 2669 } 2670 printf("\n"); 2671 } 2672 2673 static inline void 2674 print_fdir_flow_type(uint32_t flow_types_mask) 2675 { 2676 int i; 2677 char *p; 2678 2679 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2680 if (!(flow_types_mask & (1 << i))) 2681 continue; 2682 p = flowtype_to_str(i); 2683 if (p) 2684 printf(" %s", p); 2685 else 2686 printf(" unknown"); 2687 } 2688 printf("\n"); 2689 } 2690 2691 void 2692 fdir_get_infos(portid_t port_id) 2693 { 2694 struct rte_eth_fdir_stats fdir_stat; 2695 struct rte_eth_fdir_info fdir_info; 2696 int ret; 2697 2698 static const char *fdir_stats_border = "########################"; 2699 2700 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2701 return; 2702 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2703 if (ret < 0) { 2704 printf("\n FDIR is not supported on port %-2d\n", 2705 port_id); 2706 return; 2707 } 2708 2709 memset(&fdir_info, 0, sizeof(fdir_info)); 2710 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2711 RTE_ETH_FILTER_INFO, &fdir_info); 2712 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2713 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2714 RTE_ETH_FILTER_STATS, &fdir_stat); 2715 printf("\n %s FDIR infos for port %-2d %s\n", 2716 fdir_stats_border, port_id, fdir_stats_border); 2717 printf(" MODE: "); 2718 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2719 printf(" PERFECT\n"); 2720 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2721 printf(" PERFECT-MAC-VLAN\n"); 2722 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2723 printf(" PERFECT-TUNNEL\n"); 2724 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2725 printf(" SIGNATURE\n"); 2726 else 2727 printf(" DISABLE\n"); 2728 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2729 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2730 printf(" SUPPORTED FLOW TYPE: "); 2731 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2732 } 2733 printf(" FLEX PAYLOAD INFO:\n"); 2734 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2735 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2736 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2737 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2738 fdir_info.flex_payload_unit, 2739 fdir_info.max_flex_payload_segment_num, 2740 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2741 printf(" MASK: "); 2742 print_fdir_mask(&fdir_info.mask); 2743 if (fdir_info.flex_conf.nb_payloads > 0) { 2744 printf(" FLEX PAYLOAD SRC OFFSET:"); 2745 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2746 } 2747 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2748 printf(" FLEX MASK CFG:"); 2749 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2750 } 2751 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2752 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2753 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2754 fdir_info.guarant_spc, fdir_info.best_spc); 2755 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2756 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2757 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2758 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2759 fdir_stat.collision, fdir_stat.free, 2760 fdir_stat.maxhash, fdir_stat.maxlen, 2761 fdir_stat.add, fdir_stat.remove, 2762 fdir_stat.f_add, fdir_stat.f_remove); 2763 printf(" %s############################%s\n", 2764 fdir_stats_border, fdir_stats_border); 2765 } 2766 2767 void 2768 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2769 { 2770 struct rte_port *port; 2771 struct rte_eth_fdir_flex_conf *flex_conf; 2772 int i, idx = 0; 2773 2774 port = &ports[port_id]; 2775 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2776 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2777 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2778 idx = i; 2779 break; 2780 } 2781 } 2782 if (i >= RTE_ETH_FLOW_MAX) { 2783 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2784 idx = flex_conf->nb_flexmasks; 2785 flex_conf->nb_flexmasks++; 2786 } else { 2787 printf("The flex mask table is full. Can not set flex" 2788 " mask for flow_type(%u).", cfg->flow_type); 2789 return; 2790 } 2791 } 2792 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2793 cfg, 2794 sizeof(struct rte_eth_fdir_flex_mask)); 2795 } 2796 2797 void 2798 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2799 { 2800 struct rte_port *port; 2801 struct rte_eth_fdir_flex_conf *flex_conf; 2802 int i, idx = 0; 2803 2804 port = &ports[port_id]; 2805 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2806 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2807 if (cfg->type == flex_conf->flex_set[i].type) { 2808 idx = i; 2809 break; 2810 } 2811 } 2812 if (i >= RTE_ETH_PAYLOAD_MAX) { 2813 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2814 idx = flex_conf->nb_payloads; 2815 flex_conf->nb_payloads++; 2816 } else { 2817 printf("The flex payload table is full. Can not set" 2818 " flex payload for type(%u).", cfg->type); 2819 return; 2820 } 2821 } 2822 (void)rte_memcpy(&flex_conf->flex_set[idx], 2823 cfg, 2824 sizeof(struct rte_eth_flex_payload_cfg)); 2825 2826 } 2827 2828 #ifdef RTE_LIBRTE_IXGBE_PMD 2829 void 2830 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2831 { 2832 int diag; 2833 2834 if (is_rx) 2835 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 2836 else 2837 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 2838 2839 if (diag == 0) 2840 return; 2841 if(is_rx) 2842 printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed " 2843 "diag=%d\n", port_id, diag); 2844 else 2845 printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed " 2846 "diag=%d\n", port_id, diag); 2847 2848 } 2849 2850 void 2851 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2852 { 2853 int diag; 2854 2855 diag = rte_pmd_ixgbe_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2856 2857 if (diag == 0) 2858 return; 2859 printf("rte_pmd_ixgbe_set_vf_vlan_filter for port_id=%d failed " 2860 "diag=%d\n", port_id, diag); 2861 } 2862 #endif 2863 2864 int 2865 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2866 { 2867 int diag; 2868 struct rte_eth_link link; 2869 2870 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2871 return 1; 2872 rte_eth_link_get_nowait(port_id, &link); 2873 if (rate > link.link_speed) { 2874 printf("Invalid rate value:%u bigger than link speed: %u\n", 2875 rate, link.link_speed); 2876 return 1; 2877 } 2878 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2879 if (diag == 0) 2880 return diag; 2881 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2882 port_id, diag); 2883 return diag; 2884 } 2885 2886 #ifdef RTE_LIBRTE_IXGBE_PMD 2887 int 2888 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2889 { 2890 int diag; 2891 2892 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk); 2893 if (diag == 0) 2894 return diag; 2895 printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2896 port_id, diag); 2897 return diag; 2898 } 2899 #endif 2900 2901 /* 2902 * Functions to manage the set of filtered Multicast MAC addresses. 2903 * 2904 * A pool of filtered multicast MAC addresses is associated with each port. 2905 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2906 * The address of the pool and the number of valid multicast MAC addresses 2907 * recorded in the pool are stored in the fields "mc_addr_pool" and 2908 * "mc_addr_nb" of the "rte_port" data structure. 2909 * 2910 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2911 * to be supplied a contiguous array of multicast MAC addresses. 2912 * To comply with this constraint, the set of multicast addresses recorded 2913 * into the pool are systematically compacted at the beginning of the pool. 2914 * Hence, when a multicast address is removed from the pool, all following 2915 * addresses, if any, are copied back to keep the set contiguous. 2916 */ 2917 #define MCAST_POOL_INC 32 2918 2919 static int 2920 mcast_addr_pool_extend(struct rte_port *port) 2921 { 2922 struct ether_addr *mc_pool; 2923 size_t mc_pool_size; 2924 2925 /* 2926 * If a free entry is available at the end of the pool, just 2927 * increment the number of recorded multicast addresses. 2928 */ 2929 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2930 port->mc_addr_nb++; 2931 return 0; 2932 } 2933 2934 /* 2935 * [re]allocate a pool with MCAST_POOL_INC more entries. 2936 * The previous test guarantees that port->mc_addr_nb is a multiple 2937 * of MCAST_POOL_INC. 2938 */ 2939 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2940 MCAST_POOL_INC); 2941 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2942 mc_pool_size); 2943 if (mc_pool == NULL) { 2944 printf("allocation of pool of %u multicast addresses failed\n", 2945 port->mc_addr_nb + MCAST_POOL_INC); 2946 return -ENOMEM; 2947 } 2948 2949 port->mc_addr_pool = mc_pool; 2950 port->mc_addr_nb++; 2951 return 0; 2952 2953 } 2954 2955 static void 2956 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2957 { 2958 port->mc_addr_nb--; 2959 if (addr_idx == port->mc_addr_nb) { 2960 /* No need to recompact the set of multicast addressses. */ 2961 if (port->mc_addr_nb == 0) { 2962 /* free the pool of multicast addresses. */ 2963 free(port->mc_addr_pool); 2964 port->mc_addr_pool = NULL; 2965 } 2966 return; 2967 } 2968 memmove(&port->mc_addr_pool[addr_idx], 2969 &port->mc_addr_pool[addr_idx + 1], 2970 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2971 } 2972 2973 static void 2974 eth_port_multicast_addr_list_set(uint8_t port_id) 2975 { 2976 struct rte_port *port; 2977 int diag; 2978 2979 port = &ports[port_id]; 2980 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2981 port->mc_addr_nb); 2982 if (diag == 0) 2983 return; 2984 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2985 port->mc_addr_nb, port_id, -diag); 2986 } 2987 2988 void 2989 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2990 { 2991 struct rte_port *port; 2992 uint32_t i; 2993 2994 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2995 return; 2996 2997 port = &ports[port_id]; 2998 2999 /* 3000 * Check that the added multicast MAC address is not already recorded 3001 * in the pool of multicast addresses. 3002 */ 3003 for (i = 0; i < port->mc_addr_nb; i++) { 3004 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3005 printf("multicast address already filtered by port\n"); 3006 return; 3007 } 3008 } 3009 3010 if (mcast_addr_pool_extend(port) != 0) 3011 return; 3012 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3013 eth_port_multicast_addr_list_set(port_id); 3014 } 3015 3016 void 3017 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 3018 { 3019 struct rte_port *port; 3020 uint32_t i; 3021 3022 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3023 return; 3024 3025 port = &ports[port_id]; 3026 3027 /* 3028 * Search the pool of multicast MAC addresses for the removed address. 3029 */ 3030 for (i = 0; i < port->mc_addr_nb; i++) { 3031 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3032 break; 3033 } 3034 if (i == port->mc_addr_nb) { 3035 printf("multicast address not filtered by port %d\n", port_id); 3036 return; 3037 } 3038 3039 mcast_addr_pool_remove(port, i); 3040 eth_port_multicast_addr_list_set(port_id); 3041 } 3042 3043 void 3044 port_dcb_info_display(uint8_t port_id) 3045 { 3046 struct rte_eth_dcb_info dcb_info; 3047 uint16_t i; 3048 int ret; 3049 static const char *border = "================"; 3050 3051 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3052 return; 3053 3054 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3055 if (ret) { 3056 printf("\n Failed to get dcb infos on port %-2d\n", 3057 port_id); 3058 return; 3059 } 3060 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3061 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3062 printf("\n TC : "); 3063 for (i = 0; i < dcb_info.nb_tcs; i++) 3064 printf("\t%4d", i); 3065 printf("\n Priority : "); 3066 for (i = 0; i < dcb_info.nb_tcs; i++) 3067 printf("\t%4d", dcb_info.prio_tc[i]); 3068 printf("\n BW percent :"); 3069 for (i = 0; i < dcb_info.nb_tcs; i++) 3070 printf("\t%4d%%", dcb_info.tc_bws[i]); 3071 printf("\n RXQ base : "); 3072 for (i = 0; i < dcb_info.nb_tcs; i++) 3073 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3074 printf("\n RXQ number :"); 3075 for (i = 0; i < dcb_info.nb_tcs; i++) 3076 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3077 printf("\n TXQ base : "); 3078 for (i = 0; i < dcb_info.nb_tcs; i++) 3079 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3080 printf("\n TXQ number :"); 3081 for (i = 0; i < dcb_info.nb_tcs; i++) 3082 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3083 printf("\n"); 3084 } 3085