1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_mempool.h> 88 #include <rte_mbuf.h> 89 #include <rte_interrupts.h> 90 #include <rte_pci.h> 91 #include <rte_ether.h> 92 #include <rte_ethdev.h> 93 #include <rte_string_fns.h> 94 #include <rte_cycles.h> 95 #include <rte_flow.h> 96 #include <rte_errno.h> 97 98 #include "testpmd.h" 99 100 static char *flowtype_to_str(uint16_t flow_type); 101 102 static const struct { 103 enum tx_pkt_split split; 104 const char *name; 105 } tx_split_name[] = { 106 { 107 .split = TX_PKT_SPLIT_OFF, 108 .name = "off", 109 }, 110 { 111 .split = TX_PKT_SPLIT_ON, 112 .name = "on", 113 }, 114 { 115 .split = TX_PKT_SPLIT_RND, 116 .name = "rand", 117 }, 118 }; 119 120 struct rss_type_info { 121 char str[32]; 122 uint64_t rss_type; 123 }; 124 125 static const struct rss_type_info rss_type_table[] = { 126 { "ipv4", ETH_RSS_IPV4 }, 127 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 128 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 129 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 130 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 131 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 132 { "ipv6", ETH_RSS_IPV6 }, 133 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 134 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 135 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 136 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 137 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 138 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 139 { "ipv6-ex", ETH_RSS_IPV6_EX }, 140 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 141 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 142 { "port", ETH_RSS_PORT }, 143 { "vxlan", ETH_RSS_VXLAN }, 144 { "geneve", ETH_RSS_GENEVE }, 145 { "nvgre", ETH_RSS_NVGRE }, 146 147 }; 148 149 static void 150 print_ethaddr(const char *name, struct ether_addr *eth_addr) 151 { 152 char buf[ETHER_ADDR_FMT_SIZE]; 153 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 154 printf("%s%s", name, buf); 155 } 156 157 void 158 nic_stats_display(portid_t port_id) 159 { 160 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 161 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 162 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 163 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 164 uint64_t mpps_rx, mpps_tx; 165 struct rte_eth_stats stats; 166 struct rte_port *port = &ports[port_id]; 167 uint8_t i; 168 portid_t pid; 169 170 static const char *nic_stats_border = "########################"; 171 172 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 173 printf("Valid port range is [0"); 174 FOREACH_PORT(pid, ports) 175 printf(", %d", pid); 176 printf("]\n"); 177 return; 178 } 179 rte_eth_stats_get(port_id, &stats); 180 printf("\n %s NIC statistics for port %-2d %s\n", 181 nic_stats_border, port_id, nic_stats_border); 182 183 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 184 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 185 "%-"PRIu64"\n", 186 stats.ipackets, stats.imissed, stats.ibytes); 187 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 188 printf(" RX-nombuf: %-10"PRIu64"\n", 189 stats.rx_nombuf); 190 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 191 "%-"PRIu64"\n", 192 stats.opackets, stats.oerrors, stats.obytes); 193 } 194 else { 195 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 196 " RX-bytes: %10"PRIu64"\n", 197 stats.ipackets, stats.ierrors, stats.ibytes); 198 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 199 printf(" RX-nombuf: %10"PRIu64"\n", 200 stats.rx_nombuf); 201 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 202 " TX-bytes: %10"PRIu64"\n", 203 stats.opackets, stats.oerrors, stats.obytes); 204 } 205 206 if (port->rx_queue_stats_mapping_enabled) { 207 printf("\n"); 208 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 209 printf(" Stats reg %2d RX-packets: %10"PRIu64 210 " RX-errors: %10"PRIu64 211 " RX-bytes: %10"PRIu64"\n", 212 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 213 } 214 } 215 if (port->tx_queue_stats_mapping_enabled) { 216 printf("\n"); 217 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 218 printf(" Stats reg %2d TX-packets: %10"PRIu64 219 " TX-bytes: %10"PRIu64"\n", 220 i, stats.q_opackets[i], stats.q_obytes[i]); 221 } 222 } 223 224 diff_cycles = prev_cycles[port_id]; 225 prev_cycles[port_id] = rte_rdtsc(); 226 if (diff_cycles > 0) 227 diff_cycles = prev_cycles[port_id] - diff_cycles; 228 229 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 230 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 231 prev_pkts_rx[port_id] = stats.ipackets; 232 prev_pkts_tx[port_id] = stats.opackets; 233 mpps_rx = diff_cycles > 0 ? 234 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 235 mpps_tx = diff_cycles > 0 ? 236 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 237 printf("\n Throughput (since last show)\n"); 238 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 239 mpps_rx, mpps_tx); 240 241 printf(" %s############################%s\n", 242 nic_stats_border, nic_stats_border); 243 } 244 245 void 246 nic_stats_clear(portid_t port_id) 247 { 248 portid_t pid; 249 250 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 251 printf("Valid port range is [0"); 252 FOREACH_PORT(pid, ports) 253 printf(", %d", pid); 254 printf("]\n"); 255 return; 256 } 257 rte_eth_stats_reset(port_id); 258 printf("\n NIC statistics for port %d cleared\n", port_id); 259 } 260 261 void 262 nic_xstats_display(portid_t port_id) 263 { 264 struct rte_eth_xstat *xstats; 265 int cnt_xstats, idx_xstat; 266 struct rte_eth_xstat_name *xstats_names; 267 268 printf("###### NIC extended statistics for port %-2d\n", port_id); 269 if (!rte_eth_dev_is_valid_port(port_id)) { 270 printf("Error: Invalid port number %i\n", port_id); 271 return; 272 } 273 274 /* Get count */ 275 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 276 if (cnt_xstats < 0) { 277 printf("Error: Cannot get count of xstats\n"); 278 return; 279 } 280 281 /* Get id-name lookup table */ 282 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 283 if (xstats_names == NULL) { 284 printf("Cannot allocate memory for xstats lookup\n"); 285 return; 286 } 287 if (cnt_xstats != rte_eth_xstats_get_names( 288 port_id, xstats_names, cnt_xstats)) { 289 printf("Error: Cannot get xstats lookup\n"); 290 free(xstats_names); 291 return; 292 } 293 294 /* Get stats themselves */ 295 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 296 if (xstats == NULL) { 297 printf("Cannot allocate memory for xstats\n"); 298 free(xstats_names); 299 return; 300 } 301 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 302 printf("Error: Unable to get xstats\n"); 303 free(xstats_names); 304 free(xstats); 305 return; 306 } 307 308 /* Display xstats */ 309 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 310 printf("%s: %"PRIu64"\n", 311 xstats_names[idx_xstat].name, 312 xstats[idx_xstat].value); 313 free(xstats_names); 314 free(xstats); 315 } 316 317 void 318 nic_xstats_clear(portid_t port_id) 319 { 320 rte_eth_xstats_reset(port_id); 321 } 322 323 void 324 nic_stats_mapping_display(portid_t port_id) 325 { 326 struct rte_port *port = &ports[port_id]; 327 uint16_t i; 328 portid_t pid; 329 330 static const char *nic_stats_mapping_border = "########################"; 331 332 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 333 printf("Valid port range is [0"); 334 FOREACH_PORT(pid, ports) 335 printf(", %d", pid); 336 printf("]\n"); 337 return; 338 } 339 340 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 341 printf("Port id %d - either does not support queue statistic mapping or" 342 " no queue statistic mapping set\n", port_id); 343 return; 344 } 345 346 printf("\n %s NIC statistics mapping for port %-2d %s\n", 347 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 348 349 if (port->rx_queue_stats_mapping_enabled) { 350 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 351 if (rx_queue_stats_mappings[i].port_id == port_id) { 352 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 353 rx_queue_stats_mappings[i].queue_id, 354 rx_queue_stats_mappings[i].stats_counter_id); 355 } 356 } 357 printf("\n"); 358 } 359 360 361 if (port->tx_queue_stats_mapping_enabled) { 362 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 363 if (tx_queue_stats_mappings[i].port_id == port_id) { 364 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 365 tx_queue_stats_mappings[i].queue_id, 366 tx_queue_stats_mappings[i].stats_counter_id); 367 } 368 } 369 } 370 371 printf(" %s####################################%s\n", 372 nic_stats_mapping_border, nic_stats_mapping_border); 373 } 374 375 void 376 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 377 { 378 struct rte_eth_rxq_info qinfo; 379 int32_t rc; 380 static const char *info_border = "*********************"; 381 382 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 383 if (rc != 0) { 384 printf("Failed to retrieve information for port: %hhu, " 385 "RX queue: %hu\nerror desc: %s(%d)\n", 386 port_id, queue_id, strerror(-rc), rc); 387 return; 388 } 389 390 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 391 info_border, port_id, queue_id, info_border); 392 393 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 394 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 395 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 396 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 397 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 398 printf("\nRX drop packets: %s", 399 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 400 printf("\nRX deferred start: %s", 401 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 402 printf("\nRX scattered packets: %s", 403 (qinfo.scattered_rx != 0) ? "on" : "off"); 404 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 405 printf("\n"); 406 } 407 408 void 409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 410 { 411 struct rte_eth_txq_info qinfo; 412 int32_t rc; 413 static const char *info_border = "*********************"; 414 415 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 416 if (rc != 0) { 417 printf("Failed to retrieve information for port: %hhu, " 418 "TX queue: %hu\nerror desc: %s(%d)\n", 419 port_id, queue_id, strerror(-rc), rc); 420 return; 421 } 422 423 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 424 info_border, port_id, queue_id, info_border); 425 426 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 427 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 428 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 429 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 430 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 431 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 432 printf("\nTX deferred start: %s", 433 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 434 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 435 printf("\n"); 436 } 437 438 void 439 port_infos_display(portid_t port_id) 440 { 441 struct rte_port *port; 442 struct ether_addr mac_addr; 443 struct rte_eth_link link; 444 struct rte_eth_dev_info dev_info; 445 int vlan_offload; 446 struct rte_mempool * mp; 447 static const char *info_border = "*********************"; 448 portid_t pid; 449 450 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 451 printf("Valid port range is [0"); 452 FOREACH_PORT(pid, ports) 453 printf(", %d", pid); 454 printf("]\n"); 455 return; 456 } 457 port = &ports[port_id]; 458 rte_eth_link_get_nowait(port_id, &link); 459 memset(&dev_info, 0, sizeof(dev_info)); 460 rte_eth_dev_info_get(port_id, &dev_info); 461 printf("\n%s Infos for port %-2d %s\n", 462 info_border, port_id, info_border); 463 rte_eth_macaddr_get(port_id, &mac_addr); 464 print_ethaddr("MAC address: ", &mac_addr); 465 printf("\nDriver name: %s", dev_info.driver_name); 466 printf("\nConnect to socket: %u", port->socket_id); 467 468 if (port_numa[port_id] != NUMA_NO_CONFIG) { 469 mp = mbuf_pool_find(port_numa[port_id]); 470 if (mp) 471 printf("\nmemory allocation on the socket: %d", 472 port_numa[port_id]); 473 } else 474 printf("\nmemory allocation on the socket: %u",port->socket_id); 475 476 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 477 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 478 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 479 ("full-duplex") : ("half-duplex")); 480 printf("Promiscuous mode: %s\n", 481 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 482 printf("Allmulticast mode: %s\n", 483 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 484 printf("Maximum number of MAC addresses: %u\n", 485 (unsigned int)(port->dev_info.max_mac_addrs)); 486 printf("Maximum number of MAC addresses of hash filtering: %u\n", 487 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 488 489 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 490 if (vlan_offload >= 0){ 491 printf("VLAN offload: \n"); 492 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 493 printf(" strip on \n"); 494 else 495 printf(" strip off \n"); 496 497 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 498 printf(" filter on \n"); 499 else 500 printf(" filter off \n"); 501 502 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 503 printf(" qinq(extend) on \n"); 504 else 505 printf(" qinq(extend) off \n"); 506 } 507 508 if (dev_info.hash_key_size > 0) 509 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 510 if (dev_info.reta_size > 0) 511 printf("Redirection table size: %u\n", dev_info.reta_size); 512 if (!dev_info.flow_type_rss_offloads) 513 printf("No flow type is supported.\n"); 514 else { 515 uint16_t i; 516 char *p; 517 518 printf("Supported flow types:\n"); 519 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 520 i++) { 521 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 522 continue; 523 p = flowtype_to_str(i); 524 printf(" %s\n", (p ? p : "unknown")); 525 } 526 } 527 528 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 529 printf("Max possible number of RXDs per queue: %hu\n", 530 dev_info.rx_desc_lim.nb_max); 531 printf("Min possible number of RXDs per queue: %hu\n", 532 dev_info.rx_desc_lim.nb_min); 533 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 534 535 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 536 printf("Max possible number of TXDs per queue: %hu\n", 537 dev_info.tx_desc_lim.nb_max); 538 printf("Min possible number of TXDs per queue: %hu\n", 539 dev_info.tx_desc_lim.nb_min); 540 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 541 } 542 543 int 544 port_id_is_invalid(portid_t port_id, enum print_warning warning) 545 { 546 if (port_id == (portid_t)RTE_PORT_ALL) 547 return 0; 548 549 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 550 return 0; 551 552 if (warning == ENABLED_WARN) 553 printf("Invalid port %d\n", port_id); 554 555 return 1; 556 } 557 558 static int 559 vlan_id_is_invalid(uint16_t vlan_id) 560 { 561 if (vlan_id < 4096) 562 return 0; 563 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 564 return 1; 565 } 566 567 static int 568 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 569 { 570 uint64_t pci_len; 571 572 if (reg_off & 0x3) { 573 printf("Port register offset 0x%X not aligned on a 4-byte " 574 "boundary\n", 575 (unsigned)reg_off); 576 return 1; 577 } 578 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 579 if (reg_off >= pci_len) { 580 printf("Port %d: register offset %u (0x%X) out of port PCI " 581 "resource (length=%"PRIu64")\n", 582 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 583 return 1; 584 } 585 return 0; 586 } 587 588 static int 589 reg_bit_pos_is_invalid(uint8_t bit_pos) 590 { 591 if (bit_pos <= 31) 592 return 0; 593 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 594 return 1; 595 } 596 597 #define display_port_and_reg_off(port_id, reg_off) \ 598 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 599 600 static inline void 601 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 602 { 603 display_port_and_reg_off(port_id, (unsigned)reg_off); 604 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 605 } 606 607 void 608 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 609 { 610 uint32_t reg_v; 611 612 613 if (port_id_is_invalid(port_id, ENABLED_WARN)) 614 return; 615 if (port_reg_off_is_invalid(port_id, reg_off)) 616 return; 617 if (reg_bit_pos_is_invalid(bit_x)) 618 return; 619 reg_v = port_id_pci_reg_read(port_id, reg_off); 620 display_port_and_reg_off(port_id, (unsigned)reg_off); 621 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 622 } 623 624 void 625 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 626 uint8_t bit1_pos, uint8_t bit2_pos) 627 { 628 uint32_t reg_v; 629 uint8_t l_bit; 630 uint8_t h_bit; 631 632 if (port_id_is_invalid(port_id, ENABLED_WARN)) 633 return; 634 if (port_reg_off_is_invalid(port_id, reg_off)) 635 return; 636 if (reg_bit_pos_is_invalid(bit1_pos)) 637 return; 638 if (reg_bit_pos_is_invalid(bit2_pos)) 639 return; 640 if (bit1_pos > bit2_pos) 641 l_bit = bit2_pos, h_bit = bit1_pos; 642 else 643 l_bit = bit1_pos, h_bit = bit2_pos; 644 645 reg_v = port_id_pci_reg_read(port_id, reg_off); 646 reg_v >>= l_bit; 647 if (h_bit < 31) 648 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 649 display_port_and_reg_off(port_id, (unsigned)reg_off); 650 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 651 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 652 } 653 654 void 655 port_reg_display(portid_t port_id, uint32_t reg_off) 656 { 657 uint32_t reg_v; 658 659 if (port_id_is_invalid(port_id, ENABLED_WARN)) 660 return; 661 if (port_reg_off_is_invalid(port_id, reg_off)) 662 return; 663 reg_v = port_id_pci_reg_read(port_id, reg_off); 664 display_port_reg_value(port_id, reg_off, reg_v); 665 } 666 667 void 668 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 669 uint8_t bit_v) 670 { 671 uint32_t reg_v; 672 673 if (port_id_is_invalid(port_id, ENABLED_WARN)) 674 return; 675 if (port_reg_off_is_invalid(port_id, reg_off)) 676 return; 677 if (reg_bit_pos_is_invalid(bit_pos)) 678 return; 679 if (bit_v > 1) { 680 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 681 return; 682 } 683 reg_v = port_id_pci_reg_read(port_id, reg_off); 684 if (bit_v == 0) 685 reg_v &= ~(1 << bit_pos); 686 else 687 reg_v |= (1 << bit_pos); 688 port_id_pci_reg_write(port_id, reg_off, reg_v); 689 display_port_reg_value(port_id, reg_off, reg_v); 690 } 691 692 void 693 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 694 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 695 { 696 uint32_t max_v; 697 uint32_t reg_v; 698 uint8_t l_bit; 699 uint8_t h_bit; 700 701 if (port_id_is_invalid(port_id, ENABLED_WARN)) 702 return; 703 if (port_reg_off_is_invalid(port_id, reg_off)) 704 return; 705 if (reg_bit_pos_is_invalid(bit1_pos)) 706 return; 707 if (reg_bit_pos_is_invalid(bit2_pos)) 708 return; 709 if (bit1_pos > bit2_pos) 710 l_bit = bit2_pos, h_bit = bit1_pos; 711 else 712 l_bit = bit1_pos, h_bit = bit2_pos; 713 714 if ((h_bit - l_bit) < 31) 715 max_v = (1 << (h_bit - l_bit + 1)) - 1; 716 else 717 max_v = 0xFFFFFFFF; 718 719 if (value > max_v) { 720 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 721 (unsigned)value, (unsigned)value, 722 (unsigned)max_v, (unsigned)max_v); 723 return; 724 } 725 reg_v = port_id_pci_reg_read(port_id, reg_off); 726 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 727 reg_v |= (value << l_bit); /* Set changed bits */ 728 port_id_pci_reg_write(port_id, reg_off, reg_v); 729 display_port_reg_value(port_id, reg_off, reg_v); 730 } 731 732 void 733 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 734 { 735 if (port_id_is_invalid(port_id, ENABLED_WARN)) 736 return; 737 if (port_reg_off_is_invalid(port_id, reg_off)) 738 return; 739 port_id_pci_reg_write(port_id, reg_off, reg_v); 740 display_port_reg_value(port_id, reg_off, reg_v); 741 } 742 743 void 744 port_mtu_set(portid_t port_id, uint16_t mtu) 745 { 746 int diag; 747 748 if (port_id_is_invalid(port_id, ENABLED_WARN)) 749 return; 750 diag = rte_eth_dev_set_mtu(port_id, mtu); 751 if (diag == 0) 752 return; 753 printf("Set MTU failed. diag=%d\n", diag); 754 } 755 756 /* Generic flow management functions. */ 757 758 /** Generate flow_item[] entry. */ 759 #define MK_FLOW_ITEM(t, s) \ 760 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 761 .name = # t, \ 762 .size = s, \ 763 } 764 765 /** Information about known flow pattern items. */ 766 static const struct { 767 const char *name; 768 size_t size; 769 } flow_item[] = { 770 MK_FLOW_ITEM(END, 0), 771 MK_FLOW_ITEM(VOID, 0), 772 MK_FLOW_ITEM(INVERT, 0), 773 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 774 MK_FLOW_ITEM(PF, 0), 775 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 776 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 777 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 778 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 779 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 780 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 781 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 782 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 783 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 784 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 785 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 786 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 787 }; 788 789 /** Compute storage space needed by item specification. */ 790 static void 791 flow_item_spec_size(const struct rte_flow_item *item, 792 size_t *size, size_t *pad) 793 { 794 if (!item->spec) 795 goto empty; 796 switch (item->type) { 797 union { 798 const struct rte_flow_item_raw *raw; 799 } spec; 800 801 case RTE_FLOW_ITEM_TYPE_RAW: 802 spec.raw = item->spec; 803 *size = offsetof(struct rte_flow_item_raw, pattern) + 804 spec.raw->length * sizeof(*spec.raw->pattern); 805 break; 806 default: 807 empty: 808 *size = 0; 809 break; 810 } 811 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 812 } 813 814 /** Generate flow_action[] entry. */ 815 #define MK_FLOW_ACTION(t, s) \ 816 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 817 .name = # t, \ 818 .size = s, \ 819 } 820 821 /** Information about known flow actions. */ 822 static const struct { 823 const char *name; 824 size_t size; 825 } flow_action[] = { 826 MK_FLOW_ACTION(END, 0), 827 MK_FLOW_ACTION(VOID, 0), 828 MK_FLOW_ACTION(PASSTHRU, 0), 829 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 830 MK_FLOW_ACTION(FLAG, 0), 831 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 832 MK_FLOW_ACTION(DROP, 0), 833 MK_FLOW_ACTION(COUNT, 0), 834 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 835 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 836 MK_FLOW_ACTION(PF, 0), 837 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 838 }; 839 840 /** Compute storage space needed by action configuration. */ 841 static void 842 flow_action_conf_size(const struct rte_flow_action *action, 843 size_t *size, size_t *pad) 844 { 845 if (!action->conf) 846 goto empty; 847 switch (action->type) { 848 union { 849 const struct rte_flow_action_rss *rss; 850 } conf; 851 852 case RTE_FLOW_ACTION_TYPE_RSS: 853 conf.rss = action->conf; 854 *size = offsetof(struct rte_flow_action_rss, queue) + 855 conf.rss->num * sizeof(*conf.rss->queue); 856 break; 857 default: 858 empty: 859 *size = 0; 860 break; 861 } 862 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 863 } 864 865 /** Generate a port_flow entry from attributes/pattern/actions. */ 866 static struct port_flow * 867 port_flow_new(const struct rte_flow_attr *attr, 868 const struct rte_flow_item *pattern, 869 const struct rte_flow_action *actions) 870 { 871 const struct rte_flow_item *item; 872 const struct rte_flow_action *action; 873 struct port_flow *pf = NULL; 874 size_t tmp; 875 size_t pad; 876 size_t off1 = 0; 877 size_t off2 = 0; 878 int err = ENOTSUP; 879 880 store: 881 item = pattern; 882 if (pf) 883 pf->pattern = (void *)&pf->data[off1]; 884 do { 885 struct rte_flow_item *dst = NULL; 886 887 if ((unsigned int)item->type > RTE_DIM(flow_item) || 888 !flow_item[item->type].name) 889 goto notsup; 890 if (pf) 891 dst = memcpy(pf->data + off1, item, sizeof(*item)); 892 off1 += sizeof(*item); 893 flow_item_spec_size(item, &tmp, &pad); 894 if (item->spec) { 895 if (pf) 896 dst->spec = memcpy(pf->data + off2, 897 item->spec, tmp); 898 off2 += tmp + pad; 899 } 900 if (item->last) { 901 if (pf) 902 dst->last = memcpy(pf->data + off2, 903 item->last, tmp); 904 off2 += tmp + pad; 905 } 906 if (item->mask) { 907 if (pf) 908 dst->mask = memcpy(pf->data + off2, 909 item->mask, tmp); 910 off2 += tmp + pad; 911 } 912 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 913 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 914 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 915 action = actions; 916 if (pf) 917 pf->actions = (void *)&pf->data[off1]; 918 do { 919 struct rte_flow_action *dst = NULL; 920 921 if ((unsigned int)action->type > RTE_DIM(flow_action) || 922 !flow_action[action->type].name) 923 goto notsup; 924 if (pf) 925 dst = memcpy(pf->data + off1, action, sizeof(*action)); 926 off1 += sizeof(*action); 927 flow_action_conf_size(action, &tmp, &pad); 928 if (action->conf) { 929 if (pf) 930 dst->conf = memcpy(pf->data + off2, 931 action->conf, tmp); 932 off2 += tmp + pad; 933 } 934 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 935 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 936 if (pf != NULL) 937 return pf; 938 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 939 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 940 pf = calloc(1, tmp + off1 + off2); 941 if (pf == NULL) 942 err = errno; 943 else { 944 *pf = (const struct port_flow){ 945 .size = tmp + off1 + off2, 946 .attr = *attr, 947 }; 948 tmp -= offsetof(struct port_flow, data); 949 off2 = tmp + off1; 950 off1 = tmp; 951 goto store; 952 } 953 notsup: 954 rte_errno = err; 955 return NULL; 956 } 957 958 /** Print a message out of a flow error. */ 959 static int 960 port_flow_complain(struct rte_flow_error *error) 961 { 962 static const char *const errstrlist[] = { 963 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 964 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 965 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 966 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 967 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 968 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 969 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 970 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 971 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 972 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 973 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 974 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 975 }; 976 const char *errstr; 977 char buf[32]; 978 int err = rte_errno; 979 980 if ((unsigned int)error->type > RTE_DIM(errstrlist) || 981 !errstrlist[error->type]) 982 errstr = "unknown type"; 983 else 984 errstr = errstrlist[error->type]; 985 printf("Caught error type %d (%s): %s%s\n", 986 error->type, errstr, 987 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 988 error->cause), buf) : "", 989 error->message ? error->message : "(no stated reason)"); 990 return -err; 991 } 992 993 /** Validate flow rule. */ 994 int 995 port_flow_validate(portid_t port_id, 996 const struct rte_flow_attr *attr, 997 const struct rte_flow_item *pattern, 998 const struct rte_flow_action *actions) 999 { 1000 struct rte_flow_error error; 1001 1002 /* Poisoning to make sure PMDs update it in case of error. */ 1003 memset(&error, 0x11, sizeof(error)); 1004 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1005 return port_flow_complain(&error); 1006 printf("Flow rule validated\n"); 1007 return 0; 1008 } 1009 1010 /** Create flow rule. */ 1011 int 1012 port_flow_create(portid_t port_id, 1013 const struct rte_flow_attr *attr, 1014 const struct rte_flow_item *pattern, 1015 const struct rte_flow_action *actions) 1016 { 1017 struct rte_flow *flow; 1018 struct rte_port *port; 1019 struct port_flow *pf; 1020 uint32_t id; 1021 struct rte_flow_error error; 1022 1023 /* Poisoning to make sure PMDs update it in case of error. */ 1024 memset(&error, 0x22, sizeof(error)); 1025 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1026 if (!flow) 1027 return port_flow_complain(&error); 1028 port = &ports[port_id]; 1029 if (port->flow_list) { 1030 if (port->flow_list->id == UINT32_MAX) { 1031 printf("Highest rule ID is already assigned, delete" 1032 " it first"); 1033 rte_flow_destroy(port_id, flow, NULL); 1034 return -ENOMEM; 1035 } 1036 id = port->flow_list->id + 1; 1037 } else 1038 id = 0; 1039 pf = port_flow_new(attr, pattern, actions); 1040 if (!pf) { 1041 int err = rte_errno; 1042 1043 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1044 rte_flow_destroy(port_id, flow, NULL); 1045 return -err; 1046 } 1047 pf->next = port->flow_list; 1048 pf->id = id; 1049 pf->flow = flow; 1050 port->flow_list = pf; 1051 printf("Flow rule #%u created\n", pf->id); 1052 return 0; 1053 } 1054 1055 /** Destroy a number of flow rules. */ 1056 int 1057 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1058 { 1059 struct rte_port *port; 1060 struct port_flow **tmp; 1061 uint32_t c = 0; 1062 int ret = 0; 1063 1064 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1065 port_id == (portid_t)RTE_PORT_ALL) 1066 return -EINVAL; 1067 port = &ports[port_id]; 1068 tmp = &port->flow_list; 1069 while (*tmp) { 1070 uint32_t i; 1071 1072 for (i = 0; i != n; ++i) { 1073 struct rte_flow_error error; 1074 struct port_flow *pf = *tmp; 1075 1076 if (rule[i] != pf->id) 1077 continue; 1078 /* 1079 * Poisoning to make sure PMDs update it in case 1080 * of error. 1081 */ 1082 memset(&error, 0x33, sizeof(error)); 1083 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1084 ret = port_flow_complain(&error); 1085 continue; 1086 } 1087 printf("Flow rule #%u destroyed\n", pf->id); 1088 *tmp = pf->next; 1089 free(pf); 1090 break; 1091 } 1092 if (i == n) 1093 tmp = &(*tmp)->next; 1094 ++c; 1095 } 1096 return ret; 1097 } 1098 1099 /** Remove all flow rules. */ 1100 int 1101 port_flow_flush(portid_t port_id) 1102 { 1103 struct rte_flow_error error; 1104 struct rte_port *port; 1105 int ret = 0; 1106 1107 /* Poisoning to make sure PMDs update it in case of error. */ 1108 memset(&error, 0x44, sizeof(error)); 1109 if (rte_flow_flush(port_id, &error)) { 1110 ret = port_flow_complain(&error); 1111 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1112 port_id == (portid_t)RTE_PORT_ALL) 1113 return ret; 1114 } 1115 port = &ports[port_id]; 1116 while (port->flow_list) { 1117 struct port_flow *pf = port->flow_list->next; 1118 1119 free(port->flow_list); 1120 port->flow_list = pf; 1121 } 1122 return ret; 1123 } 1124 1125 /** Query a flow rule. */ 1126 int 1127 port_flow_query(portid_t port_id, uint32_t rule, 1128 enum rte_flow_action_type action) 1129 { 1130 struct rte_flow_error error; 1131 struct rte_port *port; 1132 struct port_flow *pf; 1133 const char *name; 1134 union { 1135 struct rte_flow_query_count count; 1136 } query; 1137 1138 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1139 port_id == (portid_t)RTE_PORT_ALL) 1140 return -EINVAL; 1141 port = &ports[port_id]; 1142 for (pf = port->flow_list; pf; pf = pf->next) 1143 if (pf->id == rule) 1144 break; 1145 if (!pf) { 1146 printf("Flow rule #%u not found\n", rule); 1147 return -ENOENT; 1148 } 1149 if ((unsigned int)action > RTE_DIM(flow_action) || 1150 !flow_action[action].name) 1151 name = "unknown"; 1152 else 1153 name = flow_action[action].name; 1154 switch (action) { 1155 case RTE_FLOW_ACTION_TYPE_COUNT: 1156 break; 1157 default: 1158 printf("Cannot query action type %d (%s)\n", action, name); 1159 return -ENOTSUP; 1160 } 1161 /* Poisoning to make sure PMDs update it in case of error. */ 1162 memset(&error, 0x55, sizeof(error)); 1163 memset(&query, 0, sizeof(query)); 1164 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1165 return port_flow_complain(&error); 1166 switch (action) { 1167 case RTE_FLOW_ACTION_TYPE_COUNT: 1168 printf("%s:\n" 1169 " hits_set: %u\n" 1170 " bytes_set: %u\n" 1171 " hits: %" PRIu64 "\n" 1172 " bytes: %" PRIu64 "\n", 1173 name, 1174 query.count.hits_set, 1175 query.count.bytes_set, 1176 query.count.hits, 1177 query.count.bytes); 1178 break; 1179 default: 1180 printf("Cannot display result for action type %d (%s)\n", 1181 action, name); 1182 break; 1183 } 1184 return 0; 1185 } 1186 1187 /** List flow rules. */ 1188 void 1189 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1190 { 1191 struct rte_port *port; 1192 struct port_flow *pf; 1193 struct port_flow *list = NULL; 1194 uint32_t i; 1195 1196 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1197 port_id == (portid_t)RTE_PORT_ALL) 1198 return; 1199 port = &ports[port_id]; 1200 if (!port->flow_list) 1201 return; 1202 /* Sort flows by group, priority and ID. */ 1203 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1204 struct port_flow **tmp; 1205 1206 if (n) { 1207 /* Filter out unwanted groups. */ 1208 for (i = 0; i != n; ++i) 1209 if (pf->attr.group == group[i]) 1210 break; 1211 if (i == n) 1212 continue; 1213 } 1214 tmp = &list; 1215 while (*tmp && 1216 (pf->attr.group > (*tmp)->attr.group || 1217 (pf->attr.group == (*tmp)->attr.group && 1218 pf->attr.priority > (*tmp)->attr.priority) || 1219 (pf->attr.group == (*tmp)->attr.group && 1220 pf->attr.priority == (*tmp)->attr.priority && 1221 pf->id > (*tmp)->id))) 1222 tmp = &(*tmp)->tmp; 1223 pf->tmp = *tmp; 1224 *tmp = pf; 1225 } 1226 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1227 for (pf = list; pf != NULL; pf = pf->tmp) { 1228 const struct rte_flow_item *item = pf->pattern; 1229 const struct rte_flow_action *action = pf->actions; 1230 1231 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1232 pf->id, 1233 pf->attr.group, 1234 pf->attr.priority, 1235 pf->attr.ingress ? 'i' : '-', 1236 pf->attr.egress ? 'e' : '-'); 1237 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1238 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1239 printf("%s ", flow_item[item->type].name); 1240 ++item; 1241 } 1242 printf("=>"); 1243 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1244 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1245 printf(" %s", flow_action[action->type].name); 1246 ++action; 1247 } 1248 printf("\n"); 1249 } 1250 } 1251 1252 /* 1253 * RX/TX ring descriptors display functions. 1254 */ 1255 int 1256 rx_queue_id_is_invalid(queueid_t rxq_id) 1257 { 1258 if (rxq_id < nb_rxq) 1259 return 0; 1260 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1261 return 1; 1262 } 1263 1264 int 1265 tx_queue_id_is_invalid(queueid_t txq_id) 1266 { 1267 if (txq_id < nb_txq) 1268 return 0; 1269 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1270 return 1; 1271 } 1272 1273 static int 1274 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1275 { 1276 if (rxdesc_id < nb_rxd) 1277 return 0; 1278 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1279 rxdesc_id, nb_rxd); 1280 return 1; 1281 } 1282 1283 static int 1284 tx_desc_id_is_invalid(uint16_t txdesc_id) 1285 { 1286 if (txdesc_id < nb_txd) 1287 return 0; 1288 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1289 txdesc_id, nb_txd); 1290 return 1; 1291 } 1292 1293 static const struct rte_memzone * 1294 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 1295 { 1296 char mz_name[RTE_MEMZONE_NAMESIZE]; 1297 const struct rte_memzone *mz; 1298 1299 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1300 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1301 mz = rte_memzone_lookup(mz_name); 1302 if (mz == NULL) 1303 printf("%s ring memory zoneof (port %d, queue %d) not" 1304 "found (zone name = %s\n", 1305 ring_name, port_id, q_id, mz_name); 1306 return mz; 1307 } 1308 1309 union igb_ring_dword { 1310 uint64_t dword; 1311 struct { 1312 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1313 uint32_t lo; 1314 uint32_t hi; 1315 #else 1316 uint32_t hi; 1317 uint32_t lo; 1318 #endif 1319 } words; 1320 }; 1321 1322 struct igb_ring_desc_32_bytes { 1323 union igb_ring_dword lo_dword; 1324 union igb_ring_dword hi_dword; 1325 union igb_ring_dword resv1; 1326 union igb_ring_dword resv2; 1327 }; 1328 1329 struct igb_ring_desc_16_bytes { 1330 union igb_ring_dword lo_dword; 1331 union igb_ring_dword hi_dword; 1332 }; 1333 1334 static void 1335 ring_rxd_display_dword(union igb_ring_dword dword) 1336 { 1337 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1338 (unsigned)dword.words.hi); 1339 } 1340 1341 static void 1342 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1343 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1344 uint8_t port_id, 1345 #else 1346 __rte_unused uint8_t port_id, 1347 #endif 1348 uint16_t desc_id) 1349 { 1350 struct igb_ring_desc_16_bytes *ring = 1351 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1352 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1353 struct rte_eth_dev_info dev_info; 1354 1355 memset(&dev_info, 0, sizeof(dev_info)); 1356 rte_eth_dev_info_get(port_id, &dev_info); 1357 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1358 /* 32 bytes RX descriptor, i40e only */ 1359 struct igb_ring_desc_32_bytes *ring = 1360 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1361 ring[desc_id].lo_dword.dword = 1362 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1363 ring_rxd_display_dword(ring[desc_id].lo_dword); 1364 ring[desc_id].hi_dword.dword = 1365 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1366 ring_rxd_display_dword(ring[desc_id].hi_dword); 1367 ring[desc_id].resv1.dword = 1368 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1369 ring_rxd_display_dword(ring[desc_id].resv1); 1370 ring[desc_id].resv2.dword = 1371 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1372 ring_rxd_display_dword(ring[desc_id].resv2); 1373 1374 return; 1375 } 1376 #endif 1377 /* 16 bytes RX descriptor */ 1378 ring[desc_id].lo_dword.dword = 1379 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1380 ring_rxd_display_dword(ring[desc_id].lo_dword); 1381 ring[desc_id].hi_dword.dword = 1382 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1383 ring_rxd_display_dword(ring[desc_id].hi_dword); 1384 } 1385 1386 static void 1387 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1388 { 1389 struct igb_ring_desc_16_bytes *ring; 1390 struct igb_ring_desc_16_bytes txd; 1391 1392 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1393 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1394 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1395 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1396 (unsigned)txd.lo_dword.words.lo, 1397 (unsigned)txd.lo_dword.words.hi, 1398 (unsigned)txd.hi_dword.words.lo, 1399 (unsigned)txd.hi_dword.words.hi); 1400 } 1401 1402 void 1403 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1404 { 1405 const struct rte_memzone *rx_mz; 1406 1407 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1408 return; 1409 if (rx_queue_id_is_invalid(rxq_id)) 1410 return; 1411 if (rx_desc_id_is_invalid(rxd_id)) 1412 return; 1413 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1414 if (rx_mz == NULL) 1415 return; 1416 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1417 } 1418 1419 void 1420 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1421 { 1422 const struct rte_memzone *tx_mz; 1423 1424 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1425 return; 1426 if (tx_queue_id_is_invalid(txq_id)) 1427 return; 1428 if (tx_desc_id_is_invalid(txd_id)) 1429 return; 1430 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1431 if (tx_mz == NULL) 1432 return; 1433 ring_tx_descriptor_display(tx_mz, txd_id); 1434 } 1435 1436 void 1437 fwd_lcores_config_display(void) 1438 { 1439 lcoreid_t lc_id; 1440 1441 printf("List of forwarding lcores:"); 1442 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1443 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1444 printf("\n"); 1445 } 1446 void 1447 rxtx_config_display(void) 1448 { 1449 printf(" %s packet forwarding%s - CRC stripping %s - " 1450 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 1451 retry_enabled == 0 ? "" : " with retry", 1452 rx_mode.hw_strip_crc ? "enabled" : "disabled", 1453 nb_pkt_per_burst); 1454 1455 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1456 printf(" packet len=%u - nb packet segments=%d\n", 1457 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1458 1459 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 1460 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 1461 1462 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1463 nb_fwd_lcores, nb_fwd_ports); 1464 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1465 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1466 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1467 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 1468 rx_conf->rx_thresh.wthresh); 1469 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1470 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1471 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 1472 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 1473 tx_conf->tx_thresh.wthresh); 1474 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 1475 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 1476 } 1477 1478 void 1479 port_rss_reta_info(portid_t port_id, 1480 struct rte_eth_rss_reta_entry64 *reta_conf, 1481 uint16_t nb_entries) 1482 { 1483 uint16_t i, idx, shift; 1484 int ret; 1485 1486 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1487 return; 1488 1489 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1490 if (ret != 0) { 1491 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1492 return; 1493 } 1494 1495 for (i = 0; i < nb_entries; i++) { 1496 idx = i / RTE_RETA_GROUP_SIZE; 1497 shift = i % RTE_RETA_GROUP_SIZE; 1498 if (!(reta_conf[idx].mask & (1ULL << shift))) 1499 continue; 1500 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1501 i, reta_conf[idx].reta[shift]); 1502 } 1503 } 1504 1505 /* 1506 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1507 * key of the port. 1508 */ 1509 void 1510 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1511 { 1512 struct rte_eth_rss_conf rss_conf; 1513 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1514 uint64_t rss_hf; 1515 uint8_t i; 1516 int diag; 1517 struct rte_eth_dev_info dev_info; 1518 uint8_t hash_key_size; 1519 1520 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1521 return; 1522 1523 memset(&dev_info, 0, sizeof(dev_info)); 1524 rte_eth_dev_info_get(port_id, &dev_info); 1525 if (dev_info.hash_key_size > 0 && 1526 dev_info.hash_key_size <= sizeof(rss_key)) 1527 hash_key_size = dev_info.hash_key_size; 1528 else { 1529 printf("dev_info did not provide a valid hash key size\n"); 1530 return; 1531 } 1532 1533 rss_conf.rss_hf = 0; 1534 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1535 if (!strcmp(rss_info, rss_type_table[i].str)) 1536 rss_conf.rss_hf = rss_type_table[i].rss_type; 1537 } 1538 1539 /* Get RSS hash key if asked to display it */ 1540 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1541 rss_conf.rss_key_len = hash_key_size; 1542 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1543 if (diag != 0) { 1544 switch (diag) { 1545 case -ENODEV: 1546 printf("port index %d invalid\n", port_id); 1547 break; 1548 case -ENOTSUP: 1549 printf("operation not supported by device\n"); 1550 break; 1551 default: 1552 printf("operation failed - diag=%d\n", diag); 1553 break; 1554 } 1555 return; 1556 } 1557 rss_hf = rss_conf.rss_hf; 1558 if (rss_hf == 0) { 1559 printf("RSS disabled\n"); 1560 return; 1561 } 1562 printf("RSS functions:\n "); 1563 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1564 if (rss_hf & rss_type_table[i].rss_type) 1565 printf("%s ", rss_type_table[i].str); 1566 } 1567 printf("\n"); 1568 if (!show_rss_key) 1569 return; 1570 printf("RSS key:\n"); 1571 for (i = 0; i < hash_key_size; i++) 1572 printf("%02X", rss_key[i]); 1573 printf("\n"); 1574 } 1575 1576 void 1577 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1578 uint hash_key_len) 1579 { 1580 struct rte_eth_rss_conf rss_conf; 1581 int diag; 1582 unsigned int i; 1583 1584 rss_conf.rss_key = NULL; 1585 rss_conf.rss_key_len = hash_key_len; 1586 rss_conf.rss_hf = 0; 1587 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1588 if (!strcmp(rss_type_table[i].str, rss_type)) 1589 rss_conf.rss_hf = rss_type_table[i].rss_type; 1590 } 1591 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1592 if (diag == 0) { 1593 rss_conf.rss_key = hash_key; 1594 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1595 } 1596 if (diag == 0) 1597 return; 1598 1599 switch (diag) { 1600 case -ENODEV: 1601 printf("port index %d invalid\n", port_id); 1602 break; 1603 case -ENOTSUP: 1604 printf("operation not supported by device\n"); 1605 break; 1606 default: 1607 printf("operation failed - diag=%d\n", diag); 1608 break; 1609 } 1610 } 1611 1612 /* 1613 * Setup forwarding configuration for each logical core. 1614 */ 1615 static void 1616 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1617 { 1618 streamid_t nb_fs_per_lcore; 1619 streamid_t nb_fs; 1620 streamid_t sm_id; 1621 lcoreid_t nb_extra; 1622 lcoreid_t nb_fc; 1623 lcoreid_t nb_lc; 1624 lcoreid_t lc_id; 1625 1626 nb_fs = cfg->nb_fwd_streams; 1627 nb_fc = cfg->nb_fwd_lcores; 1628 if (nb_fs <= nb_fc) { 1629 nb_fs_per_lcore = 1; 1630 nb_extra = 0; 1631 } else { 1632 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1633 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1634 } 1635 1636 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1637 sm_id = 0; 1638 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1639 fwd_lcores[lc_id]->stream_idx = sm_id; 1640 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1641 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1642 } 1643 1644 /* 1645 * Assign extra remaining streams, if any. 1646 */ 1647 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1648 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1649 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1650 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1651 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1652 } 1653 } 1654 1655 static void 1656 simple_fwd_config_setup(void) 1657 { 1658 portid_t i; 1659 portid_t j; 1660 portid_t inc = 2; 1661 1662 if (port_topology == PORT_TOPOLOGY_CHAINED || 1663 port_topology == PORT_TOPOLOGY_LOOP) { 1664 inc = 1; 1665 } else if (nb_fwd_ports % 2) { 1666 printf("\nWarning! Cannot handle an odd number of ports " 1667 "with the current port topology. Configuration " 1668 "must be changed to have an even number of ports, " 1669 "or relaunch application with " 1670 "--port-topology=chained\n\n"); 1671 } 1672 1673 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1674 cur_fwd_config.nb_fwd_streams = 1675 (streamid_t) cur_fwd_config.nb_fwd_ports; 1676 1677 /* reinitialize forwarding streams */ 1678 init_fwd_streams(); 1679 1680 /* 1681 * In the simple forwarding test, the number of forwarding cores 1682 * must be lower or equal to the number of forwarding ports. 1683 */ 1684 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1685 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1686 cur_fwd_config.nb_fwd_lcores = 1687 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1688 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1689 1690 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1691 if (port_topology != PORT_TOPOLOGY_LOOP) 1692 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1693 else 1694 j = i; 1695 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1696 fwd_streams[i]->rx_queue = 0; 1697 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1698 fwd_streams[i]->tx_queue = 0; 1699 fwd_streams[i]->peer_addr = j; 1700 fwd_streams[i]->retry_enabled = retry_enabled; 1701 1702 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1703 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1704 fwd_streams[j]->rx_queue = 0; 1705 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1706 fwd_streams[j]->tx_queue = 0; 1707 fwd_streams[j]->peer_addr = i; 1708 fwd_streams[j]->retry_enabled = retry_enabled; 1709 } 1710 } 1711 } 1712 1713 /** 1714 * For the RSS forwarding test all streams distributed over lcores. Each stream 1715 * being composed of a RX queue to poll on a RX port for input messages, 1716 * associated with a TX queue of a TX port where to send forwarded packets. 1717 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1718 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1719 * following rules: 1720 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1721 * - TxQl = RxQj 1722 */ 1723 static void 1724 rss_fwd_config_setup(void) 1725 { 1726 portid_t rxp; 1727 portid_t txp; 1728 queueid_t rxq; 1729 queueid_t nb_q; 1730 streamid_t sm_id; 1731 1732 nb_q = nb_rxq; 1733 if (nb_q > nb_txq) 1734 nb_q = nb_txq; 1735 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1736 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1737 cur_fwd_config.nb_fwd_streams = 1738 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1739 1740 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1741 cur_fwd_config.nb_fwd_lcores = 1742 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1743 1744 /* reinitialize forwarding streams */ 1745 init_fwd_streams(); 1746 1747 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1748 rxp = 0; rxq = 0; 1749 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1750 struct fwd_stream *fs; 1751 1752 fs = fwd_streams[sm_id]; 1753 1754 if ((rxp & 0x1) == 0) 1755 txp = (portid_t) (rxp + 1); 1756 else 1757 txp = (portid_t) (rxp - 1); 1758 /* 1759 * if we are in loopback, simply send stuff out through the 1760 * ingress port 1761 */ 1762 if (port_topology == PORT_TOPOLOGY_LOOP) 1763 txp = rxp; 1764 1765 fs->rx_port = fwd_ports_ids[rxp]; 1766 fs->rx_queue = rxq; 1767 fs->tx_port = fwd_ports_ids[txp]; 1768 fs->tx_queue = rxq; 1769 fs->peer_addr = fs->tx_port; 1770 fs->retry_enabled = retry_enabled; 1771 rxq = (queueid_t) (rxq + 1); 1772 if (rxq < nb_q) 1773 continue; 1774 /* 1775 * rxq == nb_q 1776 * Restart from RX queue 0 on next RX port 1777 */ 1778 rxq = 0; 1779 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1780 rxp = (portid_t) 1781 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1782 else 1783 rxp = (portid_t) (rxp + 1); 1784 } 1785 } 1786 1787 /** 1788 * For the DCB forwarding test, each core is assigned on each traffic class. 1789 * 1790 * Each core is assigned a multi-stream, each stream being composed of 1791 * a RX queue to poll on a RX port for input messages, associated with 1792 * a TX queue of a TX port where to send forwarded packets. All RX and 1793 * TX queues are mapping to the same traffic class. 1794 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1795 * the same core 1796 */ 1797 static void 1798 dcb_fwd_config_setup(void) 1799 { 1800 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1801 portid_t txp, rxp = 0; 1802 queueid_t txq, rxq = 0; 1803 lcoreid_t lc_id; 1804 uint16_t nb_rx_queue, nb_tx_queue; 1805 uint16_t i, j, k, sm_id = 0; 1806 uint8_t tc = 0; 1807 1808 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1809 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1810 cur_fwd_config.nb_fwd_streams = 1811 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1812 1813 /* reinitialize forwarding streams */ 1814 init_fwd_streams(); 1815 sm_id = 0; 1816 txp = 1; 1817 /* get the dcb info on the first RX and TX ports */ 1818 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1819 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1820 1821 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1822 fwd_lcores[lc_id]->stream_nb = 0; 1823 fwd_lcores[lc_id]->stream_idx = sm_id; 1824 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1825 /* if the nb_queue is zero, means this tc is 1826 * not enabled on the POOL 1827 */ 1828 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1829 break; 1830 k = fwd_lcores[lc_id]->stream_nb + 1831 fwd_lcores[lc_id]->stream_idx; 1832 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1833 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1834 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1835 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1836 for (j = 0; j < nb_rx_queue; j++) { 1837 struct fwd_stream *fs; 1838 1839 fs = fwd_streams[k + j]; 1840 fs->rx_port = fwd_ports_ids[rxp]; 1841 fs->rx_queue = rxq + j; 1842 fs->tx_port = fwd_ports_ids[txp]; 1843 fs->tx_queue = txq + j % nb_tx_queue; 1844 fs->peer_addr = fs->tx_port; 1845 fs->retry_enabled = retry_enabled; 1846 } 1847 fwd_lcores[lc_id]->stream_nb += 1848 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1849 } 1850 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1851 1852 tc++; 1853 if (tc < rxp_dcb_info.nb_tcs) 1854 continue; 1855 /* Restart from TC 0 on next RX port */ 1856 tc = 0; 1857 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1858 rxp = (portid_t) 1859 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1860 else 1861 rxp++; 1862 if (rxp >= nb_fwd_ports) 1863 return; 1864 /* get the dcb information on next RX and TX ports */ 1865 if ((rxp & 0x1) == 0) 1866 txp = (portid_t) (rxp + 1); 1867 else 1868 txp = (portid_t) (rxp - 1); 1869 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1870 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1871 } 1872 } 1873 1874 static void 1875 icmp_echo_config_setup(void) 1876 { 1877 portid_t rxp; 1878 queueid_t rxq; 1879 lcoreid_t lc_id; 1880 uint16_t sm_id; 1881 1882 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1883 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1884 (nb_txq * nb_fwd_ports); 1885 else 1886 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1887 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1888 cur_fwd_config.nb_fwd_streams = 1889 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1890 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1891 cur_fwd_config.nb_fwd_lcores = 1892 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1893 if (verbose_level > 0) { 1894 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1895 __FUNCTION__, 1896 cur_fwd_config.nb_fwd_lcores, 1897 cur_fwd_config.nb_fwd_ports, 1898 cur_fwd_config.nb_fwd_streams); 1899 } 1900 1901 /* reinitialize forwarding streams */ 1902 init_fwd_streams(); 1903 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1904 rxp = 0; rxq = 0; 1905 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1906 if (verbose_level > 0) 1907 printf(" core=%d: \n", lc_id); 1908 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1909 struct fwd_stream *fs; 1910 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1911 fs->rx_port = fwd_ports_ids[rxp]; 1912 fs->rx_queue = rxq; 1913 fs->tx_port = fs->rx_port; 1914 fs->tx_queue = rxq; 1915 fs->peer_addr = fs->tx_port; 1916 fs->retry_enabled = retry_enabled; 1917 if (verbose_level > 0) 1918 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1919 sm_id, fs->rx_port, fs->rx_queue, 1920 fs->tx_queue); 1921 rxq = (queueid_t) (rxq + 1); 1922 if (rxq == nb_rxq) { 1923 rxq = 0; 1924 rxp = (portid_t) (rxp + 1); 1925 } 1926 } 1927 } 1928 } 1929 1930 void 1931 fwd_config_setup(void) 1932 { 1933 cur_fwd_config.fwd_eng = cur_fwd_eng; 1934 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1935 icmp_echo_config_setup(); 1936 return; 1937 } 1938 if ((nb_rxq > 1) && (nb_txq > 1)){ 1939 if (dcb_config) 1940 dcb_fwd_config_setup(); 1941 else 1942 rss_fwd_config_setup(); 1943 } 1944 else 1945 simple_fwd_config_setup(); 1946 } 1947 1948 void 1949 pkt_fwd_config_display(struct fwd_config *cfg) 1950 { 1951 struct fwd_stream *fs; 1952 lcoreid_t lc_id; 1953 streamid_t sm_id; 1954 1955 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 1956 "NUMA support %s, MP over anonymous pages %s\n", 1957 cfg->fwd_eng->fwd_mode_name, 1958 retry_enabled == 0 ? "" : " with retry", 1959 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1960 numa_support == 1 ? "enabled" : "disabled", 1961 mp_anon != 0 ? "enabled" : "disabled"); 1962 1963 if (retry_enabled) 1964 printf("TX retry num: %u, delay between TX retries: %uus\n", 1965 burst_tx_retry_num, burst_tx_delay_time); 1966 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1967 printf("Logical Core %u (socket %u) forwards packets on " 1968 "%d streams:", 1969 fwd_lcores_cpuids[lc_id], 1970 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1971 fwd_lcores[lc_id]->stream_nb); 1972 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1973 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1974 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1975 "P=%d/Q=%d (socket %u) ", 1976 fs->rx_port, fs->rx_queue, 1977 ports[fs->rx_port].socket_id, 1978 fs->tx_port, fs->tx_queue, 1979 ports[fs->tx_port].socket_id); 1980 print_ethaddr("peer=", 1981 &peer_eth_addrs[fs->peer_addr]); 1982 } 1983 printf("\n"); 1984 } 1985 printf("\n"); 1986 } 1987 1988 int 1989 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1990 { 1991 unsigned int i; 1992 unsigned int lcore_cpuid; 1993 int record_now; 1994 1995 record_now = 0; 1996 again: 1997 for (i = 0; i < nb_lc; i++) { 1998 lcore_cpuid = lcorelist[i]; 1999 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2000 printf("lcore %u not enabled\n", lcore_cpuid); 2001 return -1; 2002 } 2003 if (lcore_cpuid == rte_get_master_lcore()) { 2004 printf("lcore %u cannot be masked on for running " 2005 "packet forwarding, which is the master lcore " 2006 "and reserved for command line parsing only\n", 2007 lcore_cpuid); 2008 return -1; 2009 } 2010 if (record_now) 2011 fwd_lcores_cpuids[i] = lcore_cpuid; 2012 } 2013 if (record_now == 0) { 2014 record_now = 1; 2015 goto again; 2016 } 2017 nb_cfg_lcores = (lcoreid_t) nb_lc; 2018 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2019 printf("previous number of forwarding cores %u - changed to " 2020 "number of configured cores %u\n", 2021 (unsigned int) nb_fwd_lcores, nb_lc); 2022 nb_fwd_lcores = (lcoreid_t) nb_lc; 2023 } 2024 2025 return 0; 2026 } 2027 2028 int 2029 set_fwd_lcores_mask(uint64_t lcoremask) 2030 { 2031 unsigned int lcorelist[64]; 2032 unsigned int nb_lc; 2033 unsigned int i; 2034 2035 if (lcoremask == 0) { 2036 printf("Invalid NULL mask of cores\n"); 2037 return -1; 2038 } 2039 nb_lc = 0; 2040 for (i = 0; i < 64; i++) { 2041 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2042 continue; 2043 lcorelist[nb_lc++] = i; 2044 } 2045 return set_fwd_lcores_list(lcorelist, nb_lc); 2046 } 2047 2048 void 2049 set_fwd_lcores_number(uint16_t nb_lc) 2050 { 2051 if (nb_lc > nb_cfg_lcores) { 2052 printf("nb fwd cores %u > %u (max. number of configured " 2053 "lcores) - ignored\n", 2054 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2055 return; 2056 } 2057 nb_fwd_lcores = (lcoreid_t) nb_lc; 2058 printf("Number of forwarding cores set to %u\n", 2059 (unsigned int) nb_fwd_lcores); 2060 } 2061 2062 void 2063 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2064 { 2065 unsigned int i; 2066 portid_t port_id; 2067 int record_now; 2068 2069 record_now = 0; 2070 again: 2071 for (i = 0; i < nb_pt; i++) { 2072 port_id = (portid_t) portlist[i]; 2073 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2074 return; 2075 if (record_now) 2076 fwd_ports_ids[i] = port_id; 2077 } 2078 if (record_now == 0) { 2079 record_now = 1; 2080 goto again; 2081 } 2082 nb_cfg_ports = (portid_t) nb_pt; 2083 if (nb_fwd_ports != (portid_t) nb_pt) { 2084 printf("previous number of forwarding ports %u - changed to " 2085 "number of configured ports %u\n", 2086 (unsigned int) nb_fwd_ports, nb_pt); 2087 nb_fwd_ports = (portid_t) nb_pt; 2088 } 2089 } 2090 2091 void 2092 set_fwd_ports_mask(uint64_t portmask) 2093 { 2094 unsigned int portlist[64]; 2095 unsigned int nb_pt; 2096 unsigned int i; 2097 2098 if (portmask == 0) { 2099 printf("Invalid NULL mask of ports\n"); 2100 return; 2101 } 2102 nb_pt = 0; 2103 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 2104 if (! ((uint64_t)(1ULL << i) & portmask)) 2105 continue; 2106 portlist[nb_pt++] = i; 2107 } 2108 set_fwd_ports_list(portlist, nb_pt); 2109 } 2110 2111 void 2112 set_fwd_ports_number(uint16_t nb_pt) 2113 { 2114 if (nb_pt > nb_cfg_ports) { 2115 printf("nb fwd ports %u > %u (number of configured " 2116 "ports) - ignored\n", 2117 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2118 return; 2119 } 2120 nb_fwd_ports = (portid_t) nb_pt; 2121 printf("Number of forwarding ports set to %u\n", 2122 (unsigned int) nb_fwd_ports); 2123 } 2124 2125 int 2126 port_is_forwarding(portid_t port_id) 2127 { 2128 unsigned int i; 2129 2130 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2131 return -1; 2132 2133 for (i = 0; i < nb_fwd_ports; i++) { 2134 if (fwd_ports_ids[i] == port_id) 2135 return 1; 2136 } 2137 2138 return 0; 2139 } 2140 2141 void 2142 set_nb_pkt_per_burst(uint16_t nb) 2143 { 2144 if (nb > MAX_PKT_BURST) { 2145 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2146 " ignored\n", 2147 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2148 return; 2149 } 2150 nb_pkt_per_burst = nb; 2151 printf("Number of packets per burst set to %u\n", 2152 (unsigned int) nb_pkt_per_burst); 2153 } 2154 2155 static const char * 2156 tx_split_get_name(enum tx_pkt_split split) 2157 { 2158 uint32_t i; 2159 2160 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2161 if (tx_split_name[i].split == split) 2162 return tx_split_name[i].name; 2163 } 2164 return NULL; 2165 } 2166 2167 void 2168 set_tx_pkt_split(const char *name) 2169 { 2170 uint32_t i; 2171 2172 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2173 if (strcmp(tx_split_name[i].name, name) == 0) { 2174 tx_pkt_split = tx_split_name[i].split; 2175 return; 2176 } 2177 } 2178 printf("unknown value: \"%s\"\n", name); 2179 } 2180 2181 void 2182 show_tx_pkt_segments(void) 2183 { 2184 uint32_t i, n; 2185 const char *split; 2186 2187 n = tx_pkt_nb_segs; 2188 split = tx_split_get_name(tx_pkt_split); 2189 2190 printf("Number of segments: %u\n", n); 2191 printf("Segment sizes: "); 2192 for (i = 0; i != n - 1; i++) 2193 printf("%hu,", tx_pkt_seg_lengths[i]); 2194 printf("%hu\n", tx_pkt_seg_lengths[i]); 2195 printf("Split packet: %s\n", split); 2196 } 2197 2198 void 2199 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2200 { 2201 uint16_t tx_pkt_len; 2202 unsigned i; 2203 2204 if (nb_segs >= (unsigned) nb_txd) { 2205 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2206 nb_segs, (unsigned int) nb_txd); 2207 return; 2208 } 2209 2210 /* 2211 * Check that each segment length is greater or equal than 2212 * the mbuf data sise. 2213 * Check also that the total packet length is greater or equal than the 2214 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2215 */ 2216 tx_pkt_len = 0; 2217 for (i = 0; i < nb_segs; i++) { 2218 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2219 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2220 i, seg_lengths[i], (unsigned) mbuf_data_size); 2221 return; 2222 } 2223 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2224 } 2225 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2226 printf("total packet length=%u < %d - give up\n", 2227 (unsigned) tx_pkt_len, 2228 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2229 return; 2230 } 2231 2232 for (i = 0; i < nb_segs; i++) 2233 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2234 2235 tx_pkt_length = tx_pkt_len; 2236 tx_pkt_nb_segs = (uint8_t) nb_segs; 2237 } 2238 2239 char* 2240 list_pkt_forwarding_modes(void) 2241 { 2242 static char fwd_modes[128] = ""; 2243 const char *separator = "|"; 2244 struct fwd_engine *fwd_eng; 2245 unsigned i = 0; 2246 2247 if (strlen (fwd_modes) == 0) { 2248 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2249 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2250 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2251 strncat(fwd_modes, separator, 2252 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2253 } 2254 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2255 } 2256 2257 return fwd_modes; 2258 } 2259 2260 char* 2261 list_pkt_forwarding_retry_modes(void) 2262 { 2263 static char fwd_modes[128] = ""; 2264 const char *separator = "|"; 2265 struct fwd_engine *fwd_eng; 2266 unsigned i = 0; 2267 2268 if (strlen(fwd_modes) == 0) { 2269 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2270 if (fwd_eng == &rx_only_engine) 2271 continue; 2272 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2273 sizeof(fwd_modes) - 2274 strlen(fwd_modes) - 1); 2275 strncat(fwd_modes, separator, 2276 sizeof(fwd_modes) - 2277 strlen(fwd_modes) - 1); 2278 } 2279 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2280 } 2281 2282 return fwd_modes; 2283 } 2284 2285 void 2286 set_pkt_forwarding_mode(const char *fwd_mode_name) 2287 { 2288 struct fwd_engine *fwd_eng; 2289 unsigned i; 2290 2291 i = 0; 2292 while ((fwd_eng = fwd_engines[i]) != NULL) { 2293 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2294 printf("Set %s packet forwarding mode%s\n", 2295 fwd_mode_name, 2296 retry_enabled == 0 ? "" : " with retry"); 2297 cur_fwd_eng = fwd_eng; 2298 return; 2299 } 2300 i++; 2301 } 2302 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2303 } 2304 2305 void 2306 set_verbose_level(uint16_t vb_level) 2307 { 2308 printf("Change verbose level from %u to %u\n", 2309 (unsigned int) verbose_level, (unsigned int) vb_level); 2310 verbose_level = vb_level; 2311 } 2312 2313 void 2314 vlan_extend_set(portid_t port_id, int on) 2315 { 2316 int diag; 2317 int vlan_offload; 2318 2319 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2320 return; 2321 2322 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2323 2324 if (on) 2325 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2326 else 2327 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2328 2329 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2330 if (diag < 0) 2331 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2332 "diag=%d\n", port_id, on, diag); 2333 } 2334 2335 void 2336 rx_vlan_strip_set(portid_t port_id, int on) 2337 { 2338 int diag; 2339 int vlan_offload; 2340 2341 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2342 return; 2343 2344 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2345 2346 if (on) 2347 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2348 else 2349 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2350 2351 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2352 if (diag < 0) 2353 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2354 "diag=%d\n", port_id, on, diag); 2355 } 2356 2357 void 2358 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2359 { 2360 int diag; 2361 2362 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2363 return; 2364 2365 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2366 if (diag < 0) 2367 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2368 "diag=%d\n", port_id, queue_id, on, diag); 2369 } 2370 2371 void 2372 rx_vlan_filter_set(portid_t port_id, int on) 2373 { 2374 int diag; 2375 int vlan_offload; 2376 2377 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2378 return; 2379 2380 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2381 2382 if (on) 2383 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2384 else 2385 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2386 2387 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2388 if (diag < 0) 2389 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2390 "diag=%d\n", port_id, on, diag); 2391 } 2392 2393 int 2394 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2395 { 2396 int diag; 2397 2398 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2399 return 1; 2400 if (vlan_id_is_invalid(vlan_id)) 2401 return 1; 2402 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2403 if (diag == 0) 2404 return 0; 2405 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2406 "diag=%d\n", 2407 port_id, vlan_id, on, diag); 2408 return -1; 2409 } 2410 2411 void 2412 rx_vlan_all_filter_set(portid_t port_id, int on) 2413 { 2414 uint16_t vlan_id; 2415 2416 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2417 return; 2418 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2419 if (rx_vft_set(port_id, vlan_id, on)) 2420 break; 2421 } 2422 } 2423 2424 void 2425 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2426 { 2427 int diag; 2428 2429 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2430 return; 2431 2432 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2433 if (diag == 0) 2434 return; 2435 2436 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2437 "diag=%d\n", 2438 port_id, vlan_type, tp_id, diag); 2439 } 2440 2441 void 2442 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2443 { 2444 int vlan_offload; 2445 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2446 return; 2447 if (vlan_id_is_invalid(vlan_id)) 2448 return; 2449 2450 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2451 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2452 printf("Error, as QinQ has been enabled.\n"); 2453 return; 2454 } 2455 2456 tx_vlan_reset(port_id); 2457 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 2458 ports[port_id].tx_vlan_id = vlan_id; 2459 } 2460 2461 void 2462 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2463 { 2464 int vlan_offload; 2465 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2466 return; 2467 if (vlan_id_is_invalid(vlan_id)) 2468 return; 2469 if (vlan_id_is_invalid(vlan_id_outer)) 2470 return; 2471 2472 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2473 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2474 printf("Error, as QinQ hasn't been enabled.\n"); 2475 return; 2476 } 2477 2478 tx_vlan_reset(port_id); 2479 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 2480 ports[port_id].tx_vlan_id = vlan_id; 2481 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2482 } 2483 2484 void 2485 tx_vlan_reset(portid_t port_id) 2486 { 2487 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2488 return; 2489 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 2490 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 2491 ports[port_id].tx_vlan_id = 0; 2492 ports[port_id].tx_vlan_id_outer = 0; 2493 } 2494 2495 void 2496 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2497 { 2498 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2499 return; 2500 2501 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2502 } 2503 2504 void 2505 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2506 { 2507 uint16_t i; 2508 uint8_t existing_mapping_found = 0; 2509 2510 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2511 return; 2512 2513 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2514 return; 2515 2516 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2517 printf("map_value not in required range 0..%d\n", 2518 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2519 return; 2520 } 2521 2522 if (!is_rx) { /*then tx*/ 2523 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2524 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2525 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2526 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2527 existing_mapping_found = 1; 2528 break; 2529 } 2530 } 2531 if (!existing_mapping_found) { /* A new additional mapping... */ 2532 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2533 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2534 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2535 nb_tx_queue_stats_mappings++; 2536 } 2537 } 2538 else { /*rx*/ 2539 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2540 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2541 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2542 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2543 existing_mapping_found = 1; 2544 break; 2545 } 2546 } 2547 if (!existing_mapping_found) { /* A new additional mapping... */ 2548 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2549 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2550 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2551 nb_rx_queue_stats_mappings++; 2552 } 2553 } 2554 } 2555 2556 static inline void 2557 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2558 { 2559 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2560 2561 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2562 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2563 " tunnel_id: 0x%08x", 2564 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2565 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2566 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2567 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2568 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2569 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2570 2571 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2572 rte_be_to_cpu_16(mask->src_port_mask), 2573 rte_be_to_cpu_16(mask->dst_port_mask)); 2574 2575 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2576 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2577 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2578 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2579 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2580 2581 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2582 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2583 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2584 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2585 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2586 } 2587 2588 printf("\n"); 2589 } 2590 2591 static inline void 2592 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2593 { 2594 struct rte_eth_flex_payload_cfg *cfg; 2595 uint32_t i, j; 2596 2597 for (i = 0; i < flex_conf->nb_payloads; i++) { 2598 cfg = &flex_conf->flex_set[i]; 2599 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2600 printf("\n RAW: "); 2601 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2602 printf("\n L2_PAYLOAD: "); 2603 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2604 printf("\n L3_PAYLOAD: "); 2605 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2606 printf("\n L4_PAYLOAD: "); 2607 else 2608 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2609 for (j = 0; j < num; j++) 2610 printf(" %-5u", cfg->src_offset[j]); 2611 } 2612 printf("\n"); 2613 } 2614 2615 static char * 2616 flowtype_to_str(uint16_t flow_type) 2617 { 2618 struct flow_type_info { 2619 char str[32]; 2620 uint16_t ftype; 2621 }; 2622 2623 uint8_t i; 2624 static struct flow_type_info flowtype_str_table[] = { 2625 {"raw", RTE_ETH_FLOW_RAW}, 2626 {"ipv4", RTE_ETH_FLOW_IPV4}, 2627 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2628 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2629 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2630 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2631 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2632 {"ipv6", RTE_ETH_FLOW_IPV6}, 2633 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2634 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2635 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2636 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2637 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2638 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2639 {"port", RTE_ETH_FLOW_PORT}, 2640 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2641 {"geneve", RTE_ETH_FLOW_GENEVE}, 2642 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2643 }; 2644 2645 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2646 if (flowtype_str_table[i].ftype == flow_type) 2647 return flowtype_str_table[i].str; 2648 } 2649 2650 return NULL; 2651 } 2652 2653 static inline void 2654 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2655 { 2656 struct rte_eth_fdir_flex_mask *mask; 2657 uint32_t i, j; 2658 char *p; 2659 2660 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2661 mask = &flex_conf->flex_mask[i]; 2662 p = flowtype_to_str(mask->flow_type); 2663 printf("\n %s:\t", p ? p : "unknown"); 2664 for (j = 0; j < num; j++) 2665 printf(" %02x", mask->mask[j]); 2666 } 2667 printf("\n"); 2668 } 2669 2670 static inline void 2671 print_fdir_flow_type(uint32_t flow_types_mask) 2672 { 2673 int i; 2674 char *p; 2675 2676 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2677 if (!(flow_types_mask & (1 << i))) 2678 continue; 2679 p = flowtype_to_str(i); 2680 if (p) 2681 printf(" %s", p); 2682 else 2683 printf(" unknown"); 2684 } 2685 printf("\n"); 2686 } 2687 2688 void 2689 fdir_get_infos(portid_t port_id) 2690 { 2691 struct rte_eth_fdir_stats fdir_stat; 2692 struct rte_eth_fdir_info fdir_info; 2693 int ret; 2694 2695 static const char *fdir_stats_border = "########################"; 2696 2697 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2698 return; 2699 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2700 if (ret < 0) { 2701 printf("\n FDIR is not supported on port %-2d\n", 2702 port_id); 2703 return; 2704 } 2705 2706 memset(&fdir_info, 0, sizeof(fdir_info)); 2707 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2708 RTE_ETH_FILTER_INFO, &fdir_info); 2709 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2710 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2711 RTE_ETH_FILTER_STATS, &fdir_stat); 2712 printf("\n %s FDIR infos for port %-2d %s\n", 2713 fdir_stats_border, port_id, fdir_stats_border); 2714 printf(" MODE: "); 2715 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2716 printf(" PERFECT\n"); 2717 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2718 printf(" PERFECT-MAC-VLAN\n"); 2719 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2720 printf(" PERFECT-TUNNEL\n"); 2721 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2722 printf(" SIGNATURE\n"); 2723 else 2724 printf(" DISABLE\n"); 2725 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2726 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2727 printf(" SUPPORTED FLOW TYPE: "); 2728 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2729 } 2730 printf(" FLEX PAYLOAD INFO:\n"); 2731 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2732 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2733 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2734 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2735 fdir_info.flex_payload_unit, 2736 fdir_info.max_flex_payload_segment_num, 2737 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2738 printf(" MASK: "); 2739 print_fdir_mask(&fdir_info.mask); 2740 if (fdir_info.flex_conf.nb_payloads > 0) { 2741 printf(" FLEX PAYLOAD SRC OFFSET:"); 2742 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2743 } 2744 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2745 printf(" FLEX MASK CFG:"); 2746 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2747 } 2748 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2749 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2750 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2751 fdir_info.guarant_spc, fdir_info.best_spc); 2752 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2753 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2754 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2755 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2756 fdir_stat.collision, fdir_stat.free, 2757 fdir_stat.maxhash, fdir_stat.maxlen, 2758 fdir_stat.add, fdir_stat.remove, 2759 fdir_stat.f_add, fdir_stat.f_remove); 2760 printf(" %s############################%s\n", 2761 fdir_stats_border, fdir_stats_border); 2762 } 2763 2764 void 2765 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2766 { 2767 struct rte_port *port; 2768 struct rte_eth_fdir_flex_conf *flex_conf; 2769 int i, idx = 0; 2770 2771 port = &ports[port_id]; 2772 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2773 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2774 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2775 idx = i; 2776 break; 2777 } 2778 } 2779 if (i >= RTE_ETH_FLOW_MAX) { 2780 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2781 idx = flex_conf->nb_flexmasks; 2782 flex_conf->nb_flexmasks++; 2783 } else { 2784 printf("The flex mask table is full. Can not set flex" 2785 " mask for flow_type(%u).", cfg->flow_type); 2786 return; 2787 } 2788 } 2789 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2790 cfg, 2791 sizeof(struct rte_eth_fdir_flex_mask)); 2792 } 2793 2794 void 2795 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2796 { 2797 struct rte_port *port; 2798 struct rte_eth_fdir_flex_conf *flex_conf; 2799 int i, idx = 0; 2800 2801 port = &ports[port_id]; 2802 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2803 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2804 if (cfg->type == flex_conf->flex_set[i].type) { 2805 idx = i; 2806 break; 2807 } 2808 } 2809 if (i >= RTE_ETH_PAYLOAD_MAX) { 2810 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2811 idx = flex_conf->nb_payloads; 2812 flex_conf->nb_payloads++; 2813 } else { 2814 printf("The flex payload table is full. Can not set" 2815 " flex payload for type(%u).", cfg->type); 2816 return; 2817 } 2818 } 2819 (void)rte_memcpy(&flex_conf->flex_set[idx], 2820 cfg, 2821 sizeof(struct rte_eth_flex_payload_cfg)); 2822 2823 } 2824 2825 void 2826 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2827 { 2828 int diag; 2829 2830 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2831 return; 2832 if (is_rx) 2833 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2834 else 2835 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2836 if (diag == 0) 2837 return; 2838 if(is_rx) 2839 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2840 "diag=%d\n", port_id, diag); 2841 else 2842 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2843 "diag=%d\n", port_id, diag); 2844 2845 } 2846 2847 void 2848 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2849 { 2850 int diag; 2851 2852 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2853 return; 2854 if (vlan_id_is_invalid(vlan_id)) 2855 return; 2856 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2857 if (diag == 0) 2858 return; 2859 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2860 "diag=%d\n", port_id, diag); 2861 } 2862 2863 int 2864 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2865 { 2866 int diag; 2867 struct rte_eth_link link; 2868 2869 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2870 return 1; 2871 rte_eth_link_get_nowait(port_id, &link); 2872 if (rate > link.link_speed) { 2873 printf("Invalid rate value:%u bigger than link speed: %u\n", 2874 rate, link.link_speed); 2875 return 1; 2876 } 2877 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2878 if (diag == 0) 2879 return diag; 2880 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2881 port_id, diag); 2882 return diag; 2883 } 2884 2885 int 2886 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2887 { 2888 int diag; 2889 struct rte_eth_link link; 2890 2891 if (q_msk == 0) 2892 return 0; 2893 2894 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2895 return 1; 2896 rte_eth_link_get_nowait(port_id, &link); 2897 if (rate > link.link_speed) { 2898 printf("Invalid rate value:%u bigger than link speed: %u\n", 2899 rate, link.link_speed); 2900 return 1; 2901 } 2902 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2903 if (diag == 0) 2904 return diag; 2905 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2906 port_id, diag); 2907 return diag; 2908 } 2909 2910 /* 2911 * Functions to manage the set of filtered Multicast MAC addresses. 2912 * 2913 * A pool of filtered multicast MAC addresses is associated with each port. 2914 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2915 * The address of the pool and the number of valid multicast MAC addresses 2916 * recorded in the pool are stored in the fields "mc_addr_pool" and 2917 * "mc_addr_nb" of the "rte_port" data structure. 2918 * 2919 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2920 * to be supplied a contiguous array of multicast MAC addresses. 2921 * To comply with this constraint, the set of multicast addresses recorded 2922 * into the pool are systematically compacted at the beginning of the pool. 2923 * Hence, when a multicast address is removed from the pool, all following 2924 * addresses, if any, are copied back to keep the set contiguous. 2925 */ 2926 #define MCAST_POOL_INC 32 2927 2928 static int 2929 mcast_addr_pool_extend(struct rte_port *port) 2930 { 2931 struct ether_addr *mc_pool; 2932 size_t mc_pool_size; 2933 2934 /* 2935 * If a free entry is available at the end of the pool, just 2936 * increment the number of recorded multicast addresses. 2937 */ 2938 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2939 port->mc_addr_nb++; 2940 return 0; 2941 } 2942 2943 /* 2944 * [re]allocate a pool with MCAST_POOL_INC more entries. 2945 * The previous test guarantees that port->mc_addr_nb is a multiple 2946 * of MCAST_POOL_INC. 2947 */ 2948 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2949 MCAST_POOL_INC); 2950 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2951 mc_pool_size); 2952 if (mc_pool == NULL) { 2953 printf("allocation of pool of %u multicast addresses failed\n", 2954 port->mc_addr_nb + MCAST_POOL_INC); 2955 return -ENOMEM; 2956 } 2957 2958 port->mc_addr_pool = mc_pool; 2959 port->mc_addr_nb++; 2960 return 0; 2961 2962 } 2963 2964 static void 2965 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2966 { 2967 port->mc_addr_nb--; 2968 if (addr_idx == port->mc_addr_nb) { 2969 /* No need to recompact the set of multicast addressses. */ 2970 if (port->mc_addr_nb == 0) { 2971 /* free the pool of multicast addresses. */ 2972 free(port->mc_addr_pool); 2973 port->mc_addr_pool = NULL; 2974 } 2975 return; 2976 } 2977 memmove(&port->mc_addr_pool[addr_idx], 2978 &port->mc_addr_pool[addr_idx + 1], 2979 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2980 } 2981 2982 static void 2983 eth_port_multicast_addr_list_set(uint8_t port_id) 2984 { 2985 struct rte_port *port; 2986 int diag; 2987 2988 port = &ports[port_id]; 2989 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2990 port->mc_addr_nb); 2991 if (diag == 0) 2992 return; 2993 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2994 port->mc_addr_nb, port_id, -diag); 2995 } 2996 2997 void 2998 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2999 { 3000 struct rte_port *port; 3001 uint32_t i; 3002 3003 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3004 return; 3005 3006 port = &ports[port_id]; 3007 3008 /* 3009 * Check that the added multicast MAC address is not already recorded 3010 * in the pool of multicast addresses. 3011 */ 3012 for (i = 0; i < port->mc_addr_nb; i++) { 3013 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3014 printf("multicast address already filtered by port\n"); 3015 return; 3016 } 3017 } 3018 3019 if (mcast_addr_pool_extend(port) != 0) 3020 return; 3021 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3022 eth_port_multicast_addr_list_set(port_id); 3023 } 3024 3025 void 3026 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 3027 { 3028 struct rte_port *port; 3029 uint32_t i; 3030 3031 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3032 return; 3033 3034 port = &ports[port_id]; 3035 3036 /* 3037 * Search the pool of multicast MAC addresses for the removed address. 3038 */ 3039 for (i = 0; i < port->mc_addr_nb; i++) { 3040 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3041 break; 3042 } 3043 if (i == port->mc_addr_nb) { 3044 printf("multicast address not filtered by port %d\n", port_id); 3045 return; 3046 } 3047 3048 mcast_addr_pool_remove(port, i); 3049 eth_port_multicast_addr_list_set(port_id); 3050 } 3051 3052 void 3053 port_dcb_info_display(uint8_t port_id) 3054 { 3055 struct rte_eth_dcb_info dcb_info; 3056 uint16_t i; 3057 int ret; 3058 static const char *border = "================"; 3059 3060 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3061 return; 3062 3063 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3064 if (ret) { 3065 printf("\n Failed to get dcb infos on port %-2d\n", 3066 port_id); 3067 return; 3068 } 3069 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3070 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3071 printf("\n TC : "); 3072 for (i = 0; i < dcb_info.nb_tcs; i++) 3073 printf("\t%4d", i); 3074 printf("\n Priority : "); 3075 for (i = 0; i < dcb_info.nb_tcs; i++) 3076 printf("\t%4d", dcb_info.prio_tc[i]); 3077 printf("\n BW percent :"); 3078 for (i = 0; i < dcb_info.nb_tcs; i++) 3079 printf("\t%4d%%", dcb_info.tc_bws[i]); 3080 printf("\n RXQ base : "); 3081 for (i = 0; i < dcb_info.nb_tcs; i++) 3082 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3083 printf("\n RXQ number :"); 3084 for (i = 0; i < dcb_info.nb_tcs; i++) 3085 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3086 printf("\n TXQ base : "); 3087 for (i = 0; i < dcb_info.nb_tcs; i++) 3088 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3089 printf("\n TXQ number :"); 3090 for (i = 0; i < dcb_info.nb_tcs; i++) 3091 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3092 printf("\n"); 3093 } 3094