1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_ring.h> 88 #include <rte_mempool.h> 89 #include <rte_mbuf.h> 90 #include <rte_interrupts.h> 91 #include <rte_pci.h> 92 #include <rte_ether.h> 93 #include <rte_ethdev.h> 94 #include <rte_string_fns.h> 95 96 #include "testpmd.h" 97 98 static char *flowtype_to_str(uint16_t flow_type); 99 100 static const struct { 101 enum tx_pkt_split split; 102 const char *name; 103 } tx_split_name[] = { 104 { 105 .split = TX_PKT_SPLIT_OFF, 106 .name = "off", 107 }, 108 { 109 .split = TX_PKT_SPLIT_ON, 110 .name = "on", 111 }, 112 { 113 .split = TX_PKT_SPLIT_RND, 114 .name = "rand", 115 }, 116 }; 117 118 struct rss_type_info { 119 char str[32]; 120 uint64_t rss_type; 121 }; 122 123 static const struct rss_type_info rss_type_table[] = { 124 { "ipv4", ETH_RSS_IPV4 }, 125 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 126 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 127 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 128 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 129 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 130 { "ipv6", ETH_RSS_IPV6 }, 131 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 132 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 133 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 134 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 135 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 136 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 137 { "ipv6-ex", ETH_RSS_IPV6_EX }, 138 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 139 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 140 }; 141 142 static void 143 print_ethaddr(const char *name, struct ether_addr *eth_addr) 144 { 145 char buf[ETHER_ADDR_FMT_SIZE]; 146 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 147 printf("%s%s", name, buf); 148 } 149 150 void 151 nic_stats_display(portid_t port_id) 152 { 153 struct rte_eth_stats stats; 154 struct rte_port *port = &ports[port_id]; 155 uint8_t i; 156 portid_t pid; 157 158 static const char *nic_stats_border = "########################"; 159 160 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 161 printf("Valid port range is [0"); 162 FOREACH_PORT(pid, ports) 163 printf(", %d", pid); 164 printf("]\n"); 165 return; 166 } 167 rte_eth_stats_get(port_id, &stats); 168 printf("\n %s NIC statistics for port %-2d %s\n", 169 nic_stats_border, port_id, nic_stats_border); 170 171 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 172 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 173 "%-"PRIu64"\n", 174 stats.ipackets, stats.imissed, stats.ibytes); 175 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 176 printf(" RX-nombuf: %-10"PRIu64"\n", 177 stats.rx_nombuf); 178 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 179 "%-"PRIu64"\n", 180 stats.opackets, stats.oerrors, stats.obytes); 181 } 182 else { 183 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 184 " RX-bytes: %10"PRIu64"\n", 185 stats.ipackets, stats.ierrors, stats.ibytes); 186 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 187 printf(" RX-nombuf: %10"PRIu64"\n", 188 stats.rx_nombuf); 189 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 190 " TX-bytes: %10"PRIu64"\n", 191 stats.opackets, stats.oerrors, stats.obytes); 192 } 193 194 if (port->rx_queue_stats_mapping_enabled) { 195 printf("\n"); 196 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 197 printf(" Stats reg %2d RX-packets: %10"PRIu64 198 " RX-errors: %10"PRIu64 199 " RX-bytes: %10"PRIu64"\n", 200 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 201 } 202 } 203 if (port->tx_queue_stats_mapping_enabled) { 204 printf("\n"); 205 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 206 printf(" Stats reg %2d TX-packets: %10"PRIu64 207 " TX-bytes: %10"PRIu64"\n", 208 i, stats.q_opackets[i], stats.q_obytes[i]); 209 } 210 } 211 212 printf(" %s############################%s\n", 213 nic_stats_border, nic_stats_border); 214 } 215 216 void 217 nic_stats_clear(portid_t port_id) 218 { 219 portid_t pid; 220 221 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 222 printf("Valid port range is [0"); 223 FOREACH_PORT(pid, ports) 224 printf(", %d", pid); 225 printf("]\n"); 226 return; 227 } 228 rte_eth_stats_reset(port_id); 229 printf("\n NIC statistics for port %d cleared\n", port_id); 230 } 231 232 void 233 nic_xstats_display(portid_t port_id) 234 { 235 struct rte_eth_xstats *xstats; 236 int len, ret, i; 237 238 printf("###### NIC extended statistics for port %-2d\n", port_id); 239 240 len = rte_eth_xstats_get(port_id, NULL, 0); 241 if (len < 0) { 242 printf("Cannot get xstats count\n"); 243 return; 244 } 245 xstats = malloc(sizeof(xstats[0]) * len); 246 if (xstats == NULL) { 247 printf("Cannot allocate memory for xstats\n"); 248 return; 249 } 250 ret = rte_eth_xstats_get(port_id, xstats, len); 251 if (ret < 0 || ret > len) { 252 printf("Cannot get xstats\n"); 253 free(xstats); 254 return; 255 } 256 for (i = 0; i < len; i++) 257 printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value); 258 free(xstats); 259 } 260 261 void 262 nic_xstats_clear(portid_t port_id) 263 { 264 rte_eth_xstats_reset(port_id); 265 } 266 267 void 268 nic_stats_mapping_display(portid_t port_id) 269 { 270 struct rte_port *port = &ports[port_id]; 271 uint16_t i; 272 portid_t pid; 273 274 static const char *nic_stats_mapping_border = "########################"; 275 276 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 277 printf("Valid port range is [0"); 278 FOREACH_PORT(pid, ports) 279 printf(", %d", pid); 280 printf("]\n"); 281 return; 282 } 283 284 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 285 printf("Port id %d - either does not support queue statistic mapping or" 286 " no queue statistic mapping set\n", port_id); 287 return; 288 } 289 290 printf("\n %s NIC statistics mapping for port %-2d %s\n", 291 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 292 293 if (port->rx_queue_stats_mapping_enabled) { 294 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 295 if (rx_queue_stats_mappings[i].port_id == port_id) { 296 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 297 rx_queue_stats_mappings[i].queue_id, 298 rx_queue_stats_mappings[i].stats_counter_id); 299 } 300 } 301 printf("\n"); 302 } 303 304 305 if (port->tx_queue_stats_mapping_enabled) { 306 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 307 if (tx_queue_stats_mappings[i].port_id == port_id) { 308 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 309 tx_queue_stats_mappings[i].queue_id, 310 tx_queue_stats_mappings[i].stats_counter_id); 311 } 312 } 313 } 314 315 printf(" %s####################################%s\n", 316 nic_stats_mapping_border, nic_stats_mapping_border); 317 } 318 319 void 320 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 321 { 322 struct rte_eth_rxq_info qinfo; 323 int32_t rc; 324 static const char *info_border = "*********************"; 325 326 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 327 if (rc != 0) { 328 printf("Failed to retrieve information for port: %hhu, " 329 "RX queue: %hu\nerror desc: %s(%d)\n", 330 port_id, queue_id, strerror(-rc), rc); 331 return; 332 } 333 334 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 335 info_border, port_id, queue_id, info_border); 336 337 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 338 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 339 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 340 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 341 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 342 printf("\nRX drop packets: %s", 343 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 344 printf("\nRX deferred start: %s", 345 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 346 printf("\nRX scattered packets: %s", 347 (qinfo.scattered_rx != 0) ? "on" : "off"); 348 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 349 printf("\n"); 350 } 351 352 void 353 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 354 { 355 struct rte_eth_txq_info qinfo; 356 int32_t rc; 357 static const char *info_border = "*********************"; 358 359 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 360 if (rc != 0) { 361 printf("Failed to retrieve information for port: %hhu, " 362 "TX queue: %hu\nerror desc: %s(%d)\n", 363 port_id, queue_id, strerror(-rc), rc); 364 return; 365 } 366 367 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 368 info_border, port_id, queue_id, info_border); 369 370 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 371 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 372 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 373 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 374 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 375 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 376 printf("\nTX deferred start: %s", 377 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 378 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 379 printf("\n"); 380 } 381 382 void 383 port_infos_display(portid_t port_id) 384 { 385 struct rte_port *port; 386 struct ether_addr mac_addr; 387 struct rte_eth_link link; 388 struct rte_eth_dev_info dev_info; 389 int vlan_offload; 390 struct rte_mempool * mp; 391 static const char *info_border = "*********************"; 392 portid_t pid; 393 394 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 395 printf("Valid port range is [0"); 396 FOREACH_PORT(pid, ports) 397 printf(", %d", pid); 398 printf("]\n"); 399 return; 400 } 401 port = &ports[port_id]; 402 rte_eth_link_get_nowait(port_id, &link); 403 printf("\n%s Infos for port %-2d %s\n", 404 info_border, port_id, info_border); 405 rte_eth_macaddr_get(port_id, &mac_addr); 406 print_ethaddr("MAC address: ", &mac_addr); 407 printf("\nConnect to socket: %u", port->socket_id); 408 409 if (port_numa[port_id] != NUMA_NO_CONFIG) { 410 mp = mbuf_pool_find(port_numa[port_id]); 411 if (mp) 412 printf("\nmemory allocation on the socket: %d", 413 port_numa[port_id]); 414 } else 415 printf("\nmemory allocation on the socket: %u",port->socket_id); 416 417 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 418 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 419 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 420 ("full-duplex") : ("half-duplex")); 421 printf("Promiscuous mode: %s\n", 422 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 423 printf("Allmulticast mode: %s\n", 424 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 425 printf("Maximum number of MAC addresses: %u\n", 426 (unsigned int)(port->dev_info.max_mac_addrs)); 427 printf("Maximum number of MAC addresses of hash filtering: %u\n", 428 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 429 430 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 431 if (vlan_offload >= 0){ 432 printf("VLAN offload: \n"); 433 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 434 printf(" strip on \n"); 435 else 436 printf(" strip off \n"); 437 438 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 439 printf(" filter on \n"); 440 else 441 printf(" filter off \n"); 442 443 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 444 printf(" qinq(extend) on \n"); 445 else 446 printf(" qinq(extend) off \n"); 447 } 448 449 memset(&dev_info, 0, sizeof(dev_info)); 450 rte_eth_dev_info_get(port_id, &dev_info); 451 if (dev_info.hash_key_size > 0) 452 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 453 if (dev_info.reta_size > 0) 454 printf("Redirection table size: %u\n", dev_info.reta_size); 455 if (!dev_info.flow_type_rss_offloads) 456 printf("No flow type is supported.\n"); 457 else { 458 uint16_t i; 459 char *p; 460 461 printf("Supported flow types:\n"); 462 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 463 i++) { 464 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 465 continue; 466 p = flowtype_to_str(i); 467 printf(" %s\n", (p ? p : "unknown")); 468 } 469 } 470 471 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 472 printf("Max possible number of RXDs per queue: %hu\n", 473 dev_info.rx_desc_lim.nb_max); 474 printf("Min possible number of RXDs per queue: %hu\n", 475 dev_info.rx_desc_lim.nb_min); 476 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 477 478 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 479 printf("Max possible number of TXDs per queue: %hu\n", 480 dev_info.tx_desc_lim.nb_max); 481 printf("Min possible number of TXDs per queue: %hu\n", 482 dev_info.tx_desc_lim.nb_min); 483 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 484 } 485 486 int 487 port_id_is_invalid(portid_t port_id, enum print_warning warning) 488 { 489 if (port_id == (portid_t)RTE_PORT_ALL) 490 return 0; 491 492 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 493 return 0; 494 495 if (warning == ENABLED_WARN) 496 printf("Invalid port %d\n", port_id); 497 498 return 1; 499 } 500 501 static int 502 vlan_id_is_invalid(uint16_t vlan_id) 503 { 504 if (vlan_id < 4096) 505 return 0; 506 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 507 return 1; 508 } 509 510 static int 511 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 512 { 513 uint64_t pci_len; 514 515 if (reg_off & 0x3) { 516 printf("Port register offset 0x%X not aligned on a 4-byte " 517 "boundary\n", 518 (unsigned)reg_off); 519 return 1; 520 } 521 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 522 if (reg_off >= pci_len) { 523 printf("Port %d: register offset %u (0x%X) out of port PCI " 524 "resource (length=%"PRIu64")\n", 525 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 526 return 1; 527 } 528 return 0; 529 } 530 531 static int 532 reg_bit_pos_is_invalid(uint8_t bit_pos) 533 { 534 if (bit_pos <= 31) 535 return 0; 536 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 537 return 1; 538 } 539 540 #define display_port_and_reg_off(port_id, reg_off) \ 541 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 542 543 static inline void 544 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 545 { 546 display_port_and_reg_off(port_id, (unsigned)reg_off); 547 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 548 } 549 550 void 551 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 552 { 553 uint32_t reg_v; 554 555 556 if (port_id_is_invalid(port_id, ENABLED_WARN)) 557 return; 558 if (port_reg_off_is_invalid(port_id, reg_off)) 559 return; 560 if (reg_bit_pos_is_invalid(bit_x)) 561 return; 562 reg_v = port_id_pci_reg_read(port_id, reg_off); 563 display_port_and_reg_off(port_id, (unsigned)reg_off); 564 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 565 } 566 567 void 568 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 569 uint8_t bit1_pos, uint8_t bit2_pos) 570 { 571 uint32_t reg_v; 572 uint8_t l_bit; 573 uint8_t h_bit; 574 575 if (port_id_is_invalid(port_id, ENABLED_WARN)) 576 return; 577 if (port_reg_off_is_invalid(port_id, reg_off)) 578 return; 579 if (reg_bit_pos_is_invalid(bit1_pos)) 580 return; 581 if (reg_bit_pos_is_invalid(bit2_pos)) 582 return; 583 if (bit1_pos > bit2_pos) 584 l_bit = bit2_pos, h_bit = bit1_pos; 585 else 586 l_bit = bit1_pos, h_bit = bit2_pos; 587 588 reg_v = port_id_pci_reg_read(port_id, reg_off); 589 reg_v >>= l_bit; 590 if (h_bit < 31) 591 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 592 display_port_and_reg_off(port_id, (unsigned)reg_off); 593 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 594 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 595 } 596 597 void 598 port_reg_display(portid_t port_id, uint32_t reg_off) 599 { 600 uint32_t reg_v; 601 602 if (port_id_is_invalid(port_id, ENABLED_WARN)) 603 return; 604 if (port_reg_off_is_invalid(port_id, reg_off)) 605 return; 606 reg_v = port_id_pci_reg_read(port_id, reg_off); 607 display_port_reg_value(port_id, reg_off, reg_v); 608 } 609 610 void 611 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 612 uint8_t bit_v) 613 { 614 uint32_t reg_v; 615 616 if (port_id_is_invalid(port_id, ENABLED_WARN)) 617 return; 618 if (port_reg_off_is_invalid(port_id, reg_off)) 619 return; 620 if (reg_bit_pos_is_invalid(bit_pos)) 621 return; 622 if (bit_v > 1) { 623 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 624 return; 625 } 626 reg_v = port_id_pci_reg_read(port_id, reg_off); 627 if (bit_v == 0) 628 reg_v &= ~(1 << bit_pos); 629 else 630 reg_v |= (1 << bit_pos); 631 port_id_pci_reg_write(port_id, reg_off, reg_v); 632 display_port_reg_value(port_id, reg_off, reg_v); 633 } 634 635 void 636 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 637 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 638 { 639 uint32_t max_v; 640 uint32_t reg_v; 641 uint8_t l_bit; 642 uint8_t h_bit; 643 644 if (port_id_is_invalid(port_id, ENABLED_WARN)) 645 return; 646 if (port_reg_off_is_invalid(port_id, reg_off)) 647 return; 648 if (reg_bit_pos_is_invalid(bit1_pos)) 649 return; 650 if (reg_bit_pos_is_invalid(bit2_pos)) 651 return; 652 if (bit1_pos > bit2_pos) 653 l_bit = bit2_pos, h_bit = bit1_pos; 654 else 655 l_bit = bit1_pos, h_bit = bit2_pos; 656 657 if ((h_bit - l_bit) < 31) 658 max_v = (1 << (h_bit - l_bit + 1)) - 1; 659 else 660 max_v = 0xFFFFFFFF; 661 662 if (value > max_v) { 663 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 664 (unsigned)value, (unsigned)value, 665 (unsigned)max_v, (unsigned)max_v); 666 return; 667 } 668 reg_v = port_id_pci_reg_read(port_id, reg_off); 669 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 670 reg_v |= (value << l_bit); /* Set changed bits */ 671 port_id_pci_reg_write(port_id, reg_off, reg_v); 672 display_port_reg_value(port_id, reg_off, reg_v); 673 } 674 675 void 676 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 677 { 678 if (port_id_is_invalid(port_id, ENABLED_WARN)) 679 return; 680 if (port_reg_off_is_invalid(port_id, reg_off)) 681 return; 682 port_id_pci_reg_write(port_id, reg_off, reg_v); 683 display_port_reg_value(port_id, reg_off, reg_v); 684 } 685 686 void 687 port_mtu_set(portid_t port_id, uint16_t mtu) 688 { 689 int diag; 690 691 if (port_id_is_invalid(port_id, ENABLED_WARN)) 692 return; 693 diag = rte_eth_dev_set_mtu(port_id, mtu); 694 if (diag == 0) 695 return; 696 printf("Set MTU failed. diag=%d\n", diag); 697 } 698 699 /* 700 * RX/TX ring descriptors display functions. 701 */ 702 int 703 rx_queue_id_is_invalid(queueid_t rxq_id) 704 { 705 if (rxq_id < nb_rxq) 706 return 0; 707 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 708 return 1; 709 } 710 711 int 712 tx_queue_id_is_invalid(queueid_t txq_id) 713 { 714 if (txq_id < nb_txq) 715 return 0; 716 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 717 return 1; 718 } 719 720 static int 721 rx_desc_id_is_invalid(uint16_t rxdesc_id) 722 { 723 if (rxdesc_id < nb_rxd) 724 return 0; 725 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 726 rxdesc_id, nb_rxd); 727 return 1; 728 } 729 730 static int 731 tx_desc_id_is_invalid(uint16_t txdesc_id) 732 { 733 if (txdesc_id < nb_txd) 734 return 0; 735 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 736 txdesc_id, nb_txd); 737 return 1; 738 } 739 740 static const struct rte_memzone * 741 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 742 { 743 char mz_name[RTE_MEMZONE_NAMESIZE]; 744 const struct rte_memzone *mz; 745 746 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 747 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 748 mz = rte_memzone_lookup(mz_name); 749 if (mz == NULL) 750 printf("%s ring memory zoneof (port %d, queue %d) not" 751 "found (zone name = %s\n", 752 ring_name, port_id, q_id, mz_name); 753 return mz; 754 } 755 756 union igb_ring_dword { 757 uint64_t dword; 758 struct { 759 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 760 uint32_t lo; 761 uint32_t hi; 762 #else 763 uint32_t hi; 764 uint32_t lo; 765 #endif 766 } words; 767 }; 768 769 struct igb_ring_desc_32_bytes { 770 union igb_ring_dword lo_dword; 771 union igb_ring_dword hi_dword; 772 union igb_ring_dword resv1; 773 union igb_ring_dword resv2; 774 }; 775 776 struct igb_ring_desc_16_bytes { 777 union igb_ring_dword lo_dword; 778 union igb_ring_dword hi_dword; 779 }; 780 781 static void 782 ring_rxd_display_dword(union igb_ring_dword dword) 783 { 784 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 785 (unsigned)dword.words.hi); 786 } 787 788 static void 789 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 790 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 791 uint8_t port_id, 792 #else 793 __rte_unused uint8_t port_id, 794 #endif 795 uint16_t desc_id) 796 { 797 struct igb_ring_desc_16_bytes *ring = 798 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 799 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 800 struct rte_eth_dev_info dev_info; 801 802 memset(&dev_info, 0, sizeof(dev_info)); 803 rte_eth_dev_info_get(port_id, &dev_info); 804 if (strstr(dev_info.driver_name, "i40e") != NULL) { 805 /* 32 bytes RX descriptor, i40e only */ 806 struct igb_ring_desc_32_bytes *ring = 807 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 808 ring[desc_id].lo_dword.dword = 809 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 810 ring_rxd_display_dword(ring[desc_id].lo_dword); 811 ring[desc_id].hi_dword.dword = 812 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 813 ring_rxd_display_dword(ring[desc_id].hi_dword); 814 ring[desc_id].resv1.dword = 815 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 816 ring_rxd_display_dword(ring[desc_id].resv1); 817 ring[desc_id].resv2.dword = 818 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 819 ring_rxd_display_dword(ring[desc_id].resv2); 820 821 return; 822 } 823 #endif 824 /* 16 bytes RX descriptor */ 825 ring[desc_id].lo_dword.dword = 826 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 827 ring_rxd_display_dword(ring[desc_id].lo_dword); 828 ring[desc_id].hi_dword.dword = 829 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 830 ring_rxd_display_dword(ring[desc_id].hi_dword); 831 } 832 833 static void 834 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 835 { 836 struct igb_ring_desc_16_bytes *ring; 837 struct igb_ring_desc_16_bytes txd; 838 839 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 840 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 841 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 842 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 843 (unsigned)txd.lo_dword.words.lo, 844 (unsigned)txd.lo_dword.words.hi, 845 (unsigned)txd.hi_dword.words.lo, 846 (unsigned)txd.hi_dword.words.hi); 847 } 848 849 void 850 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 851 { 852 const struct rte_memzone *rx_mz; 853 854 if (port_id_is_invalid(port_id, ENABLED_WARN)) 855 return; 856 if (rx_queue_id_is_invalid(rxq_id)) 857 return; 858 if (rx_desc_id_is_invalid(rxd_id)) 859 return; 860 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 861 if (rx_mz == NULL) 862 return; 863 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 864 } 865 866 void 867 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 868 { 869 const struct rte_memzone *tx_mz; 870 871 if (port_id_is_invalid(port_id, ENABLED_WARN)) 872 return; 873 if (tx_queue_id_is_invalid(txq_id)) 874 return; 875 if (tx_desc_id_is_invalid(txd_id)) 876 return; 877 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 878 if (tx_mz == NULL) 879 return; 880 ring_tx_descriptor_display(tx_mz, txd_id); 881 } 882 883 void 884 fwd_lcores_config_display(void) 885 { 886 lcoreid_t lc_id; 887 888 printf("List of forwarding lcores:"); 889 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 890 printf(" %2u", fwd_lcores_cpuids[lc_id]); 891 printf("\n"); 892 } 893 void 894 rxtx_config_display(void) 895 { 896 printf(" %s packet forwarding - CRC stripping %s - " 897 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 898 rx_mode.hw_strip_crc ? "enabled" : "disabled", 899 nb_pkt_per_burst); 900 901 if (cur_fwd_eng == &tx_only_engine) 902 printf(" packet len=%u - nb packet segments=%d\n", 903 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 904 905 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 906 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 907 908 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 909 nb_fwd_lcores, nb_fwd_ports); 910 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 911 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 912 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 913 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 914 rx_conf->rx_thresh.wthresh); 915 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 916 nb_txq, nb_txd, tx_conf->tx_free_thresh); 917 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 918 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 919 tx_conf->tx_thresh.wthresh); 920 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 921 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 922 } 923 924 void 925 port_rss_reta_info(portid_t port_id, 926 struct rte_eth_rss_reta_entry64 *reta_conf, 927 uint16_t nb_entries) 928 { 929 uint16_t i, idx, shift; 930 int ret; 931 932 if (port_id_is_invalid(port_id, ENABLED_WARN)) 933 return; 934 935 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 936 if (ret != 0) { 937 printf("Failed to get RSS RETA info, return code = %d\n", ret); 938 return; 939 } 940 941 for (i = 0; i < nb_entries; i++) { 942 idx = i / RTE_RETA_GROUP_SIZE; 943 shift = i % RTE_RETA_GROUP_SIZE; 944 if (!(reta_conf[idx].mask & (1ULL << shift))) 945 continue; 946 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 947 i, reta_conf[idx].reta[shift]); 948 } 949 } 950 951 /* 952 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 953 * key of the port. 954 */ 955 void 956 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 957 { 958 struct rte_eth_rss_conf rss_conf; 959 uint8_t rss_key[10 * 4] = ""; 960 uint64_t rss_hf; 961 uint8_t i; 962 int diag; 963 964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 965 return; 966 967 rss_conf.rss_hf = 0; 968 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 969 if (!strcmp(rss_info, rss_type_table[i].str)) 970 rss_conf.rss_hf = rss_type_table[i].rss_type; 971 } 972 973 /* Get RSS hash key if asked to display it */ 974 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 975 rss_conf.rss_key_len = sizeof(rss_key); 976 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 977 if (diag != 0) { 978 switch (diag) { 979 case -ENODEV: 980 printf("port index %d invalid\n", port_id); 981 break; 982 case -ENOTSUP: 983 printf("operation not supported by device\n"); 984 break; 985 default: 986 printf("operation failed - diag=%d\n", diag); 987 break; 988 } 989 return; 990 } 991 rss_hf = rss_conf.rss_hf; 992 if (rss_hf == 0) { 993 printf("RSS disabled\n"); 994 return; 995 } 996 printf("RSS functions:\n "); 997 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 998 if (rss_hf & rss_type_table[i].rss_type) 999 printf("%s ", rss_type_table[i].str); 1000 } 1001 printf("\n"); 1002 if (!show_rss_key) 1003 return; 1004 printf("RSS key:\n"); 1005 for (i = 0; i < sizeof(rss_key); i++) 1006 printf("%02X", rss_key[i]); 1007 printf("\n"); 1008 } 1009 1010 void 1011 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1012 uint hash_key_len) 1013 { 1014 struct rte_eth_rss_conf rss_conf; 1015 int diag; 1016 unsigned int i; 1017 1018 rss_conf.rss_key = NULL; 1019 rss_conf.rss_key_len = hash_key_len; 1020 rss_conf.rss_hf = 0; 1021 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1022 if (!strcmp(rss_type_table[i].str, rss_type)) 1023 rss_conf.rss_hf = rss_type_table[i].rss_type; 1024 } 1025 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1026 if (diag == 0) { 1027 rss_conf.rss_key = hash_key; 1028 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1029 } 1030 if (diag == 0) 1031 return; 1032 1033 switch (diag) { 1034 case -ENODEV: 1035 printf("port index %d invalid\n", port_id); 1036 break; 1037 case -ENOTSUP: 1038 printf("operation not supported by device\n"); 1039 break; 1040 default: 1041 printf("operation failed - diag=%d\n", diag); 1042 break; 1043 } 1044 } 1045 1046 /* 1047 * Setup forwarding configuration for each logical core. 1048 */ 1049 static void 1050 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1051 { 1052 streamid_t nb_fs_per_lcore; 1053 streamid_t nb_fs; 1054 streamid_t sm_id; 1055 lcoreid_t nb_extra; 1056 lcoreid_t nb_fc; 1057 lcoreid_t nb_lc; 1058 lcoreid_t lc_id; 1059 1060 nb_fs = cfg->nb_fwd_streams; 1061 nb_fc = cfg->nb_fwd_lcores; 1062 if (nb_fs <= nb_fc) { 1063 nb_fs_per_lcore = 1; 1064 nb_extra = 0; 1065 } else { 1066 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1067 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1068 } 1069 1070 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1071 sm_id = 0; 1072 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1073 fwd_lcores[lc_id]->stream_idx = sm_id; 1074 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1075 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1076 } 1077 1078 /* 1079 * Assign extra remaining streams, if any. 1080 */ 1081 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1082 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1083 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1084 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1085 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1086 } 1087 } 1088 1089 static void 1090 simple_fwd_config_setup(void) 1091 { 1092 portid_t i; 1093 portid_t j; 1094 portid_t inc = 2; 1095 1096 if (port_topology == PORT_TOPOLOGY_CHAINED || 1097 port_topology == PORT_TOPOLOGY_LOOP) { 1098 inc = 1; 1099 } else if (nb_fwd_ports % 2) { 1100 printf("\nWarning! Cannot handle an odd number of ports " 1101 "with the current port topology. Configuration " 1102 "must be changed to have an even number of ports, " 1103 "or relaunch application with " 1104 "--port-topology=chained\n\n"); 1105 } 1106 1107 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1108 cur_fwd_config.nb_fwd_streams = 1109 (streamid_t) cur_fwd_config.nb_fwd_ports; 1110 1111 /* reinitialize forwarding streams */ 1112 init_fwd_streams(); 1113 1114 /* 1115 * In the simple forwarding test, the number of forwarding cores 1116 * must be lower or equal to the number of forwarding ports. 1117 */ 1118 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1119 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1120 cur_fwd_config.nb_fwd_lcores = 1121 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1122 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1123 1124 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1125 if (port_topology != PORT_TOPOLOGY_LOOP) 1126 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1127 else 1128 j = i; 1129 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1130 fwd_streams[i]->rx_queue = 0; 1131 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1132 fwd_streams[i]->tx_queue = 0; 1133 fwd_streams[i]->peer_addr = j; 1134 1135 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1136 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1137 fwd_streams[j]->rx_queue = 0; 1138 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1139 fwd_streams[j]->tx_queue = 0; 1140 fwd_streams[j]->peer_addr = i; 1141 } 1142 } 1143 } 1144 1145 /** 1146 * For the RSS forwarding test, each core is assigned on every port a transmit 1147 * queue whose index is the index of the core itself. This approach limits the 1148 * maximumm number of processing cores of the RSS test to the maximum number of 1149 * TX queues supported by the devices. 1150 * 1151 * Each core is assigned a single stream, each stream being composed of 1152 * a RX queue to poll on a RX port for input messages, associated with 1153 * a TX queue of a TX port where to send forwarded packets. 1154 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1155 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1156 * following rules: 1157 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1158 * - TxQl = RxQj 1159 */ 1160 static void 1161 rss_fwd_config_setup(void) 1162 { 1163 portid_t rxp; 1164 portid_t txp; 1165 queueid_t rxq; 1166 queueid_t nb_q; 1167 lcoreid_t lc_id; 1168 1169 nb_q = nb_rxq; 1170 if (nb_q > nb_txq) 1171 nb_q = nb_txq; 1172 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1173 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1174 cur_fwd_config.nb_fwd_streams = 1175 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1176 if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores) 1177 cur_fwd_config.nb_fwd_streams = 1178 (streamid_t)cur_fwd_config.nb_fwd_lcores; 1179 else 1180 cur_fwd_config.nb_fwd_lcores = 1181 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1182 1183 /* reinitialize forwarding streams */ 1184 init_fwd_streams(); 1185 1186 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1187 rxp = 0; rxq = 0; 1188 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1189 struct fwd_stream *fs; 1190 1191 fs = fwd_streams[lc_id]; 1192 1193 if ((rxp & 0x1) == 0) 1194 txp = (portid_t) (rxp + 1); 1195 else 1196 txp = (portid_t) (rxp - 1); 1197 /* 1198 * if we are in loopback, simply send stuff out through the 1199 * ingress port 1200 */ 1201 if (port_topology == PORT_TOPOLOGY_LOOP) 1202 txp = rxp; 1203 1204 fs->rx_port = fwd_ports_ids[rxp]; 1205 fs->rx_queue = rxq; 1206 fs->tx_port = fwd_ports_ids[txp]; 1207 fs->tx_queue = rxq; 1208 fs->peer_addr = fs->tx_port; 1209 rxq = (queueid_t) (rxq + 1); 1210 if (rxq < nb_q) 1211 continue; 1212 /* 1213 * rxq == nb_q 1214 * Restart from RX queue 0 on next RX port 1215 */ 1216 rxq = 0; 1217 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1218 rxp = (portid_t) 1219 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1220 else 1221 rxp = (portid_t) (rxp + 1); 1222 } 1223 } 1224 1225 /** 1226 * For the DCB forwarding test, each core is assigned on each traffic class. 1227 * 1228 * Each core is assigned a multi-stream, each stream being composed of 1229 * a RX queue to poll on a RX port for input messages, associated with 1230 * a TX queue of a TX port where to send forwarded packets. All RX and 1231 * TX queues are mapping to the same traffic class. 1232 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1233 * the same core 1234 */ 1235 static void 1236 dcb_fwd_config_setup(void) 1237 { 1238 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1239 portid_t txp, rxp = 0; 1240 queueid_t txq, rxq = 0; 1241 lcoreid_t lc_id; 1242 uint16_t nb_rx_queue, nb_tx_queue; 1243 uint16_t i, j, k, sm_id = 0; 1244 uint8_t tc = 0; 1245 1246 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1247 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1248 cur_fwd_config.nb_fwd_streams = 1249 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1250 1251 /* reinitialize forwarding streams */ 1252 init_fwd_streams(); 1253 sm_id = 0; 1254 txp = 1; 1255 /* get the dcb info on the first RX and TX ports */ 1256 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1257 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1258 1259 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1260 fwd_lcores[lc_id]->stream_nb = 0; 1261 fwd_lcores[lc_id]->stream_idx = sm_id; 1262 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1263 /* if the nb_queue is zero, means this tc is 1264 * not enabled on the POOL 1265 */ 1266 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1267 break; 1268 k = fwd_lcores[lc_id]->stream_nb + 1269 fwd_lcores[lc_id]->stream_idx; 1270 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1271 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1272 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1273 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1274 for (j = 0; j < nb_rx_queue; j++) { 1275 struct fwd_stream *fs; 1276 1277 fs = fwd_streams[k + j]; 1278 fs->rx_port = fwd_ports_ids[rxp]; 1279 fs->rx_queue = rxq + j; 1280 fs->tx_port = fwd_ports_ids[txp]; 1281 fs->tx_queue = txq + j % nb_tx_queue; 1282 fs->peer_addr = fs->tx_port; 1283 } 1284 fwd_lcores[lc_id]->stream_nb += 1285 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1286 } 1287 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1288 1289 tc++; 1290 if (tc < rxp_dcb_info.nb_tcs) 1291 continue; 1292 /* Restart from TC 0 on next RX port */ 1293 tc = 0; 1294 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1295 rxp = (portid_t) 1296 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1297 else 1298 rxp++; 1299 if (rxp >= nb_fwd_ports) 1300 return; 1301 /* get the dcb information on next RX and TX ports */ 1302 if ((rxp & 0x1) == 0) 1303 txp = (portid_t) (rxp + 1); 1304 else 1305 txp = (portid_t) (rxp - 1); 1306 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1307 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1308 } 1309 } 1310 1311 static void 1312 icmp_echo_config_setup(void) 1313 { 1314 portid_t rxp; 1315 queueid_t rxq; 1316 lcoreid_t lc_id; 1317 uint16_t sm_id; 1318 1319 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1320 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1321 (nb_txq * nb_fwd_ports); 1322 else 1323 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1324 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1325 cur_fwd_config.nb_fwd_streams = 1326 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1327 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1328 cur_fwd_config.nb_fwd_lcores = 1329 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1330 if (verbose_level > 0) { 1331 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1332 __FUNCTION__, 1333 cur_fwd_config.nb_fwd_lcores, 1334 cur_fwd_config.nb_fwd_ports, 1335 cur_fwd_config.nb_fwd_streams); 1336 } 1337 1338 /* reinitialize forwarding streams */ 1339 init_fwd_streams(); 1340 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1341 rxp = 0; rxq = 0; 1342 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1343 if (verbose_level > 0) 1344 printf(" core=%d: \n", lc_id); 1345 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1346 struct fwd_stream *fs; 1347 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1348 fs->rx_port = fwd_ports_ids[rxp]; 1349 fs->rx_queue = rxq; 1350 fs->tx_port = fs->rx_port; 1351 fs->tx_queue = rxq; 1352 fs->peer_addr = fs->tx_port; 1353 if (verbose_level > 0) 1354 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1355 sm_id, fs->rx_port, fs->rx_queue, 1356 fs->tx_queue); 1357 rxq = (queueid_t) (rxq + 1); 1358 if (rxq == nb_rxq) { 1359 rxq = 0; 1360 rxp = (portid_t) (rxp + 1); 1361 } 1362 } 1363 } 1364 } 1365 1366 void 1367 fwd_config_setup(void) 1368 { 1369 cur_fwd_config.fwd_eng = cur_fwd_eng; 1370 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1371 icmp_echo_config_setup(); 1372 return; 1373 } 1374 if ((nb_rxq > 1) && (nb_txq > 1)){ 1375 if (dcb_config) 1376 dcb_fwd_config_setup(); 1377 else 1378 rss_fwd_config_setup(); 1379 } 1380 else 1381 simple_fwd_config_setup(); 1382 } 1383 1384 static void 1385 pkt_fwd_config_display(struct fwd_config *cfg) 1386 { 1387 struct fwd_stream *fs; 1388 lcoreid_t lc_id; 1389 streamid_t sm_id; 1390 1391 printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - " 1392 "NUMA support %s, MP over anonymous pages %s\n", 1393 cfg->fwd_eng->fwd_mode_name, 1394 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1395 numa_support == 1 ? "enabled" : "disabled", 1396 mp_anon != 0 ? "enabled" : "disabled"); 1397 1398 if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0) 1399 printf("TX retry num: %u, delay between TX retries: %uus\n", 1400 burst_tx_retry_num, burst_tx_delay_time); 1401 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1402 printf("Logical Core %u (socket %u) forwards packets on " 1403 "%d streams:", 1404 fwd_lcores_cpuids[lc_id], 1405 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1406 fwd_lcores[lc_id]->stream_nb); 1407 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1408 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1409 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1410 "P=%d/Q=%d (socket %u) ", 1411 fs->rx_port, fs->rx_queue, 1412 ports[fs->rx_port].socket_id, 1413 fs->tx_port, fs->tx_queue, 1414 ports[fs->tx_port].socket_id); 1415 print_ethaddr("peer=", 1416 &peer_eth_addrs[fs->peer_addr]); 1417 } 1418 printf("\n"); 1419 } 1420 printf("\n"); 1421 } 1422 1423 1424 void 1425 fwd_config_display(void) 1426 { 1427 fwd_config_setup(); 1428 pkt_fwd_config_display(&cur_fwd_config); 1429 } 1430 1431 int 1432 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1433 { 1434 unsigned int i; 1435 unsigned int lcore_cpuid; 1436 int record_now; 1437 1438 record_now = 0; 1439 again: 1440 for (i = 0; i < nb_lc; i++) { 1441 lcore_cpuid = lcorelist[i]; 1442 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1443 printf("lcore %u not enabled\n", lcore_cpuid); 1444 return -1; 1445 } 1446 if (lcore_cpuid == rte_get_master_lcore()) { 1447 printf("lcore %u cannot be masked on for running " 1448 "packet forwarding, which is the master lcore " 1449 "and reserved for command line parsing only\n", 1450 lcore_cpuid); 1451 return -1; 1452 } 1453 if (record_now) 1454 fwd_lcores_cpuids[i] = lcore_cpuid; 1455 } 1456 if (record_now == 0) { 1457 record_now = 1; 1458 goto again; 1459 } 1460 nb_cfg_lcores = (lcoreid_t) nb_lc; 1461 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1462 printf("previous number of forwarding cores %u - changed to " 1463 "number of configured cores %u\n", 1464 (unsigned int) nb_fwd_lcores, nb_lc); 1465 nb_fwd_lcores = (lcoreid_t) nb_lc; 1466 } 1467 1468 return 0; 1469 } 1470 1471 int 1472 set_fwd_lcores_mask(uint64_t lcoremask) 1473 { 1474 unsigned int lcorelist[64]; 1475 unsigned int nb_lc; 1476 unsigned int i; 1477 1478 if (lcoremask == 0) { 1479 printf("Invalid NULL mask of cores\n"); 1480 return -1; 1481 } 1482 nb_lc = 0; 1483 for (i = 0; i < 64; i++) { 1484 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1485 continue; 1486 lcorelist[nb_lc++] = i; 1487 } 1488 return set_fwd_lcores_list(lcorelist, nb_lc); 1489 } 1490 1491 void 1492 set_fwd_lcores_number(uint16_t nb_lc) 1493 { 1494 if (nb_lc > nb_cfg_lcores) { 1495 printf("nb fwd cores %u > %u (max. number of configured " 1496 "lcores) - ignored\n", 1497 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1498 return; 1499 } 1500 nb_fwd_lcores = (lcoreid_t) nb_lc; 1501 printf("Number of forwarding cores set to %u\n", 1502 (unsigned int) nb_fwd_lcores); 1503 } 1504 1505 void 1506 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1507 { 1508 unsigned int i; 1509 portid_t port_id; 1510 int record_now; 1511 1512 record_now = 0; 1513 again: 1514 for (i = 0; i < nb_pt; i++) { 1515 port_id = (portid_t) portlist[i]; 1516 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1517 return; 1518 if (record_now) 1519 fwd_ports_ids[i] = port_id; 1520 } 1521 if (record_now == 0) { 1522 record_now = 1; 1523 goto again; 1524 } 1525 nb_cfg_ports = (portid_t) nb_pt; 1526 if (nb_fwd_ports != (portid_t) nb_pt) { 1527 printf("previous number of forwarding ports %u - changed to " 1528 "number of configured ports %u\n", 1529 (unsigned int) nb_fwd_ports, nb_pt); 1530 nb_fwd_ports = (portid_t) nb_pt; 1531 } 1532 } 1533 1534 void 1535 set_fwd_ports_mask(uint64_t portmask) 1536 { 1537 unsigned int portlist[64]; 1538 unsigned int nb_pt; 1539 unsigned int i; 1540 1541 if (portmask == 0) { 1542 printf("Invalid NULL mask of ports\n"); 1543 return; 1544 } 1545 nb_pt = 0; 1546 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1547 if (! ((uint64_t)(1ULL << i) & portmask)) 1548 continue; 1549 portlist[nb_pt++] = i; 1550 } 1551 set_fwd_ports_list(portlist, nb_pt); 1552 } 1553 1554 void 1555 set_fwd_ports_number(uint16_t nb_pt) 1556 { 1557 if (nb_pt > nb_cfg_ports) { 1558 printf("nb fwd ports %u > %u (number of configured " 1559 "ports) - ignored\n", 1560 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1561 return; 1562 } 1563 nb_fwd_ports = (portid_t) nb_pt; 1564 printf("Number of forwarding ports set to %u\n", 1565 (unsigned int) nb_fwd_ports); 1566 } 1567 1568 void 1569 set_nb_pkt_per_burst(uint16_t nb) 1570 { 1571 if (nb > MAX_PKT_BURST) { 1572 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1573 " ignored\n", 1574 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1575 return; 1576 } 1577 nb_pkt_per_burst = nb; 1578 printf("Number of packets per burst set to %u\n", 1579 (unsigned int) nb_pkt_per_burst); 1580 } 1581 1582 static const char * 1583 tx_split_get_name(enum tx_pkt_split split) 1584 { 1585 uint32_t i; 1586 1587 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1588 if (tx_split_name[i].split == split) 1589 return tx_split_name[i].name; 1590 } 1591 return NULL; 1592 } 1593 1594 void 1595 set_tx_pkt_split(const char *name) 1596 { 1597 uint32_t i; 1598 1599 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1600 if (strcmp(tx_split_name[i].name, name) == 0) { 1601 tx_pkt_split = tx_split_name[i].split; 1602 return; 1603 } 1604 } 1605 printf("unknown value: \"%s\"\n", name); 1606 } 1607 1608 void 1609 show_tx_pkt_segments(void) 1610 { 1611 uint32_t i, n; 1612 const char *split; 1613 1614 n = tx_pkt_nb_segs; 1615 split = tx_split_get_name(tx_pkt_split); 1616 1617 printf("Number of segments: %u\n", n); 1618 printf("Segment sizes: "); 1619 for (i = 0; i != n - 1; i++) 1620 printf("%hu,", tx_pkt_seg_lengths[i]); 1621 printf("%hu\n", tx_pkt_seg_lengths[i]); 1622 printf("Split packet: %s\n", split); 1623 } 1624 1625 void 1626 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1627 { 1628 uint16_t tx_pkt_len; 1629 unsigned i; 1630 1631 if (nb_segs >= (unsigned) nb_txd) { 1632 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1633 nb_segs, (unsigned int) nb_txd); 1634 return; 1635 } 1636 1637 /* 1638 * Check that each segment length is greater or equal than 1639 * the mbuf data sise. 1640 * Check also that the total packet length is greater or equal than the 1641 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1642 */ 1643 tx_pkt_len = 0; 1644 for (i = 0; i < nb_segs; i++) { 1645 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1646 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1647 i, seg_lengths[i], (unsigned) mbuf_data_size); 1648 return; 1649 } 1650 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1651 } 1652 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1653 printf("total packet length=%u < %d - give up\n", 1654 (unsigned) tx_pkt_len, 1655 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1656 return; 1657 } 1658 1659 for (i = 0; i < nb_segs; i++) 1660 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1661 1662 tx_pkt_length = tx_pkt_len; 1663 tx_pkt_nb_segs = (uint8_t) nb_segs; 1664 } 1665 1666 char* 1667 list_pkt_forwarding_modes(void) 1668 { 1669 static char fwd_modes[128] = ""; 1670 const char *separator = "|"; 1671 struct fwd_engine *fwd_eng; 1672 unsigned i = 0; 1673 1674 if (strlen (fwd_modes) == 0) { 1675 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1676 strcat(fwd_modes, fwd_eng->fwd_mode_name); 1677 strcat(fwd_modes, separator); 1678 } 1679 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1680 } 1681 1682 return fwd_modes; 1683 } 1684 1685 void 1686 set_pkt_forwarding_mode(const char *fwd_mode_name) 1687 { 1688 struct fwd_engine *fwd_eng; 1689 unsigned i; 1690 1691 i = 0; 1692 while ((fwd_eng = fwd_engines[i]) != NULL) { 1693 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1694 printf("Set %s packet forwarding mode\n", 1695 fwd_mode_name); 1696 cur_fwd_eng = fwd_eng; 1697 return; 1698 } 1699 i++; 1700 } 1701 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1702 } 1703 1704 void 1705 set_verbose_level(uint16_t vb_level) 1706 { 1707 printf("Change verbose level from %u to %u\n", 1708 (unsigned int) verbose_level, (unsigned int) vb_level); 1709 verbose_level = vb_level; 1710 } 1711 1712 void 1713 vlan_extend_set(portid_t port_id, int on) 1714 { 1715 int diag; 1716 int vlan_offload; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1719 return; 1720 1721 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1722 1723 if (on) 1724 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1725 else 1726 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1727 1728 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1729 if (diag < 0) 1730 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1731 "diag=%d\n", port_id, on, diag); 1732 } 1733 1734 void 1735 rx_vlan_strip_set(portid_t port_id, int on) 1736 { 1737 int diag; 1738 int vlan_offload; 1739 1740 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1741 return; 1742 1743 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1744 1745 if (on) 1746 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1747 else 1748 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1749 1750 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1751 if (diag < 0) 1752 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1753 "diag=%d\n", port_id, on, diag); 1754 } 1755 1756 void 1757 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1758 { 1759 int diag; 1760 1761 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1762 return; 1763 1764 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1765 if (diag < 0) 1766 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1767 "diag=%d\n", port_id, queue_id, on, diag); 1768 } 1769 1770 void 1771 rx_vlan_filter_set(portid_t port_id, int on) 1772 { 1773 int diag; 1774 int vlan_offload; 1775 1776 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1777 return; 1778 1779 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1780 1781 if (on) 1782 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1783 else 1784 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1785 1786 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1787 if (diag < 0) 1788 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1789 "diag=%d\n", port_id, on, diag); 1790 } 1791 1792 int 1793 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1794 { 1795 int diag; 1796 1797 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1798 return 1; 1799 if (vlan_id_is_invalid(vlan_id)) 1800 return 1; 1801 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1802 if (diag == 0) 1803 return 0; 1804 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1805 "diag=%d\n", 1806 port_id, vlan_id, on, diag); 1807 return -1; 1808 } 1809 1810 void 1811 rx_vlan_all_filter_set(portid_t port_id, int on) 1812 { 1813 uint16_t vlan_id; 1814 1815 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1816 return; 1817 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1818 if (rx_vft_set(port_id, vlan_id, on)) 1819 break; 1820 } 1821 } 1822 1823 void 1824 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 1825 { 1826 int diag; 1827 1828 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1829 return; 1830 1831 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 1832 if (diag == 0) 1833 return; 1834 1835 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 1836 "diag=%d\n", 1837 port_id, vlan_type, tp_id, diag); 1838 } 1839 1840 void 1841 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1842 { 1843 int vlan_offload; 1844 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1845 return; 1846 if (vlan_id_is_invalid(vlan_id)) 1847 return; 1848 1849 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1850 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 1851 printf("Error, as QinQ has been enabled.\n"); 1852 return; 1853 } 1854 1855 tx_vlan_reset(port_id); 1856 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1857 ports[port_id].tx_vlan_id = vlan_id; 1858 } 1859 1860 void 1861 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 1862 { 1863 int vlan_offload; 1864 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1865 return; 1866 if (vlan_id_is_invalid(vlan_id)) 1867 return; 1868 if (vlan_id_is_invalid(vlan_id_outer)) 1869 return; 1870 1871 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1872 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 1873 printf("Error, as QinQ hasn't been enabled.\n"); 1874 return; 1875 } 1876 1877 tx_vlan_reset(port_id); 1878 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 1879 ports[port_id].tx_vlan_id = vlan_id; 1880 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 1881 } 1882 1883 void 1884 tx_vlan_reset(portid_t port_id) 1885 { 1886 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1887 return; 1888 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 1889 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 1890 ports[port_id].tx_vlan_id = 0; 1891 ports[port_id].tx_vlan_id_outer = 0; 1892 } 1893 1894 void 1895 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1896 { 1897 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1898 return; 1899 1900 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 1901 } 1902 1903 void 1904 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 1905 { 1906 uint16_t i; 1907 uint8_t existing_mapping_found = 0; 1908 1909 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1910 return; 1911 1912 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 1913 return; 1914 1915 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1916 printf("map_value not in required range 0..%d\n", 1917 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1918 return; 1919 } 1920 1921 if (!is_rx) { /*then tx*/ 1922 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1923 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1924 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 1925 tx_queue_stats_mappings[i].stats_counter_id = map_value; 1926 existing_mapping_found = 1; 1927 break; 1928 } 1929 } 1930 if (!existing_mapping_found) { /* A new additional mapping... */ 1931 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 1932 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 1933 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 1934 nb_tx_queue_stats_mappings++; 1935 } 1936 } 1937 else { /*rx*/ 1938 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1939 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1940 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 1941 rx_queue_stats_mappings[i].stats_counter_id = map_value; 1942 existing_mapping_found = 1; 1943 break; 1944 } 1945 } 1946 if (!existing_mapping_found) { /* A new additional mapping... */ 1947 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 1948 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 1949 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 1950 nb_rx_queue_stats_mappings++; 1951 } 1952 } 1953 } 1954 1955 static inline void 1956 print_fdir_mask(struct rte_eth_fdir_masks *mask) 1957 { 1958 printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask); 1959 1960 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 1961 printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask); 1962 else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 1963 printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x", 1964 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 1965 mask->tunnel_id_mask); 1966 else { 1967 printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 1968 " src_port: 0x%04x, dst_port: 0x%04x", 1969 mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip, 1970 mask->src_port_mask, mask->dst_port_mask); 1971 1972 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 1973 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 1974 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 1975 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 1976 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 1977 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 1978 } 1979 1980 printf("\n"); 1981 } 1982 1983 static inline void 1984 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 1985 { 1986 struct rte_eth_flex_payload_cfg *cfg; 1987 uint32_t i, j; 1988 1989 for (i = 0; i < flex_conf->nb_payloads; i++) { 1990 cfg = &flex_conf->flex_set[i]; 1991 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 1992 printf("\n RAW: "); 1993 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 1994 printf("\n L2_PAYLOAD: "); 1995 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 1996 printf("\n L3_PAYLOAD: "); 1997 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 1998 printf("\n L4_PAYLOAD: "); 1999 else 2000 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2001 for (j = 0; j < num; j++) 2002 printf(" %-5u", cfg->src_offset[j]); 2003 } 2004 printf("\n"); 2005 } 2006 2007 static char * 2008 flowtype_to_str(uint16_t flow_type) 2009 { 2010 struct flow_type_info { 2011 char str[32]; 2012 uint16_t ftype; 2013 }; 2014 2015 uint8_t i; 2016 static struct flow_type_info flowtype_str_table[] = { 2017 {"raw", RTE_ETH_FLOW_RAW}, 2018 {"ipv4", RTE_ETH_FLOW_IPV4}, 2019 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2020 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2021 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2022 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2023 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2024 {"ipv6", RTE_ETH_FLOW_IPV6}, 2025 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2026 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2027 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2028 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2029 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2030 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2031 }; 2032 2033 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2034 if (flowtype_str_table[i].ftype == flow_type) 2035 return flowtype_str_table[i].str; 2036 } 2037 2038 return NULL; 2039 } 2040 2041 static inline void 2042 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2043 { 2044 struct rte_eth_fdir_flex_mask *mask; 2045 uint32_t i, j; 2046 char *p; 2047 2048 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2049 mask = &flex_conf->flex_mask[i]; 2050 p = flowtype_to_str(mask->flow_type); 2051 printf("\n %s:\t", p ? p : "unknown"); 2052 for (j = 0; j < num; j++) 2053 printf(" %02x", mask->mask[j]); 2054 } 2055 printf("\n"); 2056 } 2057 2058 static inline void 2059 print_fdir_flow_type(uint32_t flow_types_mask) 2060 { 2061 int i; 2062 char *p; 2063 2064 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2065 if (!(flow_types_mask & (1 << i))) 2066 continue; 2067 p = flowtype_to_str(i); 2068 if (p) 2069 printf(" %s", p); 2070 else 2071 printf(" unknown"); 2072 } 2073 printf("\n"); 2074 } 2075 2076 void 2077 fdir_get_infos(portid_t port_id) 2078 { 2079 struct rte_eth_fdir_stats fdir_stat; 2080 struct rte_eth_fdir_info fdir_info; 2081 int ret; 2082 2083 static const char *fdir_stats_border = "########################"; 2084 2085 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2086 return; 2087 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2088 if (ret < 0) { 2089 printf("\n FDIR is not supported on port %-2d\n", 2090 port_id); 2091 return; 2092 } 2093 2094 memset(&fdir_info, 0, sizeof(fdir_info)); 2095 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2096 RTE_ETH_FILTER_INFO, &fdir_info); 2097 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2098 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2099 RTE_ETH_FILTER_STATS, &fdir_stat); 2100 printf("\n %s FDIR infos for port %-2d %s\n", 2101 fdir_stats_border, port_id, fdir_stats_border); 2102 printf(" MODE: "); 2103 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2104 printf(" PERFECT\n"); 2105 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2106 printf(" PERFECT-MAC-VLAN\n"); 2107 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2108 printf(" PERFECT-TUNNEL\n"); 2109 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2110 printf(" SIGNATURE\n"); 2111 else 2112 printf(" DISABLE\n"); 2113 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2114 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2115 printf(" SUPPORTED FLOW TYPE: "); 2116 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2117 } 2118 printf(" FLEX PAYLOAD INFO:\n"); 2119 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2120 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2121 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2122 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2123 fdir_info.flex_payload_unit, 2124 fdir_info.max_flex_payload_segment_num, 2125 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2126 printf(" MASK: "); 2127 print_fdir_mask(&fdir_info.mask); 2128 if (fdir_info.flex_conf.nb_payloads > 0) { 2129 printf(" FLEX PAYLOAD SRC OFFSET:"); 2130 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2131 } 2132 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2133 printf(" FLEX MASK CFG:"); 2134 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2135 } 2136 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2137 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2138 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2139 fdir_info.guarant_spc, fdir_info.best_spc); 2140 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2141 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2142 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2143 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2144 fdir_stat.collision, fdir_stat.free, 2145 fdir_stat.maxhash, fdir_stat.maxlen, 2146 fdir_stat.add, fdir_stat.remove, 2147 fdir_stat.f_add, fdir_stat.f_remove); 2148 printf(" %s############################%s\n", 2149 fdir_stats_border, fdir_stats_border); 2150 } 2151 2152 void 2153 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2154 { 2155 struct rte_port *port; 2156 struct rte_eth_fdir_flex_conf *flex_conf; 2157 int i, idx = 0; 2158 2159 port = &ports[port_id]; 2160 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2161 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2162 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2163 idx = i; 2164 break; 2165 } 2166 } 2167 if (i >= RTE_ETH_FLOW_MAX) { 2168 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2169 idx = flex_conf->nb_flexmasks; 2170 flex_conf->nb_flexmasks++; 2171 } else { 2172 printf("The flex mask table is full. Can not set flex" 2173 " mask for flow_type(%u).", cfg->flow_type); 2174 return; 2175 } 2176 } 2177 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2178 cfg, 2179 sizeof(struct rte_eth_fdir_flex_mask)); 2180 } 2181 2182 void 2183 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2184 { 2185 struct rte_port *port; 2186 struct rte_eth_fdir_flex_conf *flex_conf; 2187 int i, idx = 0; 2188 2189 port = &ports[port_id]; 2190 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2191 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2192 if (cfg->type == flex_conf->flex_set[i].type) { 2193 idx = i; 2194 break; 2195 } 2196 } 2197 if (i >= RTE_ETH_PAYLOAD_MAX) { 2198 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2199 idx = flex_conf->nb_payloads; 2200 flex_conf->nb_payloads++; 2201 } else { 2202 printf("The flex payload table is full. Can not set" 2203 " flex payload for type(%u).", cfg->type); 2204 return; 2205 } 2206 } 2207 (void)rte_memcpy(&flex_conf->flex_set[idx], 2208 cfg, 2209 sizeof(struct rte_eth_flex_payload_cfg)); 2210 2211 } 2212 2213 void 2214 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2215 { 2216 int diag; 2217 2218 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2219 return; 2220 if (is_rx) 2221 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2222 else 2223 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2224 if (diag == 0) 2225 return; 2226 if(is_rx) 2227 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2228 "diag=%d\n", port_id, diag); 2229 else 2230 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2231 "diag=%d\n", port_id, diag); 2232 2233 } 2234 2235 void 2236 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2237 { 2238 int diag; 2239 2240 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2241 return; 2242 if (vlan_id_is_invalid(vlan_id)) 2243 return; 2244 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2245 if (diag == 0) 2246 return; 2247 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2248 "diag=%d\n", port_id, diag); 2249 } 2250 2251 int 2252 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2253 { 2254 int diag; 2255 struct rte_eth_link link; 2256 2257 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2258 return 1; 2259 rte_eth_link_get_nowait(port_id, &link); 2260 if (rate > link.link_speed) { 2261 printf("Invalid rate value:%u bigger than link speed: %u\n", 2262 rate, link.link_speed); 2263 return 1; 2264 } 2265 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2266 if (diag == 0) 2267 return diag; 2268 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2269 port_id, diag); 2270 return diag; 2271 } 2272 2273 int 2274 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2275 { 2276 int diag; 2277 struct rte_eth_link link; 2278 2279 if (q_msk == 0) 2280 return 0; 2281 2282 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2283 return 1; 2284 rte_eth_link_get_nowait(port_id, &link); 2285 if (rate > link.link_speed) { 2286 printf("Invalid rate value:%u bigger than link speed: %u\n", 2287 rate, link.link_speed); 2288 return 1; 2289 } 2290 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2291 if (diag == 0) 2292 return diag; 2293 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2294 port_id, diag); 2295 return diag; 2296 } 2297 2298 /* 2299 * Functions to manage the set of filtered Multicast MAC addresses. 2300 * 2301 * A pool of filtered multicast MAC addresses is associated with each port. 2302 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2303 * The address of the pool and the number of valid multicast MAC addresses 2304 * recorded in the pool are stored in the fields "mc_addr_pool" and 2305 * "mc_addr_nb" of the "rte_port" data structure. 2306 * 2307 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2308 * to be supplied a contiguous array of multicast MAC addresses. 2309 * To comply with this constraint, the set of multicast addresses recorded 2310 * into the pool are systematically compacted at the beginning of the pool. 2311 * Hence, when a multicast address is removed from the pool, all following 2312 * addresses, if any, are copied back to keep the set contiguous. 2313 */ 2314 #define MCAST_POOL_INC 32 2315 2316 static int 2317 mcast_addr_pool_extend(struct rte_port *port) 2318 { 2319 struct ether_addr *mc_pool; 2320 size_t mc_pool_size; 2321 2322 /* 2323 * If a free entry is available at the end of the pool, just 2324 * increment the number of recorded multicast addresses. 2325 */ 2326 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2327 port->mc_addr_nb++; 2328 return 0; 2329 } 2330 2331 /* 2332 * [re]allocate a pool with MCAST_POOL_INC more entries. 2333 * The previous test guarantees that port->mc_addr_nb is a multiple 2334 * of MCAST_POOL_INC. 2335 */ 2336 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2337 MCAST_POOL_INC); 2338 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2339 mc_pool_size); 2340 if (mc_pool == NULL) { 2341 printf("allocation of pool of %u multicast addresses failed\n", 2342 port->mc_addr_nb + MCAST_POOL_INC); 2343 return -ENOMEM; 2344 } 2345 2346 port->mc_addr_pool = mc_pool; 2347 port->mc_addr_nb++; 2348 return 0; 2349 2350 } 2351 2352 static void 2353 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2354 { 2355 port->mc_addr_nb--; 2356 if (addr_idx == port->mc_addr_nb) { 2357 /* No need to recompact the set of multicast addressses. */ 2358 if (port->mc_addr_nb == 0) { 2359 /* free the pool of multicast addresses. */ 2360 free(port->mc_addr_pool); 2361 port->mc_addr_pool = NULL; 2362 } 2363 return; 2364 } 2365 memmove(&port->mc_addr_pool[addr_idx], 2366 &port->mc_addr_pool[addr_idx + 1], 2367 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2368 } 2369 2370 static void 2371 eth_port_multicast_addr_list_set(uint8_t port_id) 2372 { 2373 struct rte_port *port; 2374 int diag; 2375 2376 port = &ports[port_id]; 2377 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2378 port->mc_addr_nb); 2379 if (diag == 0) 2380 return; 2381 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2382 port->mc_addr_nb, port_id, -diag); 2383 } 2384 2385 void 2386 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2387 { 2388 struct rte_port *port; 2389 uint32_t i; 2390 2391 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2392 return; 2393 2394 port = &ports[port_id]; 2395 2396 /* 2397 * Check that the added multicast MAC address is not already recorded 2398 * in the pool of multicast addresses. 2399 */ 2400 for (i = 0; i < port->mc_addr_nb; i++) { 2401 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 2402 printf("multicast address already filtered by port\n"); 2403 return; 2404 } 2405 } 2406 2407 if (mcast_addr_pool_extend(port) != 0) 2408 return; 2409 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 2410 eth_port_multicast_addr_list_set(port_id); 2411 } 2412 2413 void 2414 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 2415 { 2416 struct rte_port *port; 2417 uint32_t i; 2418 2419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2420 return; 2421 2422 port = &ports[port_id]; 2423 2424 /* 2425 * Search the pool of multicast MAC addresses for the removed address. 2426 */ 2427 for (i = 0; i < port->mc_addr_nb; i++) { 2428 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 2429 break; 2430 } 2431 if (i == port->mc_addr_nb) { 2432 printf("multicast address not filtered by port %d\n", port_id); 2433 return; 2434 } 2435 2436 mcast_addr_pool_remove(port, i); 2437 eth_port_multicast_addr_list_set(port_id); 2438 } 2439 2440 void 2441 port_dcb_info_display(uint8_t port_id) 2442 { 2443 struct rte_eth_dcb_info dcb_info; 2444 uint16_t i; 2445 int ret; 2446 static const char *border = "================"; 2447 2448 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2449 return; 2450 2451 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 2452 if (ret) { 2453 printf("\n Failed to get dcb infos on port %-2d\n", 2454 port_id); 2455 return; 2456 } 2457 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 2458 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 2459 printf("\n TC : "); 2460 for (i = 0; i < dcb_info.nb_tcs; i++) 2461 printf("\t%4d", i); 2462 printf("\n Priority : "); 2463 for (i = 0; i < dcb_info.nb_tcs; i++) 2464 printf("\t%4d", dcb_info.prio_tc[i]); 2465 printf("\n BW percent :"); 2466 for (i = 0; i < dcb_info.nb_tcs; i++) 2467 printf("\t%4d%%", dcb_info.tc_bws[i]); 2468 printf("\n RXQ base : "); 2469 for (i = 0; i < dcb_info.nb_tcs; i++) 2470 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 2471 printf("\n RXQ number :"); 2472 for (i = 0; i < dcb_info.nb_tcs; i++) 2473 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 2474 printf("\n TXQ base : "); 2475 for (i = 0; i < dcb_info.nb_tcs; i++) 2476 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 2477 printf("\n TXQ number :"); 2478 for (i = 0; i < dcb_info.nb_tcs; i++) 2479 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 2480 printf("\n"); 2481 } 2482