1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_mempool.h> 88 #include <rte_mbuf.h> 89 #include <rte_interrupts.h> 90 #include <rte_pci.h> 91 #include <rte_ether.h> 92 #include <rte_ethdev.h> 93 #include <rte_string_fns.h> 94 #include <rte_cycles.h> 95 96 #include "testpmd.h" 97 98 static char *flowtype_to_str(uint16_t flow_type); 99 100 static const struct { 101 enum tx_pkt_split split; 102 const char *name; 103 } tx_split_name[] = { 104 { 105 .split = TX_PKT_SPLIT_OFF, 106 .name = "off", 107 }, 108 { 109 .split = TX_PKT_SPLIT_ON, 110 .name = "on", 111 }, 112 { 113 .split = TX_PKT_SPLIT_RND, 114 .name = "rand", 115 }, 116 }; 117 118 struct rss_type_info { 119 char str[32]; 120 uint64_t rss_type; 121 }; 122 123 static const struct rss_type_info rss_type_table[] = { 124 { "ipv4", ETH_RSS_IPV4 }, 125 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 126 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 127 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 128 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 129 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 130 { "ipv6", ETH_RSS_IPV6 }, 131 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 132 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 133 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 134 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 135 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 136 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 137 { "ipv6-ex", ETH_RSS_IPV6_EX }, 138 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 139 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 140 { "port", ETH_RSS_PORT }, 141 { "vxlan", ETH_RSS_VXLAN }, 142 { "geneve", ETH_RSS_GENEVE }, 143 { "nvgre", ETH_RSS_NVGRE }, 144 145 }; 146 147 static void 148 print_ethaddr(const char *name, struct ether_addr *eth_addr) 149 { 150 char buf[ETHER_ADDR_FMT_SIZE]; 151 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 152 printf("%s%s", name, buf); 153 } 154 155 void 156 nic_stats_display(portid_t port_id) 157 { 158 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 159 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 160 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 161 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 162 uint64_t mpps_rx, mpps_tx; 163 struct rte_eth_stats stats; 164 struct rte_port *port = &ports[port_id]; 165 uint8_t i; 166 portid_t pid; 167 168 static const char *nic_stats_border = "########################"; 169 170 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 171 printf("Valid port range is [0"); 172 FOREACH_PORT(pid, ports) 173 printf(", %d", pid); 174 printf("]\n"); 175 return; 176 } 177 rte_eth_stats_get(port_id, &stats); 178 printf("\n %s NIC statistics for port %-2d %s\n", 179 nic_stats_border, port_id, nic_stats_border); 180 181 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 182 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 183 "%-"PRIu64"\n", 184 stats.ipackets, stats.imissed, stats.ibytes); 185 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 186 printf(" RX-nombuf: %-10"PRIu64"\n", 187 stats.rx_nombuf); 188 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 189 "%-"PRIu64"\n", 190 stats.opackets, stats.oerrors, stats.obytes); 191 } 192 else { 193 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 194 " RX-bytes: %10"PRIu64"\n", 195 stats.ipackets, stats.ierrors, stats.ibytes); 196 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 197 printf(" RX-nombuf: %10"PRIu64"\n", 198 stats.rx_nombuf); 199 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 200 " TX-bytes: %10"PRIu64"\n", 201 stats.opackets, stats.oerrors, stats.obytes); 202 } 203 204 if (port->rx_queue_stats_mapping_enabled) { 205 printf("\n"); 206 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 207 printf(" Stats reg %2d RX-packets: %10"PRIu64 208 " RX-errors: %10"PRIu64 209 " RX-bytes: %10"PRIu64"\n", 210 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 211 } 212 } 213 if (port->tx_queue_stats_mapping_enabled) { 214 printf("\n"); 215 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 216 printf(" Stats reg %2d TX-packets: %10"PRIu64 217 " TX-bytes: %10"PRIu64"\n", 218 i, stats.q_opackets[i], stats.q_obytes[i]); 219 } 220 } 221 222 diff_cycles = prev_cycles[port_id]; 223 prev_cycles[port_id] = rte_rdtsc(); 224 if (diff_cycles > 0) 225 diff_cycles = prev_cycles[port_id] - diff_cycles; 226 227 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 228 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 229 prev_pkts_rx[port_id] = stats.ipackets; 230 prev_pkts_tx[port_id] = stats.opackets; 231 mpps_rx = diff_cycles > 0 ? 232 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 233 mpps_tx = diff_cycles > 0 ? 234 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 235 printf("\n Throughput (since last show)\n"); 236 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 237 mpps_rx, mpps_tx); 238 239 printf(" %s############################%s\n", 240 nic_stats_border, nic_stats_border); 241 } 242 243 void 244 nic_stats_clear(portid_t port_id) 245 { 246 portid_t pid; 247 248 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 249 printf("Valid port range is [0"); 250 FOREACH_PORT(pid, ports) 251 printf(", %d", pid); 252 printf("]\n"); 253 return; 254 } 255 rte_eth_stats_reset(port_id); 256 printf("\n NIC statistics for port %d cleared\n", port_id); 257 } 258 259 void 260 nic_xstats_display(portid_t port_id) 261 { 262 struct rte_eth_xstat *xstats; 263 int cnt_xstats, idx_xstat; 264 struct rte_eth_xstat_name *xstats_names; 265 266 printf("###### NIC extended statistics for port %-2d\n", port_id); 267 if (!rte_eth_dev_is_valid_port(port_id)) { 268 printf("Error: Invalid port number %i\n", port_id); 269 return; 270 } 271 272 /* Get count */ 273 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 274 if (cnt_xstats < 0) { 275 printf("Error: Cannot get count of xstats\n"); 276 return; 277 } 278 279 /* Get id-name lookup table */ 280 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 281 if (xstats_names == NULL) { 282 printf("Cannot allocate memory for xstats lookup\n"); 283 return; 284 } 285 if (cnt_xstats != rte_eth_xstats_get_names( 286 port_id, xstats_names, cnt_xstats)) { 287 printf("Error: Cannot get xstats lookup\n"); 288 free(xstats_names); 289 return; 290 } 291 292 /* Get stats themselves */ 293 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 294 if (xstats == NULL) { 295 printf("Cannot allocate memory for xstats\n"); 296 free(xstats_names); 297 return; 298 } 299 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 300 printf("Error: Unable to get xstats\n"); 301 free(xstats_names); 302 free(xstats); 303 return; 304 } 305 306 /* Display xstats */ 307 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 308 printf("%s: %"PRIu64"\n", 309 xstats_names[idx_xstat].name, 310 xstats[idx_xstat].value); 311 free(xstats_names); 312 free(xstats); 313 } 314 315 void 316 nic_xstats_clear(portid_t port_id) 317 { 318 rte_eth_xstats_reset(port_id); 319 } 320 321 void 322 nic_stats_mapping_display(portid_t port_id) 323 { 324 struct rte_port *port = &ports[port_id]; 325 uint16_t i; 326 portid_t pid; 327 328 static const char *nic_stats_mapping_border = "########################"; 329 330 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 331 printf("Valid port range is [0"); 332 FOREACH_PORT(pid, ports) 333 printf(", %d", pid); 334 printf("]\n"); 335 return; 336 } 337 338 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 339 printf("Port id %d - either does not support queue statistic mapping or" 340 " no queue statistic mapping set\n", port_id); 341 return; 342 } 343 344 printf("\n %s NIC statistics mapping for port %-2d %s\n", 345 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 346 347 if (port->rx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 349 if (rx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 351 rx_queue_stats_mappings[i].queue_id, 352 rx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 printf("\n"); 356 } 357 358 359 if (port->tx_queue_stats_mapping_enabled) { 360 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 361 if (tx_queue_stats_mappings[i].port_id == port_id) { 362 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 363 tx_queue_stats_mappings[i].queue_id, 364 tx_queue_stats_mappings[i].stats_counter_id); 365 } 366 } 367 } 368 369 printf(" %s####################################%s\n", 370 nic_stats_mapping_border, nic_stats_mapping_border); 371 } 372 373 void 374 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 375 { 376 struct rte_eth_rxq_info qinfo; 377 int32_t rc; 378 static const char *info_border = "*********************"; 379 380 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 381 if (rc != 0) { 382 printf("Failed to retrieve information for port: %hhu, " 383 "RX queue: %hu\nerror desc: %s(%d)\n", 384 port_id, queue_id, strerror(-rc), rc); 385 return; 386 } 387 388 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 389 info_border, port_id, queue_id, info_border); 390 391 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 392 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 393 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 394 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 395 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 396 printf("\nRX drop packets: %s", 397 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 398 printf("\nRX deferred start: %s", 399 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 400 printf("\nRX scattered packets: %s", 401 (qinfo.scattered_rx != 0) ? "on" : "off"); 402 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 403 printf("\n"); 404 } 405 406 void 407 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 408 { 409 struct rte_eth_txq_info qinfo; 410 int32_t rc; 411 static const char *info_border = "*********************"; 412 413 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 414 if (rc != 0) { 415 printf("Failed to retrieve information for port: %hhu, " 416 "TX queue: %hu\nerror desc: %s(%d)\n", 417 port_id, queue_id, strerror(-rc), rc); 418 return; 419 } 420 421 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 422 info_border, port_id, queue_id, info_border); 423 424 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 425 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 426 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 427 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 428 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 429 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 430 printf("\nTX deferred start: %s", 431 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 432 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 433 printf("\n"); 434 } 435 436 void 437 port_infos_display(portid_t port_id) 438 { 439 struct rte_port *port; 440 struct ether_addr mac_addr; 441 struct rte_eth_link link; 442 struct rte_eth_dev_info dev_info; 443 int vlan_offload; 444 struct rte_mempool * mp; 445 static const char *info_border = "*********************"; 446 portid_t pid; 447 448 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 449 printf("Valid port range is [0"); 450 FOREACH_PORT(pid, ports) 451 printf(", %d", pid); 452 printf("]\n"); 453 return; 454 } 455 port = &ports[port_id]; 456 rte_eth_link_get_nowait(port_id, &link); 457 printf("\n%s Infos for port %-2d %s\n", 458 info_border, port_id, info_border); 459 rte_eth_macaddr_get(port_id, &mac_addr); 460 print_ethaddr("MAC address: ", &mac_addr); 461 printf("\nConnect to socket: %u", port->socket_id); 462 463 if (port_numa[port_id] != NUMA_NO_CONFIG) { 464 mp = mbuf_pool_find(port_numa[port_id]); 465 if (mp) 466 printf("\nmemory allocation on the socket: %d", 467 port_numa[port_id]); 468 } else 469 printf("\nmemory allocation on the socket: %u",port->socket_id); 470 471 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 472 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 473 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 474 ("full-duplex") : ("half-duplex")); 475 printf("Promiscuous mode: %s\n", 476 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 477 printf("Allmulticast mode: %s\n", 478 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 479 printf("Maximum number of MAC addresses: %u\n", 480 (unsigned int)(port->dev_info.max_mac_addrs)); 481 printf("Maximum number of MAC addresses of hash filtering: %u\n", 482 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 483 484 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 485 if (vlan_offload >= 0){ 486 printf("VLAN offload: \n"); 487 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 488 printf(" strip on \n"); 489 else 490 printf(" strip off \n"); 491 492 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 493 printf(" filter on \n"); 494 else 495 printf(" filter off \n"); 496 497 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 498 printf(" qinq(extend) on \n"); 499 else 500 printf(" qinq(extend) off \n"); 501 } 502 503 memset(&dev_info, 0, sizeof(dev_info)); 504 rte_eth_dev_info_get(port_id, &dev_info); 505 if (dev_info.hash_key_size > 0) 506 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 507 if (dev_info.reta_size > 0) 508 printf("Redirection table size: %u\n", dev_info.reta_size); 509 if (!dev_info.flow_type_rss_offloads) 510 printf("No flow type is supported.\n"); 511 else { 512 uint16_t i; 513 char *p; 514 515 printf("Supported flow types:\n"); 516 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 517 i++) { 518 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 519 continue; 520 p = flowtype_to_str(i); 521 printf(" %s\n", (p ? p : "unknown")); 522 } 523 } 524 525 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 526 printf("Max possible number of RXDs per queue: %hu\n", 527 dev_info.rx_desc_lim.nb_max); 528 printf("Min possible number of RXDs per queue: %hu\n", 529 dev_info.rx_desc_lim.nb_min); 530 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 531 532 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 533 printf("Max possible number of TXDs per queue: %hu\n", 534 dev_info.tx_desc_lim.nb_max); 535 printf("Min possible number of TXDs per queue: %hu\n", 536 dev_info.tx_desc_lim.nb_min); 537 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 538 } 539 540 int 541 port_id_is_invalid(portid_t port_id, enum print_warning warning) 542 { 543 if (port_id == (portid_t)RTE_PORT_ALL) 544 return 0; 545 546 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 547 return 0; 548 549 if (warning == ENABLED_WARN) 550 printf("Invalid port %d\n", port_id); 551 552 return 1; 553 } 554 555 static int 556 vlan_id_is_invalid(uint16_t vlan_id) 557 { 558 if (vlan_id < 4096) 559 return 0; 560 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 561 return 1; 562 } 563 564 static int 565 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 566 { 567 uint64_t pci_len; 568 569 if (reg_off & 0x3) { 570 printf("Port register offset 0x%X not aligned on a 4-byte " 571 "boundary\n", 572 (unsigned)reg_off); 573 return 1; 574 } 575 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 576 if (reg_off >= pci_len) { 577 printf("Port %d: register offset %u (0x%X) out of port PCI " 578 "resource (length=%"PRIu64")\n", 579 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 580 return 1; 581 } 582 return 0; 583 } 584 585 static int 586 reg_bit_pos_is_invalid(uint8_t bit_pos) 587 { 588 if (bit_pos <= 31) 589 return 0; 590 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 591 return 1; 592 } 593 594 #define display_port_and_reg_off(port_id, reg_off) \ 595 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 596 597 static inline void 598 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 599 { 600 display_port_and_reg_off(port_id, (unsigned)reg_off); 601 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 602 } 603 604 void 605 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 606 { 607 uint32_t reg_v; 608 609 610 if (port_id_is_invalid(port_id, ENABLED_WARN)) 611 return; 612 if (port_reg_off_is_invalid(port_id, reg_off)) 613 return; 614 if (reg_bit_pos_is_invalid(bit_x)) 615 return; 616 reg_v = port_id_pci_reg_read(port_id, reg_off); 617 display_port_and_reg_off(port_id, (unsigned)reg_off); 618 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 619 } 620 621 void 622 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 623 uint8_t bit1_pos, uint8_t bit2_pos) 624 { 625 uint32_t reg_v; 626 uint8_t l_bit; 627 uint8_t h_bit; 628 629 if (port_id_is_invalid(port_id, ENABLED_WARN)) 630 return; 631 if (port_reg_off_is_invalid(port_id, reg_off)) 632 return; 633 if (reg_bit_pos_is_invalid(bit1_pos)) 634 return; 635 if (reg_bit_pos_is_invalid(bit2_pos)) 636 return; 637 if (bit1_pos > bit2_pos) 638 l_bit = bit2_pos, h_bit = bit1_pos; 639 else 640 l_bit = bit1_pos, h_bit = bit2_pos; 641 642 reg_v = port_id_pci_reg_read(port_id, reg_off); 643 reg_v >>= l_bit; 644 if (h_bit < 31) 645 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 646 display_port_and_reg_off(port_id, (unsigned)reg_off); 647 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 648 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 649 } 650 651 void 652 port_reg_display(portid_t port_id, uint32_t reg_off) 653 { 654 uint32_t reg_v; 655 656 if (port_id_is_invalid(port_id, ENABLED_WARN)) 657 return; 658 if (port_reg_off_is_invalid(port_id, reg_off)) 659 return; 660 reg_v = port_id_pci_reg_read(port_id, reg_off); 661 display_port_reg_value(port_id, reg_off, reg_v); 662 } 663 664 void 665 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 666 uint8_t bit_v) 667 { 668 uint32_t reg_v; 669 670 if (port_id_is_invalid(port_id, ENABLED_WARN)) 671 return; 672 if (port_reg_off_is_invalid(port_id, reg_off)) 673 return; 674 if (reg_bit_pos_is_invalid(bit_pos)) 675 return; 676 if (bit_v > 1) { 677 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 678 return; 679 } 680 reg_v = port_id_pci_reg_read(port_id, reg_off); 681 if (bit_v == 0) 682 reg_v &= ~(1 << bit_pos); 683 else 684 reg_v |= (1 << bit_pos); 685 port_id_pci_reg_write(port_id, reg_off, reg_v); 686 display_port_reg_value(port_id, reg_off, reg_v); 687 } 688 689 void 690 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 691 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 692 { 693 uint32_t max_v; 694 uint32_t reg_v; 695 uint8_t l_bit; 696 uint8_t h_bit; 697 698 if (port_id_is_invalid(port_id, ENABLED_WARN)) 699 return; 700 if (port_reg_off_is_invalid(port_id, reg_off)) 701 return; 702 if (reg_bit_pos_is_invalid(bit1_pos)) 703 return; 704 if (reg_bit_pos_is_invalid(bit2_pos)) 705 return; 706 if (bit1_pos > bit2_pos) 707 l_bit = bit2_pos, h_bit = bit1_pos; 708 else 709 l_bit = bit1_pos, h_bit = bit2_pos; 710 711 if ((h_bit - l_bit) < 31) 712 max_v = (1 << (h_bit - l_bit + 1)) - 1; 713 else 714 max_v = 0xFFFFFFFF; 715 716 if (value > max_v) { 717 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 718 (unsigned)value, (unsigned)value, 719 (unsigned)max_v, (unsigned)max_v); 720 return; 721 } 722 reg_v = port_id_pci_reg_read(port_id, reg_off); 723 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 724 reg_v |= (value << l_bit); /* Set changed bits */ 725 port_id_pci_reg_write(port_id, reg_off, reg_v); 726 display_port_reg_value(port_id, reg_off, reg_v); 727 } 728 729 void 730 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 731 { 732 if (port_id_is_invalid(port_id, ENABLED_WARN)) 733 return; 734 if (port_reg_off_is_invalid(port_id, reg_off)) 735 return; 736 port_id_pci_reg_write(port_id, reg_off, reg_v); 737 display_port_reg_value(port_id, reg_off, reg_v); 738 } 739 740 void 741 port_mtu_set(portid_t port_id, uint16_t mtu) 742 { 743 int diag; 744 745 if (port_id_is_invalid(port_id, ENABLED_WARN)) 746 return; 747 diag = rte_eth_dev_set_mtu(port_id, mtu); 748 if (diag == 0) 749 return; 750 printf("Set MTU failed. diag=%d\n", diag); 751 } 752 753 /* 754 * RX/TX ring descriptors display functions. 755 */ 756 int 757 rx_queue_id_is_invalid(queueid_t rxq_id) 758 { 759 if (rxq_id < nb_rxq) 760 return 0; 761 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 762 return 1; 763 } 764 765 int 766 tx_queue_id_is_invalid(queueid_t txq_id) 767 { 768 if (txq_id < nb_txq) 769 return 0; 770 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 771 return 1; 772 } 773 774 static int 775 rx_desc_id_is_invalid(uint16_t rxdesc_id) 776 { 777 if (rxdesc_id < nb_rxd) 778 return 0; 779 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 780 rxdesc_id, nb_rxd); 781 return 1; 782 } 783 784 static int 785 tx_desc_id_is_invalid(uint16_t txdesc_id) 786 { 787 if (txdesc_id < nb_txd) 788 return 0; 789 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 790 txdesc_id, nb_txd); 791 return 1; 792 } 793 794 static const struct rte_memzone * 795 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 796 { 797 char mz_name[RTE_MEMZONE_NAMESIZE]; 798 const struct rte_memzone *mz; 799 800 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 801 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 802 mz = rte_memzone_lookup(mz_name); 803 if (mz == NULL) 804 printf("%s ring memory zoneof (port %d, queue %d) not" 805 "found (zone name = %s\n", 806 ring_name, port_id, q_id, mz_name); 807 return mz; 808 } 809 810 union igb_ring_dword { 811 uint64_t dword; 812 struct { 813 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 814 uint32_t lo; 815 uint32_t hi; 816 #else 817 uint32_t hi; 818 uint32_t lo; 819 #endif 820 } words; 821 }; 822 823 struct igb_ring_desc_32_bytes { 824 union igb_ring_dword lo_dword; 825 union igb_ring_dword hi_dword; 826 union igb_ring_dword resv1; 827 union igb_ring_dword resv2; 828 }; 829 830 struct igb_ring_desc_16_bytes { 831 union igb_ring_dword lo_dword; 832 union igb_ring_dword hi_dword; 833 }; 834 835 static void 836 ring_rxd_display_dword(union igb_ring_dword dword) 837 { 838 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 839 (unsigned)dword.words.hi); 840 } 841 842 static void 843 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 844 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 845 uint8_t port_id, 846 #else 847 __rte_unused uint8_t port_id, 848 #endif 849 uint16_t desc_id) 850 { 851 struct igb_ring_desc_16_bytes *ring = 852 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 853 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 854 struct rte_eth_dev_info dev_info; 855 856 memset(&dev_info, 0, sizeof(dev_info)); 857 rte_eth_dev_info_get(port_id, &dev_info); 858 if (strstr(dev_info.driver_name, "i40e") != NULL) { 859 /* 32 bytes RX descriptor, i40e only */ 860 struct igb_ring_desc_32_bytes *ring = 861 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 862 ring[desc_id].lo_dword.dword = 863 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 864 ring_rxd_display_dword(ring[desc_id].lo_dword); 865 ring[desc_id].hi_dword.dword = 866 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 867 ring_rxd_display_dword(ring[desc_id].hi_dword); 868 ring[desc_id].resv1.dword = 869 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 870 ring_rxd_display_dword(ring[desc_id].resv1); 871 ring[desc_id].resv2.dword = 872 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 873 ring_rxd_display_dword(ring[desc_id].resv2); 874 875 return; 876 } 877 #endif 878 /* 16 bytes RX descriptor */ 879 ring[desc_id].lo_dword.dword = 880 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 881 ring_rxd_display_dword(ring[desc_id].lo_dword); 882 ring[desc_id].hi_dword.dword = 883 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 884 ring_rxd_display_dword(ring[desc_id].hi_dword); 885 } 886 887 static void 888 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 889 { 890 struct igb_ring_desc_16_bytes *ring; 891 struct igb_ring_desc_16_bytes txd; 892 893 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 894 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 895 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 896 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 897 (unsigned)txd.lo_dword.words.lo, 898 (unsigned)txd.lo_dword.words.hi, 899 (unsigned)txd.hi_dword.words.lo, 900 (unsigned)txd.hi_dword.words.hi); 901 } 902 903 void 904 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 905 { 906 const struct rte_memzone *rx_mz; 907 908 if (port_id_is_invalid(port_id, ENABLED_WARN)) 909 return; 910 if (rx_queue_id_is_invalid(rxq_id)) 911 return; 912 if (rx_desc_id_is_invalid(rxd_id)) 913 return; 914 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 915 if (rx_mz == NULL) 916 return; 917 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 918 } 919 920 void 921 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 922 { 923 const struct rte_memzone *tx_mz; 924 925 if (port_id_is_invalid(port_id, ENABLED_WARN)) 926 return; 927 if (tx_queue_id_is_invalid(txq_id)) 928 return; 929 if (tx_desc_id_is_invalid(txd_id)) 930 return; 931 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 932 if (tx_mz == NULL) 933 return; 934 ring_tx_descriptor_display(tx_mz, txd_id); 935 } 936 937 void 938 fwd_lcores_config_display(void) 939 { 940 lcoreid_t lc_id; 941 942 printf("List of forwarding lcores:"); 943 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 944 printf(" %2u", fwd_lcores_cpuids[lc_id]); 945 printf("\n"); 946 } 947 void 948 rxtx_config_display(void) 949 { 950 printf(" %s packet forwarding%s - CRC stripping %s - " 951 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 952 retry_enabled == 0 ? "" : " with retry", 953 rx_mode.hw_strip_crc ? "enabled" : "disabled", 954 nb_pkt_per_burst); 955 956 if (cur_fwd_eng == &tx_only_engine) 957 printf(" packet len=%u - nb packet segments=%d\n", 958 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 959 960 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 961 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 962 963 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 964 nb_fwd_lcores, nb_fwd_ports); 965 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 966 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 967 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 968 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 969 rx_conf->rx_thresh.wthresh); 970 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 971 nb_txq, nb_txd, tx_conf->tx_free_thresh); 972 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 973 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 974 tx_conf->tx_thresh.wthresh); 975 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 976 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 977 } 978 979 void 980 port_rss_reta_info(portid_t port_id, 981 struct rte_eth_rss_reta_entry64 *reta_conf, 982 uint16_t nb_entries) 983 { 984 uint16_t i, idx, shift; 985 int ret; 986 987 if (port_id_is_invalid(port_id, ENABLED_WARN)) 988 return; 989 990 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 991 if (ret != 0) { 992 printf("Failed to get RSS RETA info, return code = %d\n", ret); 993 return; 994 } 995 996 for (i = 0; i < nb_entries; i++) { 997 idx = i / RTE_RETA_GROUP_SIZE; 998 shift = i % RTE_RETA_GROUP_SIZE; 999 if (!(reta_conf[idx].mask & (1ULL << shift))) 1000 continue; 1001 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1002 i, reta_conf[idx].reta[shift]); 1003 } 1004 } 1005 1006 /* 1007 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1008 * key of the port. 1009 */ 1010 void 1011 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1012 { 1013 struct rte_eth_rss_conf rss_conf; 1014 uint8_t rss_key[10 * 4] = ""; 1015 uint64_t rss_hf; 1016 uint8_t i; 1017 int diag; 1018 1019 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1020 return; 1021 1022 rss_conf.rss_hf = 0; 1023 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1024 if (!strcmp(rss_info, rss_type_table[i].str)) 1025 rss_conf.rss_hf = rss_type_table[i].rss_type; 1026 } 1027 1028 /* Get RSS hash key if asked to display it */ 1029 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1030 rss_conf.rss_key_len = sizeof(rss_key); 1031 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1032 if (diag != 0) { 1033 switch (diag) { 1034 case -ENODEV: 1035 printf("port index %d invalid\n", port_id); 1036 break; 1037 case -ENOTSUP: 1038 printf("operation not supported by device\n"); 1039 break; 1040 default: 1041 printf("operation failed - diag=%d\n", diag); 1042 break; 1043 } 1044 return; 1045 } 1046 rss_hf = rss_conf.rss_hf; 1047 if (rss_hf == 0) { 1048 printf("RSS disabled\n"); 1049 return; 1050 } 1051 printf("RSS functions:\n "); 1052 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1053 if (rss_hf & rss_type_table[i].rss_type) 1054 printf("%s ", rss_type_table[i].str); 1055 } 1056 printf("\n"); 1057 if (!show_rss_key) 1058 return; 1059 printf("RSS key:\n"); 1060 for (i = 0; i < sizeof(rss_key); i++) 1061 printf("%02X", rss_key[i]); 1062 printf("\n"); 1063 } 1064 1065 void 1066 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1067 uint hash_key_len) 1068 { 1069 struct rte_eth_rss_conf rss_conf; 1070 int diag; 1071 unsigned int i; 1072 1073 rss_conf.rss_key = NULL; 1074 rss_conf.rss_key_len = hash_key_len; 1075 rss_conf.rss_hf = 0; 1076 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1077 if (!strcmp(rss_type_table[i].str, rss_type)) 1078 rss_conf.rss_hf = rss_type_table[i].rss_type; 1079 } 1080 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1081 if (diag == 0) { 1082 rss_conf.rss_key = hash_key; 1083 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1084 } 1085 if (diag == 0) 1086 return; 1087 1088 switch (diag) { 1089 case -ENODEV: 1090 printf("port index %d invalid\n", port_id); 1091 break; 1092 case -ENOTSUP: 1093 printf("operation not supported by device\n"); 1094 break; 1095 default: 1096 printf("operation failed - diag=%d\n", diag); 1097 break; 1098 } 1099 } 1100 1101 /* 1102 * Setup forwarding configuration for each logical core. 1103 */ 1104 static void 1105 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1106 { 1107 streamid_t nb_fs_per_lcore; 1108 streamid_t nb_fs; 1109 streamid_t sm_id; 1110 lcoreid_t nb_extra; 1111 lcoreid_t nb_fc; 1112 lcoreid_t nb_lc; 1113 lcoreid_t lc_id; 1114 1115 nb_fs = cfg->nb_fwd_streams; 1116 nb_fc = cfg->nb_fwd_lcores; 1117 if (nb_fs <= nb_fc) { 1118 nb_fs_per_lcore = 1; 1119 nb_extra = 0; 1120 } else { 1121 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1122 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1123 } 1124 1125 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1126 sm_id = 0; 1127 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1128 fwd_lcores[lc_id]->stream_idx = sm_id; 1129 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1130 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1131 } 1132 1133 /* 1134 * Assign extra remaining streams, if any. 1135 */ 1136 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1137 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1138 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1139 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1140 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1141 } 1142 } 1143 1144 static void 1145 simple_fwd_config_setup(void) 1146 { 1147 portid_t i; 1148 portid_t j; 1149 portid_t inc = 2; 1150 1151 if (port_topology == PORT_TOPOLOGY_CHAINED || 1152 port_topology == PORT_TOPOLOGY_LOOP) { 1153 inc = 1; 1154 } else if (nb_fwd_ports % 2) { 1155 printf("\nWarning! Cannot handle an odd number of ports " 1156 "with the current port topology. Configuration " 1157 "must be changed to have an even number of ports, " 1158 "or relaunch application with " 1159 "--port-topology=chained\n\n"); 1160 } 1161 1162 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1163 cur_fwd_config.nb_fwd_streams = 1164 (streamid_t) cur_fwd_config.nb_fwd_ports; 1165 1166 /* reinitialize forwarding streams */ 1167 init_fwd_streams(); 1168 1169 /* 1170 * In the simple forwarding test, the number of forwarding cores 1171 * must be lower or equal to the number of forwarding ports. 1172 */ 1173 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1174 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1175 cur_fwd_config.nb_fwd_lcores = 1176 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1177 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1178 1179 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1180 if (port_topology != PORT_TOPOLOGY_LOOP) 1181 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1182 else 1183 j = i; 1184 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1185 fwd_streams[i]->rx_queue = 0; 1186 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1187 fwd_streams[i]->tx_queue = 0; 1188 fwd_streams[i]->peer_addr = j; 1189 fwd_streams[i]->retry_enabled = retry_enabled; 1190 1191 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1192 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1193 fwd_streams[j]->rx_queue = 0; 1194 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1195 fwd_streams[j]->tx_queue = 0; 1196 fwd_streams[j]->peer_addr = i; 1197 fwd_streams[j]->retry_enabled = retry_enabled; 1198 } 1199 } 1200 } 1201 1202 /** 1203 * For the RSS forwarding test all streams distributed over lcores. Each stream 1204 * being composed of a RX queue to poll on a RX port for input messages, 1205 * associated with a TX queue of a TX port where to send forwarded packets. 1206 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1207 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1208 * following rules: 1209 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1210 * - TxQl = RxQj 1211 */ 1212 static void 1213 rss_fwd_config_setup(void) 1214 { 1215 portid_t rxp; 1216 portid_t txp; 1217 queueid_t rxq; 1218 queueid_t nb_q; 1219 streamid_t sm_id; 1220 1221 nb_q = nb_rxq; 1222 if (nb_q > nb_txq) 1223 nb_q = nb_txq; 1224 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1225 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1226 cur_fwd_config.nb_fwd_streams = 1227 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1228 1229 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1230 cur_fwd_config.nb_fwd_lcores = 1231 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1232 1233 /* reinitialize forwarding streams */ 1234 init_fwd_streams(); 1235 1236 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1237 rxp = 0; rxq = 0; 1238 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1239 struct fwd_stream *fs; 1240 1241 fs = fwd_streams[sm_id]; 1242 1243 if ((rxp & 0x1) == 0) 1244 txp = (portid_t) (rxp + 1); 1245 else 1246 txp = (portid_t) (rxp - 1); 1247 /* 1248 * if we are in loopback, simply send stuff out through the 1249 * ingress port 1250 */ 1251 if (port_topology == PORT_TOPOLOGY_LOOP) 1252 txp = rxp; 1253 1254 fs->rx_port = fwd_ports_ids[rxp]; 1255 fs->rx_queue = rxq; 1256 fs->tx_port = fwd_ports_ids[txp]; 1257 fs->tx_queue = rxq; 1258 fs->peer_addr = fs->tx_port; 1259 fs->retry_enabled = retry_enabled; 1260 rxq = (queueid_t) (rxq + 1); 1261 if (rxq < nb_q) 1262 continue; 1263 /* 1264 * rxq == nb_q 1265 * Restart from RX queue 0 on next RX port 1266 */ 1267 rxq = 0; 1268 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1269 rxp = (portid_t) 1270 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1271 else 1272 rxp = (portid_t) (rxp + 1); 1273 } 1274 } 1275 1276 /** 1277 * For the DCB forwarding test, each core is assigned on each traffic class. 1278 * 1279 * Each core is assigned a multi-stream, each stream being composed of 1280 * a RX queue to poll on a RX port for input messages, associated with 1281 * a TX queue of a TX port where to send forwarded packets. All RX and 1282 * TX queues are mapping to the same traffic class. 1283 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1284 * the same core 1285 */ 1286 static void 1287 dcb_fwd_config_setup(void) 1288 { 1289 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1290 portid_t txp, rxp = 0; 1291 queueid_t txq, rxq = 0; 1292 lcoreid_t lc_id; 1293 uint16_t nb_rx_queue, nb_tx_queue; 1294 uint16_t i, j, k, sm_id = 0; 1295 uint8_t tc = 0; 1296 1297 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1298 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1299 cur_fwd_config.nb_fwd_streams = 1300 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1301 1302 /* reinitialize forwarding streams */ 1303 init_fwd_streams(); 1304 sm_id = 0; 1305 txp = 1; 1306 /* get the dcb info on the first RX and TX ports */ 1307 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1308 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1309 1310 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1311 fwd_lcores[lc_id]->stream_nb = 0; 1312 fwd_lcores[lc_id]->stream_idx = sm_id; 1313 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1314 /* if the nb_queue is zero, means this tc is 1315 * not enabled on the POOL 1316 */ 1317 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1318 break; 1319 k = fwd_lcores[lc_id]->stream_nb + 1320 fwd_lcores[lc_id]->stream_idx; 1321 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1322 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1323 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1324 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1325 for (j = 0; j < nb_rx_queue; j++) { 1326 struct fwd_stream *fs; 1327 1328 fs = fwd_streams[k + j]; 1329 fs->rx_port = fwd_ports_ids[rxp]; 1330 fs->rx_queue = rxq + j; 1331 fs->tx_port = fwd_ports_ids[txp]; 1332 fs->tx_queue = txq + j % nb_tx_queue; 1333 fs->peer_addr = fs->tx_port; 1334 fs->retry_enabled = retry_enabled; 1335 } 1336 fwd_lcores[lc_id]->stream_nb += 1337 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1338 } 1339 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1340 1341 tc++; 1342 if (tc < rxp_dcb_info.nb_tcs) 1343 continue; 1344 /* Restart from TC 0 on next RX port */ 1345 tc = 0; 1346 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1347 rxp = (portid_t) 1348 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1349 else 1350 rxp++; 1351 if (rxp >= nb_fwd_ports) 1352 return; 1353 /* get the dcb information on next RX and TX ports */ 1354 if ((rxp & 0x1) == 0) 1355 txp = (portid_t) (rxp + 1); 1356 else 1357 txp = (portid_t) (rxp - 1); 1358 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1359 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1360 } 1361 } 1362 1363 static void 1364 icmp_echo_config_setup(void) 1365 { 1366 portid_t rxp; 1367 queueid_t rxq; 1368 lcoreid_t lc_id; 1369 uint16_t sm_id; 1370 1371 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1372 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1373 (nb_txq * nb_fwd_ports); 1374 else 1375 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1376 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1377 cur_fwd_config.nb_fwd_streams = 1378 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1379 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1380 cur_fwd_config.nb_fwd_lcores = 1381 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1382 if (verbose_level > 0) { 1383 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1384 __FUNCTION__, 1385 cur_fwd_config.nb_fwd_lcores, 1386 cur_fwd_config.nb_fwd_ports, 1387 cur_fwd_config.nb_fwd_streams); 1388 } 1389 1390 /* reinitialize forwarding streams */ 1391 init_fwd_streams(); 1392 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1393 rxp = 0; rxq = 0; 1394 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1395 if (verbose_level > 0) 1396 printf(" core=%d: \n", lc_id); 1397 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1398 struct fwd_stream *fs; 1399 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1400 fs->rx_port = fwd_ports_ids[rxp]; 1401 fs->rx_queue = rxq; 1402 fs->tx_port = fs->rx_port; 1403 fs->tx_queue = rxq; 1404 fs->peer_addr = fs->tx_port; 1405 fs->retry_enabled = retry_enabled; 1406 if (verbose_level > 0) 1407 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1408 sm_id, fs->rx_port, fs->rx_queue, 1409 fs->tx_queue); 1410 rxq = (queueid_t) (rxq + 1); 1411 if (rxq == nb_rxq) { 1412 rxq = 0; 1413 rxp = (portid_t) (rxp + 1); 1414 } 1415 } 1416 } 1417 } 1418 1419 void 1420 fwd_config_setup(void) 1421 { 1422 cur_fwd_config.fwd_eng = cur_fwd_eng; 1423 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1424 icmp_echo_config_setup(); 1425 return; 1426 } 1427 if ((nb_rxq > 1) && (nb_txq > 1)){ 1428 if (dcb_config) 1429 dcb_fwd_config_setup(); 1430 else 1431 rss_fwd_config_setup(); 1432 } 1433 else 1434 simple_fwd_config_setup(); 1435 } 1436 1437 void 1438 pkt_fwd_config_display(struct fwd_config *cfg) 1439 { 1440 struct fwd_stream *fs; 1441 lcoreid_t lc_id; 1442 streamid_t sm_id; 1443 1444 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 1445 "NUMA support %s, MP over anonymous pages %s\n", 1446 cfg->fwd_eng->fwd_mode_name, 1447 retry_enabled == 0 ? "" : " with retry", 1448 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1449 numa_support == 1 ? "enabled" : "disabled", 1450 mp_anon != 0 ? "enabled" : "disabled"); 1451 1452 if (retry_enabled) 1453 printf("TX retry num: %u, delay between TX retries: %uus\n", 1454 burst_tx_retry_num, burst_tx_delay_time); 1455 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1456 printf("Logical Core %u (socket %u) forwards packets on " 1457 "%d streams:", 1458 fwd_lcores_cpuids[lc_id], 1459 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1460 fwd_lcores[lc_id]->stream_nb); 1461 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1462 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1463 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1464 "P=%d/Q=%d (socket %u) ", 1465 fs->rx_port, fs->rx_queue, 1466 ports[fs->rx_port].socket_id, 1467 fs->tx_port, fs->tx_queue, 1468 ports[fs->tx_port].socket_id); 1469 print_ethaddr("peer=", 1470 &peer_eth_addrs[fs->peer_addr]); 1471 } 1472 printf("\n"); 1473 } 1474 printf("\n"); 1475 } 1476 1477 int 1478 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1479 { 1480 unsigned int i; 1481 unsigned int lcore_cpuid; 1482 int record_now; 1483 1484 record_now = 0; 1485 again: 1486 for (i = 0; i < nb_lc; i++) { 1487 lcore_cpuid = lcorelist[i]; 1488 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1489 printf("lcore %u not enabled\n", lcore_cpuid); 1490 return -1; 1491 } 1492 if (lcore_cpuid == rte_get_master_lcore()) { 1493 printf("lcore %u cannot be masked on for running " 1494 "packet forwarding, which is the master lcore " 1495 "and reserved for command line parsing only\n", 1496 lcore_cpuid); 1497 return -1; 1498 } 1499 if (record_now) 1500 fwd_lcores_cpuids[i] = lcore_cpuid; 1501 } 1502 if (record_now == 0) { 1503 record_now = 1; 1504 goto again; 1505 } 1506 nb_cfg_lcores = (lcoreid_t) nb_lc; 1507 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1508 printf("previous number of forwarding cores %u - changed to " 1509 "number of configured cores %u\n", 1510 (unsigned int) nb_fwd_lcores, nb_lc); 1511 nb_fwd_lcores = (lcoreid_t) nb_lc; 1512 } 1513 1514 return 0; 1515 } 1516 1517 int 1518 set_fwd_lcores_mask(uint64_t lcoremask) 1519 { 1520 unsigned int lcorelist[64]; 1521 unsigned int nb_lc; 1522 unsigned int i; 1523 1524 if (lcoremask == 0) { 1525 printf("Invalid NULL mask of cores\n"); 1526 return -1; 1527 } 1528 nb_lc = 0; 1529 for (i = 0; i < 64; i++) { 1530 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1531 continue; 1532 lcorelist[nb_lc++] = i; 1533 } 1534 return set_fwd_lcores_list(lcorelist, nb_lc); 1535 } 1536 1537 void 1538 set_fwd_lcores_number(uint16_t nb_lc) 1539 { 1540 if (nb_lc > nb_cfg_lcores) { 1541 printf("nb fwd cores %u > %u (max. number of configured " 1542 "lcores) - ignored\n", 1543 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1544 return; 1545 } 1546 nb_fwd_lcores = (lcoreid_t) nb_lc; 1547 printf("Number of forwarding cores set to %u\n", 1548 (unsigned int) nb_fwd_lcores); 1549 } 1550 1551 void 1552 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1553 { 1554 unsigned int i; 1555 portid_t port_id; 1556 int record_now; 1557 1558 record_now = 0; 1559 again: 1560 for (i = 0; i < nb_pt; i++) { 1561 port_id = (portid_t) portlist[i]; 1562 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1563 return; 1564 if (record_now) 1565 fwd_ports_ids[i] = port_id; 1566 } 1567 if (record_now == 0) { 1568 record_now = 1; 1569 goto again; 1570 } 1571 nb_cfg_ports = (portid_t) nb_pt; 1572 if (nb_fwd_ports != (portid_t) nb_pt) { 1573 printf("previous number of forwarding ports %u - changed to " 1574 "number of configured ports %u\n", 1575 (unsigned int) nb_fwd_ports, nb_pt); 1576 nb_fwd_ports = (portid_t) nb_pt; 1577 } 1578 } 1579 1580 void 1581 set_fwd_ports_mask(uint64_t portmask) 1582 { 1583 unsigned int portlist[64]; 1584 unsigned int nb_pt; 1585 unsigned int i; 1586 1587 if (portmask == 0) { 1588 printf("Invalid NULL mask of ports\n"); 1589 return; 1590 } 1591 nb_pt = 0; 1592 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1593 if (! ((uint64_t)(1ULL << i) & portmask)) 1594 continue; 1595 portlist[nb_pt++] = i; 1596 } 1597 set_fwd_ports_list(portlist, nb_pt); 1598 } 1599 1600 void 1601 set_fwd_ports_number(uint16_t nb_pt) 1602 { 1603 if (nb_pt > nb_cfg_ports) { 1604 printf("nb fwd ports %u > %u (number of configured " 1605 "ports) - ignored\n", 1606 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1607 return; 1608 } 1609 nb_fwd_ports = (portid_t) nb_pt; 1610 printf("Number of forwarding ports set to %u\n", 1611 (unsigned int) nb_fwd_ports); 1612 } 1613 1614 int 1615 port_is_forwarding(portid_t port_id) 1616 { 1617 unsigned int i; 1618 1619 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1620 return -1; 1621 1622 for (i = 0; i < nb_fwd_ports; i++) { 1623 if (fwd_ports_ids[i] == port_id) 1624 return 1; 1625 } 1626 1627 return 0; 1628 } 1629 1630 void 1631 set_nb_pkt_per_burst(uint16_t nb) 1632 { 1633 if (nb > MAX_PKT_BURST) { 1634 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1635 " ignored\n", 1636 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1637 return; 1638 } 1639 nb_pkt_per_burst = nb; 1640 printf("Number of packets per burst set to %u\n", 1641 (unsigned int) nb_pkt_per_burst); 1642 } 1643 1644 static const char * 1645 tx_split_get_name(enum tx_pkt_split split) 1646 { 1647 uint32_t i; 1648 1649 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1650 if (tx_split_name[i].split == split) 1651 return tx_split_name[i].name; 1652 } 1653 return NULL; 1654 } 1655 1656 void 1657 set_tx_pkt_split(const char *name) 1658 { 1659 uint32_t i; 1660 1661 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1662 if (strcmp(tx_split_name[i].name, name) == 0) { 1663 tx_pkt_split = tx_split_name[i].split; 1664 return; 1665 } 1666 } 1667 printf("unknown value: \"%s\"\n", name); 1668 } 1669 1670 void 1671 show_tx_pkt_segments(void) 1672 { 1673 uint32_t i, n; 1674 const char *split; 1675 1676 n = tx_pkt_nb_segs; 1677 split = tx_split_get_name(tx_pkt_split); 1678 1679 printf("Number of segments: %u\n", n); 1680 printf("Segment sizes: "); 1681 for (i = 0; i != n - 1; i++) 1682 printf("%hu,", tx_pkt_seg_lengths[i]); 1683 printf("%hu\n", tx_pkt_seg_lengths[i]); 1684 printf("Split packet: %s\n", split); 1685 } 1686 1687 void 1688 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1689 { 1690 uint16_t tx_pkt_len; 1691 unsigned i; 1692 1693 if (nb_segs >= (unsigned) nb_txd) { 1694 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1695 nb_segs, (unsigned int) nb_txd); 1696 return; 1697 } 1698 1699 /* 1700 * Check that each segment length is greater or equal than 1701 * the mbuf data sise. 1702 * Check also that the total packet length is greater or equal than the 1703 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1704 */ 1705 tx_pkt_len = 0; 1706 for (i = 0; i < nb_segs; i++) { 1707 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1708 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1709 i, seg_lengths[i], (unsigned) mbuf_data_size); 1710 return; 1711 } 1712 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1713 } 1714 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1715 printf("total packet length=%u < %d - give up\n", 1716 (unsigned) tx_pkt_len, 1717 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1718 return; 1719 } 1720 1721 for (i = 0; i < nb_segs; i++) 1722 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1723 1724 tx_pkt_length = tx_pkt_len; 1725 tx_pkt_nb_segs = (uint8_t) nb_segs; 1726 } 1727 1728 char* 1729 list_pkt_forwarding_modes(void) 1730 { 1731 static char fwd_modes[128] = ""; 1732 const char *separator = "|"; 1733 struct fwd_engine *fwd_eng; 1734 unsigned i = 0; 1735 1736 if (strlen (fwd_modes) == 0) { 1737 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1738 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1739 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1740 strncat(fwd_modes, separator, 1741 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1742 } 1743 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1744 } 1745 1746 return fwd_modes; 1747 } 1748 1749 char* 1750 list_pkt_forwarding_retry_modes(void) 1751 { 1752 static char fwd_modes[128] = ""; 1753 const char *separator = "|"; 1754 struct fwd_engine *fwd_eng; 1755 unsigned i = 0; 1756 1757 if (strlen(fwd_modes) == 0) { 1758 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1759 if (fwd_eng == &rx_only_engine) 1760 continue; 1761 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1762 sizeof(fwd_modes) - 1763 strlen(fwd_modes) - 1); 1764 strncat(fwd_modes, separator, 1765 sizeof(fwd_modes) - 1766 strlen(fwd_modes) - 1); 1767 } 1768 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1769 } 1770 1771 return fwd_modes; 1772 } 1773 1774 void 1775 set_pkt_forwarding_mode(const char *fwd_mode_name) 1776 { 1777 struct fwd_engine *fwd_eng; 1778 unsigned i; 1779 1780 i = 0; 1781 while ((fwd_eng = fwd_engines[i]) != NULL) { 1782 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1783 printf("Set %s packet forwarding mode%s\n", 1784 fwd_mode_name, 1785 retry_enabled == 0 ? "" : " with retry"); 1786 cur_fwd_eng = fwd_eng; 1787 return; 1788 } 1789 i++; 1790 } 1791 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1792 } 1793 1794 void 1795 set_verbose_level(uint16_t vb_level) 1796 { 1797 printf("Change verbose level from %u to %u\n", 1798 (unsigned int) verbose_level, (unsigned int) vb_level); 1799 verbose_level = vb_level; 1800 } 1801 1802 void 1803 vlan_extend_set(portid_t port_id, int on) 1804 { 1805 int diag; 1806 int vlan_offload; 1807 1808 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1809 return; 1810 1811 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1812 1813 if (on) 1814 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1815 else 1816 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1817 1818 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1819 if (diag < 0) 1820 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1821 "diag=%d\n", port_id, on, diag); 1822 } 1823 1824 void 1825 rx_vlan_strip_set(portid_t port_id, int on) 1826 { 1827 int diag; 1828 int vlan_offload; 1829 1830 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1831 return; 1832 1833 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1834 1835 if (on) 1836 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1837 else 1838 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1839 1840 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1841 if (diag < 0) 1842 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1843 "diag=%d\n", port_id, on, diag); 1844 } 1845 1846 void 1847 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1848 { 1849 int diag; 1850 1851 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1852 return; 1853 1854 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1855 if (diag < 0) 1856 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1857 "diag=%d\n", port_id, queue_id, on, diag); 1858 } 1859 1860 void 1861 rx_vlan_filter_set(portid_t port_id, int on) 1862 { 1863 int diag; 1864 int vlan_offload; 1865 1866 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1867 return; 1868 1869 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1870 1871 if (on) 1872 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1873 else 1874 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1875 1876 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1877 if (diag < 0) 1878 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1879 "diag=%d\n", port_id, on, diag); 1880 } 1881 1882 int 1883 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1884 { 1885 int diag; 1886 1887 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1888 return 1; 1889 if (vlan_id_is_invalid(vlan_id)) 1890 return 1; 1891 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1892 if (diag == 0) 1893 return 0; 1894 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1895 "diag=%d\n", 1896 port_id, vlan_id, on, diag); 1897 return -1; 1898 } 1899 1900 void 1901 rx_vlan_all_filter_set(portid_t port_id, int on) 1902 { 1903 uint16_t vlan_id; 1904 1905 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1906 return; 1907 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1908 if (rx_vft_set(port_id, vlan_id, on)) 1909 break; 1910 } 1911 } 1912 1913 void 1914 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 1915 { 1916 int diag; 1917 1918 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1919 return; 1920 1921 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 1922 if (diag == 0) 1923 return; 1924 1925 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 1926 "diag=%d\n", 1927 port_id, vlan_type, tp_id, diag); 1928 } 1929 1930 void 1931 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1932 { 1933 int vlan_offload; 1934 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1935 return; 1936 if (vlan_id_is_invalid(vlan_id)) 1937 return; 1938 1939 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1940 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 1941 printf("Error, as QinQ has been enabled.\n"); 1942 return; 1943 } 1944 1945 tx_vlan_reset(port_id); 1946 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1947 ports[port_id].tx_vlan_id = vlan_id; 1948 } 1949 1950 void 1951 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 1952 { 1953 int vlan_offload; 1954 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1955 return; 1956 if (vlan_id_is_invalid(vlan_id)) 1957 return; 1958 if (vlan_id_is_invalid(vlan_id_outer)) 1959 return; 1960 1961 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1962 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 1963 printf("Error, as QinQ hasn't been enabled.\n"); 1964 return; 1965 } 1966 1967 tx_vlan_reset(port_id); 1968 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 1969 ports[port_id].tx_vlan_id = vlan_id; 1970 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 1971 } 1972 1973 void 1974 tx_vlan_reset(portid_t port_id) 1975 { 1976 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1977 return; 1978 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 1979 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 1980 ports[port_id].tx_vlan_id = 0; 1981 ports[port_id].tx_vlan_id_outer = 0; 1982 } 1983 1984 void 1985 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1986 { 1987 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1988 return; 1989 1990 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 1991 } 1992 1993 void 1994 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 1995 { 1996 uint16_t i; 1997 uint8_t existing_mapping_found = 0; 1998 1999 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2000 return; 2001 2002 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2003 return; 2004 2005 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2006 printf("map_value not in required range 0..%d\n", 2007 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2008 return; 2009 } 2010 2011 if (!is_rx) { /*then tx*/ 2012 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2013 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2014 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2015 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2016 existing_mapping_found = 1; 2017 break; 2018 } 2019 } 2020 if (!existing_mapping_found) { /* A new additional mapping... */ 2021 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2022 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2023 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2024 nb_tx_queue_stats_mappings++; 2025 } 2026 } 2027 else { /*rx*/ 2028 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2029 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2030 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2031 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2032 existing_mapping_found = 1; 2033 break; 2034 } 2035 } 2036 if (!existing_mapping_found) { /* A new additional mapping... */ 2037 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2038 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2039 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2040 nb_rx_queue_stats_mappings++; 2041 } 2042 } 2043 } 2044 2045 static inline void 2046 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2047 { 2048 printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask); 2049 2050 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2051 printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask); 2052 else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2053 printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x", 2054 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2055 mask->tunnel_id_mask); 2056 else { 2057 printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 2058 " src_port: 0x%04x, dst_port: 0x%04x", 2059 mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip, 2060 mask->src_port_mask, mask->dst_port_mask); 2061 2062 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 2063 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2064 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 2065 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 2066 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 2067 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 2068 } 2069 2070 printf("\n"); 2071 } 2072 2073 static inline void 2074 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2075 { 2076 struct rte_eth_flex_payload_cfg *cfg; 2077 uint32_t i, j; 2078 2079 for (i = 0; i < flex_conf->nb_payloads; i++) { 2080 cfg = &flex_conf->flex_set[i]; 2081 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2082 printf("\n RAW: "); 2083 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2084 printf("\n L2_PAYLOAD: "); 2085 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2086 printf("\n L3_PAYLOAD: "); 2087 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2088 printf("\n L4_PAYLOAD: "); 2089 else 2090 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2091 for (j = 0; j < num; j++) 2092 printf(" %-5u", cfg->src_offset[j]); 2093 } 2094 printf("\n"); 2095 } 2096 2097 static char * 2098 flowtype_to_str(uint16_t flow_type) 2099 { 2100 struct flow_type_info { 2101 char str[32]; 2102 uint16_t ftype; 2103 }; 2104 2105 uint8_t i; 2106 static struct flow_type_info flowtype_str_table[] = { 2107 {"raw", RTE_ETH_FLOW_RAW}, 2108 {"ipv4", RTE_ETH_FLOW_IPV4}, 2109 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2110 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2111 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2112 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2113 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2114 {"ipv6", RTE_ETH_FLOW_IPV6}, 2115 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2116 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2117 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2118 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2119 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2120 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2121 {"port", RTE_ETH_FLOW_PORT}, 2122 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2123 {"geneve", RTE_ETH_FLOW_GENEVE}, 2124 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2125 }; 2126 2127 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2128 if (flowtype_str_table[i].ftype == flow_type) 2129 return flowtype_str_table[i].str; 2130 } 2131 2132 return NULL; 2133 } 2134 2135 static inline void 2136 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2137 { 2138 struct rte_eth_fdir_flex_mask *mask; 2139 uint32_t i, j; 2140 char *p; 2141 2142 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2143 mask = &flex_conf->flex_mask[i]; 2144 p = flowtype_to_str(mask->flow_type); 2145 printf("\n %s:\t", p ? p : "unknown"); 2146 for (j = 0; j < num; j++) 2147 printf(" %02x", mask->mask[j]); 2148 } 2149 printf("\n"); 2150 } 2151 2152 static inline void 2153 print_fdir_flow_type(uint32_t flow_types_mask) 2154 { 2155 int i; 2156 char *p; 2157 2158 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2159 if (!(flow_types_mask & (1 << i))) 2160 continue; 2161 p = flowtype_to_str(i); 2162 if (p) 2163 printf(" %s", p); 2164 else 2165 printf(" unknown"); 2166 } 2167 printf("\n"); 2168 } 2169 2170 void 2171 fdir_get_infos(portid_t port_id) 2172 { 2173 struct rte_eth_fdir_stats fdir_stat; 2174 struct rte_eth_fdir_info fdir_info; 2175 int ret; 2176 2177 static const char *fdir_stats_border = "########################"; 2178 2179 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2180 return; 2181 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2182 if (ret < 0) { 2183 printf("\n FDIR is not supported on port %-2d\n", 2184 port_id); 2185 return; 2186 } 2187 2188 memset(&fdir_info, 0, sizeof(fdir_info)); 2189 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2190 RTE_ETH_FILTER_INFO, &fdir_info); 2191 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2192 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2193 RTE_ETH_FILTER_STATS, &fdir_stat); 2194 printf("\n %s FDIR infos for port %-2d %s\n", 2195 fdir_stats_border, port_id, fdir_stats_border); 2196 printf(" MODE: "); 2197 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2198 printf(" PERFECT\n"); 2199 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2200 printf(" PERFECT-MAC-VLAN\n"); 2201 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2202 printf(" PERFECT-TUNNEL\n"); 2203 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2204 printf(" SIGNATURE\n"); 2205 else 2206 printf(" DISABLE\n"); 2207 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2208 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2209 printf(" SUPPORTED FLOW TYPE: "); 2210 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2211 } 2212 printf(" FLEX PAYLOAD INFO:\n"); 2213 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2214 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2215 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2216 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2217 fdir_info.flex_payload_unit, 2218 fdir_info.max_flex_payload_segment_num, 2219 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2220 printf(" MASK: "); 2221 print_fdir_mask(&fdir_info.mask); 2222 if (fdir_info.flex_conf.nb_payloads > 0) { 2223 printf(" FLEX PAYLOAD SRC OFFSET:"); 2224 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2225 } 2226 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2227 printf(" FLEX MASK CFG:"); 2228 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2229 } 2230 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2231 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2232 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2233 fdir_info.guarant_spc, fdir_info.best_spc); 2234 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2235 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2236 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2237 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2238 fdir_stat.collision, fdir_stat.free, 2239 fdir_stat.maxhash, fdir_stat.maxlen, 2240 fdir_stat.add, fdir_stat.remove, 2241 fdir_stat.f_add, fdir_stat.f_remove); 2242 printf(" %s############################%s\n", 2243 fdir_stats_border, fdir_stats_border); 2244 } 2245 2246 void 2247 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2248 { 2249 struct rte_port *port; 2250 struct rte_eth_fdir_flex_conf *flex_conf; 2251 int i, idx = 0; 2252 2253 port = &ports[port_id]; 2254 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2255 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2256 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2257 idx = i; 2258 break; 2259 } 2260 } 2261 if (i >= RTE_ETH_FLOW_MAX) { 2262 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2263 idx = flex_conf->nb_flexmasks; 2264 flex_conf->nb_flexmasks++; 2265 } else { 2266 printf("The flex mask table is full. Can not set flex" 2267 " mask for flow_type(%u).", cfg->flow_type); 2268 return; 2269 } 2270 } 2271 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2272 cfg, 2273 sizeof(struct rte_eth_fdir_flex_mask)); 2274 } 2275 2276 void 2277 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2278 { 2279 struct rte_port *port; 2280 struct rte_eth_fdir_flex_conf *flex_conf; 2281 int i, idx = 0; 2282 2283 port = &ports[port_id]; 2284 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2285 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2286 if (cfg->type == flex_conf->flex_set[i].type) { 2287 idx = i; 2288 break; 2289 } 2290 } 2291 if (i >= RTE_ETH_PAYLOAD_MAX) { 2292 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2293 idx = flex_conf->nb_payloads; 2294 flex_conf->nb_payloads++; 2295 } else { 2296 printf("The flex payload table is full. Can not set" 2297 " flex payload for type(%u).", cfg->type); 2298 return; 2299 } 2300 } 2301 (void)rte_memcpy(&flex_conf->flex_set[idx], 2302 cfg, 2303 sizeof(struct rte_eth_flex_payload_cfg)); 2304 2305 } 2306 2307 void 2308 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2309 { 2310 int diag; 2311 2312 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2313 return; 2314 if (is_rx) 2315 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2316 else 2317 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2318 if (diag == 0) 2319 return; 2320 if(is_rx) 2321 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2322 "diag=%d\n", port_id, diag); 2323 else 2324 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2325 "diag=%d\n", port_id, diag); 2326 2327 } 2328 2329 void 2330 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2331 { 2332 int diag; 2333 2334 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2335 return; 2336 if (vlan_id_is_invalid(vlan_id)) 2337 return; 2338 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2339 if (diag == 0) 2340 return; 2341 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2342 "diag=%d\n", port_id, diag); 2343 } 2344 2345 int 2346 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2347 { 2348 int diag; 2349 struct rte_eth_link link; 2350 2351 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2352 return 1; 2353 rte_eth_link_get_nowait(port_id, &link); 2354 if (rate > link.link_speed) { 2355 printf("Invalid rate value:%u bigger than link speed: %u\n", 2356 rate, link.link_speed); 2357 return 1; 2358 } 2359 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2360 if (diag == 0) 2361 return diag; 2362 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2363 port_id, diag); 2364 return diag; 2365 } 2366 2367 int 2368 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2369 { 2370 int diag; 2371 struct rte_eth_link link; 2372 2373 if (q_msk == 0) 2374 return 0; 2375 2376 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2377 return 1; 2378 rte_eth_link_get_nowait(port_id, &link); 2379 if (rate > link.link_speed) { 2380 printf("Invalid rate value:%u bigger than link speed: %u\n", 2381 rate, link.link_speed); 2382 return 1; 2383 } 2384 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2385 if (diag == 0) 2386 return diag; 2387 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2388 port_id, diag); 2389 return diag; 2390 } 2391 2392 /* 2393 * Functions to manage the set of filtered Multicast MAC addresses. 2394 * 2395 * A pool of filtered multicast MAC addresses is associated with each port. 2396 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2397 * The address of the pool and the number of valid multicast MAC addresses 2398 * recorded in the pool are stored in the fields "mc_addr_pool" and 2399 * "mc_addr_nb" of the "rte_port" data structure. 2400 * 2401 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2402 * to be supplied a contiguous array of multicast MAC addresses. 2403 * To comply with this constraint, the set of multicast addresses recorded 2404 * into the pool are systematically compacted at the beginning of the pool. 2405 * Hence, when a multicast address is removed from the pool, all following 2406 * addresses, if any, are copied back to keep the set contiguous. 2407 */ 2408 #define MCAST_POOL_INC 32 2409 2410 static int 2411 mcast_addr_pool_extend(struct rte_port *port) 2412 { 2413 struct ether_addr *mc_pool; 2414 size_t mc_pool_size; 2415 2416 /* 2417 * If a free entry is available at the end of the pool, just 2418 * increment the number of recorded multicast addresses. 2419 */ 2420 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2421 port->mc_addr_nb++; 2422 return 0; 2423 } 2424 2425 /* 2426 * [re]allocate a pool with MCAST_POOL_INC more entries. 2427 * The previous test guarantees that port->mc_addr_nb is a multiple 2428 * of MCAST_POOL_INC. 2429 */ 2430 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2431 MCAST_POOL_INC); 2432 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2433 mc_pool_size); 2434 if (mc_pool == NULL) { 2435 printf("allocation of pool of %u multicast addresses failed\n", 2436 port->mc_addr_nb + MCAST_POOL_INC); 2437 return -ENOMEM; 2438 } 2439 2440 port->mc_addr_pool = mc_pool; 2441 port->mc_addr_nb++; 2442 return 0; 2443 2444 } 2445 2446 static void 2447 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2448 { 2449 port->mc_addr_nb--; 2450 if (addr_idx == port->mc_addr_nb) { 2451 /* No need to recompact the set of multicast addressses. */ 2452 if (port->mc_addr_nb == 0) { 2453 /* free the pool of multicast addresses. */ 2454 free(port->mc_addr_pool); 2455 port->mc_addr_pool = NULL; 2456 } 2457 return; 2458 } 2459 memmove(&port->mc_addr_pool[addr_idx], 2460 &port->mc_addr_pool[addr_idx + 1], 2461 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2462 } 2463 2464 static void 2465 eth_port_multicast_addr_list_set(uint8_t port_id) 2466 { 2467 struct rte_port *port; 2468 int diag; 2469 2470 port = &ports[port_id]; 2471 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2472 port->mc_addr_nb); 2473 if (diag == 0) 2474 return; 2475 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2476 port->mc_addr_nb, port_id, -diag); 2477 } 2478 2479 void 2480 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2481 { 2482 struct rte_port *port; 2483 uint32_t i; 2484 2485 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2486 return; 2487 2488 port = &ports[port_id]; 2489 2490 /* 2491 * Check that the added multicast MAC address is not already recorded 2492 * in the pool of multicast addresses. 2493 */ 2494 for (i = 0; i < port->mc_addr_nb; i++) { 2495 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 2496 printf("multicast address already filtered by port\n"); 2497 return; 2498 } 2499 } 2500 2501 if (mcast_addr_pool_extend(port) != 0) 2502 return; 2503 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 2504 eth_port_multicast_addr_list_set(port_id); 2505 } 2506 2507 void 2508 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 2509 { 2510 struct rte_port *port; 2511 uint32_t i; 2512 2513 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2514 return; 2515 2516 port = &ports[port_id]; 2517 2518 /* 2519 * Search the pool of multicast MAC addresses for the removed address. 2520 */ 2521 for (i = 0; i < port->mc_addr_nb; i++) { 2522 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 2523 break; 2524 } 2525 if (i == port->mc_addr_nb) { 2526 printf("multicast address not filtered by port %d\n", port_id); 2527 return; 2528 } 2529 2530 mcast_addr_pool_remove(port, i); 2531 eth_port_multicast_addr_list_set(port_id); 2532 } 2533 2534 void 2535 port_dcb_info_display(uint8_t port_id) 2536 { 2537 struct rte_eth_dcb_info dcb_info; 2538 uint16_t i; 2539 int ret; 2540 static const char *border = "================"; 2541 2542 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2543 return; 2544 2545 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 2546 if (ret) { 2547 printf("\n Failed to get dcb infos on port %-2d\n", 2548 port_id); 2549 return; 2550 } 2551 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 2552 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 2553 printf("\n TC : "); 2554 for (i = 0; i < dcb_info.nb_tcs; i++) 2555 printf("\t%4d", i); 2556 printf("\n Priority : "); 2557 for (i = 0; i < dcb_info.nb_tcs; i++) 2558 printf("\t%4d", dcb_info.prio_tc[i]); 2559 printf("\n BW percent :"); 2560 for (i = 0; i < dcb_info.nb_tcs; i++) 2561 printf("\t%4d%%", dcb_info.tc_bws[i]); 2562 printf("\n RXQ base : "); 2563 for (i = 0; i < dcb_info.nb_tcs; i++) 2564 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 2565 printf("\n RXQ number :"); 2566 for (i = 0; i < dcb_info.nb_tcs; i++) 2567 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 2568 printf("\n TXQ base : "); 2569 for (i = 0; i < dcb_info.nb_tcs; i++) 2570 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 2571 printf("\n TXQ number :"); 2572 for (i = 0; i < dcb_info.nb_tcs; i++) 2573 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 2574 printf("\n"); 2575 } 2576