1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_ring.h> 88 #include <rte_mempool.h> 89 #include <rte_mbuf.h> 90 #include <rte_interrupts.h> 91 #include <rte_pci.h> 92 #include <rte_ether.h> 93 #include <rte_ethdev.h> 94 #include <rte_string_fns.h> 95 #include <rte_cycles.h> 96 97 #include "testpmd.h" 98 99 static char *flowtype_to_str(uint16_t flow_type); 100 101 static const struct { 102 enum tx_pkt_split split; 103 const char *name; 104 } tx_split_name[] = { 105 { 106 .split = TX_PKT_SPLIT_OFF, 107 .name = "off", 108 }, 109 { 110 .split = TX_PKT_SPLIT_ON, 111 .name = "on", 112 }, 113 { 114 .split = TX_PKT_SPLIT_RND, 115 .name = "rand", 116 }, 117 }; 118 119 struct rss_type_info { 120 char str[32]; 121 uint64_t rss_type; 122 }; 123 124 static const struct rss_type_info rss_type_table[] = { 125 { "ipv4", ETH_RSS_IPV4 }, 126 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 127 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 128 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 129 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 130 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 131 { "ipv6", ETH_RSS_IPV6 }, 132 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 133 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 134 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 135 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 136 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 137 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 138 { "ipv6-ex", ETH_RSS_IPV6_EX }, 139 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 140 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 141 { "port", ETH_RSS_PORT }, 142 { "vxlan", ETH_RSS_VXLAN }, 143 { "geneve", ETH_RSS_GENEVE }, 144 { "nvgre", ETH_RSS_NVGRE }, 145 146 }; 147 148 static void 149 print_ethaddr(const char *name, struct ether_addr *eth_addr) 150 { 151 char buf[ETHER_ADDR_FMT_SIZE]; 152 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 153 printf("%s%s", name, buf); 154 } 155 156 void 157 nic_stats_display(portid_t port_id) 158 { 159 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 160 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 161 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 162 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 163 uint64_t mpps_rx, mpps_tx; 164 struct rte_eth_stats stats; 165 struct rte_port *port = &ports[port_id]; 166 uint8_t i; 167 portid_t pid; 168 169 static const char *nic_stats_border = "########################"; 170 171 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 172 printf("Valid port range is [0"); 173 FOREACH_PORT(pid, ports) 174 printf(", %d", pid); 175 printf("]\n"); 176 return; 177 } 178 rte_eth_stats_get(port_id, &stats); 179 printf("\n %s NIC statistics for port %-2d %s\n", 180 nic_stats_border, port_id, nic_stats_border); 181 182 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 183 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 184 "%-"PRIu64"\n", 185 stats.ipackets, stats.imissed, stats.ibytes); 186 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 187 printf(" RX-nombuf: %-10"PRIu64"\n", 188 stats.rx_nombuf); 189 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 190 "%-"PRIu64"\n", 191 stats.opackets, stats.oerrors, stats.obytes); 192 } 193 else { 194 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 195 " RX-bytes: %10"PRIu64"\n", 196 stats.ipackets, stats.ierrors, stats.ibytes); 197 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 198 printf(" RX-nombuf: %10"PRIu64"\n", 199 stats.rx_nombuf); 200 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 201 " TX-bytes: %10"PRIu64"\n", 202 stats.opackets, stats.oerrors, stats.obytes); 203 } 204 205 if (port->rx_queue_stats_mapping_enabled) { 206 printf("\n"); 207 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 208 printf(" Stats reg %2d RX-packets: %10"PRIu64 209 " RX-errors: %10"PRIu64 210 " RX-bytes: %10"PRIu64"\n", 211 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 212 } 213 } 214 if (port->tx_queue_stats_mapping_enabled) { 215 printf("\n"); 216 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 217 printf(" Stats reg %2d TX-packets: %10"PRIu64 218 " TX-bytes: %10"PRIu64"\n", 219 i, stats.q_opackets[i], stats.q_obytes[i]); 220 } 221 } 222 223 diff_cycles = prev_cycles[port_id]; 224 prev_cycles[port_id] = rte_rdtsc(); 225 if (diff_cycles > 0) 226 diff_cycles = prev_cycles[port_id] - diff_cycles; 227 228 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 229 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 230 prev_pkts_rx[port_id] = stats.ipackets; 231 prev_pkts_tx[port_id] = stats.opackets; 232 mpps_rx = diff_cycles > 0 ? 233 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 234 mpps_tx = diff_cycles > 0 ? 235 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 236 printf("\n Throughput (since last show)\n"); 237 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 238 mpps_rx, mpps_tx); 239 240 printf(" %s############################%s\n", 241 nic_stats_border, nic_stats_border); 242 } 243 244 void 245 nic_stats_clear(portid_t port_id) 246 { 247 portid_t pid; 248 249 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 250 printf("Valid port range is [0"); 251 FOREACH_PORT(pid, ports) 252 printf(", %d", pid); 253 printf("]\n"); 254 return; 255 } 256 rte_eth_stats_reset(port_id); 257 printf("\n NIC statistics for port %d cleared\n", port_id); 258 } 259 260 void 261 nic_xstats_display(portid_t port_id) 262 { 263 struct rte_eth_xstat *xstats; 264 int cnt_xstats, idx_xstat, idx_name; 265 struct rte_eth_xstat_name *xstats_names; 266 267 printf("###### NIC extended statistics for port %-2d\n", port_id); 268 if (!rte_eth_dev_is_valid_port(port_id)) { 269 printf("Error: Invalid port number %i\n", port_id); 270 return; 271 } 272 273 /* Get count */ 274 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 275 if (cnt_xstats < 0) { 276 printf("Error: Cannot get count of xstats\n"); 277 return; 278 } 279 280 /* Get id-name lookup table */ 281 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 282 if (xstats_names == NULL) { 283 printf("Cannot allocate memory for xstats lookup\n"); 284 return; 285 } 286 if (cnt_xstats != rte_eth_xstats_get_names( 287 port_id, xstats_names, cnt_xstats)) { 288 printf("Error: Cannot get xstats lookup\n"); 289 free(xstats_names); 290 return; 291 } 292 293 /* Get stats themselves */ 294 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 295 if (xstats == NULL) { 296 printf("Cannot allocate memory for xstats\n"); 297 free(xstats_names); 298 return; 299 } 300 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 301 printf("Error: Unable to get xstats\n"); 302 free(xstats_names); 303 free(xstats); 304 return; 305 } 306 307 /* Display xstats */ 308 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 309 for (idx_name = 0; idx_name < cnt_xstats; idx_name++) 310 if (xstats_names[idx_name].id == xstats[idx_xstat].id) { 311 printf("%s: %"PRIu64"\n", 312 xstats_names[idx_name].name, 313 xstats[idx_xstat].value); 314 break; 315 } 316 free(xstats_names); 317 free(xstats); 318 } 319 320 void 321 nic_xstats_clear(portid_t port_id) 322 { 323 rte_eth_xstats_reset(port_id); 324 } 325 326 void 327 nic_stats_mapping_display(portid_t port_id) 328 { 329 struct rte_port *port = &ports[port_id]; 330 uint16_t i; 331 portid_t pid; 332 333 static const char *nic_stats_mapping_border = "########################"; 334 335 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 336 printf("Valid port range is [0"); 337 FOREACH_PORT(pid, ports) 338 printf(", %d", pid); 339 printf("]\n"); 340 return; 341 } 342 343 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 344 printf("Port id %d - either does not support queue statistic mapping or" 345 " no queue statistic mapping set\n", port_id); 346 return; 347 } 348 349 printf("\n %s NIC statistics mapping for port %-2d %s\n", 350 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 351 352 if (port->rx_queue_stats_mapping_enabled) { 353 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 354 if (rx_queue_stats_mappings[i].port_id == port_id) { 355 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 356 rx_queue_stats_mappings[i].queue_id, 357 rx_queue_stats_mappings[i].stats_counter_id); 358 } 359 } 360 printf("\n"); 361 } 362 363 364 if (port->tx_queue_stats_mapping_enabled) { 365 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 366 if (tx_queue_stats_mappings[i].port_id == port_id) { 367 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 368 tx_queue_stats_mappings[i].queue_id, 369 tx_queue_stats_mappings[i].stats_counter_id); 370 } 371 } 372 } 373 374 printf(" %s####################################%s\n", 375 nic_stats_mapping_border, nic_stats_mapping_border); 376 } 377 378 void 379 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 380 { 381 struct rte_eth_rxq_info qinfo; 382 int32_t rc; 383 static const char *info_border = "*********************"; 384 385 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 386 if (rc != 0) { 387 printf("Failed to retrieve information for port: %hhu, " 388 "RX queue: %hu\nerror desc: %s(%d)\n", 389 port_id, queue_id, strerror(-rc), rc); 390 return; 391 } 392 393 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 394 info_border, port_id, queue_id, info_border); 395 396 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 397 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 398 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 399 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 400 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 401 printf("\nRX drop packets: %s", 402 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 403 printf("\nRX deferred start: %s", 404 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 405 printf("\nRX scattered packets: %s", 406 (qinfo.scattered_rx != 0) ? "on" : "off"); 407 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 408 printf("\n"); 409 } 410 411 void 412 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 413 { 414 struct rte_eth_txq_info qinfo; 415 int32_t rc; 416 static const char *info_border = "*********************"; 417 418 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 419 if (rc != 0) { 420 printf("Failed to retrieve information for port: %hhu, " 421 "TX queue: %hu\nerror desc: %s(%d)\n", 422 port_id, queue_id, strerror(-rc), rc); 423 return; 424 } 425 426 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 427 info_border, port_id, queue_id, info_border); 428 429 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 430 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 431 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 432 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 433 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 434 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 435 printf("\nTX deferred start: %s", 436 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 437 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 438 printf("\n"); 439 } 440 441 void 442 port_infos_display(portid_t port_id) 443 { 444 struct rte_port *port; 445 struct ether_addr mac_addr; 446 struct rte_eth_link link; 447 struct rte_eth_dev_info dev_info; 448 int vlan_offload; 449 struct rte_mempool * mp; 450 static const char *info_border = "*********************"; 451 portid_t pid; 452 453 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 454 printf("Valid port range is [0"); 455 FOREACH_PORT(pid, ports) 456 printf(", %d", pid); 457 printf("]\n"); 458 return; 459 } 460 port = &ports[port_id]; 461 rte_eth_link_get_nowait(port_id, &link); 462 printf("\n%s Infos for port %-2d %s\n", 463 info_border, port_id, info_border); 464 rte_eth_macaddr_get(port_id, &mac_addr); 465 print_ethaddr("MAC address: ", &mac_addr); 466 printf("\nConnect to socket: %u", port->socket_id); 467 468 if (port_numa[port_id] != NUMA_NO_CONFIG) { 469 mp = mbuf_pool_find(port_numa[port_id]); 470 if (mp) 471 printf("\nmemory allocation on the socket: %d", 472 port_numa[port_id]); 473 } else 474 printf("\nmemory allocation on the socket: %u",port->socket_id); 475 476 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 477 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 478 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 479 ("full-duplex") : ("half-duplex")); 480 printf("Promiscuous mode: %s\n", 481 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 482 printf("Allmulticast mode: %s\n", 483 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 484 printf("Maximum number of MAC addresses: %u\n", 485 (unsigned int)(port->dev_info.max_mac_addrs)); 486 printf("Maximum number of MAC addresses of hash filtering: %u\n", 487 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 488 489 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 490 if (vlan_offload >= 0){ 491 printf("VLAN offload: \n"); 492 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 493 printf(" strip on \n"); 494 else 495 printf(" strip off \n"); 496 497 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 498 printf(" filter on \n"); 499 else 500 printf(" filter off \n"); 501 502 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 503 printf(" qinq(extend) on \n"); 504 else 505 printf(" qinq(extend) off \n"); 506 } 507 508 memset(&dev_info, 0, sizeof(dev_info)); 509 rte_eth_dev_info_get(port_id, &dev_info); 510 if (dev_info.hash_key_size > 0) 511 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 512 if (dev_info.reta_size > 0) 513 printf("Redirection table size: %u\n", dev_info.reta_size); 514 if (!dev_info.flow_type_rss_offloads) 515 printf("No flow type is supported.\n"); 516 else { 517 uint16_t i; 518 char *p; 519 520 printf("Supported flow types:\n"); 521 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 522 i++) { 523 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 524 continue; 525 p = flowtype_to_str(i); 526 printf(" %s\n", (p ? p : "unknown")); 527 } 528 } 529 530 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 531 printf("Max possible number of RXDs per queue: %hu\n", 532 dev_info.rx_desc_lim.nb_max); 533 printf("Min possible number of RXDs per queue: %hu\n", 534 dev_info.rx_desc_lim.nb_min); 535 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 536 537 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 538 printf("Max possible number of TXDs per queue: %hu\n", 539 dev_info.tx_desc_lim.nb_max); 540 printf("Min possible number of TXDs per queue: %hu\n", 541 dev_info.tx_desc_lim.nb_min); 542 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 543 } 544 545 int 546 port_id_is_invalid(portid_t port_id, enum print_warning warning) 547 { 548 if (port_id == (portid_t)RTE_PORT_ALL) 549 return 0; 550 551 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 552 return 0; 553 554 if (warning == ENABLED_WARN) 555 printf("Invalid port %d\n", port_id); 556 557 return 1; 558 } 559 560 static int 561 vlan_id_is_invalid(uint16_t vlan_id) 562 { 563 if (vlan_id < 4096) 564 return 0; 565 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 566 return 1; 567 } 568 569 static int 570 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 571 { 572 uint64_t pci_len; 573 574 if (reg_off & 0x3) { 575 printf("Port register offset 0x%X not aligned on a 4-byte " 576 "boundary\n", 577 (unsigned)reg_off); 578 return 1; 579 } 580 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 581 if (reg_off >= pci_len) { 582 printf("Port %d: register offset %u (0x%X) out of port PCI " 583 "resource (length=%"PRIu64")\n", 584 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 585 return 1; 586 } 587 return 0; 588 } 589 590 static int 591 reg_bit_pos_is_invalid(uint8_t bit_pos) 592 { 593 if (bit_pos <= 31) 594 return 0; 595 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 596 return 1; 597 } 598 599 #define display_port_and_reg_off(port_id, reg_off) \ 600 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 601 602 static inline void 603 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 604 { 605 display_port_and_reg_off(port_id, (unsigned)reg_off); 606 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 607 } 608 609 void 610 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 611 { 612 uint32_t reg_v; 613 614 615 if (port_id_is_invalid(port_id, ENABLED_WARN)) 616 return; 617 if (port_reg_off_is_invalid(port_id, reg_off)) 618 return; 619 if (reg_bit_pos_is_invalid(bit_x)) 620 return; 621 reg_v = port_id_pci_reg_read(port_id, reg_off); 622 display_port_and_reg_off(port_id, (unsigned)reg_off); 623 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 624 } 625 626 void 627 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 628 uint8_t bit1_pos, uint8_t bit2_pos) 629 { 630 uint32_t reg_v; 631 uint8_t l_bit; 632 uint8_t h_bit; 633 634 if (port_id_is_invalid(port_id, ENABLED_WARN)) 635 return; 636 if (port_reg_off_is_invalid(port_id, reg_off)) 637 return; 638 if (reg_bit_pos_is_invalid(bit1_pos)) 639 return; 640 if (reg_bit_pos_is_invalid(bit2_pos)) 641 return; 642 if (bit1_pos > bit2_pos) 643 l_bit = bit2_pos, h_bit = bit1_pos; 644 else 645 l_bit = bit1_pos, h_bit = bit2_pos; 646 647 reg_v = port_id_pci_reg_read(port_id, reg_off); 648 reg_v >>= l_bit; 649 if (h_bit < 31) 650 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 651 display_port_and_reg_off(port_id, (unsigned)reg_off); 652 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 653 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 654 } 655 656 void 657 port_reg_display(portid_t port_id, uint32_t reg_off) 658 { 659 uint32_t reg_v; 660 661 if (port_id_is_invalid(port_id, ENABLED_WARN)) 662 return; 663 if (port_reg_off_is_invalid(port_id, reg_off)) 664 return; 665 reg_v = port_id_pci_reg_read(port_id, reg_off); 666 display_port_reg_value(port_id, reg_off, reg_v); 667 } 668 669 void 670 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 671 uint8_t bit_v) 672 { 673 uint32_t reg_v; 674 675 if (port_id_is_invalid(port_id, ENABLED_WARN)) 676 return; 677 if (port_reg_off_is_invalid(port_id, reg_off)) 678 return; 679 if (reg_bit_pos_is_invalid(bit_pos)) 680 return; 681 if (bit_v > 1) { 682 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 683 return; 684 } 685 reg_v = port_id_pci_reg_read(port_id, reg_off); 686 if (bit_v == 0) 687 reg_v &= ~(1 << bit_pos); 688 else 689 reg_v |= (1 << bit_pos); 690 port_id_pci_reg_write(port_id, reg_off, reg_v); 691 display_port_reg_value(port_id, reg_off, reg_v); 692 } 693 694 void 695 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 696 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 697 { 698 uint32_t max_v; 699 uint32_t reg_v; 700 uint8_t l_bit; 701 uint8_t h_bit; 702 703 if (port_id_is_invalid(port_id, ENABLED_WARN)) 704 return; 705 if (port_reg_off_is_invalid(port_id, reg_off)) 706 return; 707 if (reg_bit_pos_is_invalid(bit1_pos)) 708 return; 709 if (reg_bit_pos_is_invalid(bit2_pos)) 710 return; 711 if (bit1_pos > bit2_pos) 712 l_bit = bit2_pos, h_bit = bit1_pos; 713 else 714 l_bit = bit1_pos, h_bit = bit2_pos; 715 716 if ((h_bit - l_bit) < 31) 717 max_v = (1 << (h_bit - l_bit + 1)) - 1; 718 else 719 max_v = 0xFFFFFFFF; 720 721 if (value > max_v) { 722 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 723 (unsigned)value, (unsigned)value, 724 (unsigned)max_v, (unsigned)max_v); 725 return; 726 } 727 reg_v = port_id_pci_reg_read(port_id, reg_off); 728 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 729 reg_v |= (value << l_bit); /* Set changed bits */ 730 port_id_pci_reg_write(port_id, reg_off, reg_v); 731 display_port_reg_value(port_id, reg_off, reg_v); 732 } 733 734 void 735 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 736 { 737 if (port_id_is_invalid(port_id, ENABLED_WARN)) 738 return; 739 if (port_reg_off_is_invalid(port_id, reg_off)) 740 return; 741 port_id_pci_reg_write(port_id, reg_off, reg_v); 742 display_port_reg_value(port_id, reg_off, reg_v); 743 } 744 745 void 746 port_mtu_set(portid_t port_id, uint16_t mtu) 747 { 748 int diag; 749 750 if (port_id_is_invalid(port_id, ENABLED_WARN)) 751 return; 752 diag = rte_eth_dev_set_mtu(port_id, mtu); 753 if (diag == 0) 754 return; 755 printf("Set MTU failed. diag=%d\n", diag); 756 } 757 758 /* 759 * RX/TX ring descriptors display functions. 760 */ 761 int 762 rx_queue_id_is_invalid(queueid_t rxq_id) 763 { 764 if (rxq_id < nb_rxq) 765 return 0; 766 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 767 return 1; 768 } 769 770 int 771 tx_queue_id_is_invalid(queueid_t txq_id) 772 { 773 if (txq_id < nb_txq) 774 return 0; 775 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 776 return 1; 777 } 778 779 static int 780 rx_desc_id_is_invalid(uint16_t rxdesc_id) 781 { 782 if (rxdesc_id < nb_rxd) 783 return 0; 784 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 785 rxdesc_id, nb_rxd); 786 return 1; 787 } 788 789 static int 790 tx_desc_id_is_invalid(uint16_t txdesc_id) 791 { 792 if (txdesc_id < nb_txd) 793 return 0; 794 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 795 txdesc_id, nb_txd); 796 return 1; 797 } 798 799 static const struct rte_memzone * 800 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 801 { 802 char mz_name[RTE_MEMZONE_NAMESIZE]; 803 const struct rte_memzone *mz; 804 805 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 806 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 807 mz = rte_memzone_lookup(mz_name); 808 if (mz == NULL) 809 printf("%s ring memory zoneof (port %d, queue %d) not" 810 "found (zone name = %s\n", 811 ring_name, port_id, q_id, mz_name); 812 return mz; 813 } 814 815 union igb_ring_dword { 816 uint64_t dword; 817 struct { 818 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 819 uint32_t lo; 820 uint32_t hi; 821 #else 822 uint32_t hi; 823 uint32_t lo; 824 #endif 825 } words; 826 }; 827 828 struct igb_ring_desc_32_bytes { 829 union igb_ring_dword lo_dword; 830 union igb_ring_dword hi_dword; 831 union igb_ring_dword resv1; 832 union igb_ring_dword resv2; 833 }; 834 835 struct igb_ring_desc_16_bytes { 836 union igb_ring_dword lo_dword; 837 union igb_ring_dword hi_dword; 838 }; 839 840 static void 841 ring_rxd_display_dword(union igb_ring_dword dword) 842 { 843 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 844 (unsigned)dword.words.hi); 845 } 846 847 static void 848 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 849 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 850 uint8_t port_id, 851 #else 852 __rte_unused uint8_t port_id, 853 #endif 854 uint16_t desc_id) 855 { 856 struct igb_ring_desc_16_bytes *ring = 857 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 858 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 859 struct rte_eth_dev_info dev_info; 860 861 memset(&dev_info, 0, sizeof(dev_info)); 862 rte_eth_dev_info_get(port_id, &dev_info); 863 if (strstr(dev_info.driver_name, "i40e") != NULL) { 864 /* 32 bytes RX descriptor, i40e only */ 865 struct igb_ring_desc_32_bytes *ring = 866 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 867 ring[desc_id].lo_dword.dword = 868 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 869 ring_rxd_display_dword(ring[desc_id].lo_dword); 870 ring[desc_id].hi_dword.dword = 871 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 872 ring_rxd_display_dword(ring[desc_id].hi_dword); 873 ring[desc_id].resv1.dword = 874 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 875 ring_rxd_display_dword(ring[desc_id].resv1); 876 ring[desc_id].resv2.dword = 877 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 878 ring_rxd_display_dword(ring[desc_id].resv2); 879 880 return; 881 } 882 #endif 883 /* 16 bytes RX descriptor */ 884 ring[desc_id].lo_dword.dword = 885 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 886 ring_rxd_display_dword(ring[desc_id].lo_dword); 887 ring[desc_id].hi_dword.dword = 888 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 889 ring_rxd_display_dword(ring[desc_id].hi_dword); 890 } 891 892 static void 893 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 894 { 895 struct igb_ring_desc_16_bytes *ring; 896 struct igb_ring_desc_16_bytes txd; 897 898 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 899 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 900 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 901 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 902 (unsigned)txd.lo_dword.words.lo, 903 (unsigned)txd.lo_dword.words.hi, 904 (unsigned)txd.hi_dword.words.lo, 905 (unsigned)txd.hi_dword.words.hi); 906 } 907 908 void 909 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 910 { 911 const struct rte_memzone *rx_mz; 912 913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 914 return; 915 if (rx_queue_id_is_invalid(rxq_id)) 916 return; 917 if (rx_desc_id_is_invalid(rxd_id)) 918 return; 919 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 920 if (rx_mz == NULL) 921 return; 922 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 923 } 924 925 void 926 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 927 { 928 const struct rte_memzone *tx_mz; 929 930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 931 return; 932 if (tx_queue_id_is_invalid(txq_id)) 933 return; 934 if (tx_desc_id_is_invalid(txd_id)) 935 return; 936 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 937 if (tx_mz == NULL) 938 return; 939 ring_tx_descriptor_display(tx_mz, txd_id); 940 } 941 942 void 943 fwd_lcores_config_display(void) 944 { 945 lcoreid_t lc_id; 946 947 printf("List of forwarding lcores:"); 948 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 949 printf(" %2u", fwd_lcores_cpuids[lc_id]); 950 printf("\n"); 951 } 952 void 953 rxtx_config_display(void) 954 { 955 printf(" %s packet forwarding%s - CRC stripping %s - " 956 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 957 retry_enabled == 0 ? "" : " with retry", 958 rx_mode.hw_strip_crc ? "enabled" : "disabled", 959 nb_pkt_per_burst); 960 961 if (cur_fwd_eng == &tx_only_engine) 962 printf(" packet len=%u - nb packet segments=%d\n", 963 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 964 965 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 966 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 967 968 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 969 nb_fwd_lcores, nb_fwd_ports); 970 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 971 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 972 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 973 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 974 rx_conf->rx_thresh.wthresh); 975 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 976 nb_txq, nb_txd, tx_conf->tx_free_thresh); 977 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 978 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 979 tx_conf->tx_thresh.wthresh); 980 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 981 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 982 } 983 984 void 985 port_rss_reta_info(portid_t port_id, 986 struct rte_eth_rss_reta_entry64 *reta_conf, 987 uint16_t nb_entries) 988 { 989 uint16_t i, idx, shift; 990 int ret; 991 992 if (port_id_is_invalid(port_id, ENABLED_WARN)) 993 return; 994 995 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 996 if (ret != 0) { 997 printf("Failed to get RSS RETA info, return code = %d\n", ret); 998 return; 999 } 1000 1001 for (i = 0; i < nb_entries; i++) { 1002 idx = i / RTE_RETA_GROUP_SIZE; 1003 shift = i % RTE_RETA_GROUP_SIZE; 1004 if (!(reta_conf[idx].mask & (1ULL << shift))) 1005 continue; 1006 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1007 i, reta_conf[idx].reta[shift]); 1008 } 1009 } 1010 1011 /* 1012 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1013 * key of the port. 1014 */ 1015 void 1016 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1017 { 1018 struct rte_eth_rss_conf rss_conf; 1019 uint8_t rss_key[10 * 4] = ""; 1020 uint64_t rss_hf; 1021 uint8_t i; 1022 int diag; 1023 1024 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1025 return; 1026 1027 rss_conf.rss_hf = 0; 1028 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1029 if (!strcmp(rss_info, rss_type_table[i].str)) 1030 rss_conf.rss_hf = rss_type_table[i].rss_type; 1031 } 1032 1033 /* Get RSS hash key if asked to display it */ 1034 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1035 rss_conf.rss_key_len = sizeof(rss_key); 1036 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1037 if (diag != 0) { 1038 switch (diag) { 1039 case -ENODEV: 1040 printf("port index %d invalid\n", port_id); 1041 break; 1042 case -ENOTSUP: 1043 printf("operation not supported by device\n"); 1044 break; 1045 default: 1046 printf("operation failed - diag=%d\n", diag); 1047 break; 1048 } 1049 return; 1050 } 1051 rss_hf = rss_conf.rss_hf; 1052 if (rss_hf == 0) { 1053 printf("RSS disabled\n"); 1054 return; 1055 } 1056 printf("RSS functions:\n "); 1057 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1058 if (rss_hf & rss_type_table[i].rss_type) 1059 printf("%s ", rss_type_table[i].str); 1060 } 1061 printf("\n"); 1062 if (!show_rss_key) 1063 return; 1064 printf("RSS key:\n"); 1065 for (i = 0; i < sizeof(rss_key); i++) 1066 printf("%02X", rss_key[i]); 1067 printf("\n"); 1068 } 1069 1070 void 1071 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1072 uint hash_key_len) 1073 { 1074 struct rte_eth_rss_conf rss_conf; 1075 int diag; 1076 unsigned int i; 1077 1078 rss_conf.rss_key = NULL; 1079 rss_conf.rss_key_len = hash_key_len; 1080 rss_conf.rss_hf = 0; 1081 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1082 if (!strcmp(rss_type_table[i].str, rss_type)) 1083 rss_conf.rss_hf = rss_type_table[i].rss_type; 1084 } 1085 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1086 if (diag == 0) { 1087 rss_conf.rss_key = hash_key; 1088 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1089 } 1090 if (diag == 0) 1091 return; 1092 1093 switch (diag) { 1094 case -ENODEV: 1095 printf("port index %d invalid\n", port_id); 1096 break; 1097 case -ENOTSUP: 1098 printf("operation not supported by device\n"); 1099 break; 1100 default: 1101 printf("operation failed - diag=%d\n", diag); 1102 break; 1103 } 1104 } 1105 1106 /* 1107 * Setup forwarding configuration for each logical core. 1108 */ 1109 static void 1110 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1111 { 1112 streamid_t nb_fs_per_lcore; 1113 streamid_t nb_fs; 1114 streamid_t sm_id; 1115 lcoreid_t nb_extra; 1116 lcoreid_t nb_fc; 1117 lcoreid_t nb_lc; 1118 lcoreid_t lc_id; 1119 1120 nb_fs = cfg->nb_fwd_streams; 1121 nb_fc = cfg->nb_fwd_lcores; 1122 if (nb_fs <= nb_fc) { 1123 nb_fs_per_lcore = 1; 1124 nb_extra = 0; 1125 } else { 1126 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1127 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1128 } 1129 1130 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1131 sm_id = 0; 1132 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1133 fwd_lcores[lc_id]->stream_idx = sm_id; 1134 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1135 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1136 } 1137 1138 /* 1139 * Assign extra remaining streams, if any. 1140 */ 1141 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1142 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1143 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1144 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1145 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1146 } 1147 } 1148 1149 static void 1150 simple_fwd_config_setup(void) 1151 { 1152 portid_t i; 1153 portid_t j; 1154 portid_t inc = 2; 1155 1156 if (port_topology == PORT_TOPOLOGY_CHAINED || 1157 port_topology == PORT_TOPOLOGY_LOOP) { 1158 inc = 1; 1159 } else if (nb_fwd_ports % 2) { 1160 printf("\nWarning! Cannot handle an odd number of ports " 1161 "with the current port topology. Configuration " 1162 "must be changed to have an even number of ports, " 1163 "or relaunch application with " 1164 "--port-topology=chained\n\n"); 1165 } 1166 1167 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1168 cur_fwd_config.nb_fwd_streams = 1169 (streamid_t) cur_fwd_config.nb_fwd_ports; 1170 1171 /* reinitialize forwarding streams */ 1172 init_fwd_streams(); 1173 1174 /* 1175 * In the simple forwarding test, the number of forwarding cores 1176 * must be lower or equal to the number of forwarding ports. 1177 */ 1178 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1179 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1180 cur_fwd_config.nb_fwd_lcores = 1181 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1182 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1183 1184 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1185 if (port_topology != PORT_TOPOLOGY_LOOP) 1186 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1187 else 1188 j = i; 1189 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1190 fwd_streams[i]->rx_queue = 0; 1191 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1192 fwd_streams[i]->tx_queue = 0; 1193 fwd_streams[i]->peer_addr = j; 1194 fwd_streams[i]->retry_enabled = retry_enabled; 1195 1196 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1197 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1198 fwd_streams[j]->rx_queue = 0; 1199 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1200 fwd_streams[j]->tx_queue = 0; 1201 fwd_streams[j]->peer_addr = i; 1202 fwd_streams[j]->retry_enabled = retry_enabled; 1203 } 1204 } 1205 } 1206 1207 /** 1208 * For the RSS forwarding test, each core is assigned on every port a transmit 1209 * queue whose index is the index of the core itself. This approach limits the 1210 * maximumm number of processing cores of the RSS test to the maximum number of 1211 * TX queues supported by the devices. 1212 * 1213 * Each core is assigned a single stream, each stream being composed of 1214 * a RX queue to poll on a RX port for input messages, associated with 1215 * a TX queue of a TX port where to send forwarded packets. 1216 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1217 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1218 * following rules: 1219 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1220 * - TxQl = RxQj 1221 */ 1222 static void 1223 rss_fwd_config_setup(void) 1224 { 1225 portid_t rxp; 1226 portid_t txp; 1227 queueid_t rxq; 1228 queueid_t nb_q; 1229 lcoreid_t lc_id; 1230 1231 nb_q = nb_rxq; 1232 if (nb_q > nb_txq) 1233 nb_q = nb_txq; 1234 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1235 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1236 cur_fwd_config.nb_fwd_streams = 1237 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1238 1239 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1240 cur_fwd_config.nb_fwd_lcores = 1241 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1242 1243 /* reinitialize forwarding streams */ 1244 init_fwd_streams(); 1245 1246 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1247 rxp = 0; rxq = 0; 1248 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_streams; lc_id++) { 1249 struct fwd_stream *fs; 1250 1251 fs = fwd_streams[lc_id]; 1252 1253 if ((rxp & 0x1) == 0) 1254 txp = (portid_t) (rxp + 1); 1255 else 1256 txp = (portid_t) (rxp - 1); 1257 /* 1258 * if we are in loopback, simply send stuff out through the 1259 * ingress port 1260 */ 1261 if (port_topology == PORT_TOPOLOGY_LOOP) 1262 txp = rxp; 1263 1264 fs->rx_port = fwd_ports_ids[rxp]; 1265 fs->rx_queue = rxq; 1266 fs->tx_port = fwd_ports_ids[txp]; 1267 fs->tx_queue = rxq; 1268 fs->peer_addr = fs->tx_port; 1269 fs->retry_enabled = retry_enabled; 1270 rxq = (queueid_t) (rxq + 1); 1271 if (rxq < nb_q) 1272 continue; 1273 /* 1274 * rxq == nb_q 1275 * Restart from RX queue 0 on next RX port 1276 */ 1277 rxq = 0; 1278 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1279 rxp = (portid_t) 1280 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1281 else 1282 rxp = (portid_t) (rxp + 1); 1283 } 1284 } 1285 1286 /** 1287 * For the DCB forwarding test, each core is assigned on each traffic class. 1288 * 1289 * Each core is assigned a multi-stream, each stream being composed of 1290 * a RX queue to poll on a RX port for input messages, associated with 1291 * a TX queue of a TX port where to send forwarded packets. All RX and 1292 * TX queues are mapping to the same traffic class. 1293 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1294 * the same core 1295 */ 1296 static void 1297 dcb_fwd_config_setup(void) 1298 { 1299 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1300 portid_t txp, rxp = 0; 1301 queueid_t txq, rxq = 0; 1302 lcoreid_t lc_id; 1303 uint16_t nb_rx_queue, nb_tx_queue; 1304 uint16_t i, j, k, sm_id = 0; 1305 uint8_t tc = 0; 1306 1307 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1308 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1309 cur_fwd_config.nb_fwd_streams = 1310 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1311 1312 /* reinitialize forwarding streams */ 1313 init_fwd_streams(); 1314 sm_id = 0; 1315 txp = 1; 1316 /* get the dcb info on the first RX and TX ports */ 1317 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1318 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1319 1320 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1321 fwd_lcores[lc_id]->stream_nb = 0; 1322 fwd_lcores[lc_id]->stream_idx = sm_id; 1323 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1324 /* if the nb_queue is zero, means this tc is 1325 * not enabled on the POOL 1326 */ 1327 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1328 break; 1329 k = fwd_lcores[lc_id]->stream_nb + 1330 fwd_lcores[lc_id]->stream_idx; 1331 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1332 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1333 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1334 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1335 for (j = 0; j < nb_rx_queue; j++) { 1336 struct fwd_stream *fs; 1337 1338 fs = fwd_streams[k + j]; 1339 fs->rx_port = fwd_ports_ids[rxp]; 1340 fs->rx_queue = rxq + j; 1341 fs->tx_port = fwd_ports_ids[txp]; 1342 fs->tx_queue = txq + j % nb_tx_queue; 1343 fs->peer_addr = fs->tx_port; 1344 fs->retry_enabled = retry_enabled; 1345 } 1346 fwd_lcores[lc_id]->stream_nb += 1347 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1348 } 1349 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1350 1351 tc++; 1352 if (tc < rxp_dcb_info.nb_tcs) 1353 continue; 1354 /* Restart from TC 0 on next RX port */ 1355 tc = 0; 1356 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1357 rxp = (portid_t) 1358 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1359 else 1360 rxp++; 1361 if (rxp >= nb_fwd_ports) 1362 return; 1363 /* get the dcb information on next RX and TX ports */ 1364 if ((rxp & 0x1) == 0) 1365 txp = (portid_t) (rxp + 1); 1366 else 1367 txp = (portid_t) (rxp - 1); 1368 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1369 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1370 } 1371 } 1372 1373 static void 1374 icmp_echo_config_setup(void) 1375 { 1376 portid_t rxp; 1377 queueid_t rxq; 1378 lcoreid_t lc_id; 1379 uint16_t sm_id; 1380 1381 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1382 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1383 (nb_txq * nb_fwd_ports); 1384 else 1385 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1386 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1387 cur_fwd_config.nb_fwd_streams = 1388 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1389 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1390 cur_fwd_config.nb_fwd_lcores = 1391 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1392 if (verbose_level > 0) { 1393 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1394 __FUNCTION__, 1395 cur_fwd_config.nb_fwd_lcores, 1396 cur_fwd_config.nb_fwd_ports, 1397 cur_fwd_config.nb_fwd_streams); 1398 } 1399 1400 /* reinitialize forwarding streams */ 1401 init_fwd_streams(); 1402 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1403 rxp = 0; rxq = 0; 1404 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1405 if (verbose_level > 0) 1406 printf(" core=%d: \n", lc_id); 1407 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1408 struct fwd_stream *fs; 1409 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1410 fs->rx_port = fwd_ports_ids[rxp]; 1411 fs->rx_queue = rxq; 1412 fs->tx_port = fs->rx_port; 1413 fs->tx_queue = rxq; 1414 fs->peer_addr = fs->tx_port; 1415 fs->retry_enabled = retry_enabled; 1416 if (verbose_level > 0) 1417 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1418 sm_id, fs->rx_port, fs->rx_queue, 1419 fs->tx_queue); 1420 rxq = (queueid_t) (rxq + 1); 1421 if (rxq == nb_rxq) { 1422 rxq = 0; 1423 rxp = (portid_t) (rxp + 1); 1424 } 1425 } 1426 } 1427 } 1428 1429 void 1430 fwd_config_setup(void) 1431 { 1432 cur_fwd_config.fwd_eng = cur_fwd_eng; 1433 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1434 icmp_echo_config_setup(); 1435 return; 1436 } 1437 if ((nb_rxq > 1) && (nb_txq > 1)){ 1438 if (dcb_config) 1439 dcb_fwd_config_setup(); 1440 else 1441 rss_fwd_config_setup(); 1442 } 1443 else 1444 simple_fwd_config_setup(); 1445 } 1446 1447 void 1448 pkt_fwd_config_display(struct fwd_config *cfg) 1449 { 1450 struct fwd_stream *fs; 1451 lcoreid_t lc_id; 1452 streamid_t sm_id; 1453 1454 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 1455 "NUMA support %s, MP over anonymous pages %s\n", 1456 cfg->fwd_eng->fwd_mode_name, 1457 retry_enabled == 0 ? "" : " with retry", 1458 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1459 numa_support == 1 ? "enabled" : "disabled", 1460 mp_anon != 0 ? "enabled" : "disabled"); 1461 1462 if (retry_enabled) 1463 printf("TX retry num: %u, delay between TX retries: %uus\n", 1464 burst_tx_retry_num, burst_tx_delay_time); 1465 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1466 printf("Logical Core %u (socket %u) forwards packets on " 1467 "%d streams:", 1468 fwd_lcores_cpuids[lc_id], 1469 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1470 fwd_lcores[lc_id]->stream_nb); 1471 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1472 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1473 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1474 "P=%d/Q=%d (socket %u) ", 1475 fs->rx_port, fs->rx_queue, 1476 ports[fs->rx_port].socket_id, 1477 fs->tx_port, fs->tx_queue, 1478 ports[fs->tx_port].socket_id); 1479 print_ethaddr("peer=", 1480 &peer_eth_addrs[fs->peer_addr]); 1481 } 1482 printf("\n"); 1483 } 1484 printf("\n"); 1485 } 1486 1487 int 1488 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1489 { 1490 unsigned int i; 1491 unsigned int lcore_cpuid; 1492 int record_now; 1493 1494 record_now = 0; 1495 again: 1496 for (i = 0; i < nb_lc; i++) { 1497 lcore_cpuid = lcorelist[i]; 1498 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1499 printf("lcore %u not enabled\n", lcore_cpuid); 1500 return -1; 1501 } 1502 if (lcore_cpuid == rte_get_master_lcore()) { 1503 printf("lcore %u cannot be masked on for running " 1504 "packet forwarding, which is the master lcore " 1505 "and reserved for command line parsing only\n", 1506 lcore_cpuid); 1507 return -1; 1508 } 1509 if (record_now) 1510 fwd_lcores_cpuids[i] = lcore_cpuid; 1511 } 1512 if (record_now == 0) { 1513 record_now = 1; 1514 goto again; 1515 } 1516 nb_cfg_lcores = (lcoreid_t) nb_lc; 1517 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1518 printf("previous number of forwarding cores %u - changed to " 1519 "number of configured cores %u\n", 1520 (unsigned int) nb_fwd_lcores, nb_lc); 1521 nb_fwd_lcores = (lcoreid_t) nb_lc; 1522 } 1523 1524 return 0; 1525 } 1526 1527 int 1528 set_fwd_lcores_mask(uint64_t lcoremask) 1529 { 1530 unsigned int lcorelist[64]; 1531 unsigned int nb_lc; 1532 unsigned int i; 1533 1534 if (lcoremask == 0) { 1535 printf("Invalid NULL mask of cores\n"); 1536 return -1; 1537 } 1538 nb_lc = 0; 1539 for (i = 0; i < 64; i++) { 1540 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1541 continue; 1542 lcorelist[nb_lc++] = i; 1543 } 1544 return set_fwd_lcores_list(lcorelist, nb_lc); 1545 } 1546 1547 void 1548 set_fwd_lcores_number(uint16_t nb_lc) 1549 { 1550 if (nb_lc > nb_cfg_lcores) { 1551 printf("nb fwd cores %u > %u (max. number of configured " 1552 "lcores) - ignored\n", 1553 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1554 return; 1555 } 1556 nb_fwd_lcores = (lcoreid_t) nb_lc; 1557 printf("Number of forwarding cores set to %u\n", 1558 (unsigned int) nb_fwd_lcores); 1559 } 1560 1561 void 1562 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1563 { 1564 unsigned int i; 1565 portid_t port_id; 1566 int record_now; 1567 1568 record_now = 0; 1569 again: 1570 for (i = 0; i < nb_pt; i++) { 1571 port_id = (portid_t) portlist[i]; 1572 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1573 return; 1574 if (record_now) 1575 fwd_ports_ids[i] = port_id; 1576 } 1577 if (record_now == 0) { 1578 record_now = 1; 1579 goto again; 1580 } 1581 nb_cfg_ports = (portid_t) nb_pt; 1582 if (nb_fwd_ports != (portid_t) nb_pt) { 1583 printf("previous number of forwarding ports %u - changed to " 1584 "number of configured ports %u\n", 1585 (unsigned int) nb_fwd_ports, nb_pt); 1586 nb_fwd_ports = (portid_t) nb_pt; 1587 } 1588 } 1589 1590 void 1591 set_fwd_ports_mask(uint64_t portmask) 1592 { 1593 unsigned int portlist[64]; 1594 unsigned int nb_pt; 1595 unsigned int i; 1596 1597 if (portmask == 0) { 1598 printf("Invalid NULL mask of ports\n"); 1599 return; 1600 } 1601 nb_pt = 0; 1602 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1603 if (! ((uint64_t)(1ULL << i) & portmask)) 1604 continue; 1605 portlist[nb_pt++] = i; 1606 } 1607 set_fwd_ports_list(portlist, nb_pt); 1608 } 1609 1610 void 1611 set_fwd_ports_number(uint16_t nb_pt) 1612 { 1613 if (nb_pt > nb_cfg_ports) { 1614 printf("nb fwd ports %u > %u (number of configured " 1615 "ports) - ignored\n", 1616 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1617 return; 1618 } 1619 nb_fwd_ports = (portid_t) nb_pt; 1620 printf("Number of forwarding ports set to %u\n", 1621 (unsigned int) nb_fwd_ports); 1622 } 1623 1624 int 1625 port_is_forwarding(portid_t port_id) 1626 { 1627 unsigned int i; 1628 1629 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1630 return -1; 1631 1632 for (i = 0; i < nb_fwd_ports; i++) { 1633 if (fwd_ports_ids[i] == port_id) 1634 return 1; 1635 } 1636 1637 return 0; 1638 } 1639 1640 void 1641 set_nb_pkt_per_burst(uint16_t nb) 1642 { 1643 if (nb > MAX_PKT_BURST) { 1644 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1645 " ignored\n", 1646 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1647 return; 1648 } 1649 nb_pkt_per_burst = nb; 1650 printf("Number of packets per burst set to %u\n", 1651 (unsigned int) nb_pkt_per_burst); 1652 } 1653 1654 static const char * 1655 tx_split_get_name(enum tx_pkt_split split) 1656 { 1657 uint32_t i; 1658 1659 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1660 if (tx_split_name[i].split == split) 1661 return tx_split_name[i].name; 1662 } 1663 return NULL; 1664 } 1665 1666 void 1667 set_tx_pkt_split(const char *name) 1668 { 1669 uint32_t i; 1670 1671 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1672 if (strcmp(tx_split_name[i].name, name) == 0) { 1673 tx_pkt_split = tx_split_name[i].split; 1674 return; 1675 } 1676 } 1677 printf("unknown value: \"%s\"\n", name); 1678 } 1679 1680 void 1681 show_tx_pkt_segments(void) 1682 { 1683 uint32_t i, n; 1684 const char *split; 1685 1686 n = tx_pkt_nb_segs; 1687 split = tx_split_get_name(tx_pkt_split); 1688 1689 printf("Number of segments: %u\n", n); 1690 printf("Segment sizes: "); 1691 for (i = 0; i != n - 1; i++) 1692 printf("%hu,", tx_pkt_seg_lengths[i]); 1693 printf("%hu\n", tx_pkt_seg_lengths[i]); 1694 printf("Split packet: %s\n", split); 1695 } 1696 1697 void 1698 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1699 { 1700 uint16_t tx_pkt_len; 1701 unsigned i; 1702 1703 if (nb_segs >= (unsigned) nb_txd) { 1704 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1705 nb_segs, (unsigned int) nb_txd); 1706 return; 1707 } 1708 1709 /* 1710 * Check that each segment length is greater or equal than 1711 * the mbuf data sise. 1712 * Check also that the total packet length is greater or equal than the 1713 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1714 */ 1715 tx_pkt_len = 0; 1716 for (i = 0; i < nb_segs; i++) { 1717 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1718 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1719 i, seg_lengths[i], (unsigned) mbuf_data_size); 1720 return; 1721 } 1722 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1723 } 1724 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1725 printf("total packet length=%u < %d - give up\n", 1726 (unsigned) tx_pkt_len, 1727 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1728 return; 1729 } 1730 1731 for (i = 0; i < nb_segs; i++) 1732 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1733 1734 tx_pkt_length = tx_pkt_len; 1735 tx_pkt_nb_segs = (uint8_t) nb_segs; 1736 } 1737 1738 char* 1739 list_pkt_forwarding_modes(void) 1740 { 1741 static char fwd_modes[128] = ""; 1742 const char *separator = "|"; 1743 struct fwd_engine *fwd_eng; 1744 unsigned i = 0; 1745 1746 if (strlen (fwd_modes) == 0) { 1747 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1748 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1749 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1750 strncat(fwd_modes, separator, 1751 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1752 } 1753 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1754 } 1755 1756 return fwd_modes; 1757 } 1758 1759 char* 1760 list_pkt_forwarding_retry_modes(void) 1761 { 1762 static char fwd_modes[128] = ""; 1763 const char *separator = "|"; 1764 struct fwd_engine *fwd_eng; 1765 unsigned i = 0; 1766 1767 if (strlen(fwd_modes) == 0) { 1768 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1769 if (fwd_eng == &rx_only_engine) 1770 continue; 1771 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1772 sizeof(fwd_modes) - 1773 strlen(fwd_modes) - 1); 1774 strncat(fwd_modes, separator, 1775 sizeof(fwd_modes) - 1776 strlen(fwd_modes) - 1); 1777 } 1778 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1779 } 1780 1781 return fwd_modes; 1782 } 1783 1784 void 1785 set_pkt_forwarding_mode(const char *fwd_mode_name) 1786 { 1787 struct fwd_engine *fwd_eng; 1788 unsigned i; 1789 1790 i = 0; 1791 while ((fwd_eng = fwd_engines[i]) != NULL) { 1792 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1793 printf("Set %s packet forwarding mode%s\n", 1794 fwd_mode_name, 1795 retry_enabled == 0 ? "" : " with retry"); 1796 cur_fwd_eng = fwd_eng; 1797 return; 1798 } 1799 i++; 1800 } 1801 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1802 } 1803 1804 void 1805 set_verbose_level(uint16_t vb_level) 1806 { 1807 printf("Change verbose level from %u to %u\n", 1808 (unsigned int) verbose_level, (unsigned int) vb_level); 1809 verbose_level = vb_level; 1810 } 1811 1812 void 1813 vlan_extend_set(portid_t port_id, int on) 1814 { 1815 int diag; 1816 int vlan_offload; 1817 1818 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1819 return; 1820 1821 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1822 1823 if (on) 1824 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1825 else 1826 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1827 1828 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1829 if (diag < 0) 1830 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1831 "diag=%d\n", port_id, on, diag); 1832 } 1833 1834 void 1835 rx_vlan_strip_set(portid_t port_id, int on) 1836 { 1837 int diag; 1838 int vlan_offload; 1839 1840 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1841 return; 1842 1843 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1844 1845 if (on) 1846 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1847 else 1848 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1849 1850 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1851 if (diag < 0) 1852 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1853 "diag=%d\n", port_id, on, diag); 1854 } 1855 1856 void 1857 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1858 { 1859 int diag; 1860 1861 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1862 return; 1863 1864 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1865 if (diag < 0) 1866 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1867 "diag=%d\n", port_id, queue_id, on, diag); 1868 } 1869 1870 void 1871 rx_vlan_filter_set(portid_t port_id, int on) 1872 { 1873 int diag; 1874 int vlan_offload; 1875 1876 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1877 return; 1878 1879 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1880 1881 if (on) 1882 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1883 else 1884 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1885 1886 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1887 if (diag < 0) 1888 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1889 "diag=%d\n", port_id, on, diag); 1890 } 1891 1892 int 1893 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1894 { 1895 int diag; 1896 1897 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1898 return 1; 1899 if (vlan_id_is_invalid(vlan_id)) 1900 return 1; 1901 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1902 if (diag == 0) 1903 return 0; 1904 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1905 "diag=%d\n", 1906 port_id, vlan_id, on, diag); 1907 return -1; 1908 } 1909 1910 void 1911 rx_vlan_all_filter_set(portid_t port_id, int on) 1912 { 1913 uint16_t vlan_id; 1914 1915 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1916 return; 1917 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1918 if (rx_vft_set(port_id, vlan_id, on)) 1919 break; 1920 } 1921 } 1922 1923 void 1924 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 1925 { 1926 int diag; 1927 1928 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1929 return; 1930 1931 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 1932 if (diag == 0) 1933 return; 1934 1935 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 1936 "diag=%d\n", 1937 port_id, vlan_type, tp_id, diag); 1938 } 1939 1940 void 1941 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1942 { 1943 int vlan_offload; 1944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1945 return; 1946 if (vlan_id_is_invalid(vlan_id)) 1947 return; 1948 1949 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1950 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 1951 printf("Error, as QinQ has been enabled.\n"); 1952 return; 1953 } 1954 1955 tx_vlan_reset(port_id); 1956 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1957 ports[port_id].tx_vlan_id = vlan_id; 1958 } 1959 1960 void 1961 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 1962 { 1963 int vlan_offload; 1964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1965 return; 1966 if (vlan_id_is_invalid(vlan_id)) 1967 return; 1968 if (vlan_id_is_invalid(vlan_id_outer)) 1969 return; 1970 1971 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1972 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 1973 printf("Error, as QinQ hasn't been enabled.\n"); 1974 return; 1975 } 1976 1977 tx_vlan_reset(port_id); 1978 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 1979 ports[port_id].tx_vlan_id = vlan_id; 1980 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 1981 } 1982 1983 void 1984 tx_vlan_reset(portid_t port_id) 1985 { 1986 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1987 return; 1988 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 1989 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 1990 ports[port_id].tx_vlan_id = 0; 1991 ports[port_id].tx_vlan_id_outer = 0; 1992 } 1993 1994 void 1995 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1996 { 1997 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1998 return; 1999 2000 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2001 } 2002 2003 void 2004 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2005 { 2006 uint16_t i; 2007 uint8_t existing_mapping_found = 0; 2008 2009 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2010 return; 2011 2012 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2013 return; 2014 2015 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2016 printf("map_value not in required range 0..%d\n", 2017 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2018 return; 2019 } 2020 2021 if (!is_rx) { /*then tx*/ 2022 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2023 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2024 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2025 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2026 existing_mapping_found = 1; 2027 break; 2028 } 2029 } 2030 if (!existing_mapping_found) { /* A new additional mapping... */ 2031 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2032 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2033 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2034 nb_tx_queue_stats_mappings++; 2035 } 2036 } 2037 else { /*rx*/ 2038 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2039 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2040 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2041 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2042 existing_mapping_found = 1; 2043 break; 2044 } 2045 } 2046 if (!existing_mapping_found) { /* A new additional mapping... */ 2047 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2048 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2049 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2050 nb_rx_queue_stats_mappings++; 2051 } 2052 } 2053 } 2054 2055 static inline void 2056 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2057 { 2058 printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask); 2059 2060 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2061 printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask); 2062 else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2063 printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x", 2064 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2065 mask->tunnel_id_mask); 2066 else { 2067 printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 2068 " src_port: 0x%04x, dst_port: 0x%04x", 2069 mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip, 2070 mask->src_port_mask, mask->dst_port_mask); 2071 2072 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 2073 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2074 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 2075 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 2076 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 2077 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 2078 } 2079 2080 printf("\n"); 2081 } 2082 2083 static inline void 2084 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2085 { 2086 struct rte_eth_flex_payload_cfg *cfg; 2087 uint32_t i, j; 2088 2089 for (i = 0; i < flex_conf->nb_payloads; i++) { 2090 cfg = &flex_conf->flex_set[i]; 2091 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2092 printf("\n RAW: "); 2093 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2094 printf("\n L2_PAYLOAD: "); 2095 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2096 printf("\n L3_PAYLOAD: "); 2097 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2098 printf("\n L4_PAYLOAD: "); 2099 else 2100 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2101 for (j = 0; j < num; j++) 2102 printf(" %-5u", cfg->src_offset[j]); 2103 } 2104 printf("\n"); 2105 } 2106 2107 static char * 2108 flowtype_to_str(uint16_t flow_type) 2109 { 2110 struct flow_type_info { 2111 char str[32]; 2112 uint16_t ftype; 2113 }; 2114 2115 uint8_t i; 2116 static struct flow_type_info flowtype_str_table[] = { 2117 {"raw", RTE_ETH_FLOW_RAW}, 2118 {"ipv4", RTE_ETH_FLOW_IPV4}, 2119 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2120 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2121 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2122 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2123 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2124 {"ipv6", RTE_ETH_FLOW_IPV6}, 2125 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2126 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2127 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2128 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2129 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2130 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2131 {"port", RTE_ETH_FLOW_PORT}, 2132 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2133 {"geneve", RTE_ETH_FLOW_GENEVE}, 2134 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2135 }; 2136 2137 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2138 if (flowtype_str_table[i].ftype == flow_type) 2139 return flowtype_str_table[i].str; 2140 } 2141 2142 return NULL; 2143 } 2144 2145 static inline void 2146 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2147 { 2148 struct rte_eth_fdir_flex_mask *mask; 2149 uint32_t i, j; 2150 char *p; 2151 2152 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2153 mask = &flex_conf->flex_mask[i]; 2154 p = flowtype_to_str(mask->flow_type); 2155 printf("\n %s:\t", p ? p : "unknown"); 2156 for (j = 0; j < num; j++) 2157 printf(" %02x", mask->mask[j]); 2158 } 2159 printf("\n"); 2160 } 2161 2162 static inline void 2163 print_fdir_flow_type(uint32_t flow_types_mask) 2164 { 2165 int i; 2166 char *p; 2167 2168 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2169 if (!(flow_types_mask & (1 << i))) 2170 continue; 2171 p = flowtype_to_str(i); 2172 if (p) 2173 printf(" %s", p); 2174 else 2175 printf(" unknown"); 2176 } 2177 printf("\n"); 2178 } 2179 2180 void 2181 fdir_get_infos(portid_t port_id) 2182 { 2183 struct rte_eth_fdir_stats fdir_stat; 2184 struct rte_eth_fdir_info fdir_info; 2185 int ret; 2186 2187 static const char *fdir_stats_border = "########################"; 2188 2189 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2190 return; 2191 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2192 if (ret < 0) { 2193 printf("\n FDIR is not supported on port %-2d\n", 2194 port_id); 2195 return; 2196 } 2197 2198 memset(&fdir_info, 0, sizeof(fdir_info)); 2199 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2200 RTE_ETH_FILTER_INFO, &fdir_info); 2201 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2202 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2203 RTE_ETH_FILTER_STATS, &fdir_stat); 2204 printf("\n %s FDIR infos for port %-2d %s\n", 2205 fdir_stats_border, port_id, fdir_stats_border); 2206 printf(" MODE: "); 2207 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2208 printf(" PERFECT\n"); 2209 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2210 printf(" PERFECT-MAC-VLAN\n"); 2211 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2212 printf(" PERFECT-TUNNEL\n"); 2213 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2214 printf(" SIGNATURE\n"); 2215 else 2216 printf(" DISABLE\n"); 2217 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2218 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2219 printf(" SUPPORTED FLOW TYPE: "); 2220 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2221 } 2222 printf(" FLEX PAYLOAD INFO:\n"); 2223 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2224 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2225 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2226 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2227 fdir_info.flex_payload_unit, 2228 fdir_info.max_flex_payload_segment_num, 2229 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2230 printf(" MASK: "); 2231 print_fdir_mask(&fdir_info.mask); 2232 if (fdir_info.flex_conf.nb_payloads > 0) { 2233 printf(" FLEX PAYLOAD SRC OFFSET:"); 2234 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2235 } 2236 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2237 printf(" FLEX MASK CFG:"); 2238 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2239 } 2240 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2241 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2242 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2243 fdir_info.guarant_spc, fdir_info.best_spc); 2244 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2245 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2246 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2247 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2248 fdir_stat.collision, fdir_stat.free, 2249 fdir_stat.maxhash, fdir_stat.maxlen, 2250 fdir_stat.add, fdir_stat.remove, 2251 fdir_stat.f_add, fdir_stat.f_remove); 2252 printf(" %s############################%s\n", 2253 fdir_stats_border, fdir_stats_border); 2254 } 2255 2256 void 2257 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2258 { 2259 struct rte_port *port; 2260 struct rte_eth_fdir_flex_conf *flex_conf; 2261 int i, idx = 0; 2262 2263 port = &ports[port_id]; 2264 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2265 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2266 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2267 idx = i; 2268 break; 2269 } 2270 } 2271 if (i >= RTE_ETH_FLOW_MAX) { 2272 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2273 idx = flex_conf->nb_flexmasks; 2274 flex_conf->nb_flexmasks++; 2275 } else { 2276 printf("The flex mask table is full. Can not set flex" 2277 " mask for flow_type(%u).", cfg->flow_type); 2278 return; 2279 } 2280 } 2281 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2282 cfg, 2283 sizeof(struct rte_eth_fdir_flex_mask)); 2284 } 2285 2286 void 2287 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2288 { 2289 struct rte_port *port; 2290 struct rte_eth_fdir_flex_conf *flex_conf; 2291 int i, idx = 0; 2292 2293 port = &ports[port_id]; 2294 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2295 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2296 if (cfg->type == flex_conf->flex_set[i].type) { 2297 idx = i; 2298 break; 2299 } 2300 } 2301 if (i >= RTE_ETH_PAYLOAD_MAX) { 2302 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2303 idx = flex_conf->nb_payloads; 2304 flex_conf->nb_payloads++; 2305 } else { 2306 printf("The flex payload table is full. Can not set" 2307 " flex payload for type(%u).", cfg->type); 2308 return; 2309 } 2310 } 2311 (void)rte_memcpy(&flex_conf->flex_set[idx], 2312 cfg, 2313 sizeof(struct rte_eth_flex_payload_cfg)); 2314 2315 } 2316 2317 void 2318 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2319 { 2320 int diag; 2321 2322 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2323 return; 2324 if (is_rx) 2325 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2326 else 2327 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2328 if (diag == 0) 2329 return; 2330 if(is_rx) 2331 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2332 "diag=%d\n", port_id, diag); 2333 else 2334 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2335 "diag=%d\n", port_id, diag); 2336 2337 } 2338 2339 void 2340 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2341 { 2342 int diag; 2343 2344 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2345 return; 2346 if (vlan_id_is_invalid(vlan_id)) 2347 return; 2348 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2349 if (diag == 0) 2350 return; 2351 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2352 "diag=%d\n", port_id, diag); 2353 } 2354 2355 int 2356 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2357 { 2358 int diag; 2359 struct rte_eth_link link; 2360 2361 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2362 return 1; 2363 rte_eth_link_get_nowait(port_id, &link); 2364 if (rate > link.link_speed) { 2365 printf("Invalid rate value:%u bigger than link speed: %u\n", 2366 rate, link.link_speed); 2367 return 1; 2368 } 2369 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2370 if (diag == 0) 2371 return diag; 2372 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2373 port_id, diag); 2374 return diag; 2375 } 2376 2377 int 2378 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2379 { 2380 int diag; 2381 struct rte_eth_link link; 2382 2383 if (q_msk == 0) 2384 return 0; 2385 2386 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2387 return 1; 2388 rte_eth_link_get_nowait(port_id, &link); 2389 if (rate > link.link_speed) { 2390 printf("Invalid rate value:%u bigger than link speed: %u\n", 2391 rate, link.link_speed); 2392 return 1; 2393 } 2394 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2395 if (diag == 0) 2396 return diag; 2397 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2398 port_id, diag); 2399 return diag; 2400 } 2401 2402 /* 2403 * Functions to manage the set of filtered Multicast MAC addresses. 2404 * 2405 * A pool of filtered multicast MAC addresses is associated with each port. 2406 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2407 * The address of the pool and the number of valid multicast MAC addresses 2408 * recorded in the pool are stored in the fields "mc_addr_pool" and 2409 * "mc_addr_nb" of the "rte_port" data structure. 2410 * 2411 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2412 * to be supplied a contiguous array of multicast MAC addresses. 2413 * To comply with this constraint, the set of multicast addresses recorded 2414 * into the pool are systematically compacted at the beginning of the pool. 2415 * Hence, when a multicast address is removed from the pool, all following 2416 * addresses, if any, are copied back to keep the set contiguous. 2417 */ 2418 #define MCAST_POOL_INC 32 2419 2420 static int 2421 mcast_addr_pool_extend(struct rte_port *port) 2422 { 2423 struct ether_addr *mc_pool; 2424 size_t mc_pool_size; 2425 2426 /* 2427 * If a free entry is available at the end of the pool, just 2428 * increment the number of recorded multicast addresses. 2429 */ 2430 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2431 port->mc_addr_nb++; 2432 return 0; 2433 } 2434 2435 /* 2436 * [re]allocate a pool with MCAST_POOL_INC more entries. 2437 * The previous test guarantees that port->mc_addr_nb is a multiple 2438 * of MCAST_POOL_INC. 2439 */ 2440 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2441 MCAST_POOL_INC); 2442 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2443 mc_pool_size); 2444 if (mc_pool == NULL) { 2445 printf("allocation of pool of %u multicast addresses failed\n", 2446 port->mc_addr_nb + MCAST_POOL_INC); 2447 return -ENOMEM; 2448 } 2449 2450 port->mc_addr_pool = mc_pool; 2451 port->mc_addr_nb++; 2452 return 0; 2453 2454 } 2455 2456 static void 2457 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2458 { 2459 port->mc_addr_nb--; 2460 if (addr_idx == port->mc_addr_nb) { 2461 /* No need to recompact the set of multicast addressses. */ 2462 if (port->mc_addr_nb == 0) { 2463 /* free the pool of multicast addresses. */ 2464 free(port->mc_addr_pool); 2465 port->mc_addr_pool = NULL; 2466 } 2467 return; 2468 } 2469 memmove(&port->mc_addr_pool[addr_idx], 2470 &port->mc_addr_pool[addr_idx + 1], 2471 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2472 } 2473 2474 static void 2475 eth_port_multicast_addr_list_set(uint8_t port_id) 2476 { 2477 struct rte_port *port; 2478 int diag; 2479 2480 port = &ports[port_id]; 2481 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2482 port->mc_addr_nb); 2483 if (diag == 0) 2484 return; 2485 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2486 port->mc_addr_nb, port_id, -diag); 2487 } 2488 2489 void 2490 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2491 { 2492 struct rte_port *port; 2493 uint32_t i; 2494 2495 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2496 return; 2497 2498 port = &ports[port_id]; 2499 2500 /* 2501 * Check that the added multicast MAC address is not already recorded 2502 * in the pool of multicast addresses. 2503 */ 2504 for (i = 0; i < port->mc_addr_nb; i++) { 2505 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 2506 printf("multicast address already filtered by port\n"); 2507 return; 2508 } 2509 } 2510 2511 if (mcast_addr_pool_extend(port) != 0) 2512 return; 2513 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 2514 eth_port_multicast_addr_list_set(port_id); 2515 } 2516 2517 void 2518 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 2519 { 2520 struct rte_port *port; 2521 uint32_t i; 2522 2523 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2524 return; 2525 2526 port = &ports[port_id]; 2527 2528 /* 2529 * Search the pool of multicast MAC addresses for the removed address. 2530 */ 2531 for (i = 0; i < port->mc_addr_nb; i++) { 2532 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 2533 break; 2534 } 2535 if (i == port->mc_addr_nb) { 2536 printf("multicast address not filtered by port %d\n", port_id); 2537 return; 2538 } 2539 2540 mcast_addr_pool_remove(port, i); 2541 eth_port_multicast_addr_list_set(port_id); 2542 } 2543 2544 void 2545 port_dcb_info_display(uint8_t port_id) 2546 { 2547 struct rte_eth_dcb_info dcb_info; 2548 uint16_t i; 2549 int ret; 2550 static const char *border = "================"; 2551 2552 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2553 return; 2554 2555 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 2556 if (ret) { 2557 printf("\n Failed to get dcb infos on port %-2d\n", 2558 port_id); 2559 return; 2560 } 2561 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 2562 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 2563 printf("\n TC : "); 2564 for (i = 0; i < dcb_info.nb_tcs; i++) 2565 printf("\t%4d", i); 2566 printf("\n Priority : "); 2567 for (i = 0; i < dcb_info.nb_tcs; i++) 2568 printf("\t%4d", dcb_info.prio_tc[i]); 2569 printf("\n BW percent :"); 2570 for (i = 0; i < dcb_info.nb_tcs; i++) 2571 printf("\t%4d%%", dcb_info.tc_bws[i]); 2572 printf("\n RXQ base : "); 2573 for (i = 0; i < dcb_info.nb_tcs; i++) 2574 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 2575 printf("\n RXQ number :"); 2576 for (i = 0; i < dcb_info.nb_tcs; i++) 2577 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 2578 printf("\n TXQ base : "); 2579 for (i = 0; i < dcb_info.nb_tcs; i++) 2580 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 2581 printf("\n TXQ number :"); 2582 for (i = 0; i < dcb_info.nb_tcs; i++) 2583 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 2584 printf("\n"); 2585 } 2586