1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_mempool.h> 88 #include <rte_mbuf.h> 89 #include <rte_interrupts.h> 90 #include <rte_pci.h> 91 #include <rte_ether.h> 92 #include <rte_ethdev.h> 93 #include <rte_string_fns.h> 94 #include <rte_cycles.h> 95 96 #include "testpmd.h" 97 98 static char *flowtype_to_str(uint16_t flow_type); 99 100 static const struct { 101 enum tx_pkt_split split; 102 const char *name; 103 } tx_split_name[] = { 104 { 105 .split = TX_PKT_SPLIT_OFF, 106 .name = "off", 107 }, 108 { 109 .split = TX_PKT_SPLIT_ON, 110 .name = "on", 111 }, 112 { 113 .split = TX_PKT_SPLIT_RND, 114 .name = "rand", 115 }, 116 }; 117 118 struct rss_type_info { 119 char str[32]; 120 uint64_t rss_type; 121 }; 122 123 static const struct rss_type_info rss_type_table[] = { 124 { "ipv4", ETH_RSS_IPV4 }, 125 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 126 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 127 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 128 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 129 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 130 { "ipv6", ETH_RSS_IPV6 }, 131 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 132 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 133 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 134 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 135 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 136 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 137 { "ipv6-ex", ETH_RSS_IPV6_EX }, 138 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 139 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 140 { "port", ETH_RSS_PORT }, 141 { "vxlan", ETH_RSS_VXLAN }, 142 { "geneve", ETH_RSS_GENEVE }, 143 { "nvgre", ETH_RSS_NVGRE }, 144 145 }; 146 147 static void 148 print_ethaddr(const char *name, struct ether_addr *eth_addr) 149 { 150 char buf[ETHER_ADDR_FMT_SIZE]; 151 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 152 printf("%s%s", name, buf); 153 } 154 155 void 156 nic_stats_display(portid_t port_id) 157 { 158 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 159 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 160 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 161 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 162 uint64_t mpps_rx, mpps_tx; 163 struct rte_eth_stats stats; 164 struct rte_port *port = &ports[port_id]; 165 uint8_t i; 166 portid_t pid; 167 168 static const char *nic_stats_border = "########################"; 169 170 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 171 printf("Valid port range is [0"); 172 FOREACH_PORT(pid, ports) 173 printf(", %d", pid); 174 printf("]\n"); 175 return; 176 } 177 rte_eth_stats_get(port_id, &stats); 178 printf("\n %s NIC statistics for port %-2d %s\n", 179 nic_stats_border, port_id, nic_stats_border); 180 181 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 182 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 183 "%-"PRIu64"\n", 184 stats.ipackets, stats.imissed, stats.ibytes); 185 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 186 printf(" RX-nombuf: %-10"PRIu64"\n", 187 stats.rx_nombuf); 188 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 189 "%-"PRIu64"\n", 190 stats.opackets, stats.oerrors, stats.obytes); 191 } 192 else { 193 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 194 " RX-bytes: %10"PRIu64"\n", 195 stats.ipackets, stats.ierrors, stats.ibytes); 196 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 197 printf(" RX-nombuf: %10"PRIu64"\n", 198 stats.rx_nombuf); 199 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 200 " TX-bytes: %10"PRIu64"\n", 201 stats.opackets, stats.oerrors, stats.obytes); 202 } 203 204 if (port->rx_queue_stats_mapping_enabled) { 205 printf("\n"); 206 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 207 printf(" Stats reg %2d RX-packets: %10"PRIu64 208 " RX-errors: %10"PRIu64 209 " RX-bytes: %10"PRIu64"\n", 210 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 211 } 212 } 213 if (port->tx_queue_stats_mapping_enabled) { 214 printf("\n"); 215 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 216 printf(" Stats reg %2d TX-packets: %10"PRIu64 217 " TX-bytes: %10"PRIu64"\n", 218 i, stats.q_opackets[i], stats.q_obytes[i]); 219 } 220 } 221 222 diff_cycles = prev_cycles[port_id]; 223 prev_cycles[port_id] = rte_rdtsc(); 224 if (diff_cycles > 0) 225 diff_cycles = prev_cycles[port_id] - diff_cycles; 226 227 diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id]; 228 diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id]; 229 prev_pkts_rx[port_id] = stats.ipackets; 230 prev_pkts_tx[port_id] = stats.opackets; 231 mpps_rx = diff_cycles > 0 ? 232 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 233 mpps_tx = diff_cycles > 0 ? 234 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 235 printf("\n Throughput (since last show)\n"); 236 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 237 mpps_rx, mpps_tx); 238 239 printf(" %s############################%s\n", 240 nic_stats_border, nic_stats_border); 241 } 242 243 void 244 nic_stats_clear(portid_t port_id) 245 { 246 portid_t pid; 247 248 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 249 printf("Valid port range is [0"); 250 FOREACH_PORT(pid, ports) 251 printf(", %d", pid); 252 printf("]\n"); 253 return; 254 } 255 rte_eth_stats_reset(port_id); 256 printf("\n NIC statistics for port %d cleared\n", port_id); 257 } 258 259 void 260 nic_xstats_display(portid_t port_id) 261 { 262 struct rte_eth_xstat *xstats; 263 int cnt_xstats, idx_xstat; 264 struct rte_eth_xstat_name *xstats_names; 265 266 printf("###### NIC extended statistics for port %-2d\n", port_id); 267 if (!rte_eth_dev_is_valid_port(port_id)) { 268 printf("Error: Invalid port number %i\n", port_id); 269 return; 270 } 271 272 /* Get count */ 273 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 274 if (cnt_xstats < 0) { 275 printf("Error: Cannot get count of xstats\n"); 276 return; 277 } 278 279 /* Get id-name lookup table */ 280 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 281 if (xstats_names == NULL) { 282 printf("Cannot allocate memory for xstats lookup\n"); 283 return; 284 } 285 if (cnt_xstats != rte_eth_xstats_get_names( 286 port_id, xstats_names, cnt_xstats)) { 287 printf("Error: Cannot get xstats lookup\n"); 288 free(xstats_names); 289 return; 290 } 291 292 /* Get stats themselves */ 293 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 294 if (xstats == NULL) { 295 printf("Cannot allocate memory for xstats\n"); 296 free(xstats_names); 297 return; 298 } 299 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 300 printf("Error: Unable to get xstats\n"); 301 free(xstats_names); 302 free(xstats); 303 return; 304 } 305 306 /* Display xstats */ 307 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) 308 printf("%s: %"PRIu64"\n", 309 xstats_names[idx_xstat].name, 310 xstats[idx_xstat].value); 311 free(xstats_names); 312 free(xstats); 313 } 314 315 void 316 nic_xstats_clear(portid_t port_id) 317 { 318 rte_eth_xstats_reset(port_id); 319 } 320 321 void 322 nic_stats_mapping_display(portid_t port_id) 323 { 324 struct rte_port *port = &ports[port_id]; 325 uint16_t i; 326 portid_t pid; 327 328 static const char *nic_stats_mapping_border = "########################"; 329 330 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 331 printf("Valid port range is [0"); 332 FOREACH_PORT(pid, ports) 333 printf(", %d", pid); 334 printf("]\n"); 335 return; 336 } 337 338 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 339 printf("Port id %d - either does not support queue statistic mapping or" 340 " no queue statistic mapping set\n", port_id); 341 return; 342 } 343 344 printf("\n %s NIC statistics mapping for port %-2d %s\n", 345 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 346 347 if (port->rx_queue_stats_mapping_enabled) { 348 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 349 if (rx_queue_stats_mappings[i].port_id == port_id) { 350 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 351 rx_queue_stats_mappings[i].queue_id, 352 rx_queue_stats_mappings[i].stats_counter_id); 353 } 354 } 355 printf("\n"); 356 } 357 358 359 if (port->tx_queue_stats_mapping_enabled) { 360 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 361 if (tx_queue_stats_mappings[i].port_id == port_id) { 362 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 363 tx_queue_stats_mappings[i].queue_id, 364 tx_queue_stats_mappings[i].stats_counter_id); 365 } 366 } 367 } 368 369 printf(" %s####################################%s\n", 370 nic_stats_mapping_border, nic_stats_mapping_border); 371 } 372 373 void 374 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 375 { 376 struct rte_eth_rxq_info qinfo; 377 int32_t rc; 378 static const char *info_border = "*********************"; 379 380 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 381 if (rc != 0) { 382 printf("Failed to retrieve information for port: %hhu, " 383 "RX queue: %hu\nerror desc: %s(%d)\n", 384 port_id, queue_id, strerror(-rc), rc); 385 return; 386 } 387 388 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 389 info_border, port_id, queue_id, info_border); 390 391 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 392 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 393 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 394 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 395 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 396 printf("\nRX drop packets: %s", 397 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 398 printf("\nRX deferred start: %s", 399 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 400 printf("\nRX scattered packets: %s", 401 (qinfo.scattered_rx != 0) ? "on" : "off"); 402 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 403 printf("\n"); 404 } 405 406 void 407 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 408 { 409 struct rte_eth_txq_info qinfo; 410 int32_t rc; 411 static const char *info_border = "*********************"; 412 413 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 414 if (rc != 0) { 415 printf("Failed to retrieve information for port: %hhu, " 416 "TX queue: %hu\nerror desc: %s(%d)\n", 417 port_id, queue_id, strerror(-rc), rc); 418 return; 419 } 420 421 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 422 info_border, port_id, queue_id, info_border); 423 424 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 425 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 426 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 427 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 428 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 429 printf("\nTX flags: %#x", qinfo.conf.txq_flags); 430 printf("\nTX deferred start: %s", 431 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 432 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 433 printf("\n"); 434 } 435 436 void 437 port_infos_display(portid_t port_id) 438 { 439 struct rte_port *port; 440 struct ether_addr mac_addr; 441 struct rte_eth_link link; 442 struct rte_eth_dev_info dev_info; 443 int vlan_offload; 444 struct rte_mempool * mp; 445 static const char *info_border = "*********************"; 446 portid_t pid; 447 448 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 449 printf("Valid port range is [0"); 450 FOREACH_PORT(pid, ports) 451 printf(", %d", pid); 452 printf("]\n"); 453 return; 454 } 455 port = &ports[port_id]; 456 rte_eth_link_get_nowait(port_id, &link); 457 printf("\n%s Infos for port %-2d %s\n", 458 info_border, port_id, info_border); 459 rte_eth_macaddr_get(port_id, &mac_addr); 460 print_ethaddr("MAC address: ", &mac_addr); 461 printf("\nConnect to socket: %u", port->socket_id); 462 463 if (port_numa[port_id] != NUMA_NO_CONFIG) { 464 mp = mbuf_pool_find(port_numa[port_id]); 465 if (mp) 466 printf("\nmemory allocation on the socket: %d", 467 port_numa[port_id]); 468 } else 469 printf("\nmemory allocation on the socket: %u",port->socket_id); 470 471 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 472 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 473 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 474 ("full-duplex") : ("half-duplex")); 475 printf("Promiscuous mode: %s\n", 476 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 477 printf("Allmulticast mode: %s\n", 478 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 479 printf("Maximum number of MAC addresses: %u\n", 480 (unsigned int)(port->dev_info.max_mac_addrs)); 481 printf("Maximum number of MAC addresses of hash filtering: %u\n", 482 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 483 484 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 485 if (vlan_offload >= 0){ 486 printf("VLAN offload: \n"); 487 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 488 printf(" strip on \n"); 489 else 490 printf(" strip off \n"); 491 492 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 493 printf(" filter on \n"); 494 else 495 printf(" filter off \n"); 496 497 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 498 printf(" qinq(extend) on \n"); 499 else 500 printf(" qinq(extend) off \n"); 501 } 502 503 memset(&dev_info, 0, sizeof(dev_info)); 504 rte_eth_dev_info_get(port_id, &dev_info); 505 if (dev_info.hash_key_size > 0) 506 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 507 if (dev_info.reta_size > 0) 508 printf("Redirection table size: %u\n", dev_info.reta_size); 509 if (!dev_info.flow_type_rss_offloads) 510 printf("No flow type is supported.\n"); 511 else { 512 uint16_t i; 513 char *p; 514 515 printf("Supported flow types:\n"); 516 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 517 i++) { 518 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 519 continue; 520 p = flowtype_to_str(i); 521 printf(" %s\n", (p ? p : "unknown")); 522 } 523 } 524 525 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 526 printf("Max possible number of RXDs per queue: %hu\n", 527 dev_info.rx_desc_lim.nb_max); 528 printf("Min possible number of RXDs per queue: %hu\n", 529 dev_info.rx_desc_lim.nb_min); 530 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 531 532 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 533 printf("Max possible number of TXDs per queue: %hu\n", 534 dev_info.tx_desc_lim.nb_max); 535 printf("Min possible number of TXDs per queue: %hu\n", 536 dev_info.tx_desc_lim.nb_min); 537 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 538 } 539 540 int 541 port_id_is_invalid(portid_t port_id, enum print_warning warning) 542 { 543 if (port_id == (portid_t)RTE_PORT_ALL) 544 return 0; 545 546 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled) 547 return 0; 548 549 if (warning == ENABLED_WARN) 550 printf("Invalid port %d\n", port_id); 551 552 return 1; 553 } 554 555 static int 556 vlan_id_is_invalid(uint16_t vlan_id) 557 { 558 if (vlan_id < 4096) 559 return 0; 560 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 561 return 1; 562 } 563 564 static int 565 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 566 { 567 uint64_t pci_len; 568 569 if (reg_off & 0x3) { 570 printf("Port register offset 0x%X not aligned on a 4-byte " 571 "boundary\n", 572 (unsigned)reg_off); 573 return 1; 574 } 575 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 576 if (reg_off >= pci_len) { 577 printf("Port %d: register offset %u (0x%X) out of port PCI " 578 "resource (length=%"PRIu64")\n", 579 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 580 return 1; 581 } 582 return 0; 583 } 584 585 static int 586 reg_bit_pos_is_invalid(uint8_t bit_pos) 587 { 588 if (bit_pos <= 31) 589 return 0; 590 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 591 return 1; 592 } 593 594 #define display_port_and_reg_off(port_id, reg_off) \ 595 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 596 597 static inline void 598 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 599 { 600 display_port_and_reg_off(port_id, (unsigned)reg_off); 601 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 602 } 603 604 void 605 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 606 { 607 uint32_t reg_v; 608 609 610 if (port_id_is_invalid(port_id, ENABLED_WARN)) 611 return; 612 if (port_reg_off_is_invalid(port_id, reg_off)) 613 return; 614 if (reg_bit_pos_is_invalid(bit_x)) 615 return; 616 reg_v = port_id_pci_reg_read(port_id, reg_off); 617 display_port_and_reg_off(port_id, (unsigned)reg_off); 618 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 619 } 620 621 void 622 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 623 uint8_t bit1_pos, uint8_t bit2_pos) 624 { 625 uint32_t reg_v; 626 uint8_t l_bit; 627 uint8_t h_bit; 628 629 if (port_id_is_invalid(port_id, ENABLED_WARN)) 630 return; 631 if (port_reg_off_is_invalid(port_id, reg_off)) 632 return; 633 if (reg_bit_pos_is_invalid(bit1_pos)) 634 return; 635 if (reg_bit_pos_is_invalid(bit2_pos)) 636 return; 637 if (bit1_pos > bit2_pos) 638 l_bit = bit2_pos, h_bit = bit1_pos; 639 else 640 l_bit = bit1_pos, h_bit = bit2_pos; 641 642 reg_v = port_id_pci_reg_read(port_id, reg_off); 643 reg_v >>= l_bit; 644 if (h_bit < 31) 645 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 646 display_port_and_reg_off(port_id, (unsigned)reg_off); 647 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 648 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 649 } 650 651 void 652 port_reg_display(portid_t port_id, uint32_t reg_off) 653 { 654 uint32_t reg_v; 655 656 if (port_id_is_invalid(port_id, ENABLED_WARN)) 657 return; 658 if (port_reg_off_is_invalid(port_id, reg_off)) 659 return; 660 reg_v = port_id_pci_reg_read(port_id, reg_off); 661 display_port_reg_value(port_id, reg_off, reg_v); 662 } 663 664 void 665 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 666 uint8_t bit_v) 667 { 668 uint32_t reg_v; 669 670 if (port_id_is_invalid(port_id, ENABLED_WARN)) 671 return; 672 if (port_reg_off_is_invalid(port_id, reg_off)) 673 return; 674 if (reg_bit_pos_is_invalid(bit_pos)) 675 return; 676 if (bit_v > 1) { 677 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 678 return; 679 } 680 reg_v = port_id_pci_reg_read(port_id, reg_off); 681 if (bit_v == 0) 682 reg_v &= ~(1 << bit_pos); 683 else 684 reg_v |= (1 << bit_pos); 685 port_id_pci_reg_write(port_id, reg_off, reg_v); 686 display_port_reg_value(port_id, reg_off, reg_v); 687 } 688 689 void 690 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 691 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 692 { 693 uint32_t max_v; 694 uint32_t reg_v; 695 uint8_t l_bit; 696 uint8_t h_bit; 697 698 if (port_id_is_invalid(port_id, ENABLED_WARN)) 699 return; 700 if (port_reg_off_is_invalid(port_id, reg_off)) 701 return; 702 if (reg_bit_pos_is_invalid(bit1_pos)) 703 return; 704 if (reg_bit_pos_is_invalid(bit2_pos)) 705 return; 706 if (bit1_pos > bit2_pos) 707 l_bit = bit2_pos, h_bit = bit1_pos; 708 else 709 l_bit = bit1_pos, h_bit = bit2_pos; 710 711 if ((h_bit - l_bit) < 31) 712 max_v = (1 << (h_bit - l_bit + 1)) - 1; 713 else 714 max_v = 0xFFFFFFFF; 715 716 if (value > max_v) { 717 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 718 (unsigned)value, (unsigned)value, 719 (unsigned)max_v, (unsigned)max_v); 720 return; 721 } 722 reg_v = port_id_pci_reg_read(port_id, reg_off); 723 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 724 reg_v |= (value << l_bit); /* Set changed bits */ 725 port_id_pci_reg_write(port_id, reg_off, reg_v); 726 display_port_reg_value(port_id, reg_off, reg_v); 727 } 728 729 void 730 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 731 { 732 if (port_id_is_invalid(port_id, ENABLED_WARN)) 733 return; 734 if (port_reg_off_is_invalid(port_id, reg_off)) 735 return; 736 port_id_pci_reg_write(port_id, reg_off, reg_v); 737 display_port_reg_value(port_id, reg_off, reg_v); 738 } 739 740 void 741 port_mtu_set(portid_t port_id, uint16_t mtu) 742 { 743 int diag; 744 745 if (port_id_is_invalid(port_id, ENABLED_WARN)) 746 return; 747 diag = rte_eth_dev_set_mtu(port_id, mtu); 748 if (diag == 0) 749 return; 750 printf("Set MTU failed. diag=%d\n", diag); 751 } 752 753 /* 754 * RX/TX ring descriptors display functions. 755 */ 756 int 757 rx_queue_id_is_invalid(queueid_t rxq_id) 758 { 759 if (rxq_id < nb_rxq) 760 return 0; 761 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 762 return 1; 763 } 764 765 int 766 tx_queue_id_is_invalid(queueid_t txq_id) 767 { 768 if (txq_id < nb_txq) 769 return 0; 770 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 771 return 1; 772 } 773 774 static int 775 rx_desc_id_is_invalid(uint16_t rxdesc_id) 776 { 777 if (rxdesc_id < nb_rxd) 778 return 0; 779 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 780 rxdesc_id, nb_rxd); 781 return 1; 782 } 783 784 static int 785 tx_desc_id_is_invalid(uint16_t txdesc_id) 786 { 787 if (txdesc_id < nb_txd) 788 return 0; 789 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 790 txdesc_id, nb_txd); 791 return 1; 792 } 793 794 static const struct rte_memzone * 795 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 796 { 797 char mz_name[RTE_MEMZONE_NAMESIZE]; 798 const struct rte_memzone *mz; 799 800 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 801 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 802 mz = rte_memzone_lookup(mz_name); 803 if (mz == NULL) 804 printf("%s ring memory zoneof (port %d, queue %d) not" 805 "found (zone name = %s\n", 806 ring_name, port_id, q_id, mz_name); 807 return mz; 808 } 809 810 union igb_ring_dword { 811 uint64_t dword; 812 struct { 813 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 814 uint32_t lo; 815 uint32_t hi; 816 #else 817 uint32_t hi; 818 uint32_t lo; 819 #endif 820 } words; 821 }; 822 823 struct igb_ring_desc_32_bytes { 824 union igb_ring_dword lo_dword; 825 union igb_ring_dword hi_dword; 826 union igb_ring_dword resv1; 827 union igb_ring_dword resv2; 828 }; 829 830 struct igb_ring_desc_16_bytes { 831 union igb_ring_dword lo_dword; 832 union igb_ring_dword hi_dword; 833 }; 834 835 static void 836 ring_rxd_display_dword(union igb_ring_dword dword) 837 { 838 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 839 (unsigned)dword.words.hi); 840 } 841 842 static void 843 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 844 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 845 uint8_t port_id, 846 #else 847 __rte_unused uint8_t port_id, 848 #endif 849 uint16_t desc_id) 850 { 851 struct igb_ring_desc_16_bytes *ring = 852 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 853 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 854 struct rte_eth_dev_info dev_info; 855 856 memset(&dev_info, 0, sizeof(dev_info)); 857 rte_eth_dev_info_get(port_id, &dev_info); 858 if (strstr(dev_info.driver_name, "i40e") != NULL) { 859 /* 32 bytes RX descriptor, i40e only */ 860 struct igb_ring_desc_32_bytes *ring = 861 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 862 ring[desc_id].lo_dword.dword = 863 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 864 ring_rxd_display_dword(ring[desc_id].lo_dword); 865 ring[desc_id].hi_dword.dword = 866 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 867 ring_rxd_display_dword(ring[desc_id].hi_dword); 868 ring[desc_id].resv1.dword = 869 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 870 ring_rxd_display_dword(ring[desc_id].resv1); 871 ring[desc_id].resv2.dword = 872 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 873 ring_rxd_display_dword(ring[desc_id].resv2); 874 875 return; 876 } 877 #endif 878 /* 16 bytes RX descriptor */ 879 ring[desc_id].lo_dword.dword = 880 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 881 ring_rxd_display_dword(ring[desc_id].lo_dword); 882 ring[desc_id].hi_dword.dword = 883 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 884 ring_rxd_display_dword(ring[desc_id].hi_dword); 885 } 886 887 static void 888 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 889 { 890 struct igb_ring_desc_16_bytes *ring; 891 struct igb_ring_desc_16_bytes txd; 892 893 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 894 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 895 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 896 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 897 (unsigned)txd.lo_dword.words.lo, 898 (unsigned)txd.lo_dword.words.hi, 899 (unsigned)txd.hi_dword.words.lo, 900 (unsigned)txd.hi_dword.words.hi); 901 } 902 903 void 904 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 905 { 906 const struct rte_memzone *rx_mz; 907 908 if (port_id_is_invalid(port_id, ENABLED_WARN)) 909 return; 910 if (rx_queue_id_is_invalid(rxq_id)) 911 return; 912 if (rx_desc_id_is_invalid(rxd_id)) 913 return; 914 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 915 if (rx_mz == NULL) 916 return; 917 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 918 } 919 920 void 921 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 922 { 923 const struct rte_memzone *tx_mz; 924 925 if (port_id_is_invalid(port_id, ENABLED_WARN)) 926 return; 927 if (tx_queue_id_is_invalid(txq_id)) 928 return; 929 if (tx_desc_id_is_invalid(txd_id)) 930 return; 931 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 932 if (tx_mz == NULL) 933 return; 934 ring_tx_descriptor_display(tx_mz, txd_id); 935 } 936 937 void 938 fwd_lcores_config_display(void) 939 { 940 lcoreid_t lc_id; 941 942 printf("List of forwarding lcores:"); 943 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 944 printf(" %2u", fwd_lcores_cpuids[lc_id]); 945 printf("\n"); 946 } 947 void 948 rxtx_config_display(void) 949 { 950 printf(" %s packet forwarding%s - CRC stripping %s - " 951 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 952 retry_enabled == 0 ? "" : " with retry", 953 rx_mode.hw_strip_crc ? "enabled" : "disabled", 954 nb_pkt_per_burst); 955 956 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 957 printf(" packet len=%u - nb packet segments=%d\n", 958 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 959 960 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 961 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 962 963 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 964 nb_fwd_lcores, nb_fwd_ports); 965 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 966 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 967 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 968 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 969 rx_conf->rx_thresh.wthresh); 970 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 971 nb_txq, nb_txd, tx_conf->tx_free_thresh); 972 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 973 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 974 tx_conf->tx_thresh.wthresh); 975 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 976 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 977 } 978 979 void 980 port_rss_reta_info(portid_t port_id, 981 struct rte_eth_rss_reta_entry64 *reta_conf, 982 uint16_t nb_entries) 983 { 984 uint16_t i, idx, shift; 985 int ret; 986 987 if (port_id_is_invalid(port_id, ENABLED_WARN)) 988 return; 989 990 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 991 if (ret != 0) { 992 printf("Failed to get RSS RETA info, return code = %d\n", ret); 993 return; 994 } 995 996 for (i = 0; i < nb_entries; i++) { 997 idx = i / RTE_RETA_GROUP_SIZE; 998 shift = i % RTE_RETA_GROUP_SIZE; 999 if (!(reta_conf[idx].mask & (1ULL << shift))) 1000 continue; 1001 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1002 i, reta_conf[idx].reta[shift]); 1003 } 1004 } 1005 1006 /* 1007 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1008 * key of the port. 1009 */ 1010 void 1011 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1012 { 1013 struct rte_eth_rss_conf rss_conf; 1014 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1015 uint64_t rss_hf; 1016 uint8_t i; 1017 int diag; 1018 struct rte_eth_dev_info dev_info; 1019 uint8_t hash_key_size; 1020 1021 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1022 return; 1023 1024 memset(&dev_info, 0, sizeof(dev_info)); 1025 rte_eth_dev_info_get(port_id, &dev_info); 1026 if (dev_info.hash_key_size > 0 && 1027 dev_info.hash_key_size <= sizeof(rss_key)) 1028 hash_key_size = dev_info.hash_key_size; 1029 else { 1030 printf("dev_info did not provide a valid hash key size\n"); 1031 return; 1032 } 1033 1034 rss_conf.rss_hf = 0; 1035 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1036 if (!strcmp(rss_info, rss_type_table[i].str)) 1037 rss_conf.rss_hf = rss_type_table[i].rss_type; 1038 } 1039 1040 /* Get RSS hash key if asked to display it */ 1041 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1042 rss_conf.rss_key_len = hash_key_size; 1043 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1044 if (diag != 0) { 1045 switch (diag) { 1046 case -ENODEV: 1047 printf("port index %d invalid\n", port_id); 1048 break; 1049 case -ENOTSUP: 1050 printf("operation not supported by device\n"); 1051 break; 1052 default: 1053 printf("operation failed - diag=%d\n", diag); 1054 break; 1055 } 1056 return; 1057 } 1058 rss_hf = rss_conf.rss_hf; 1059 if (rss_hf == 0) { 1060 printf("RSS disabled\n"); 1061 return; 1062 } 1063 printf("RSS functions:\n "); 1064 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1065 if (rss_hf & rss_type_table[i].rss_type) 1066 printf("%s ", rss_type_table[i].str); 1067 } 1068 printf("\n"); 1069 if (!show_rss_key) 1070 return; 1071 printf("RSS key:\n"); 1072 for (i = 0; i < hash_key_size; i++) 1073 printf("%02X", rss_key[i]); 1074 printf("\n"); 1075 } 1076 1077 void 1078 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1079 uint hash_key_len) 1080 { 1081 struct rte_eth_rss_conf rss_conf; 1082 int diag; 1083 unsigned int i; 1084 1085 rss_conf.rss_key = NULL; 1086 rss_conf.rss_key_len = hash_key_len; 1087 rss_conf.rss_hf = 0; 1088 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1089 if (!strcmp(rss_type_table[i].str, rss_type)) 1090 rss_conf.rss_hf = rss_type_table[i].rss_type; 1091 } 1092 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1093 if (diag == 0) { 1094 rss_conf.rss_key = hash_key; 1095 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1096 } 1097 if (diag == 0) 1098 return; 1099 1100 switch (diag) { 1101 case -ENODEV: 1102 printf("port index %d invalid\n", port_id); 1103 break; 1104 case -ENOTSUP: 1105 printf("operation not supported by device\n"); 1106 break; 1107 default: 1108 printf("operation failed - diag=%d\n", diag); 1109 break; 1110 } 1111 } 1112 1113 /* 1114 * Setup forwarding configuration for each logical core. 1115 */ 1116 static void 1117 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1118 { 1119 streamid_t nb_fs_per_lcore; 1120 streamid_t nb_fs; 1121 streamid_t sm_id; 1122 lcoreid_t nb_extra; 1123 lcoreid_t nb_fc; 1124 lcoreid_t nb_lc; 1125 lcoreid_t lc_id; 1126 1127 nb_fs = cfg->nb_fwd_streams; 1128 nb_fc = cfg->nb_fwd_lcores; 1129 if (nb_fs <= nb_fc) { 1130 nb_fs_per_lcore = 1; 1131 nb_extra = 0; 1132 } else { 1133 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1134 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1135 } 1136 1137 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1138 sm_id = 0; 1139 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1140 fwd_lcores[lc_id]->stream_idx = sm_id; 1141 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1142 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1143 } 1144 1145 /* 1146 * Assign extra remaining streams, if any. 1147 */ 1148 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1149 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1150 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1151 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1152 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1153 } 1154 } 1155 1156 static void 1157 simple_fwd_config_setup(void) 1158 { 1159 portid_t i; 1160 portid_t j; 1161 portid_t inc = 2; 1162 1163 if (port_topology == PORT_TOPOLOGY_CHAINED || 1164 port_topology == PORT_TOPOLOGY_LOOP) { 1165 inc = 1; 1166 } else if (nb_fwd_ports % 2) { 1167 printf("\nWarning! Cannot handle an odd number of ports " 1168 "with the current port topology. Configuration " 1169 "must be changed to have an even number of ports, " 1170 "or relaunch application with " 1171 "--port-topology=chained\n\n"); 1172 } 1173 1174 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1175 cur_fwd_config.nb_fwd_streams = 1176 (streamid_t) cur_fwd_config.nb_fwd_ports; 1177 1178 /* reinitialize forwarding streams */ 1179 init_fwd_streams(); 1180 1181 /* 1182 * In the simple forwarding test, the number of forwarding cores 1183 * must be lower or equal to the number of forwarding ports. 1184 */ 1185 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1186 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1187 cur_fwd_config.nb_fwd_lcores = 1188 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1189 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1190 1191 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1192 if (port_topology != PORT_TOPOLOGY_LOOP) 1193 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1194 else 1195 j = i; 1196 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1197 fwd_streams[i]->rx_queue = 0; 1198 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1199 fwd_streams[i]->tx_queue = 0; 1200 fwd_streams[i]->peer_addr = j; 1201 fwd_streams[i]->retry_enabled = retry_enabled; 1202 1203 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1204 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1205 fwd_streams[j]->rx_queue = 0; 1206 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1207 fwd_streams[j]->tx_queue = 0; 1208 fwd_streams[j]->peer_addr = i; 1209 fwd_streams[j]->retry_enabled = retry_enabled; 1210 } 1211 } 1212 } 1213 1214 /** 1215 * For the RSS forwarding test all streams distributed over lcores. Each stream 1216 * being composed of a RX queue to poll on a RX port for input messages, 1217 * associated with a TX queue of a TX port where to send forwarded packets. 1218 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1219 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1220 * following rules: 1221 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1222 * - TxQl = RxQj 1223 */ 1224 static void 1225 rss_fwd_config_setup(void) 1226 { 1227 portid_t rxp; 1228 portid_t txp; 1229 queueid_t rxq; 1230 queueid_t nb_q; 1231 streamid_t sm_id; 1232 1233 nb_q = nb_rxq; 1234 if (nb_q > nb_txq) 1235 nb_q = nb_txq; 1236 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1237 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1238 cur_fwd_config.nb_fwd_streams = 1239 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1240 1241 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1242 cur_fwd_config.nb_fwd_lcores = 1243 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1244 1245 /* reinitialize forwarding streams */ 1246 init_fwd_streams(); 1247 1248 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1249 rxp = 0; rxq = 0; 1250 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1251 struct fwd_stream *fs; 1252 1253 fs = fwd_streams[sm_id]; 1254 1255 if ((rxp & 0x1) == 0) 1256 txp = (portid_t) (rxp + 1); 1257 else 1258 txp = (portid_t) (rxp - 1); 1259 /* 1260 * if we are in loopback, simply send stuff out through the 1261 * ingress port 1262 */ 1263 if (port_topology == PORT_TOPOLOGY_LOOP) 1264 txp = rxp; 1265 1266 fs->rx_port = fwd_ports_ids[rxp]; 1267 fs->rx_queue = rxq; 1268 fs->tx_port = fwd_ports_ids[txp]; 1269 fs->tx_queue = rxq; 1270 fs->peer_addr = fs->tx_port; 1271 fs->retry_enabled = retry_enabled; 1272 rxq = (queueid_t) (rxq + 1); 1273 if (rxq < nb_q) 1274 continue; 1275 /* 1276 * rxq == nb_q 1277 * Restart from RX queue 0 on next RX port 1278 */ 1279 rxq = 0; 1280 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1281 rxp = (portid_t) 1282 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1283 else 1284 rxp = (portid_t) (rxp + 1); 1285 } 1286 } 1287 1288 /** 1289 * For the DCB forwarding test, each core is assigned on each traffic class. 1290 * 1291 * Each core is assigned a multi-stream, each stream being composed of 1292 * a RX queue to poll on a RX port for input messages, associated with 1293 * a TX queue of a TX port where to send forwarded packets. All RX and 1294 * TX queues are mapping to the same traffic class. 1295 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 1296 * the same core 1297 */ 1298 static void 1299 dcb_fwd_config_setup(void) 1300 { 1301 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 1302 portid_t txp, rxp = 0; 1303 queueid_t txq, rxq = 0; 1304 lcoreid_t lc_id; 1305 uint16_t nb_rx_queue, nb_tx_queue; 1306 uint16_t i, j, k, sm_id = 0; 1307 uint8_t tc = 0; 1308 1309 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1310 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1311 cur_fwd_config.nb_fwd_streams = 1312 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1313 1314 /* reinitialize forwarding streams */ 1315 init_fwd_streams(); 1316 sm_id = 0; 1317 txp = 1; 1318 /* get the dcb info on the first RX and TX ports */ 1319 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1320 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1321 1322 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1323 fwd_lcores[lc_id]->stream_nb = 0; 1324 fwd_lcores[lc_id]->stream_idx = sm_id; 1325 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 1326 /* if the nb_queue is zero, means this tc is 1327 * not enabled on the POOL 1328 */ 1329 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 1330 break; 1331 k = fwd_lcores[lc_id]->stream_nb + 1332 fwd_lcores[lc_id]->stream_idx; 1333 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 1334 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 1335 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1336 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 1337 for (j = 0; j < nb_rx_queue; j++) { 1338 struct fwd_stream *fs; 1339 1340 fs = fwd_streams[k + j]; 1341 fs->rx_port = fwd_ports_ids[rxp]; 1342 fs->rx_queue = rxq + j; 1343 fs->tx_port = fwd_ports_ids[txp]; 1344 fs->tx_queue = txq + j % nb_tx_queue; 1345 fs->peer_addr = fs->tx_port; 1346 fs->retry_enabled = retry_enabled; 1347 } 1348 fwd_lcores[lc_id]->stream_nb += 1349 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 1350 } 1351 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 1352 1353 tc++; 1354 if (tc < rxp_dcb_info.nb_tcs) 1355 continue; 1356 /* Restart from TC 0 on next RX port */ 1357 tc = 0; 1358 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1359 rxp = (portid_t) 1360 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1361 else 1362 rxp++; 1363 if (rxp >= nb_fwd_ports) 1364 return; 1365 /* get the dcb information on next RX and TX ports */ 1366 if ((rxp & 0x1) == 0) 1367 txp = (portid_t) (rxp + 1); 1368 else 1369 txp = (portid_t) (rxp - 1); 1370 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 1371 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 1372 } 1373 } 1374 1375 static void 1376 icmp_echo_config_setup(void) 1377 { 1378 portid_t rxp; 1379 queueid_t rxq; 1380 lcoreid_t lc_id; 1381 uint16_t sm_id; 1382 1383 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1384 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1385 (nb_txq * nb_fwd_ports); 1386 else 1387 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1388 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1389 cur_fwd_config.nb_fwd_streams = 1390 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1391 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1392 cur_fwd_config.nb_fwd_lcores = 1393 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1394 if (verbose_level > 0) { 1395 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1396 __FUNCTION__, 1397 cur_fwd_config.nb_fwd_lcores, 1398 cur_fwd_config.nb_fwd_ports, 1399 cur_fwd_config.nb_fwd_streams); 1400 } 1401 1402 /* reinitialize forwarding streams */ 1403 init_fwd_streams(); 1404 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1405 rxp = 0; rxq = 0; 1406 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1407 if (verbose_level > 0) 1408 printf(" core=%d: \n", lc_id); 1409 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1410 struct fwd_stream *fs; 1411 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1412 fs->rx_port = fwd_ports_ids[rxp]; 1413 fs->rx_queue = rxq; 1414 fs->tx_port = fs->rx_port; 1415 fs->tx_queue = rxq; 1416 fs->peer_addr = fs->tx_port; 1417 fs->retry_enabled = retry_enabled; 1418 if (verbose_level > 0) 1419 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1420 sm_id, fs->rx_port, fs->rx_queue, 1421 fs->tx_queue); 1422 rxq = (queueid_t) (rxq + 1); 1423 if (rxq == nb_rxq) { 1424 rxq = 0; 1425 rxp = (portid_t) (rxp + 1); 1426 } 1427 } 1428 } 1429 } 1430 1431 void 1432 fwd_config_setup(void) 1433 { 1434 cur_fwd_config.fwd_eng = cur_fwd_eng; 1435 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1436 icmp_echo_config_setup(); 1437 return; 1438 } 1439 if ((nb_rxq > 1) && (nb_txq > 1)){ 1440 if (dcb_config) 1441 dcb_fwd_config_setup(); 1442 else 1443 rss_fwd_config_setup(); 1444 } 1445 else 1446 simple_fwd_config_setup(); 1447 } 1448 1449 void 1450 pkt_fwd_config_display(struct fwd_config *cfg) 1451 { 1452 struct fwd_stream *fs; 1453 lcoreid_t lc_id; 1454 streamid_t sm_id; 1455 1456 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 1457 "NUMA support %s, MP over anonymous pages %s\n", 1458 cfg->fwd_eng->fwd_mode_name, 1459 retry_enabled == 0 ? "" : " with retry", 1460 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1461 numa_support == 1 ? "enabled" : "disabled", 1462 mp_anon != 0 ? "enabled" : "disabled"); 1463 1464 if (retry_enabled) 1465 printf("TX retry num: %u, delay between TX retries: %uus\n", 1466 burst_tx_retry_num, burst_tx_delay_time); 1467 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1468 printf("Logical Core %u (socket %u) forwards packets on " 1469 "%d streams:", 1470 fwd_lcores_cpuids[lc_id], 1471 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1472 fwd_lcores[lc_id]->stream_nb); 1473 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1474 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1475 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1476 "P=%d/Q=%d (socket %u) ", 1477 fs->rx_port, fs->rx_queue, 1478 ports[fs->rx_port].socket_id, 1479 fs->tx_port, fs->tx_queue, 1480 ports[fs->tx_port].socket_id); 1481 print_ethaddr("peer=", 1482 &peer_eth_addrs[fs->peer_addr]); 1483 } 1484 printf("\n"); 1485 } 1486 printf("\n"); 1487 } 1488 1489 int 1490 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1491 { 1492 unsigned int i; 1493 unsigned int lcore_cpuid; 1494 int record_now; 1495 1496 record_now = 0; 1497 again: 1498 for (i = 0; i < nb_lc; i++) { 1499 lcore_cpuid = lcorelist[i]; 1500 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1501 printf("lcore %u not enabled\n", lcore_cpuid); 1502 return -1; 1503 } 1504 if (lcore_cpuid == rte_get_master_lcore()) { 1505 printf("lcore %u cannot be masked on for running " 1506 "packet forwarding, which is the master lcore " 1507 "and reserved for command line parsing only\n", 1508 lcore_cpuid); 1509 return -1; 1510 } 1511 if (record_now) 1512 fwd_lcores_cpuids[i] = lcore_cpuid; 1513 } 1514 if (record_now == 0) { 1515 record_now = 1; 1516 goto again; 1517 } 1518 nb_cfg_lcores = (lcoreid_t) nb_lc; 1519 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1520 printf("previous number of forwarding cores %u - changed to " 1521 "number of configured cores %u\n", 1522 (unsigned int) nb_fwd_lcores, nb_lc); 1523 nb_fwd_lcores = (lcoreid_t) nb_lc; 1524 } 1525 1526 return 0; 1527 } 1528 1529 int 1530 set_fwd_lcores_mask(uint64_t lcoremask) 1531 { 1532 unsigned int lcorelist[64]; 1533 unsigned int nb_lc; 1534 unsigned int i; 1535 1536 if (lcoremask == 0) { 1537 printf("Invalid NULL mask of cores\n"); 1538 return -1; 1539 } 1540 nb_lc = 0; 1541 for (i = 0; i < 64; i++) { 1542 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1543 continue; 1544 lcorelist[nb_lc++] = i; 1545 } 1546 return set_fwd_lcores_list(lcorelist, nb_lc); 1547 } 1548 1549 void 1550 set_fwd_lcores_number(uint16_t nb_lc) 1551 { 1552 if (nb_lc > nb_cfg_lcores) { 1553 printf("nb fwd cores %u > %u (max. number of configured " 1554 "lcores) - ignored\n", 1555 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1556 return; 1557 } 1558 nb_fwd_lcores = (lcoreid_t) nb_lc; 1559 printf("Number of forwarding cores set to %u\n", 1560 (unsigned int) nb_fwd_lcores); 1561 } 1562 1563 void 1564 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1565 { 1566 unsigned int i; 1567 portid_t port_id; 1568 int record_now; 1569 1570 record_now = 0; 1571 again: 1572 for (i = 0; i < nb_pt; i++) { 1573 port_id = (portid_t) portlist[i]; 1574 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1575 return; 1576 if (record_now) 1577 fwd_ports_ids[i] = port_id; 1578 } 1579 if (record_now == 0) { 1580 record_now = 1; 1581 goto again; 1582 } 1583 nb_cfg_ports = (portid_t) nb_pt; 1584 if (nb_fwd_ports != (portid_t) nb_pt) { 1585 printf("previous number of forwarding ports %u - changed to " 1586 "number of configured ports %u\n", 1587 (unsigned int) nb_fwd_ports, nb_pt); 1588 nb_fwd_ports = (portid_t) nb_pt; 1589 } 1590 } 1591 1592 void 1593 set_fwd_ports_mask(uint64_t portmask) 1594 { 1595 unsigned int portlist[64]; 1596 unsigned int nb_pt; 1597 unsigned int i; 1598 1599 if (portmask == 0) { 1600 printf("Invalid NULL mask of ports\n"); 1601 return; 1602 } 1603 nb_pt = 0; 1604 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1605 if (! ((uint64_t)(1ULL << i) & portmask)) 1606 continue; 1607 portlist[nb_pt++] = i; 1608 } 1609 set_fwd_ports_list(portlist, nb_pt); 1610 } 1611 1612 void 1613 set_fwd_ports_number(uint16_t nb_pt) 1614 { 1615 if (nb_pt > nb_cfg_ports) { 1616 printf("nb fwd ports %u > %u (number of configured " 1617 "ports) - ignored\n", 1618 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1619 return; 1620 } 1621 nb_fwd_ports = (portid_t) nb_pt; 1622 printf("Number of forwarding ports set to %u\n", 1623 (unsigned int) nb_fwd_ports); 1624 } 1625 1626 int 1627 port_is_forwarding(portid_t port_id) 1628 { 1629 unsigned int i; 1630 1631 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1632 return -1; 1633 1634 for (i = 0; i < nb_fwd_ports; i++) { 1635 if (fwd_ports_ids[i] == port_id) 1636 return 1; 1637 } 1638 1639 return 0; 1640 } 1641 1642 void 1643 set_nb_pkt_per_burst(uint16_t nb) 1644 { 1645 if (nb > MAX_PKT_BURST) { 1646 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1647 " ignored\n", 1648 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1649 return; 1650 } 1651 nb_pkt_per_burst = nb; 1652 printf("Number of packets per burst set to %u\n", 1653 (unsigned int) nb_pkt_per_burst); 1654 } 1655 1656 static const char * 1657 tx_split_get_name(enum tx_pkt_split split) 1658 { 1659 uint32_t i; 1660 1661 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1662 if (tx_split_name[i].split == split) 1663 return tx_split_name[i].name; 1664 } 1665 return NULL; 1666 } 1667 1668 void 1669 set_tx_pkt_split(const char *name) 1670 { 1671 uint32_t i; 1672 1673 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 1674 if (strcmp(tx_split_name[i].name, name) == 0) { 1675 tx_pkt_split = tx_split_name[i].split; 1676 return; 1677 } 1678 } 1679 printf("unknown value: \"%s\"\n", name); 1680 } 1681 1682 void 1683 show_tx_pkt_segments(void) 1684 { 1685 uint32_t i, n; 1686 const char *split; 1687 1688 n = tx_pkt_nb_segs; 1689 split = tx_split_get_name(tx_pkt_split); 1690 1691 printf("Number of segments: %u\n", n); 1692 printf("Segment sizes: "); 1693 for (i = 0; i != n - 1; i++) 1694 printf("%hu,", tx_pkt_seg_lengths[i]); 1695 printf("%hu\n", tx_pkt_seg_lengths[i]); 1696 printf("Split packet: %s\n", split); 1697 } 1698 1699 void 1700 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1701 { 1702 uint16_t tx_pkt_len; 1703 unsigned i; 1704 1705 if (nb_segs >= (unsigned) nb_txd) { 1706 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1707 nb_segs, (unsigned int) nb_txd); 1708 return; 1709 } 1710 1711 /* 1712 * Check that each segment length is greater or equal than 1713 * the mbuf data sise. 1714 * Check also that the total packet length is greater or equal than the 1715 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1716 */ 1717 tx_pkt_len = 0; 1718 for (i = 0; i < nb_segs; i++) { 1719 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1720 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1721 i, seg_lengths[i], (unsigned) mbuf_data_size); 1722 return; 1723 } 1724 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1725 } 1726 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1727 printf("total packet length=%u < %d - give up\n", 1728 (unsigned) tx_pkt_len, 1729 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1730 return; 1731 } 1732 1733 for (i = 0; i < nb_segs; i++) 1734 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1735 1736 tx_pkt_length = tx_pkt_len; 1737 tx_pkt_nb_segs = (uint8_t) nb_segs; 1738 } 1739 1740 char* 1741 list_pkt_forwarding_modes(void) 1742 { 1743 static char fwd_modes[128] = ""; 1744 const char *separator = "|"; 1745 struct fwd_engine *fwd_eng; 1746 unsigned i = 0; 1747 1748 if (strlen (fwd_modes) == 0) { 1749 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1750 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1751 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1752 strncat(fwd_modes, separator, 1753 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 1754 } 1755 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1756 } 1757 1758 return fwd_modes; 1759 } 1760 1761 char* 1762 list_pkt_forwarding_retry_modes(void) 1763 { 1764 static char fwd_modes[128] = ""; 1765 const char *separator = "|"; 1766 struct fwd_engine *fwd_eng; 1767 unsigned i = 0; 1768 1769 if (strlen(fwd_modes) == 0) { 1770 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1771 if (fwd_eng == &rx_only_engine) 1772 continue; 1773 strncat(fwd_modes, fwd_eng->fwd_mode_name, 1774 sizeof(fwd_modes) - 1775 strlen(fwd_modes) - 1); 1776 strncat(fwd_modes, separator, 1777 sizeof(fwd_modes) - 1778 strlen(fwd_modes) - 1); 1779 } 1780 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1781 } 1782 1783 return fwd_modes; 1784 } 1785 1786 void 1787 set_pkt_forwarding_mode(const char *fwd_mode_name) 1788 { 1789 struct fwd_engine *fwd_eng; 1790 unsigned i; 1791 1792 i = 0; 1793 while ((fwd_eng = fwd_engines[i]) != NULL) { 1794 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1795 printf("Set %s packet forwarding mode%s\n", 1796 fwd_mode_name, 1797 retry_enabled == 0 ? "" : " with retry"); 1798 cur_fwd_eng = fwd_eng; 1799 return; 1800 } 1801 i++; 1802 } 1803 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1804 } 1805 1806 void 1807 set_verbose_level(uint16_t vb_level) 1808 { 1809 printf("Change verbose level from %u to %u\n", 1810 (unsigned int) verbose_level, (unsigned int) vb_level); 1811 verbose_level = vb_level; 1812 } 1813 1814 void 1815 vlan_extend_set(portid_t port_id, int on) 1816 { 1817 int diag; 1818 int vlan_offload; 1819 1820 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1821 return; 1822 1823 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1824 1825 if (on) 1826 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1827 else 1828 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1829 1830 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1831 if (diag < 0) 1832 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1833 "diag=%d\n", port_id, on, diag); 1834 } 1835 1836 void 1837 rx_vlan_strip_set(portid_t port_id, int on) 1838 { 1839 int diag; 1840 int vlan_offload; 1841 1842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1843 return; 1844 1845 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1846 1847 if (on) 1848 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1849 else 1850 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1851 1852 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1853 if (diag < 0) 1854 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1855 "diag=%d\n", port_id, on, diag); 1856 } 1857 1858 void 1859 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1860 { 1861 int diag; 1862 1863 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1864 return; 1865 1866 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1867 if (diag < 0) 1868 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1869 "diag=%d\n", port_id, queue_id, on, diag); 1870 } 1871 1872 void 1873 rx_vlan_filter_set(portid_t port_id, int on) 1874 { 1875 int diag; 1876 int vlan_offload; 1877 1878 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1879 return; 1880 1881 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1882 1883 if (on) 1884 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1885 else 1886 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1887 1888 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1889 if (diag < 0) 1890 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1891 "diag=%d\n", port_id, on, diag); 1892 } 1893 1894 int 1895 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1896 { 1897 int diag; 1898 1899 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1900 return 1; 1901 if (vlan_id_is_invalid(vlan_id)) 1902 return 1; 1903 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1904 if (diag == 0) 1905 return 0; 1906 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1907 "diag=%d\n", 1908 port_id, vlan_id, on, diag); 1909 return -1; 1910 } 1911 1912 void 1913 rx_vlan_all_filter_set(portid_t port_id, int on) 1914 { 1915 uint16_t vlan_id; 1916 1917 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1918 return; 1919 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1920 if (rx_vft_set(port_id, vlan_id, on)) 1921 break; 1922 } 1923 } 1924 1925 void 1926 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 1927 { 1928 int diag; 1929 1930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1931 return; 1932 1933 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 1934 if (diag == 0) 1935 return; 1936 1937 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 1938 "diag=%d\n", 1939 port_id, vlan_type, tp_id, diag); 1940 } 1941 1942 void 1943 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1944 { 1945 int vlan_offload; 1946 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1947 return; 1948 if (vlan_id_is_invalid(vlan_id)) 1949 return; 1950 1951 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1952 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 1953 printf("Error, as QinQ has been enabled.\n"); 1954 return; 1955 } 1956 1957 tx_vlan_reset(port_id); 1958 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1959 ports[port_id].tx_vlan_id = vlan_id; 1960 } 1961 1962 void 1963 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 1964 { 1965 int vlan_offload; 1966 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1967 return; 1968 if (vlan_id_is_invalid(vlan_id)) 1969 return; 1970 if (vlan_id_is_invalid(vlan_id_outer)) 1971 return; 1972 1973 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1974 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 1975 printf("Error, as QinQ hasn't been enabled.\n"); 1976 return; 1977 } 1978 1979 tx_vlan_reset(port_id); 1980 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 1981 ports[port_id].tx_vlan_id = vlan_id; 1982 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 1983 } 1984 1985 void 1986 tx_vlan_reset(portid_t port_id) 1987 { 1988 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1989 return; 1990 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 1991 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 1992 ports[port_id].tx_vlan_id = 0; 1993 ports[port_id].tx_vlan_id_outer = 0; 1994 } 1995 1996 void 1997 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1998 { 1999 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2000 return; 2001 2002 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2003 } 2004 2005 void 2006 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2007 { 2008 uint16_t i; 2009 uint8_t existing_mapping_found = 0; 2010 2011 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2012 return; 2013 2014 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2015 return; 2016 2017 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2018 printf("map_value not in required range 0..%d\n", 2019 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2020 return; 2021 } 2022 2023 if (!is_rx) { /*then tx*/ 2024 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2025 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2026 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2027 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2028 existing_mapping_found = 1; 2029 break; 2030 } 2031 } 2032 if (!existing_mapping_found) { /* A new additional mapping... */ 2033 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2034 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2035 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2036 nb_tx_queue_stats_mappings++; 2037 } 2038 } 2039 else { /*rx*/ 2040 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2041 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2042 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2043 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2044 existing_mapping_found = 1; 2045 break; 2046 } 2047 } 2048 if (!existing_mapping_found) { /* A new additional mapping... */ 2049 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2050 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2051 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2052 nb_rx_queue_stats_mappings++; 2053 } 2054 } 2055 } 2056 2057 static inline void 2058 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2059 { 2060 printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask); 2061 2062 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2063 printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask); 2064 else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2065 printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x", 2066 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2067 mask->tunnel_id_mask); 2068 else { 2069 printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 2070 " src_port: 0x%04x, dst_port: 0x%04x", 2071 mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip, 2072 mask->src_port_mask, mask->dst_port_mask); 2073 2074 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 2075 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2076 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 2077 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 2078 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 2079 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 2080 } 2081 2082 printf("\n"); 2083 } 2084 2085 static inline void 2086 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2087 { 2088 struct rte_eth_flex_payload_cfg *cfg; 2089 uint32_t i, j; 2090 2091 for (i = 0; i < flex_conf->nb_payloads; i++) { 2092 cfg = &flex_conf->flex_set[i]; 2093 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2094 printf("\n RAW: "); 2095 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2096 printf("\n L2_PAYLOAD: "); 2097 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2098 printf("\n L3_PAYLOAD: "); 2099 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2100 printf("\n L4_PAYLOAD: "); 2101 else 2102 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2103 for (j = 0; j < num; j++) 2104 printf(" %-5u", cfg->src_offset[j]); 2105 } 2106 printf("\n"); 2107 } 2108 2109 static char * 2110 flowtype_to_str(uint16_t flow_type) 2111 { 2112 struct flow_type_info { 2113 char str[32]; 2114 uint16_t ftype; 2115 }; 2116 2117 uint8_t i; 2118 static struct flow_type_info flowtype_str_table[] = { 2119 {"raw", RTE_ETH_FLOW_RAW}, 2120 {"ipv4", RTE_ETH_FLOW_IPV4}, 2121 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 2122 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 2123 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 2124 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 2125 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 2126 {"ipv6", RTE_ETH_FLOW_IPV6}, 2127 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 2128 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 2129 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 2130 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 2131 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 2132 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 2133 {"port", RTE_ETH_FLOW_PORT}, 2134 {"vxlan", RTE_ETH_FLOW_VXLAN}, 2135 {"geneve", RTE_ETH_FLOW_GENEVE}, 2136 {"nvgre", RTE_ETH_FLOW_NVGRE}, 2137 }; 2138 2139 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 2140 if (flowtype_str_table[i].ftype == flow_type) 2141 return flowtype_str_table[i].str; 2142 } 2143 2144 return NULL; 2145 } 2146 2147 static inline void 2148 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2149 { 2150 struct rte_eth_fdir_flex_mask *mask; 2151 uint32_t i, j; 2152 char *p; 2153 2154 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 2155 mask = &flex_conf->flex_mask[i]; 2156 p = flowtype_to_str(mask->flow_type); 2157 printf("\n %s:\t", p ? p : "unknown"); 2158 for (j = 0; j < num; j++) 2159 printf(" %02x", mask->mask[j]); 2160 } 2161 printf("\n"); 2162 } 2163 2164 static inline void 2165 print_fdir_flow_type(uint32_t flow_types_mask) 2166 { 2167 int i; 2168 char *p; 2169 2170 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 2171 if (!(flow_types_mask & (1 << i))) 2172 continue; 2173 p = flowtype_to_str(i); 2174 if (p) 2175 printf(" %s", p); 2176 else 2177 printf(" unknown"); 2178 } 2179 printf("\n"); 2180 } 2181 2182 void 2183 fdir_get_infos(portid_t port_id) 2184 { 2185 struct rte_eth_fdir_stats fdir_stat; 2186 struct rte_eth_fdir_info fdir_info; 2187 int ret; 2188 2189 static const char *fdir_stats_border = "########################"; 2190 2191 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2192 return; 2193 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 2194 if (ret < 0) { 2195 printf("\n FDIR is not supported on port %-2d\n", 2196 port_id); 2197 return; 2198 } 2199 2200 memset(&fdir_info, 0, sizeof(fdir_info)); 2201 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2202 RTE_ETH_FILTER_INFO, &fdir_info); 2203 memset(&fdir_stat, 0, sizeof(fdir_stat)); 2204 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 2205 RTE_ETH_FILTER_STATS, &fdir_stat); 2206 printf("\n %s FDIR infos for port %-2d %s\n", 2207 fdir_stats_border, port_id, fdir_stats_border); 2208 printf(" MODE: "); 2209 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 2210 printf(" PERFECT\n"); 2211 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 2212 printf(" PERFECT-MAC-VLAN\n"); 2213 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2214 printf(" PERFECT-TUNNEL\n"); 2215 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 2216 printf(" SIGNATURE\n"); 2217 else 2218 printf(" DISABLE\n"); 2219 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 2220 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 2221 printf(" SUPPORTED FLOW TYPE: "); 2222 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 2223 } 2224 printf(" FLEX PAYLOAD INFO:\n"); 2225 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 2226 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 2227 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 2228 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 2229 fdir_info.flex_payload_unit, 2230 fdir_info.max_flex_payload_segment_num, 2231 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 2232 printf(" MASK: "); 2233 print_fdir_mask(&fdir_info.mask); 2234 if (fdir_info.flex_conf.nb_payloads > 0) { 2235 printf(" FLEX PAYLOAD SRC OFFSET:"); 2236 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2237 } 2238 if (fdir_info.flex_conf.nb_flexmasks > 0) { 2239 printf(" FLEX MASK CFG:"); 2240 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 2241 } 2242 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 2243 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 2244 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 2245 fdir_info.guarant_spc, fdir_info.best_spc); 2246 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 2247 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 2248 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2249 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2250 fdir_stat.collision, fdir_stat.free, 2251 fdir_stat.maxhash, fdir_stat.maxlen, 2252 fdir_stat.add, fdir_stat.remove, 2253 fdir_stat.f_add, fdir_stat.f_remove); 2254 printf(" %s############################%s\n", 2255 fdir_stats_border, fdir_stats_border); 2256 } 2257 2258 void 2259 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2260 { 2261 struct rte_port *port; 2262 struct rte_eth_fdir_flex_conf *flex_conf; 2263 int i, idx = 0; 2264 2265 port = &ports[port_id]; 2266 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2267 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2268 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2269 idx = i; 2270 break; 2271 } 2272 } 2273 if (i >= RTE_ETH_FLOW_MAX) { 2274 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2275 idx = flex_conf->nb_flexmasks; 2276 flex_conf->nb_flexmasks++; 2277 } else { 2278 printf("The flex mask table is full. Can not set flex" 2279 " mask for flow_type(%u).", cfg->flow_type); 2280 return; 2281 } 2282 } 2283 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2284 cfg, 2285 sizeof(struct rte_eth_fdir_flex_mask)); 2286 } 2287 2288 void 2289 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2290 { 2291 struct rte_port *port; 2292 struct rte_eth_fdir_flex_conf *flex_conf; 2293 int i, idx = 0; 2294 2295 port = &ports[port_id]; 2296 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2297 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2298 if (cfg->type == flex_conf->flex_set[i].type) { 2299 idx = i; 2300 break; 2301 } 2302 } 2303 if (i >= RTE_ETH_PAYLOAD_MAX) { 2304 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2305 idx = flex_conf->nb_payloads; 2306 flex_conf->nb_payloads++; 2307 } else { 2308 printf("The flex payload table is full. Can not set" 2309 " flex payload for type(%u).", cfg->type); 2310 return; 2311 } 2312 } 2313 (void)rte_memcpy(&flex_conf->flex_set[idx], 2314 cfg, 2315 sizeof(struct rte_eth_flex_payload_cfg)); 2316 2317 } 2318 2319 void 2320 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2321 { 2322 int diag; 2323 2324 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2325 return; 2326 if (is_rx) 2327 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2328 else 2329 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2330 if (diag == 0) 2331 return; 2332 if(is_rx) 2333 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2334 "diag=%d\n", port_id, diag); 2335 else 2336 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2337 "diag=%d\n", port_id, diag); 2338 2339 } 2340 2341 void 2342 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2343 { 2344 int diag; 2345 2346 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2347 return; 2348 if (vlan_id_is_invalid(vlan_id)) 2349 return; 2350 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2351 if (diag == 0) 2352 return; 2353 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2354 "diag=%d\n", port_id, diag); 2355 } 2356 2357 int 2358 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2359 { 2360 int diag; 2361 struct rte_eth_link link; 2362 2363 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2364 return 1; 2365 rte_eth_link_get_nowait(port_id, &link); 2366 if (rate > link.link_speed) { 2367 printf("Invalid rate value:%u bigger than link speed: %u\n", 2368 rate, link.link_speed); 2369 return 1; 2370 } 2371 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2372 if (diag == 0) 2373 return diag; 2374 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2375 port_id, diag); 2376 return diag; 2377 } 2378 2379 int 2380 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2381 { 2382 int diag; 2383 struct rte_eth_link link; 2384 2385 if (q_msk == 0) 2386 return 0; 2387 2388 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2389 return 1; 2390 rte_eth_link_get_nowait(port_id, &link); 2391 if (rate > link.link_speed) { 2392 printf("Invalid rate value:%u bigger than link speed: %u\n", 2393 rate, link.link_speed); 2394 return 1; 2395 } 2396 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2397 if (diag == 0) 2398 return diag; 2399 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2400 port_id, diag); 2401 return diag; 2402 } 2403 2404 /* 2405 * Functions to manage the set of filtered Multicast MAC addresses. 2406 * 2407 * A pool of filtered multicast MAC addresses is associated with each port. 2408 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2409 * The address of the pool and the number of valid multicast MAC addresses 2410 * recorded in the pool are stored in the fields "mc_addr_pool" and 2411 * "mc_addr_nb" of the "rte_port" data structure. 2412 * 2413 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2414 * to be supplied a contiguous array of multicast MAC addresses. 2415 * To comply with this constraint, the set of multicast addresses recorded 2416 * into the pool are systematically compacted at the beginning of the pool. 2417 * Hence, when a multicast address is removed from the pool, all following 2418 * addresses, if any, are copied back to keep the set contiguous. 2419 */ 2420 #define MCAST_POOL_INC 32 2421 2422 static int 2423 mcast_addr_pool_extend(struct rte_port *port) 2424 { 2425 struct ether_addr *mc_pool; 2426 size_t mc_pool_size; 2427 2428 /* 2429 * If a free entry is available at the end of the pool, just 2430 * increment the number of recorded multicast addresses. 2431 */ 2432 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2433 port->mc_addr_nb++; 2434 return 0; 2435 } 2436 2437 /* 2438 * [re]allocate a pool with MCAST_POOL_INC more entries. 2439 * The previous test guarantees that port->mc_addr_nb is a multiple 2440 * of MCAST_POOL_INC. 2441 */ 2442 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2443 MCAST_POOL_INC); 2444 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2445 mc_pool_size); 2446 if (mc_pool == NULL) { 2447 printf("allocation of pool of %u multicast addresses failed\n", 2448 port->mc_addr_nb + MCAST_POOL_INC); 2449 return -ENOMEM; 2450 } 2451 2452 port->mc_addr_pool = mc_pool; 2453 port->mc_addr_nb++; 2454 return 0; 2455 2456 } 2457 2458 static void 2459 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2460 { 2461 port->mc_addr_nb--; 2462 if (addr_idx == port->mc_addr_nb) { 2463 /* No need to recompact the set of multicast addressses. */ 2464 if (port->mc_addr_nb == 0) { 2465 /* free the pool of multicast addresses. */ 2466 free(port->mc_addr_pool); 2467 port->mc_addr_pool = NULL; 2468 } 2469 return; 2470 } 2471 memmove(&port->mc_addr_pool[addr_idx], 2472 &port->mc_addr_pool[addr_idx + 1], 2473 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2474 } 2475 2476 static void 2477 eth_port_multicast_addr_list_set(uint8_t port_id) 2478 { 2479 struct rte_port *port; 2480 int diag; 2481 2482 port = &ports[port_id]; 2483 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2484 port->mc_addr_nb); 2485 if (diag == 0) 2486 return; 2487 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2488 port->mc_addr_nb, port_id, -diag); 2489 } 2490 2491 void 2492 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2493 { 2494 struct rte_port *port; 2495 uint32_t i; 2496 2497 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2498 return; 2499 2500 port = &ports[port_id]; 2501 2502 /* 2503 * Check that the added multicast MAC address is not already recorded 2504 * in the pool of multicast addresses. 2505 */ 2506 for (i = 0; i < port->mc_addr_nb; i++) { 2507 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 2508 printf("multicast address already filtered by port\n"); 2509 return; 2510 } 2511 } 2512 2513 if (mcast_addr_pool_extend(port) != 0) 2514 return; 2515 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 2516 eth_port_multicast_addr_list_set(port_id); 2517 } 2518 2519 void 2520 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 2521 { 2522 struct rte_port *port; 2523 uint32_t i; 2524 2525 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2526 return; 2527 2528 port = &ports[port_id]; 2529 2530 /* 2531 * Search the pool of multicast MAC addresses for the removed address. 2532 */ 2533 for (i = 0; i < port->mc_addr_nb; i++) { 2534 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 2535 break; 2536 } 2537 if (i == port->mc_addr_nb) { 2538 printf("multicast address not filtered by port %d\n", port_id); 2539 return; 2540 } 2541 2542 mcast_addr_pool_remove(port, i); 2543 eth_port_multicast_addr_list_set(port_id); 2544 } 2545 2546 void 2547 port_dcb_info_display(uint8_t port_id) 2548 { 2549 struct rte_eth_dcb_info dcb_info; 2550 uint16_t i; 2551 int ret; 2552 static const char *border = "================"; 2553 2554 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2555 return; 2556 2557 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 2558 if (ret) { 2559 printf("\n Failed to get dcb infos on port %-2d\n", 2560 port_id); 2561 return; 2562 } 2563 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 2564 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 2565 printf("\n TC : "); 2566 for (i = 0; i < dcb_info.nb_tcs; i++) 2567 printf("\t%4d", i); 2568 printf("\n Priority : "); 2569 for (i = 0; i < dcb_info.nb_tcs; i++) 2570 printf("\t%4d", dcb_info.prio_tc[i]); 2571 printf("\n BW percent :"); 2572 for (i = 0; i < dcb_info.nb_tcs; i++) 2573 printf("\t%4d%%", dcb_info.tc_bws[i]); 2574 printf("\n RXQ base : "); 2575 for (i = 0; i < dcb_info.nb_tcs; i++) 2576 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 2577 printf("\n RXQ number :"); 2578 for (i = 0; i < dcb_info.nb_tcs; i++) 2579 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 2580 printf("\n TXQ base : "); 2581 for (i = 0; i < dcb_info.nb_tcs; i++) 2582 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 2583 printf("\n TXQ number :"); 2584 for (i = 0; i < dcb_info.nb_tcs; i++) 2585 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 2586 printf("\n"); 2587 } 2588