1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_ring.h> 88 #include <rte_mempool.h> 89 #include <rte_mbuf.h> 90 #include <rte_interrupts.h> 91 #include <rte_pci.h> 92 #include <rte_ether.h> 93 #include <rte_ethdev.h> 94 #include <rte_string_fns.h> 95 96 #include "testpmd.h" 97 98 static char *flowtype_to_str(uint16_t flow_type); 99 100 static void 101 print_ethaddr(const char *name, struct ether_addr *eth_addr) 102 { 103 char buf[ETHER_ADDR_FMT_SIZE]; 104 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 105 printf("%s%s", name, buf); 106 } 107 108 void 109 nic_stats_display(portid_t port_id) 110 { 111 struct rte_eth_stats stats; 112 struct rte_port *port = &ports[port_id]; 113 uint8_t i; 114 portid_t pid; 115 116 static const char *nic_stats_border = "########################"; 117 118 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 119 printf("Valid port range is [0"); 120 FOREACH_PORT(pid, ports) 121 printf(", %d", pid); 122 printf("]\n"); 123 return; 124 } 125 rte_eth_stats_get(port_id, &stats); 126 printf("\n %s NIC statistics for port %-2d %s\n", 127 nic_stats_border, port_id, nic_stats_border); 128 129 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 130 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 131 "%-"PRIu64"\n", 132 stats.ipackets, stats.imissed, stats.ibytes); 133 printf(" RX-badcrc: %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: " 134 "%-"PRIu64"\n", 135 stats.ibadcrc, stats.ibadlen, stats.ierrors); 136 printf(" RX-nombuf: %-10"PRIu64"\n", 137 stats.rx_nombuf); 138 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 139 "%-"PRIu64"\n", 140 stats.opackets, stats.oerrors, stats.obytes); 141 } 142 else { 143 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 144 " RX-bytes: %10"PRIu64"\n", 145 stats.ipackets, stats.ierrors, stats.ibytes); 146 printf(" RX-badcrc: %10"PRIu64" RX-badlen: %10"PRIu64 147 " RX-errors: %10"PRIu64"\n", 148 stats.ibadcrc, stats.ibadlen, stats.ierrors); 149 printf(" RX-nombuf: %10"PRIu64"\n", 150 stats.rx_nombuf); 151 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 152 " TX-bytes: %10"PRIu64"\n", 153 stats.opackets, stats.oerrors, stats.obytes); 154 } 155 156 /* stats fdir */ 157 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 158 printf(" Fdirmiss: %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n", 159 stats.fdirmiss, 160 stats.fdirmatch); 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 /* Display statistics of XON/XOFF pause frames, if any. */ 181 if ((stats.tx_pause_xon | stats.rx_pause_xon | 182 stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) { 183 printf(" RX-XOFF: %-10"PRIu64" RX-XON: %-10"PRIu64"\n", 184 stats.rx_pause_xoff, stats.rx_pause_xon); 185 printf(" TX-XOFF: %-10"PRIu64" TX-XON: %-10"PRIu64"\n", 186 stats.tx_pause_xoff, stats.tx_pause_xon); 187 } 188 printf(" %s############################%s\n", 189 nic_stats_border, nic_stats_border); 190 } 191 192 void 193 nic_stats_clear(portid_t port_id) 194 { 195 portid_t pid; 196 197 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 198 printf("Valid port range is [0"); 199 FOREACH_PORT(pid, ports) 200 printf(", %d", pid); 201 printf("]\n"); 202 return; 203 } 204 rte_eth_stats_reset(port_id); 205 printf("\n NIC statistics for port %d cleared\n", port_id); 206 } 207 208 void 209 nic_xstats_display(portid_t port_id) 210 { 211 struct rte_eth_xstats *xstats; 212 int len, ret, i; 213 214 printf("###### NIC extended statistics for port %-2d\n", port_id); 215 216 len = rte_eth_xstats_get(port_id, NULL, 0); 217 if (len < 0) { 218 printf("Cannot get xstats count\n"); 219 return; 220 } 221 xstats = malloc(sizeof(xstats[0]) * len); 222 if (xstats == NULL) { 223 printf("Cannot allocate memory for xstats\n"); 224 return; 225 } 226 ret = rte_eth_xstats_get(port_id, xstats, len); 227 if (ret < 0 || ret > len) { 228 printf("Cannot get xstats\n"); 229 free(xstats); 230 return; 231 } 232 for (i = 0; i < len; i++) 233 printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value); 234 free(xstats); 235 } 236 237 void 238 nic_xstats_clear(portid_t port_id) 239 { 240 rte_eth_xstats_reset(port_id); 241 } 242 243 void 244 nic_stats_mapping_display(portid_t port_id) 245 { 246 struct rte_port *port = &ports[port_id]; 247 uint16_t i; 248 portid_t pid; 249 250 static const char *nic_stats_mapping_border = "########################"; 251 252 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 253 printf("Valid port range is [0"); 254 FOREACH_PORT(pid, ports) 255 printf(", %d", pid); 256 printf("]\n"); 257 return; 258 } 259 260 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 261 printf("Port id %d - either does not support queue statistic mapping or" 262 " no queue statistic mapping set\n", port_id); 263 return; 264 } 265 266 printf("\n %s NIC statistics mapping for port %-2d %s\n", 267 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 268 269 if (port->rx_queue_stats_mapping_enabled) { 270 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 271 if (rx_queue_stats_mappings[i].port_id == port_id) { 272 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 273 rx_queue_stats_mappings[i].queue_id, 274 rx_queue_stats_mappings[i].stats_counter_id); 275 } 276 } 277 printf("\n"); 278 } 279 280 281 if (port->tx_queue_stats_mapping_enabled) { 282 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 283 if (tx_queue_stats_mappings[i].port_id == port_id) { 284 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 285 tx_queue_stats_mappings[i].queue_id, 286 tx_queue_stats_mappings[i].stats_counter_id); 287 } 288 } 289 } 290 291 printf(" %s####################################%s\n", 292 nic_stats_mapping_border, nic_stats_mapping_border); 293 } 294 295 void 296 port_infos_display(portid_t port_id) 297 { 298 struct rte_port *port; 299 struct ether_addr mac_addr; 300 struct rte_eth_link link; 301 struct rte_eth_dev_info dev_info; 302 int vlan_offload; 303 struct rte_mempool * mp; 304 static const char *info_border = "*********************"; 305 portid_t pid; 306 307 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 308 printf("Valid port range is [0"); 309 FOREACH_PORT(pid, ports) 310 printf(", %d", pid); 311 printf("]\n"); 312 return; 313 } 314 port = &ports[port_id]; 315 rte_eth_link_get_nowait(port_id, &link); 316 printf("\n%s Infos for port %-2d %s\n", 317 info_border, port_id, info_border); 318 rte_eth_macaddr_get(port_id, &mac_addr); 319 print_ethaddr("MAC address: ", &mac_addr); 320 printf("\nConnect to socket: %u", port->socket_id); 321 322 if (port_numa[port_id] != NUMA_NO_CONFIG) { 323 mp = mbuf_pool_find(port_numa[port_id]); 324 if (mp) 325 printf("\nmemory allocation on the socket: %d", 326 port_numa[port_id]); 327 } else 328 printf("\nmemory allocation on the socket: %u",port->socket_id); 329 330 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 331 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 332 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 333 ("full-duplex") : ("half-duplex")); 334 printf("Promiscuous mode: %s\n", 335 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 336 printf("Allmulticast mode: %s\n", 337 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 338 printf("Maximum number of MAC addresses: %u\n", 339 (unsigned int)(port->dev_info.max_mac_addrs)); 340 printf("Maximum number of MAC addresses of hash filtering: %u\n", 341 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 342 343 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 344 if (vlan_offload >= 0){ 345 printf("VLAN offload: \n"); 346 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 347 printf(" strip on \n"); 348 else 349 printf(" strip off \n"); 350 351 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 352 printf(" filter on \n"); 353 else 354 printf(" filter off \n"); 355 356 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 357 printf(" qinq(extend) on \n"); 358 else 359 printf(" qinq(extend) off \n"); 360 } 361 362 memset(&dev_info, 0, sizeof(dev_info)); 363 rte_eth_dev_info_get(port_id, &dev_info); 364 if (dev_info.hash_key_size > 0) 365 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 366 if (dev_info.reta_size > 0) 367 printf("Redirection table size: %u\n", dev_info.reta_size); 368 if (!dev_info.flow_type_rss_offloads) 369 printf("No flow type is supported.\n"); 370 else { 371 uint16_t i; 372 char *p; 373 374 printf("Supported flow types:\n"); 375 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 376 i++) { 377 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 378 continue; 379 p = flowtype_to_str(i); 380 printf(" %s\n", (p ? p : "unknown")); 381 } 382 } 383 } 384 385 int 386 port_id_is_invalid(portid_t port_id, enum print_warning warning) 387 { 388 if (port_id == (portid_t)RTE_PORT_ALL) 389 return 0; 390 391 if (ports[port_id].enabled) 392 return 0; 393 394 if (warning == ENABLED_WARN) 395 printf("Invalid port %d\n", port_id); 396 397 return 1; 398 } 399 400 static int 401 vlan_id_is_invalid(uint16_t vlan_id) 402 { 403 if (vlan_id < 4096) 404 return 0; 405 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 406 return 1; 407 } 408 409 static int 410 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 411 { 412 uint64_t pci_len; 413 414 if (reg_off & 0x3) { 415 printf("Port register offset 0x%X not aligned on a 4-byte " 416 "boundary\n", 417 (unsigned)reg_off); 418 return 1; 419 } 420 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 421 if (reg_off >= pci_len) { 422 printf("Port %d: register offset %u (0x%X) out of port PCI " 423 "resource (length=%"PRIu64")\n", 424 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 425 return 1; 426 } 427 return 0; 428 } 429 430 static int 431 reg_bit_pos_is_invalid(uint8_t bit_pos) 432 { 433 if (bit_pos <= 31) 434 return 0; 435 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 436 return 1; 437 } 438 439 #define display_port_and_reg_off(port_id, reg_off) \ 440 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 441 442 static inline void 443 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 444 { 445 display_port_and_reg_off(port_id, (unsigned)reg_off); 446 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 447 } 448 449 void 450 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 451 { 452 uint32_t reg_v; 453 454 455 if (port_id_is_invalid(port_id, ENABLED_WARN)) 456 return; 457 if (port_reg_off_is_invalid(port_id, reg_off)) 458 return; 459 if (reg_bit_pos_is_invalid(bit_x)) 460 return; 461 reg_v = port_id_pci_reg_read(port_id, reg_off); 462 display_port_and_reg_off(port_id, (unsigned)reg_off); 463 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 464 } 465 466 void 467 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 468 uint8_t bit1_pos, uint8_t bit2_pos) 469 { 470 uint32_t reg_v; 471 uint8_t l_bit; 472 uint8_t h_bit; 473 474 if (port_id_is_invalid(port_id, ENABLED_WARN)) 475 return; 476 if (port_reg_off_is_invalid(port_id, reg_off)) 477 return; 478 if (reg_bit_pos_is_invalid(bit1_pos)) 479 return; 480 if (reg_bit_pos_is_invalid(bit2_pos)) 481 return; 482 if (bit1_pos > bit2_pos) 483 l_bit = bit2_pos, h_bit = bit1_pos; 484 else 485 l_bit = bit1_pos, h_bit = bit2_pos; 486 487 reg_v = port_id_pci_reg_read(port_id, reg_off); 488 reg_v >>= l_bit; 489 if (h_bit < 31) 490 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 491 display_port_and_reg_off(port_id, (unsigned)reg_off); 492 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 493 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 494 } 495 496 void 497 port_reg_display(portid_t port_id, uint32_t reg_off) 498 { 499 uint32_t reg_v; 500 501 if (port_id_is_invalid(port_id, ENABLED_WARN)) 502 return; 503 if (port_reg_off_is_invalid(port_id, reg_off)) 504 return; 505 reg_v = port_id_pci_reg_read(port_id, reg_off); 506 display_port_reg_value(port_id, reg_off, reg_v); 507 } 508 509 void 510 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 511 uint8_t bit_v) 512 { 513 uint32_t reg_v; 514 515 if (port_id_is_invalid(port_id, ENABLED_WARN)) 516 return; 517 if (port_reg_off_is_invalid(port_id, reg_off)) 518 return; 519 if (reg_bit_pos_is_invalid(bit_pos)) 520 return; 521 if (bit_v > 1) { 522 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 523 return; 524 } 525 reg_v = port_id_pci_reg_read(port_id, reg_off); 526 if (bit_v == 0) 527 reg_v &= ~(1 << bit_pos); 528 else 529 reg_v |= (1 << bit_pos); 530 port_id_pci_reg_write(port_id, reg_off, reg_v); 531 display_port_reg_value(port_id, reg_off, reg_v); 532 } 533 534 void 535 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 536 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 537 { 538 uint32_t max_v; 539 uint32_t reg_v; 540 uint8_t l_bit; 541 uint8_t h_bit; 542 543 if (port_id_is_invalid(port_id, ENABLED_WARN)) 544 return; 545 if (port_reg_off_is_invalid(port_id, reg_off)) 546 return; 547 if (reg_bit_pos_is_invalid(bit1_pos)) 548 return; 549 if (reg_bit_pos_is_invalid(bit2_pos)) 550 return; 551 if (bit1_pos > bit2_pos) 552 l_bit = bit2_pos, h_bit = bit1_pos; 553 else 554 l_bit = bit1_pos, h_bit = bit2_pos; 555 556 if ((h_bit - l_bit) < 31) 557 max_v = (1 << (h_bit - l_bit + 1)) - 1; 558 else 559 max_v = 0xFFFFFFFF; 560 561 if (value > max_v) { 562 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 563 (unsigned)value, (unsigned)value, 564 (unsigned)max_v, (unsigned)max_v); 565 return; 566 } 567 reg_v = port_id_pci_reg_read(port_id, reg_off); 568 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 569 reg_v |= (value << l_bit); /* Set changed bits */ 570 port_id_pci_reg_write(port_id, reg_off, reg_v); 571 display_port_reg_value(port_id, reg_off, reg_v); 572 } 573 574 void 575 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 576 { 577 if (port_id_is_invalid(port_id, ENABLED_WARN)) 578 return; 579 if (port_reg_off_is_invalid(port_id, reg_off)) 580 return; 581 port_id_pci_reg_write(port_id, reg_off, reg_v); 582 display_port_reg_value(port_id, reg_off, reg_v); 583 } 584 585 void 586 port_mtu_set(portid_t port_id, uint16_t mtu) 587 { 588 int diag; 589 590 if (port_id_is_invalid(port_id, ENABLED_WARN)) 591 return; 592 diag = rte_eth_dev_set_mtu(port_id, mtu); 593 if (diag == 0) 594 return; 595 printf("Set MTU failed. diag=%d\n", diag); 596 } 597 598 /* 599 * RX/TX ring descriptors display functions. 600 */ 601 int 602 rx_queue_id_is_invalid(queueid_t rxq_id) 603 { 604 if (rxq_id < nb_rxq) 605 return 0; 606 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 607 return 1; 608 } 609 610 int 611 tx_queue_id_is_invalid(queueid_t txq_id) 612 { 613 if (txq_id < nb_txq) 614 return 0; 615 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 616 return 1; 617 } 618 619 static int 620 rx_desc_id_is_invalid(uint16_t rxdesc_id) 621 { 622 if (rxdesc_id < nb_rxd) 623 return 0; 624 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 625 rxdesc_id, nb_rxd); 626 return 1; 627 } 628 629 static int 630 tx_desc_id_is_invalid(uint16_t txdesc_id) 631 { 632 if (txdesc_id < nb_txd) 633 return 0; 634 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 635 txdesc_id, nb_txd); 636 return 1; 637 } 638 639 static const struct rte_memzone * 640 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 641 { 642 char mz_name[RTE_MEMZONE_NAMESIZE]; 643 const struct rte_memzone *mz; 644 645 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 646 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 647 mz = rte_memzone_lookup(mz_name); 648 if (mz == NULL) 649 printf("%s ring memory zoneof (port %d, queue %d) not" 650 "found (zone name = %s\n", 651 ring_name, port_id, q_id, mz_name); 652 return (mz); 653 } 654 655 union igb_ring_dword { 656 uint64_t dword; 657 struct { 658 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 659 uint32_t lo; 660 uint32_t hi; 661 #else 662 uint32_t hi; 663 uint32_t lo; 664 #endif 665 } words; 666 }; 667 668 struct igb_ring_desc_32_bytes { 669 union igb_ring_dword lo_dword; 670 union igb_ring_dword hi_dword; 671 union igb_ring_dword resv1; 672 union igb_ring_dword resv2; 673 }; 674 675 struct igb_ring_desc_16_bytes { 676 union igb_ring_dword lo_dword; 677 union igb_ring_dword hi_dword; 678 }; 679 680 static void 681 ring_rxd_display_dword(union igb_ring_dword dword) 682 { 683 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 684 (unsigned)dword.words.hi); 685 } 686 687 static void 688 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 689 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 690 uint8_t port_id, 691 #else 692 __rte_unused uint8_t port_id, 693 #endif 694 uint16_t desc_id) 695 { 696 struct igb_ring_desc_16_bytes *ring = 697 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 698 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 699 struct rte_eth_dev_info dev_info; 700 701 memset(&dev_info, 0, sizeof(dev_info)); 702 rte_eth_dev_info_get(port_id, &dev_info); 703 if (strstr(dev_info.driver_name, "i40e") != NULL) { 704 /* 32 bytes RX descriptor, i40e only */ 705 struct igb_ring_desc_32_bytes *ring = 706 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 707 ring[desc_id].lo_dword.dword = 708 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 709 ring_rxd_display_dword(ring[desc_id].lo_dword); 710 ring[desc_id].hi_dword.dword = 711 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 712 ring_rxd_display_dword(ring[desc_id].hi_dword); 713 ring[desc_id].resv1.dword = 714 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 715 ring_rxd_display_dword(ring[desc_id].resv1); 716 ring[desc_id].resv2.dword = 717 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 718 ring_rxd_display_dword(ring[desc_id].resv2); 719 720 return; 721 } 722 #endif 723 /* 16 bytes RX descriptor */ 724 ring[desc_id].lo_dword.dword = 725 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 726 ring_rxd_display_dword(ring[desc_id].lo_dword); 727 ring[desc_id].hi_dword.dword = 728 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 729 ring_rxd_display_dword(ring[desc_id].hi_dword); 730 } 731 732 static void 733 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 734 { 735 struct igb_ring_desc_16_bytes *ring; 736 struct igb_ring_desc_16_bytes txd; 737 738 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 739 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 740 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 741 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 742 (unsigned)txd.lo_dword.words.lo, 743 (unsigned)txd.lo_dword.words.hi, 744 (unsigned)txd.hi_dword.words.lo, 745 (unsigned)txd.hi_dword.words.hi); 746 } 747 748 void 749 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 750 { 751 const struct rte_memzone *rx_mz; 752 753 if (port_id_is_invalid(port_id, ENABLED_WARN)) 754 return; 755 if (rx_queue_id_is_invalid(rxq_id)) 756 return; 757 if (rx_desc_id_is_invalid(rxd_id)) 758 return; 759 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 760 if (rx_mz == NULL) 761 return; 762 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 763 } 764 765 void 766 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 767 { 768 const struct rte_memzone *tx_mz; 769 770 if (port_id_is_invalid(port_id, ENABLED_WARN)) 771 return; 772 if (tx_queue_id_is_invalid(txq_id)) 773 return; 774 if (tx_desc_id_is_invalid(txd_id)) 775 return; 776 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 777 if (tx_mz == NULL) 778 return; 779 ring_tx_descriptor_display(tx_mz, txd_id); 780 } 781 782 void 783 fwd_lcores_config_display(void) 784 { 785 lcoreid_t lc_id; 786 787 printf("List of forwarding lcores:"); 788 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 789 printf(" %2u", fwd_lcores_cpuids[lc_id]); 790 printf("\n"); 791 } 792 void 793 rxtx_config_display(void) 794 { 795 printf(" %s packet forwarding - CRC stripping %s - " 796 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 797 rx_mode.hw_strip_crc ? "enabled" : "disabled", 798 nb_pkt_per_burst); 799 800 if (cur_fwd_eng == &tx_only_engine) 801 printf(" packet len=%u - nb packet segments=%d\n", 802 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 803 804 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 805 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 806 807 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 808 nb_fwd_lcores, nb_fwd_ports); 809 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 810 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 811 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 812 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 813 rx_conf->rx_thresh.wthresh); 814 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 815 nb_txq, nb_txd, tx_conf->tx_free_thresh); 816 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 817 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 818 tx_conf->tx_thresh.wthresh); 819 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 820 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 821 } 822 823 void 824 port_rss_reta_info(portid_t port_id, 825 struct rte_eth_rss_reta_entry64 *reta_conf, 826 uint16_t nb_entries) 827 { 828 uint16_t i, idx, shift; 829 int ret; 830 831 if (port_id_is_invalid(port_id, ENABLED_WARN)) 832 return; 833 834 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 835 if (ret != 0) { 836 printf("Failed to get RSS RETA info, return code = %d\n", ret); 837 return; 838 } 839 840 for (i = 0; i < nb_entries; i++) { 841 idx = i / RTE_RETA_GROUP_SIZE; 842 shift = i % RTE_RETA_GROUP_SIZE; 843 if (!(reta_conf[idx].mask & (1ULL << shift))) 844 continue; 845 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 846 i, reta_conf[idx].reta[shift]); 847 } 848 } 849 850 /* 851 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 852 * key of the port. 853 */ 854 void 855 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 856 { 857 struct rss_type_info { 858 char str[32]; 859 uint64_t rss_type; 860 }; 861 static const struct rss_type_info rss_type_table[] = { 862 {"ipv4", ETH_RSS_IPV4}, 863 {"ipv4-frag", ETH_RSS_FRAG_IPV4}, 864 {"ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP}, 865 {"ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP}, 866 {"ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP}, 867 {"ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER}, 868 {"ipv6", ETH_RSS_IPV6}, 869 {"ipv6-frag", ETH_RSS_FRAG_IPV6}, 870 {"ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP}, 871 {"ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP}, 872 {"ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP}, 873 {"ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER}, 874 {"l2-payload", ETH_RSS_L2_PAYLOAD}, 875 {"ipv6-ex", ETH_RSS_IPV6_EX}, 876 {"ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX}, 877 {"ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX}, 878 }; 879 880 struct rte_eth_rss_conf rss_conf; 881 uint8_t rss_key[10 * 4]; 882 uint64_t rss_hf; 883 uint8_t i; 884 int diag; 885 886 if (port_id_is_invalid(port_id, ENABLED_WARN)) 887 return; 888 /* Get RSS hash key if asked to display it */ 889 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 890 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 891 if (diag != 0) { 892 switch (diag) { 893 case -ENODEV: 894 printf("port index %d invalid\n", port_id); 895 break; 896 case -ENOTSUP: 897 printf("operation not supported by device\n"); 898 break; 899 default: 900 printf("operation failed - diag=%d\n", diag); 901 break; 902 } 903 return; 904 } 905 rss_hf = rss_conf.rss_hf; 906 if (rss_hf == 0) { 907 printf("RSS disabled\n"); 908 return; 909 } 910 printf("RSS functions:\n "); 911 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 912 if (rss_hf & rss_type_table[i].rss_type) 913 printf("%s ", rss_type_table[i].str); 914 } 915 printf("\n"); 916 if (!show_rss_key) 917 return; 918 printf("RSS key:\n"); 919 for (i = 0; i < sizeof(rss_key); i++) 920 printf("%02X", rss_key[i]); 921 printf("\n"); 922 } 923 924 void 925 port_rss_hash_key_update(portid_t port_id, uint8_t *hash_key) 926 { 927 struct rte_eth_rss_conf rss_conf; 928 int diag; 929 930 rss_conf.rss_key = NULL; 931 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 932 if (diag == 0) { 933 rss_conf.rss_key = hash_key; 934 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 935 } 936 if (diag == 0) 937 return; 938 939 switch (diag) { 940 case -ENODEV: 941 printf("port index %d invalid\n", port_id); 942 break; 943 case -ENOTSUP: 944 printf("operation not supported by device\n"); 945 break; 946 default: 947 printf("operation failed - diag=%d\n", diag); 948 break; 949 } 950 } 951 952 /* 953 * Setup forwarding configuration for each logical core. 954 */ 955 static void 956 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 957 { 958 streamid_t nb_fs_per_lcore; 959 streamid_t nb_fs; 960 streamid_t sm_id; 961 lcoreid_t nb_extra; 962 lcoreid_t nb_fc; 963 lcoreid_t nb_lc; 964 lcoreid_t lc_id; 965 966 nb_fs = cfg->nb_fwd_streams; 967 nb_fc = cfg->nb_fwd_lcores; 968 if (nb_fs <= nb_fc) { 969 nb_fs_per_lcore = 1; 970 nb_extra = 0; 971 } else { 972 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 973 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 974 } 975 976 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 977 sm_id = 0; 978 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 979 fwd_lcores[lc_id]->stream_idx = sm_id; 980 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 981 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 982 } 983 984 /* 985 * Assign extra remaining streams, if any. 986 */ 987 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 988 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 989 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 990 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 991 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 992 } 993 } 994 995 static void 996 simple_fwd_config_setup(void) 997 { 998 portid_t i; 999 portid_t j; 1000 portid_t inc = 2; 1001 1002 if (port_topology == PORT_TOPOLOGY_CHAINED || 1003 port_topology == PORT_TOPOLOGY_LOOP) { 1004 inc = 1; 1005 } else if (nb_fwd_ports % 2) { 1006 printf("\nWarning! Cannot handle an odd number of ports " 1007 "with the current port topology. Configuration " 1008 "must be changed to have an even number of ports, " 1009 "or relaunch application with " 1010 "--port-topology=chained\n\n"); 1011 } 1012 1013 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1014 cur_fwd_config.nb_fwd_streams = 1015 (streamid_t) cur_fwd_config.nb_fwd_ports; 1016 1017 /* reinitialize forwarding streams */ 1018 init_fwd_streams(); 1019 1020 /* 1021 * In the simple forwarding test, the number of forwarding cores 1022 * must be lower or equal to the number of forwarding ports. 1023 */ 1024 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1025 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1026 cur_fwd_config.nb_fwd_lcores = 1027 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1028 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1029 1030 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1031 if (port_topology != PORT_TOPOLOGY_LOOP) 1032 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1033 else 1034 j = i; 1035 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1036 fwd_streams[i]->rx_queue = 0; 1037 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1038 fwd_streams[i]->tx_queue = 0; 1039 fwd_streams[i]->peer_addr = j; 1040 1041 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1042 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1043 fwd_streams[j]->rx_queue = 0; 1044 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1045 fwd_streams[j]->tx_queue = 0; 1046 fwd_streams[j]->peer_addr = i; 1047 } 1048 } 1049 } 1050 1051 /** 1052 * For the RSS forwarding test, each core is assigned on every port a transmit 1053 * queue whose index is the index of the core itself. This approach limits the 1054 * maximumm number of processing cores of the RSS test to the maximum number of 1055 * TX queues supported by the devices. 1056 * 1057 * Each core is assigned a single stream, each stream being composed of 1058 * a RX queue to poll on a RX port for input messages, associated with 1059 * a TX queue of a TX port where to send forwarded packets. 1060 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1061 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1062 * following rules: 1063 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1064 * - TxQl = RxQj 1065 */ 1066 static void 1067 rss_fwd_config_setup(void) 1068 { 1069 portid_t rxp; 1070 portid_t txp; 1071 queueid_t rxq; 1072 queueid_t nb_q; 1073 lcoreid_t lc_id; 1074 1075 nb_q = nb_rxq; 1076 if (nb_q > nb_txq) 1077 nb_q = nb_txq; 1078 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1079 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1080 cur_fwd_config.nb_fwd_streams = 1081 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1082 if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores) 1083 cur_fwd_config.nb_fwd_streams = 1084 (streamid_t)cur_fwd_config.nb_fwd_lcores; 1085 else 1086 cur_fwd_config.nb_fwd_lcores = 1087 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1088 1089 /* reinitialize forwarding streams */ 1090 init_fwd_streams(); 1091 1092 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1093 rxp = 0; rxq = 0; 1094 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1095 struct fwd_stream *fs; 1096 1097 fs = fwd_streams[lc_id]; 1098 1099 if ((rxp & 0x1) == 0) 1100 txp = (portid_t) (rxp + 1); 1101 else 1102 txp = (portid_t) (rxp - 1); 1103 /* 1104 * if we are in loopback, simply send stuff out through the 1105 * ingress port 1106 */ 1107 if (port_topology == PORT_TOPOLOGY_LOOP) 1108 txp = rxp; 1109 1110 fs->rx_port = fwd_ports_ids[rxp]; 1111 fs->rx_queue = rxq; 1112 fs->tx_port = fwd_ports_ids[txp]; 1113 fs->tx_queue = rxq; 1114 fs->peer_addr = fs->tx_port; 1115 rxq = (queueid_t) (rxq + 1); 1116 if (rxq < nb_q) 1117 continue; 1118 /* 1119 * rxq == nb_q 1120 * Restart from RX queue 0 on next RX port 1121 */ 1122 rxq = 0; 1123 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1124 rxp = (portid_t) 1125 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1126 else 1127 rxp = (portid_t) (rxp + 1); 1128 } 1129 } 1130 1131 /* 1132 * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues. 1133 */ 1134 static void 1135 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq) 1136 { 1137 if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) { 1138 1139 if (rxq < 32) 1140 /* tc0: 0-31 */ 1141 *txq = rxq; 1142 else if (rxq < 64) { 1143 /* tc1: 64-95 */ 1144 *txq = (uint16_t)(rxq + 32); 1145 } 1146 else { 1147 /* tc2: 96-111;tc3:112-127 */ 1148 *txq = (uint16_t)(rxq/2 + 64); 1149 } 1150 } 1151 else { 1152 if (rxq < 16) 1153 /* tc0 mapping*/ 1154 *txq = rxq; 1155 else if (rxq < 32) { 1156 /* tc1 mapping*/ 1157 *txq = (uint16_t)(rxq + 16); 1158 } 1159 else if (rxq < 64) { 1160 /*tc2,tc3 mapping */ 1161 *txq = (uint16_t)(rxq + 32); 1162 } 1163 else { 1164 /* tc4,tc5,tc6 and tc7 mapping */ 1165 *txq = (uint16_t)(rxq/2 + 64); 1166 } 1167 } 1168 } 1169 1170 /** 1171 * For the DCB forwarding test, each core is assigned on every port multi-transmit 1172 * queue. 1173 * 1174 * Each core is assigned a multi-stream, each stream being composed of 1175 * a RX queue to poll on a RX port for input messages, associated with 1176 * a TX queue of a TX port where to send forwarded packets. 1177 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1178 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1179 * following rules: 1180 * In VT mode, 1181 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1182 * - TxQl = RxQj 1183 * In non-VT mode, 1184 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1185 * There is a mapping of RxQj to TxQl to be required,and the mapping was implemented 1186 * in dcb_rxq_2_txq_mapping function. 1187 */ 1188 static void 1189 dcb_fwd_config_setup(void) 1190 { 1191 portid_t rxp; 1192 portid_t txp; 1193 queueid_t rxq; 1194 queueid_t nb_q; 1195 lcoreid_t lc_id; 1196 uint16_t sm_id; 1197 1198 nb_q = nb_rxq; 1199 1200 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1201 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1202 cur_fwd_config.nb_fwd_streams = 1203 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1204 1205 /* reinitialize forwarding streams */ 1206 init_fwd_streams(); 1207 1208 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1209 rxp = 0; rxq = 0; 1210 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1211 /* a fwd core can run multi-streams */ 1212 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) 1213 { 1214 struct fwd_stream *fs; 1215 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1216 if ((rxp & 0x1) == 0) 1217 txp = (portid_t) (rxp + 1); 1218 else 1219 txp = (portid_t) (rxp - 1); 1220 fs->rx_port = fwd_ports_ids[rxp]; 1221 fs->rx_queue = rxq; 1222 fs->tx_port = fwd_ports_ids[txp]; 1223 if (dcb_q_mapping == DCB_VT_Q_MAPPING) 1224 fs->tx_queue = rxq; 1225 else 1226 dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue); 1227 fs->peer_addr = fs->tx_port; 1228 rxq = (queueid_t) (rxq + 1); 1229 if (rxq < nb_q) 1230 continue; 1231 rxq = 0; 1232 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1233 rxp = (portid_t) 1234 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1235 else 1236 rxp = (portid_t) (rxp + 1); 1237 } 1238 } 1239 } 1240 1241 static void 1242 icmp_echo_config_setup(void) 1243 { 1244 portid_t rxp; 1245 queueid_t rxq; 1246 lcoreid_t lc_id; 1247 uint16_t sm_id; 1248 1249 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1250 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1251 (nb_txq * nb_fwd_ports); 1252 else 1253 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1254 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1255 cur_fwd_config.nb_fwd_streams = 1256 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1257 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1258 cur_fwd_config.nb_fwd_lcores = 1259 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1260 if (verbose_level > 0) { 1261 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1262 __FUNCTION__, 1263 cur_fwd_config.nb_fwd_lcores, 1264 cur_fwd_config.nb_fwd_ports, 1265 cur_fwd_config.nb_fwd_streams); 1266 } 1267 1268 /* reinitialize forwarding streams */ 1269 init_fwd_streams(); 1270 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1271 rxp = 0; rxq = 0; 1272 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1273 if (verbose_level > 0) 1274 printf(" core=%d: \n", lc_id); 1275 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1276 struct fwd_stream *fs; 1277 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1278 fs->rx_port = fwd_ports_ids[rxp]; 1279 fs->rx_queue = rxq; 1280 fs->tx_port = fs->rx_port; 1281 fs->tx_queue = lc_id; 1282 fs->peer_addr = fs->tx_port; 1283 if (verbose_level > 0) 1284 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1285 sm_id, fs->rx_port, fs->rx_queue, 1286 fs->tx_queue); 1287 rxq = (queueid_t) (rxq + 1); 1288 if (rxq == nb_rxq) { 1289 rxq = 0; 1290 rxp = (portid_t) (rxp + 1); 1291 } 1292 } 1293 } 1294 } 1295 1296 void 1297 fwd_config_setup(void) 1298 { 1299 cur_fwd_config.fwd_eng = cur_fwd_eng; 1300 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1301 icmp_echo_config_setup(); 1302 return; 1303 } 1304 if ((nb_rxq > 1) && (nb_txq > 1)){ 1305 if (dcb_config) 1306 dcb_fwd_config_setup(); 1307 else 1308 rss_fwd_config_setup(); 1309 } 1310 else 1311 simple_fwd_config_setup(); 1312 } 1313 1314 static void 1315 pkt_fwd_config_display(struct fwd_config *cfg) 1316 { 1317 struct fwd_stream *fs; 1318 lcoreid_t lc_id; 1319 streamid_t sm_id; 1320 1321 printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - " 1322 "NUMA support %s, MP over anonymous pages %s\n", 1323 cfg->fwd_eng->fwd_mode_name, 1324 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1325 numa_support == 1 ? "enabled" : "disabled", 1326 mp_anon != 0 ? "enabled" : "disabled"); 1327 1328 if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0) 1329 printf("TX retry num: %u, delay between TX retries: %uus\n", 1330 burst_tx_retry_num, burst_tx_delay_time); 1331 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1332 printf("Logical Core %u (socket %u) forwards packets on " 1333 "%d streams:", 1334 fwd_lcores_cpuids[lc_id], 1335 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1336 fwd_lcores[lc_id]->stream_nb); 1337 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1338 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1339 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1340 "P=%d/Q=%d (socket %u) ", 1341 fs->rx_port, fs->rx_queue, 1342 ports[fs->rx_port].socket_id, 1343 fs->tx_port, fs->tx_queue, 1344 ports[fs->tx_port].socket_id); 1345 print_ethaddr("peer=", 1346 &peer_eth_addrs[fs->peer_addr]); 1347 } 1348 printf("\n"); 1349 } 1350 printf("\n"); 1351 } 1352 1353 1354 void 1355 fwd_config_display(void) 1356 { 1357 if((dcb_config) && (nb_fwd_lcores == 1)) { 1358 printf("In DCB mode,the nb forwarding cores should be larger than 1\n"); 1359 return; 1360 } 1361 fwd_config_setup(); 1362 pkt_fwd_config_display(&cur_fwd_config); 1363 } 1364 1365 int 1366 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1367 { 1368 unsigned int i; 1369 unsigned int lcore_cpuid; 1370 int record_now; 1371 1372 record_now = 0; 1373 again: 1374 for (i = 0; i < nb_lc; i++) { 1375 lcore_cpuid = lcorelist[i]; 1376 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1377 printf("lcore %u not enabled\n", lcore_cpuid); 1378 return -1; 1379 } 1380 if (lcore_cpuid == rte_get_master_lcore()) { 1381 printf("lcore %u cannot be masked on for running " 1382 "packet forwarding, which is the master lcore " 1383 "and reserved for command line parsing only\n", 1384 lcore_cpuid); 1385 return -1; 1386 } 1387 if (record_now) 1388 fwd_lcores_cpuids[i] = lcore_cpuid; 1389 } 1390 if (record_now == 0) { 1391 record_now = 1; 1392 goto again; 1393 } 1394 nb_cfg_lcores = (lcoreid_t) nb_lc; 1395 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1396 printf("previous number of forwarding cores %u - changed to " 1397 "number of configured cores %u\n", 1398 (unsigned int) nb_fwd_lcores, nb_lc); 1399 nb_fwd_lcores = (lcoreid_t) nb_lc; 1400 } 1401 1402 return 0; 1403 } 1404 1405 int 1406 set_fwd_lcores_mask(uint64_t lcoremask) 1407 { 1408 unsigned int lcorelist[64]; 1409 unsigned int nb_lc; 1410 unsigned int i; 1411 1412 if (lcoremask == 0) { 1413 printf("Invalid NULL mask of cores\n"); 1414 return -1; 1415 } 1416 nb_lc = 0; 1417 for (i = 0; i < 64; i++) { 1418 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1419 continue; 1420 lcorelist[nb_lc++] = i; 1421 } 1422 return set_fwd_lcores_list(lcorelist, nb_lc); 1423 } 1424 1425 void 1426 set_fwd_lcores_number(uint16_t nb_lc) 1427 { 1428 if (nb_lc > nb_cfg_lcores) { 1429 printf("nb fwd cores %u > %u (max. number of configured " 1430 "lcores) - ignored\n", 1431 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1432 return; 1433 } 1434 nb_fwd_lcores = (lcoreid_t) nb_lc; 1435 printf("Number of forwarding cores set to %u\n", 1436 (unsigned int) nb_fwd_lcores); 1437 } 1438 1439 void 1440 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1441 { 1442 unsigned int i; 1443 portid_t port_id; 1444 int record_now; 1445 1446 record_now = 0; 1447 again: 1448 for (i = 0; i < nb_pt; i++) { 1449 port_id = (portid_t) portlist[i]; 1450 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1451 return; 1452 if (record_now) 1453 fwd_ports_ids[i] = port_id; 1454 } 1455 if (record_now == 0) { 1456 record_now = 1; 1457 goto again; 1458 } 1459 nb_cfg_ports = (portid_t) nb_pt; 1460 if (nb_fwd_ports != (portid_t) nb_pt) { 1461 printf("previous number of forwarding ports %u - changed to " 1462 "number of configured ports %u\n", 1463 (unsigned int) nb_fwd_ports, nb_pt); 1464 nb_fwd_ports = (portid_t) nb_pt; 1465 } 1466 } 1467 1468 void 1469 set_fwd_ports_mask(uint64_t portmask) 1470 { 1471 unsigned int portlist[64]; 1472 unsigned int nb_pt; 1473 unsigned int i; 1474 1475 if (portmask == 0) { 1476 printf("Invalid NULL mask of ports\n"); 1477 return; 1478 } 1479 nb_pt = 0; 1480 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1481 if (! ((uint64_t)(1ULL << i) & portmask)) 1482 continue; 1483 portlist[nb_pt++] = i; 1484 } 1485 set_fwd_ports_list(portlist, nb_pt); 1486 } 1487 1488 void 1489 set_fwd_ports_number(uint16_t nb_pt) 1490 { 1491 if (nb_pt > nb_cfg_ports) { 1492 printf("nb fwd ports %u > %u (number of configured " 1493 "ports) - ignored\n", 1494 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1495 return; 1496 } 1497 nb_fwd_ports = (portid_t) nb_pt; 1498 printf("Number of forwarding ports set to %u\n", 1499 (unsigned int) nb_fwd_ports); 1500 } 1501 1502 void 1503 set_nb_pkt_per_burst(uint16_t nb) 1504 { 1505 if (nb > MAX_PKT_BURST) { 1506 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1507 " ignored\n", 1508 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1509 return; 1510 } 1511 nb_pkt_per_burst = nb; 1512 printf("Number of packets per burst set to %u\n", 1513 (unsigned int) nb_pkt_per_burst); 1514 } 1515 1516 void 1517 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1518 { 1519 uint16_t tx_pkt_len; 1520 unsigned i; 1521 1522 if (nb_segs >= (unsigned) nb_txd) { 1523 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1524 nb_segs, (unsigned int) nb_txd); 1525 return; 1526 } 1527 1528 /* 1529 * Check that each segment length is greater or equal than 1530 * the mbuf data sise. 1531 * Check also that the total packet length is greater or equal than the 1532 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1533 */ 1534 tx_pkt_len = 0; 1535 for (i = 0; i < nb_segs; i++) { 1536 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1537 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1538 i, seg_lengths[i], (unsigned) mbuf_data_size); 1539 return; 1540 } 1541 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1542 } 1543 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1544 printf("total packet length=%u < %d - give up\n", 1545 (unsigned) tx_pkt_len, 1546 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1547 return; 1548 } 1549 1550 for (i = 0; i < nb_segs; i++) 1551 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1552 1553 tx_pkt_length = tx_pkt_len; 1554 tx_pkt_nb_segs = (uint8_t) nb_segs; 1555 } 1556 1557 char* 1558 list_pkt_forwarding_modes(void) 1559 { 1560 static char fwd_modes[128] = ""; 1561 const char *separator = "|"; 1562 struct fwd_engine *fwd_eng; 1563 unsigned i = 0; 1564 1565 if (strlen (fwd_modes) == 0) { 1566 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1567 strcat(fwd_modes, fwd_eng->fwd_mode_name); 1568 strcat(fwd_modes, separator); 1569 } 1570 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1571 } 1572 1573 return fwd_modes; 1574 } 1575 1576 void 1577 set_pkt_forwarding_mode(const char *fwd_mode_name) 1578 { 1579 struct fwd_engine *fwd_eng; 1580 unsigned i; 1581 1582 i = 0; 1583 while ((fwd_eng = fwd_engines[i]) != NULL) { 1584 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1585 printf("Set %s packet forwarding mode\n", 1586 fwd_mode_name); 1587 cur_fwd_eng = fwd_eng; 1588 return; 1589 } 1590 i++; 1591 } 1592 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1593 } 1594 1595 void 1596 set_verbose_level(uint16_t vb_level) 1597 { 1598 printf("Change verbose level from %u to %u\n", 1599 (unsigned int) verbose_level, (unsigned int) vb_level); 1600 verbose_level = vb_level; 1601 } 1602 1603 void 1604 vlan_extend_set(portid_t port_id, int on) 1605 { 1606 int diag; 1607 int vlan_offload; 1608 1609 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1610 return; 1611 1612 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1613 1614 if (on) 1615 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1616 else 1617 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1618 1619 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1620 if (diag < 0) 1621 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1622 "diag=%d\n", port_id, on, diag); 1623 } 1624 1625 void 1626 rx_vlan_strip_set(portid_t port_id, int on) 1627 { 1628 int diag; 1629 int vlan_offload; 1630 1631 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1632 return; 1633 1634 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1635 1636 if (on) 1637 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1638 else 1639 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1640 1641 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1642 if (diag < 0) 1643 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1644 "diag=%d\n", port_id, on, diag); 1645 } 1646 1647 void 1648 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1649 { 1650 int diag; 1651 1652 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1653 return; 1654 1655 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1656 if (diag < 0) 1657 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1658 "diag=%d\n", port_id, queue_id, on, diag); 1659 } 1660 1661 void 1662 rx_vlan_filter_set(portid_t port_id, int on) 1663 { 1664 int diag; 1665 int vlan_offload; 1666 1667 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1668 return; 1669 1670 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1671 1672 if (on) 1673 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1674 else 1675 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1676 1677 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1678 if (diag < 0) 1679 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1680 "diag=%d\n", port_id, on, diag); 1681 } 1682 1683 int 1684 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1685 { 1686 int diag; 1687 1688 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1689 return 1; 1690 if (vlan_id_is_invalid(vlan_id)) 1691 return 1; 1692 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1693 if (diag == 0) 1694 return 0; 1695 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1696 "diag=%d\n", 1697 port_id, vlan_id, on, diag); 1698 return -1; 1699 } 1700 1701 void 1702 rx_vlan_all_filter_set(portid_t port_id, int on) 1703 { 1704 uint16_t vlan_id; 1705 1706 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1707 return; 1708 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1709 if (rx_vft_set(port_id, vlan_id, on)) 1710 break; 1711 } 1712 } 1713 1714 void 1715 vlan_tpid_set(portid_t port_id, uint16_t tp_id) 1716 { 1717 int diag; 1718 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1719 return; 1720 1721 diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id); 1722 if (diag == 0) 1723 return; 1724 1725 printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed " 1726 "diag=%d\n", 1727 port_id, tp_id, diag); 1728 } 1729 1730 void 1731 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1732 { 1733 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1734 return; 1735 if (vlan_id_is_invalid(vlan_id)) 1736 return; 1737 tx_vlan_reset(port_id); 1738 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1739 ports[port_id].tx_vlan_id = vlan_id; 1740 } 1741 1742 void 1743 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 1744 { 1745 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1746 return; 1747 if (vlan_id_is_invalid(vlan_id)) 1748 return; 1749 if (vlan_id_is_invalid(vlan_id_outer)) 1750 return; 1751 tx_vlan_reset(port_id); 1752 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; 1753 ports[port_id].tx_vlan_id = vlan_id; 1754 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 1755 } 1756 1757 void 1758 tx_vlan_reset(portid_t port_id) 1759 { 1760 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1761 return; 1762 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | 1763 TESTPMD_TX_OFFLOAD_INSERT_QINQ); 1764 ports[port_id].tx_vlan_id = 0; 1765 ports[port_id].tx_vlan_id_outer = 0; 1766 } 1767 1768 void 1769 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1770 { 1771 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1772 return; 1773 1774 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 1775 } 1776 1777 void 1778 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 1779 { 1780 uint16_t i; 1781 uint8_t existing_mapping_found = 0; 1782 1783 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1784 return; 1785 1786 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 1787 return; 1788 1789 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1790 printf("map_value not in required range 0..%d\n", 1791 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1792 return; 1793 } 1794 1795 if (!is_rx) { /*then tx*/ 1796 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1797 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1798 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 1799 tx_queue_stats_mappings[i].stats_counter_id = map_value; 1800 existing_mapping_found = 1; 1801 break; 1802 } 1803 } 1804 if (!existing_mapping_found) { /* A new additional mapping... */ 1805 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 1806 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 1807 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 1808 nb_tx_queue_stats_mappings++; 1809 } 1810 } 1811 else { /*rx*/ 1812 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1813 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1814 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 1815 rx_queue_stats_mappings[i].stats_counter_id = map_value; 1816 existing_mapping_found = 1; 1817 break; 1818 } 1819 } 1820 if (!existing_mapping_found) { /* A new additional mapping... */ 1821 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 1822 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 1823 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 1824 nb_rx_queue_stats_mappings++; 1825 } 1826 } 1827 } 1828 1829 static inline void 1830 print_fdir_mask(struct rte_eth_fdir_masks *mask) 1831 { 1832 printf("\n vlan_tci: 0x%04x, src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 1833 " src_port: 0x%04x, dst_port: 0x%04x", 1834 mask->vlan_tci_mask, mask->ipv4_mask.src_ip, 1835 mask->ipv4_mask.dst_ip, 1836 mask->src_port_mask, mask->dst_port_mask); 1837 1838 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 1839 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 1840 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 1841 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 1842 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 1843 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 1844 printf("\n"); 1845 } 1846 1847 static inline void 1848 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 1849 { 1850 struct rte_eth_flex_payload_cfg *cfg; 1851 uint32_t i, j; 1852 1853 for (i = 0; i < flex_conf->nb_payloads; i++) { 1854 cfg = &flex_conf->flex_set[i]; 1855 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 1856 printf("\n RAW: "); 1857 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 1858 printf("\n L2_PAYLOAD: "); 1859 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 1860 printf("\n L3_PAYLOAD: "); 1861 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 1862 printf("\n L4_PAYLOAD: "); 1863 else 1864 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 1865 for (j = 0; j < num; j++) 1866 printf(" %-5u", cfg->src_offset[j]); 1867 } 1868 printf("\n"); 1869 } 1870 1871 static char * 1872 flowtype_to_str(uint16_t flow_type) 1873 { 1874 struct flow_type_info { 1875 char str[32]; 1876 uint16_t ftype; 1877 }; 1878 1879 uint8_t i; 1880 static struct flow_type_info flowtype_str_table[] = { 1881 {"raw", RTE_ETH_FLOW_RAW}, 1882 {"ipv4", RTE_ETH_FLOW_IPV4}, 1883 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 1884 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 1885 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 1886 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 1887 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 1888 {"ipv6", RTE_ETH_FLOW_IPV6}, 1889 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 1890 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 1891 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 1892 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 1893 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 1894 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 1895 }; 1896 1897 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 1898 if (flowtype_str_table[i].ftype == flow_type) 1899 return flowtype_str_table[i].str; 1900 } 1901 1902 return NULL; 1903 } 1904 1905 static inline void 1906 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 1907 { 1908 struct rte_eth_fdir_flex_mask *mask; 1909 uint32_t i, j; 1910 char *p; 1911 1912 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 1913 mask = &flex_conf->flex_mask[i]; 1914 p = flowtype_to_str(mask->flow_type); 1915 printf("\n %s:\t", p ? p : "unknown"); 1916 for (j = 0; j < num; j++) 1917 printf(" %02x", mask->mask[j]); 1918 } 1919 printf("\n"); 1920 } 1921 1922 static inline void 1923 print_fdir_flow_type(uint32_t flow_types_mask) 1924 { 1925 int i; 1926 char *p; 1927 1928 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 1929 if (!(flow_types_mask & (1 << i))) 1930 continue; 1931 p = flowtype_to_str(i); 1932 if (p) 1933 printf(" %s", p); 1934 else 1935 printf(" unknown"); 1936 } 1937 printf("\n"); 1938 } 1939 1940 void 1941 fdir_get_infos(portid_t port_id) 1942 { 1943 struct rte_eth_fdir_stats fdir_stat; 1944 struct rte_eth_fdir_info fdir_info; 1945 int ret; 1946 1947 static const char *fdir_stats_border = "########################"; 1948 1949 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1950 return; 1951 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 1952 if (ret < 0) { 1953 printf("\n FDIR is not supported on port %-2d\n", 1954 port_id); 1955 return; 1956 } 1957 1958 memset(&fdir_info, 0, sizeof(fdir_info)); 1959 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 1960 RTE_ETH_FILTER_INFO, &fdir_info); 1961 memset(&fdir_stat, 0, sizeof(fdir_stat)); 1962 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 1963 RTE_ETH_FILTER_STATS, &fdir_stat); 1964 printf("\n %s FDIR infos for port %-2d %s\n", 1965 fdir_stats_border, port_id, fdir_stats_border); 1966 printf(" MODE: "); 1967 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 1968 printf(" PERFECT\n"); 1969 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 1970 printf(" SIGNATURE\n"); 1971 else 1972 printf(" DISABLE\n"); 1973 printf(" SUPPORTED FLOW TYPE: "); 1974 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 1975 printf(" FLEX PAYLOAD INFO:\n"); 1976 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 1977 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 1978 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 1979 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 1980 fdir_info.flex_payload_unit, 1981 fdir_info.max_flex_payload_segment_num, 1982 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 1983 printf(" MASK: "); 1984 print_fdir_mask(&fdir_info.mask); 1985 if (fdir_info.flex_conf.nb_payloads > 0) { 1986 printf(" FLEX PAYLOAD SRC OFFSET:"); 1987 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 1988 } 1989 if (fdir_info.flex_conf.nb_flexmasks > 0) { 1990 printf(" FLEX MASK CFG:"); 1991 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 1992 } 1993 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 1994 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 1995 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 1996 fdir_info.guarant_spc, fdir_info.best_spc); 1997 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 1998 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 1999 " add: %-10"PRIu64" remove: %"PRIu64"\n" 2000 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 2001 fdir_stat.collision, fdir_stat.free, 2002 fdir_stat.maxhash, fdir_stat.maxlen, 2003 fdir_stat.add, fdir_stat.remove, 2004 fdir_stat.f_add, fdir_stat.f_remove); 2005 printf(" %s############################%s\n", 2006 fdir_stats_border, fdir_stats_border); 2007 } 2008 2009 void 2010 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 2011 { 2012 struct rte_port *port; 2013 struct rte_eth_fdir_flex_conf *flex_conf; 2014 int i, idx = 0; 2015 2016 port = &ports[port_id]; 2017 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2018 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 2019 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 2020 idx = i; 2021 break; 2022 } 2023 } 2024 if (i >= RTE_ETH_FLOW_MAX) { 2025 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2026 idx = flex_conf->nb_flexmasks; 2027 flex_conf->nb_flexmasks++; 2028 } else { 2029 printf("The flex mask table is full. Can not set flex" 2030 " mask for flow_type(%u).", cfg->flow_type); 2031 return; 2032 } 2033 } 2034 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2035 cfg, 2036 sizeof(struct rte_eth_fdir_flex_mask)); 2037 } 2038 2039 void 2040 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2041 { 2042 struct rte_port *port; 2043 struct rte_eth_fdir_flex_conf *flex_conf; 2044 int i, idx = 0; 2045 2046 port = &ports[port_id]; 2047 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2048 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2049 if (cfg->type == flex_conf->flex_set[i].type) { 2050 idx = i; 2051 break; 2052 } 2053 } 2054 if (i >= RTE_ETH_PAYLOAD_MAX) { 2055 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2056 idx = flex_conf->nb_payloads; 2057 flex_conf->nb_payloads++; 2058 } else { 2059 printf("The flex payload table is full. Can not set" 2060 " flex payload for type(%u).", cfg->type); 2061 return; 2062 } 2063 } 2064 (void)rte_memcpy(&flex_conf->flex_set[idx], 2065 cfg, 2066 sizeof(struct rte_eth_flex_payload_cfg)); 2067 2068 } 2069 2070 void 2071 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2072 { 2073 int diag; 2074 2075 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2076 return; 2077 if (is_rx) 2078 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2079 else 2080 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2081 if (diag == 0) 2082 return; 2083 if(is_rx) 2084 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2085 "diag=%d\n", port_id, diag); 2086 else 2087 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2088 "diag=%d\n", port_id, diag); 2089 2090 } 2091 2092 void 2093 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2094 { 2095 int diag; 2096 2097 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2098 return; 2099 if (vlan_id_is_invalid(vlan_id)) 2100 return; 2101 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2102 if (diag == 0) 2103 return; 2104 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2105 "diag=%d\n", port_id, diag); 2106 } 2107 2108 int 2109 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2110 { 2111 int diag; 2112 struct rte_eth_link link; 2113 2114 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2115 return 1; 2116 rte_eth_link_get_nowait(port_id, &link); 2117 if (rate > link.link_speed) { 2118 printf("Invalid rate value:%u bigger than link speed: %u\n", 2119 rate, link.link_speed); 2120 return 1; 2121 } 2122 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2123 if (diag == 0) 2124 return diag; 2125 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2126 port_id, diag); 2127 return diag; 2128 } 2129 2130 int 2131 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2132 { 2133 int diag; 2134 struct rte_eth_link link; 2135 2136 if (q_msk == 0) 2137 return 0; 2138 2139 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2140 return 1; 2141 rte_eth_link_get_nowait(port_id, &link); 2142 if (rate > link.link_speed) { 2143 printf("Invalid rate value:%u bigger than link speed: %u\n", 2144 rate, link.link_speed); 2145 return 1; 2146 } 2147 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2148 if (diag == 0) 2149 return diag; 2150 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2151 port_id, diag); 2152 return diag; 2153 } 2154 2155 /* 2156 * Functions to manage the set of filtered Multicast MAC addresses. 2157 * 2158 * A pool of filtered multicast MAC addresses is associated with each port. 2159 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 2160 * The address of the pool and the number of valid multicast MAC addresses 2161 * recorded in the pool are stored in the fields "mc_addr_pool" and 2162 * "mc_addr_nb" of the "rte_port" data structure. 2163 * 2164 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 2165 * to be supplied a contiguous array of multicast MAC addresses. 2166 * To comply with this constraint, the set of multicast addresses recorded 2167 * into the pool are systematically compacted at the beginning of the pool. 2168 * Hence, when a multicast address is removed from the pool, all following 2169 * addresses, if any, are copied back to keep the set contiguous. 2170 */ 2171 #define MCAST_POOL_INC 32 2172 2173 static int 2174 mcast_addr_pool_extend(struct rte_port *port) 2175 { 2176 struct ether_addr *mc_pool; 2177 size_t mc_pool_size; 2178 2179 /* 2180 * If a free entry is available at the end of the pool, just 2181 * increment the number of recorded multicast addresses. 2182 */ 2183 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 2184 port->mc_addr_nb++; 2185 return 0; 2186 } 2187 2188 /* 2189 * [re]allocate a pool with MCAST_POOL_INC more entries. 2190 * The previous test guarantees that port->mc_addr_nb is a multiple 2191 * of MCAST_POOL_INC. 2192 */ 2193 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 2194 MCAST_POOL_INC); 2195 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 2196 mc_pool_size); 2197 if (mc_pool == NULL) { 2198 printf("allocation of pool of %u multicast addresses failed\n", 2199 port->mc_addr_nb + MCAST_POOL_INC); 2200 return -ENOMEM; 2201 } 2202 2203 port->mc_addr_pool = mc_pool; 2204 port->mc_addr_nb++; 2205 return 0; 2206 2207 } 2208 2209 static void 2210 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 2211 { 2212 port->mc_addr_nb--; 2213 if (addr_idx == port->mc_addr_nb) { 2214 /* No need to recompact the set of multicast addressses. */ 2215 if (port->mc_addr_nb == 0) { 2216 /* free the pool of multicast addresses. */ 2217 free(port->mc_addr_pool); 2218 port->mc_addr_pool = NULL; 2219 } 2220 return; 2221 } 2222 memmove(&port->mc_addr_pool[addr_idx], 2223 &port->mc_addr_pool[addr_idx + 1], 2224 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 2225 } 2226 2227 static void 2228 eth_port_multicast_addr_list_set(uint8_t port_id) 2229 { 2230 struct rte_port *port; 2231 int diag; 2232 2233 port = &ports[port_id]; 2234 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 2235 port->mc_addr_nb); 2236 if (diag == 0) 2237 return; 2238 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 2239 port->mc_addr_nb, port_id, -diag); 2240 } 2241 2242 void 2243 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr) 2244 { 2245 struct rte_port *port; 2246 uint32_t i; 2247 2248 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2249 return; 2250 2251 port = &ports[port_id]; 2252 2253 /* 2254 * Check that the added multicast MAC address is not already recorded 2255 * in the pool of multicast addresses. 2256 */ 2257 for (i = 0; i < port->mc_addr_nb; i++) { 2258 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 2259 printf("multicast address already filtered by port\n"); 2260 return; 2261 } 2262 } 2263 2264 if (mcast_addr_pool_extend(port) != 0) 2265 return; 2266 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 2267 eth_port_multicast_addr_list_set(port_id); 2268 } 2269 2270 void 2271 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr) 2272 { 2273 struct rte_port *port; 2274 uint32_t i; 2275 2276 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2277 return; 2278 2279 port = &ports[port_id]; 2280 2281 /* 2282 * Search the pool of multicast MAC addresses for the removed address. 2283 */ 2284 for (i = 0; i < port->mc_addr_nb; i++) { 2285 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 2286 break; 2287 } 2288 if (i == port->mc_addr_nb) { 2289 printf("multicast address not filtered by port %d\n", port_id); 2290 return; 2291 } 2292 2293 mcast_addr_pool_remove(port, i); 2294 eth_port_multicast_addr_list_set(port_id); 2295 } 2296