1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright 2013-2014 6WIND S.A. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_eal.h> 83 #include <rte_per_lcore.h> 84 #include <rte_lcore.h> 85 #include <rte_atomic.h> 86 #include <rte_branch_prediction.h> 87 #include <rte_ring.h> 88 #include <rte_mempool.h> 89 #include <rte_mbuf.h> 90 #include <rte_interrupts.h> 91 #include <rte_pci.h> 92 #include <rte_ether.h> 93 #include <rte_ethdev.h> 94 #include <rte_string_fns.h> 95 96 #include "testpmd.h" 97 98 static char *flowtype_to_str(uint16_t flow_type); 99 100 static void 101 print_ethaddr(const char *name, struct ether_addr *eth_addr) 102 { 103 char buf[ETHER_ADDR_FMT_SIZE]; 104 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 105 printf("%s%s", name, buf); 106 } 107 108 void 109 nic_stats_display(portid_t port_id) 110 { 111 struct rte_eth_stats stats; 112 struct rte_port *port = &ports[port_id]; 113 uint8_t i; 114 portid_t pid; 115 116 static const char *nic_stats_border = "########################"; 117 118 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 119 printf("Valid port range is [0"); 120 FOREACH_PORT(pid, ports) 121 printf(", %d", pid); 122 printf("]\n"); 123 return; 124 } 125 rte_eth_stats_get(port_id, &stats); 126 printf("\n %s NIC statistics for port %-2d %s\n", 127 nic_stats_border, port_id, nic_stats_border); 128 129 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 130 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 131 "%-"PRIu64"\n", 132 stats.ipackets, stats.imissed, stats.ibytes); 133 printf(" RX-badcrc: %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: " 134 "%-"PRIu64"\n", 135 stats.ibadcrc, stats.ibadlen, stats.ierrors); 136 printf(" RX-nombuf: %-10"PRIu64"\n", 137 stats.rx_nombuf); 138 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 139 "%-"PRIu64"\n", 140 stats.opackets, stats.oerrors, stats.obytes); 141 } 142 else { 143 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 144 " RX-bytes: %10"PRIu64"\n", 145 stats.ipackets, stats.ierrors, stats.ibytes); 146 printf(" RX-badcrc: %10"PRIu64" RX-badlen: %10"PRIu64 147 " RX-errors: %10"PRIu64"\n", 148 stats.ibadcrc, stats.ibadlen, stats.ierrors); 149 printf(" RX-nombuf: %10"PRIu64"\n", 150 stats.rx_nombuf); 151 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 152 " TX-bytes: %10"PRIu64"\n", 153 stats.opackets, stats.oerrors, stats.obytes); 154 } 155 156 /* stats fdir */ 157 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 158 printf(" Fdirmiss: %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n", 159 stats.fdirmiss, 160 stats.fdirmatch); 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 /* Display statistics of XON/XOFF pause frames, if any. */ 181 if ((stats.tx_pause_xon | stats.rx_pause_xon | 182 stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) { 183 printf(" RX-XOFF: %-10"PRIu64" RX-XON: %-10"PRIu64"\n", 184 stats.rx_pause_xoff, stats.rx_pause_xon); 185 printf(" TX-XOFF: %-10"PRIu64" TX-XON: %-10"PRIu64"\n", 186 stats.tx_pause_xoff, stats.tx_pause_xon); 187 } 188 printf(" %s############################%s\n", 189 nic_stats_border, nic_stats_border); 190 } 191 192 void 193 nic_stats_clear(portid_t port_id) 194 { 195 portid_t pid; 196 197 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 198 printf("Valid port range is [0"); 199 FOREACH_PORT(pid, ports) 200 printf(", %d", pid); 201 printf("]\n"); 202 return; 203 } 204 rte_eth_stats_reset(port_id); 205 printf("\n NIC statistics for port %d cleared\n", port_id); 206 } 207 208 void 209 nic_xstats_display(portid_t port_id) 210 { 211 struct rte_eth_xstats *xstats; 212 int len, ret, i; 213 214 printf("###### NIC extended statistics for port %-2d\n", port_id); 215 216 len = rte_eth_xstats_get(port_id, NULL, 0); 217 if (len < 0) { 218 printf("Cannot get xstats count\n"); 219 return; 220 } 221 xstats = malloc(sizeof(xstats[0]) * len); 222 if (xstats == NULL) { 223 printf("Cannot allocate memory for xstats\n"); 224 return; 225 } 226 ret = rte_eth_xstats_get(port_id, xstats, len); 227 if (ret < 0 || ret > len) { 228 printf("Cannot get xstats\n"); 229 free(xstats); 230 return; 231 } 232 for (i = 0; i < len; i++) 233 printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value); 234 free(xstats); 235 } 236 237 void 238 nic_xstats_clear(portid_t port_id) 239 { 240 rte_eth_xstats_reset(port_id); 241 } 242 243 void 244 nic_stats_mapping_display(portid_t port_id) 245 { 246 struct rte_port *port = &ports[port_id]; 247 uint16_t i; 248 portid_t pid; 249 250 static const char *nic_stats_mapping_border = "########################"; 251 252 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 253 printf("Valid port range is [0"); 254 FOREACH_PORT(pid, ports) 255 printf(", %d", pid); 256 printf("]\n"); 257 return; 258 } 259 260 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 261 printf("Port id %d - either does not support queue statistic mapping or" 262 " no queue statistic mapping set\n", port_id); 263 return; 264 } 265 266 printf("\n %s NIC statistics mapping for port %-2d %s\n", 267 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 268 269 if (port->rx_queue_stats_mapping_enabled) { 270 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 271 if (rx_queue_stats_mappings[i].port_id == port_id) { 272 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 273 rx_queue_stats_mappings[i].queue_id, 274 rx_queue_stats_mappings[i].stats_counter_id); 275 } 276 } 277 printf("\n"); 278 } 279 280 281 if (port->tx_queue_stats_mapping_enabled) { 282 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 283 if (tx_queue_stats_mappings[i].port_id == port_id) { 284 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 285 tx_queue_stats_mappings[i].queue_id, 286 tx_queue_stats_mappings[i].stats_counter_id); 287 } 288 } 289 } 290 291 printf(" %s####################################%s\n", 292 nic_stats_mapping_border, nic_stats_mapping_border); 293 } 294 295 void 296 port_infos_display(portid_t port_id) 297 { 298 struct rte_port *port; 299 struct ether_addr mac_addr; 300 struct rte_eth_link link; 301 struct rte_eth_dev_info dev_info; 302 int vlan_offload; 303 struct rte_mempool * mp; 304 static const char *info_border = "*********************"; 305 portid_t pid; 306 307 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 308 printf("Valid port range is [0"); 309 FOREACH_PORT(pid, ports) 310 printf(", %d", pid); 311 printf("]\n"); 312 return; 313 } 314 port = &ports[port_id]; 315 rte_eth_link_get_nowait(port_id, &link); 316 printf("\n%s Infos for port %-2d %s\n", 317 info_border, port_id, info_border); 318 rte_eth_macaddr_get(port_id, &mac_addr); 319 print_ethaddr("MAC address: ", &mac_addr); 320 printf("\nConnect to socket: %u", port->socket_id); 321 322 if (port_numa[port_id] != NUMA_NO_CONFIG) { 323 mp = mbuf_pool_find(port_numa[port_id]); 324 if (mp) 325 printf("\nmemory allocation on the socket: %d", 326 port_numa[port_id]); 327 } else 328 printf("\nmemory allocation on the socket: %u",port->socket_id); 329 330 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 331 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 332 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 333 ("full-duplex") : ("half-duplex")); 334 printf("Promiscuous mode: %s\n", 335 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 336 printf("Allmulticast mode: %s\n", 337 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 338 printf("Maximum number of MAC addresses: %u\n", 339 (unsigned int)(port->dev_info.max_mac_addrs)); 340 printf("Maximum number of MAC addresses of hash filtering: %u\n", 341 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 342 343 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 344 if (vlan_offload >= 0){ 345 printf("VLAN offload: \n"); 346 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 347 printf(" strip on \n"); 348 else 349 printf(" strip off \n"); 350 351 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 352 printf(" filter on \n"); 353 else 354 printf(" filter off \n"); 355 356 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 357 printf(" qinq(extend) on \n"); 358 else 359 printf(" qinq(extend) off \n"); 360 } 361 362 memset(&dev_info, 0, sizeof(dev_info)); 363 rte_eth_dev_info_get(port_id, &dev_info); 364 if (dev_info.reta_size > 0) 365 printf("Redirection table size: %u\n", dev_info.reta_size); 366 if (!dev_info.flow_type_rss_offloads) 367 printf("No flow type is supported.\n"); 368 else { 369 uint16_t i; 370 char *p; 371 372 printf("Supported flow types:\n"); 373 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; 374 i++) { 375 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 376 continue; 377 p = flowtype_to_str(i); 378 printf(" %s\n", (p ? p : "unknown")); 379 } 380 } 381 } 382 383 int 384 port_id_is_invalid(portid_t port_id, enum print_warning warning) 385 { 386 if (port_id == (portid_t)RTE_PORT_ALL) 387 return 0; 388 389 if (ports[port_id].enabled) 390 return 0; 391 392 if (warning == ENABLED_WARN) 393 printf("Invalid port %d\n", port_id); 394 395 return 1; 396 } 397 398 static int 399 vlan_id_is_invalid(uint16_t vlan_id) 400 { 401 if (vlan_id < 4096) 402 return 0; 403 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 404 return 1; 405 } 406 407 static int 408 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 409 { 410 uint64_t pci_len; 411 412 if (reg_off & 0x3) { 413 printf("Port register offset 0x%X not aligned on a 4-byte " 414 "boundary\n", 415 (unsigned)reg_off); 416 return 1; 417 } 418 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 419 if (reg_off >= pci_len) { 420 printf("Port %d: register offset %u (0x%X) out of port PCI " 421 "resource (length=%"PRIu64")\n", 422 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 423 return 1; 424 } 425 return 0; 426 } 427 428 static int 429 reg_bit_pos_is_invalid(uint8_t bit_pos) 430 { 431 if (bit_pos <= 31) 432 return 0; 433 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 434 return 1; 435 } 436 437 #define display_port_and_reg_off(port_id, reg_off) \ 438 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 439 440 static inline void 441 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 442 { 443 display_port_and_reg_off(port_id, (unsigned)reg_off); 444 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 445 } 446 447 void 448 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 449 { 450 uint32_t reg_v; 451 452 453 if (port_id_is_invalid(port_id, ENABLED_WARN)) 454 return; 455 if (port_reg_off_is_invalid(port_id, reg_off)) 456 return; 457 if (reg_bit_pos_is_invalid(bit_x)) 458 return; 459 reg_v = port_id_pci_reg_read(port_id, reg_off); 460 display_port_and_reg_off(port_id, (unsigned)reg_off); 461 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 462 } 463 464 void 465 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 466 uint8_t bit1_pos, uint8_t bit2_pos) 467 { 468 uint32_t reg_v; 469 uint8_t l_bit; 470 uint8_t h_bit; 471 472 if (port_id_is_invalid(port_id, ENABLED_WARN)) 473 return; 474 if (port_reg_off_is_invalid(port_id, reg_off)) 475 return; 476 if (reg_bit_pos_is_invalid(bit1_pos)) 477 return; 478 if (reg_bit_pos_is_invalid(bit2_pos)) 479 return; 480 if (bit1_pos > bit2_pos) 481 l_bit = bit2_pos, h_bit = bit1_pos; 482 else 483 l_bit = bit1_pos, h_bit = bit2_pos; 484 485 reg_v = port_id_pci_reg_read(port_id, reg_off); 486 reg_v >>= l_bit; 487 if (h_bit < 31) 488 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 489 display_port_and_reg_off(port_id, (unsigned)reg_off); 490 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 491 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 492 } 493 494 void 495 port_reg_display(portid_t port_id, uint32_t reg_off) 496 { 497 uint32_t reg_v; 498 499 if (port_id_is_invalid(port_id, ENABLED_WARN)) 500 return; 501 if (port_reg_off_is_invalid(port_id, reg_off)) 502 return; 503 reg_v = port_id_pci_reg_read(port_id, reg_off); 504 display_port_reg_value(port_id, reg_off, reg_v); 505 } 506 507 void 508 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 509 uint8_t bit_v) 510 { 511 uint32_t reg_v; 512 513 if (port_id_is_invalid(port_id, ENABLED_WARN)) 514 return; 515 if (port_reg_off_is_invalid(port_id, reg_off)) 516 return; 517 if (reg_bit_pos_is_invalid(bit_pos)) 518 return; 519 if (bit_v > 1) { 520 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 521 return; 522 } 523 reg_v = port_id_pci_reg_read(port_id, reg_off); 524 if (bit_v == 0) 525 reg_v &= ~(1 << bit_pos); 526 else 527 reg_v |= (1 << bit_pos); 528 port_id_pci_reg_write(port_id, reg_off, reg_v); 529 display_port_reg_value(port_id, reg_off, reg_v); 530 } 531 532 void 533 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 534 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 535 { 536 uint32_t max_v; 537 uint32_t reg_v; 538 uint8_t l_bit; 539 uint8_t h_bit; 540 541 if (port_id_is_invalid(port_id, ENABLED_WARN)) 542 return; 543 if (port_reg_off_is_invalid(port_id, reg_off)) 544 return; 545 if (reg_bit_pos_is_invalid(bit1_pos)) 546 return; 547 if (reg_bit_pos_is_invalid(bit2_pos)) 548 return; 549 if (bit1_pos > bit2_pos) 550 l_bit = bit2_pos, h_bit = bit1_pos; 551 else 552 l_bit = bit1_pos, h_bit = bit2_pos; 553 554 if ((h_bit - l_bit) < 31) 555 max_v = (1 << (h_bit - l_bit + 1)) - 1; 556 else 557 max_v = 0xFFFFFFFF; 558 559 if (value > max_v) { 560 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 561 (unsigned)value, (unsigned)value, 562 (unsigned)max_v, (unsigned)max_v); 563 return; 564 } 565 reg_v = port_id_pci_reg_read(port_id, reg_off); 566 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 567 reg_v |= (value << l_bit); /* Set changed bits */ 568 port_id_pci_reg_write(port_id, reg_off, reg_v); 569 display_port_reg_value(port_id, reg_off, reg_v); 570 } 571 572 void 573 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 574 { 575 if (port_id_is_invalid(port_id, ENABLED_WARN)) 576 return; 577 if (port_reg_off_is_invalid(port_id, reg_off)) 578 return; 579 port_id_pci_reg_write(port_id, reg_off, reg_v); 580 display_port_reg_value(port_id, reg_off, reg_v); 581 } 582 583 void 584 port_mtu_set(portid_t port_id, uint16_t mtu) 585 { 586 int diag; 587 588 if (port_id_is_invalid(port_id, ENABLED_WARN)) 589 return; 590 diag = rte_eth_dev_set_mtu(port_id, mtu); 591 if (diag == 0) 592 return; 593 printf("Set MTU failed. diag=%d\n", diag); 594 } 595 596 /* 597 * RX/TX ring descriptors display functions. 598 */ 599 int 600 rx_queue_id_is_invalid(queueid_t rxq_id) 601 { 602 if (rxq_id < nb_rxq) 603 return 0; 604 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 605 return 1; 606 } 607 608 int 609 tx_queue_id_is_invalid(queueid_t txq_id) 610 { 611 if (txq_id < nb_txq) 612 return 0; 613 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 614 return 1; 615 } 616 617 static int 618 rx_desc_id_is_invalid(uint16_t rxdesc_id) 619 { 620 if (rxdesc_id < nb_rxd) 621 return 0; 622 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 623 rxdesc_id, nb_rxd); 624 return 1; 625 } 626 627 static int 628 tx_desc_id_is_invalid(uint16_t txdesc_id) 629 { 630 if (txdesc_id < nb_txd) 631 return 0; 632 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 633 txdesc_id, nb_txd); 634 return 1; 635 } 636 637 static const struct rte_memzone * 638 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 639 { 640 char mz_name[RTE_MEMZONE_NAMESIZE]; 641 const struct rte_memzone *mz; 642 643 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 644 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 645 mz = rte_memzone_lookup(mz_name); 646 if (mz == NULL) 647 printf("%s ring memory zoneof (port %d, queue %d) not" 648 "found (zone name = %s\n", 649 ring_name, port_id, q_id, mz_name); 650 return (mz); 651 } 652 653 union igb_ring_dword { 654 uint64_t dword; 655 struct { 656 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 657 uint32_t lo; 658 uint32_t hi; 659 #else 660 uint32_t hi; 661 uint32_t lo; 662 #endif 663 } words; 664 }; 665 666 struct igb_ring_desc_32_bytes { 667 union igb_ring_dword lo_dword; 668 union igb_ring_dword hi_dword; 669 union igb_ring_dword resv1; 670 union igb_ring_dword resv2; 671 }; 672 673 struct igb_ring_desc_16_bytes { 674 union igb_ring_dword lo_dword; 675 union igb_ring_dword hi_dword; 676 }; 677 678 static void 679 ring_rxd_display_dword(union igb_ring_dword dword) 680 { 681 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 682 (unsigned)dword.words.hi); 683 } 684 685 static void 686 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 687 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 688 uint8_t port_id, 689 #else 690 __rte_unused uint8_t port_id, 691 #endif 692 uint16_t desc_id) 693 { 694 struct igb_ring_desc_16_bytes *ring = 695 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 696 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 697 struct rte_eth_dev_info dev_info; 698 699 memset(&dev_info, 0, sizeof(dev_info)); 700 rte_eth_dev_info_get(port_id, &dev_info); 701 if (strstr(dev_info.driver_name, "i40e") != NULL) { 702 /* 32 bytes RX descriptor, i40e only */ 703 struct igb_ring_desc_32_bytes *ring = 704 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 705 ring[desc_id].lo_dword.dword = 706 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 707 ring_rxd_display_dword(ring[desc_id].lo_dword); 708 ring[desc_id].hi_dword.dword = 709 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 710 ring_rxd_display_dword(ring[desc_id].hi_dword); 711 ring[desc_id].resv1.dword = 712 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 713 ring_rxd_display_dword(ring[desc_id].resv1); 714 ring[desc_id].resv2.dword = 715 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 716 ring_rxd_display_dword(ring[desc_id].resv2); 717 718 return; 719 } 720 #endif 721 /* 16 bytes RX descriptor */ 722 ring[desc_id].lo_dword.dword = 723 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 724 ring_rxd_display_dword(ring[desc_id].lo_dword); 725 ring[desc_id].hi_dword.dword = 726 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 727 ring_rxd_display_dword(ring[desc_id].hi_dword); 728 } 729 730 static void 731 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 732 { 733 struct igb_ring_desc_16_bytes *ring; 734 struct igb_ring_desc_16_bytes txd; 735 736 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 737 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 738 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 739 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 740 (unsigned)txd.lo_dword.words.lo, 741 (unsigned)txd.lo_dword.words.hi, 742 (unsigned)txd.hi_dword.words.lo, 743 (unsigned)txd.hi_dword.words.hi); 744 } 745 746 void 747 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 748 { 749 const struct rte_memzone *rx_mz; 750 751 if (port_id_is_invalid(port_id, ENABLED_WARN)) 752 return; 753 if (rx_queue_id_is_invalid(rxq_id)) 754 return; 755 if (rx_desc_id_is_invalid(rxd_id)) 756 return; 757 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 758 if (rx_mz == NULL) 759 return; 760 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 761 } 762 763 void 764 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 765 { 766 const struct rte_memzone *tx_mz; 767 768 if (port_id_is_invalid(port_id, ENABLED_WARN)) 769 return; 770 if (tx_queue_id_is_invalid(txq_id)) 771 return; 772 if (tx_desc_id_is_invalid(txd_id)) 773 return; 774 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 775 if (tx_mz == NULL) 776 return; 777 ring_tx_descriptor_display(tx_mz, txd_id); 778 } 779 780 void 781 fwd_lcores_config_display(void) 782 { 783 lcoreid_t lc_id; 784 785 printf("List of forwarding lcores:"); 786 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 787 printf(" %2u", fwd_lcores_cpuids[lc_id]); 788 printf("\n"); 789 } 790 void 791 rxtx_config_display(void) 792 { 793 printf(" %s packet forwarding - CRC stripping %s - " 794 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 795 rx_mode.hw_strip_crc ? "enabled" : "disabled", 796 nb_pkt_per_burst); 797 798 if (cur_fwd_eng == &tx_only_engine) 799 printf(" packet len=%u - nb packet segments=%d\n", 800 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 801 802 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; 803 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; 804 805 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 806 nb_fwd_lcores, nb_fwd_ports); 807 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 808 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 809 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 810 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, 811 rx_conf->rx_thresh.wthresh); 812 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 813 nb_txq, nb_txd, tx_conf->tx_free_thresh); 814 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 815 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, 816 tx_conf->tx_thresh.wthresh); 817 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 818 tx_conf->tx_rs_thresh, tx_conf->txq_flags); 819 } 820 821 void 822 port_rss_reta_info(portid_t port_id, 823 struct rte_eth_rss_reta_entry64 *reta_conf, 824 uint16_t nb_entries) 825 { 826 uint16_t i, idx, shift; 827 int ret; 828 829 if (port_id_is_invalid(port_id, ENABLED_WARN)) 830 return; 831 832 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 833 if (ret != 0) { 834 printf("Failed to get RSS RETA info, return code = %d\n", ret); 835 return; 836 } 837 838 for (i = 0; i < nb_entries; i++) { 839 idx = i / RTE_RETA_GROUP_SIZE; 840 shift = i % RTE_RETA_GROUP_SIZE; 841 if (!(reta_conf[idx].mask & (1ULL << shift))) 842 continue; 843 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 844 i, reta_conf[idx].reta[shift]); 845 } 846 } 847 848 /* 849 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 850 * key of the port. 851 */ 852 void 853 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 854 { 855 struct rss_type_info { 856 char str[32]; 857 uint64_t rss_type; 858 }; 859 static const struct rss_type_info rss_type_table[] = { 860 {"ipv4", ETH_RSS_IPV4}, 861 {"ipv4-frag", ETH_RSS_FRAG_IPV4}, 862 {"ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP}, 863 {"ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP}, 864 {"ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP}, 865 {"ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER}, 866 {"ipv6", ETH_RSS_IPV6}, 867 {"ipv6-frag", ETH_RSS_FRAG_IPV6}, 868 {"ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP}, 869 {"ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP}, 870 {"ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP}, 871 {"ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER}, 872 {"l2-payload", ETH_RSS_L2_PAYLOAD}, 873 {"ipv6-ex", ETH_RSS_IPV6_EX}, 874 {"ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX}, 875 {"ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX}, 876 }; 877 878 struct rte_eth_rss_conf rss_conf; 879 uint8_t rss_key[10 * 4]; 880 uint64_t rss_hf; 881 uint8_t i; 882 int diag; 883 884 if (port_id_is_invalid(port_id, ENABLED_WARN)) 885 return; 886 /* Get RSS hash key if asked to display it */ 887 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 888 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 889 if (diag != 0) { 890 switch (diag) { 891 case -ENODEV: 892 printf("port index %d invalid\n", port_id); 893 break; 894 case -ENOTSUP: 895 printf("operation not supported by device\n"); 896 break; 897 default: 898 printf("operation failed - diag=%d\n", diag); 899 break; 900 } 901 return; 902 } 903 rss_hf = rss_conf.rss_hf; 904 if (rss_hf == 0) { 905 printf("RSS disabled\n"); 906 return; 907 } 908 printf("RSS functions:\n "); 909 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 910 if (rss_hf & rss_type_table[i].rss_type) 911 printf("%s ", rss_type_table[i].str); 912 } 913 printf("\n"); 914 if (!show_rss_key) 915 return; 916 printf("RSS key:\n"); 917 for (i = 0; i < sizeof(rss_key); i++) 918 printf("%02X", rss_key[i]); 919 printf("\n"); 920 } 921 922 void 923 port_rss_hash_key_update(portid_t port_id, uint8_t *hash_key) 924 { 925 struct rte_eth_rss_conf rss_conf; 926 int diag; 927 928 rss_conf.rss_key = NULL; 929 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 930 if (diag == 0) { 931 rss_conf.rss_key = hash_key; 932 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 933 } 934 if (diag == 0) 935 return; 936 937 switch (diag) { 938 case -ENODEV: 939 printf("port index %d invalid\n", port_id); 940 break; 941 case -ENOTSUP: 942 printf("operation not supported by device\n"); 943 break; 944 default: 945 printf("operation failed - diag=%d\n", diag); 946 break; 947 } 948 } 949 950 /* 951 * Setup forwarding configuration for each logical core. 952 */ 953 static void 954 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 955 { 956 streamid_t nb_fs_per_lcore; 957 streamid_t nb_fs; 958 streamid_t sm_id; 959 lcoreid_t nb_extra; 960 lcoreid_t nb_fc; 961 lcoreid_t nb_lc; 962 lcoreid_t lc_id; 963 964 nb_fs = cfg->nb_fwd_streams; 965 nb_fc = cfg->nb_fwd_lcores; 966 if (nb_fs <= nb_fc) { 967 nb_fs_per_lcore = 1; 968 nb_extra = 0; 969 } else { 970 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 971 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 972 } 973 974 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 975 sm_id = 0; 976 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 977 fwd_lcores[lc_id]->stream_idx = sm_id; 978 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 979 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 980 } 981 982 /* 983 * Assign extra remaining streams, if any. 984 */ 985 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 986 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 987 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 988 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 989 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 990 } 991 } 992 993 static void 994 simple_fwd_config_setup(void) 995 { 996 portid_t i; 997 portid_t j; 998 portid_t inc = 2; 999 1000 if (port_topology == PORT_TOPOLOGY_CHAINED || 1001 port_topology == PORT_TOPOLOGY_LOOP) { 1002 inc = 1; 1003 } else if (nb_fwd_ports % 2) { 1004 printf("\nWarning! Cannot handle an odd number of ports " 1005 "with the current port topology. Configuration " 1006 "must be changed to have an even number of ports, " 1007 "or relaunch application with " 1008 "--port-topology=chained\n\n"); 1009 } 1010 1011 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1012 cur_fwd_config.nb_fwd_streams = 1013 (streamid_t) cur_fwd_config.nb_fwd_ports; 1014 1015 /* reinitialize forwarding streams */ 1016 init_fwd_streams(); 1017 1018 /* 1019 * In the simple forwarding test, the number of forwarding cores 1020 * must be lower or equal to the number of forwarding ports. 1021 */ 1022 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1023 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1024 cur_fwd_config.nb_fwd_lcores = 1025 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1026 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1027 1028 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1029 if (port_topology != PORT_TOPOLOGY_LOOP) 1030 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1031 else 1032 j = i; 1033 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1034 fwd_streams[i]->rx_queue = 0; 1035 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1036 fwd_streams[i]->tx_queue = 0; 1037 fwd_streams[i]->peer_addr = j; 1038 1039 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1040 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1041 fwd_streams[j]->rx_queue = 0; 1042 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1043 fwd_streams[j]->tx_queue = 0; 1044 fwd_streams[j]->peer_addr = i; 1045 } 1046 } 1047 } 1048 1049 /** 1050 * For the RSS forwarding test, each core is assigned on every port a transmit 1051 * queue whose index is the index of the core itself. This approach limits the 1052 * maximumm number of processing cores of the RSS test to the maximum number of 1053 * TX queues supported by the devices. 1054 * 1055 * Each core is assigned a single stream, each stream being composed of 1056 * a RX queue to poll on a RX port for input messages, associated with 1057 * a TX queue of a TX port where to send forwarded packets. 1058 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1059 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1060 * following rules: 1061 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1062 * - TxQl = RxQj 1063 */ 1064 static void 1065 rss_fwd_config_setup(void) 1066 { 1067 portid_t rxp; 1068 portid_t txp; 1069 queueid_t rxq; 1070 queueid_t nb_q; 1071 lcoreid_t lc_id; 1072 1073 nb_q = nb_rxq; 1074 if (nb_q > nb_txq) 1075 nb_q = nb_txq; 1076 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1077 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1078 cur_fwd_config.nb_fwd_streams = 1079 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1080 if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores) 1081 cur_fwd_config.nb_fwd_streams = 1082 (streamid_t)cur_fwd_config.nb_fwd_lcores; 1083 else 1084 cur_fwd_config.nb_fwd_lcores = 1085 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1086 1087 /* reinitialize forwarding streams */ 1088 init_fwd_streams(); 1089 1090 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1091 rxp = 0; rxq = 0; 1092 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1093 struct fwd_stream *fs; 1094 1095 fs = fwd_streams[lc_id]; 1096 1097 if ((rxp & 0x1) == 0) 1098 txp = (portid_t) (rxp + 1); 1099 else 1100 txp = (portid_t) (rxp - 1); 1101 /* 1102 * if we are in loopback, simply send stuff out through the 1103 * ingress port 1104 */ 1105 if (port_topology == PORT_TOPOLOGY_LOOP) 1106 txp = rxp; 1107 1108 fs->rx_port = fwd_ports_ids[rxp]; 1109 fs->rx_queue = rxq; 1110 fs->tx_port = fwd_ports_ids[txp]; 1111 fs->tx_queue = rxq; 1112 fs->peer_addr = fs->tx_port; 1113 rxq = (queueid_t) (rxq + 1); 1114 if (rxq < nb_q) 1115 continue; 1116 /* 1117 * rxq == nb_q 1118 * Restart from RX queue 0 on next RX port 1119 */ 1120 rxq = 0; 1121 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1122 rxp = (portid_t) 1123 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1124 else 1125 rxp = (portid_t) (rxp + 1); 1126 } 1127 } 1128 1129 /* 1130 * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues. 1131 */ 1132 static void 1133 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq) 1134 { 1135 if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) { 1136 1137 if (rxq < 32) 1138 /* tc0: 0-31 */ 1139 *txq = rxq; 1140 else if (rxq < 64) { 1141 /* tc1: 64-95 */ 1142 *txq = (uint16_t)(rxq + 32); 1143 } 1144 else { 1145 /* tc2: 96-111;tc3:112-127 */ 1146 *txq = (uint16_t)(rxq/2 + 64); 1147 } 1148 } 1149 else { 1150 if (rxq < 16) 1151 /* tc0 mapping*/ 1152 *txq = rxq; 1153 else if (rxq < 32) { 1154 /* tc1 mapping*/ 1155 *txq = (uint16_t)(rxq + 16); 1156 } 1157 else if (rxq < 64) { 1158 /*tc2,tc3 mapping */ 1159 *txq = (uint16_t)(rxq + 32); 1160 } 1161 else { 1162 /* tc4,tc5,tc6 and tc7 mapping */ 1163 *txq = (uint16_t)(rxq/2 + 64); 1164 } 1165 } 1166 } 1167 1168 /** 1169 * For the DCB forwarding test, each core is assigned on every port multi-transmit 1170 * queue. 1171 * 1172 * Each core is assigned a multi-stream, each stream being composed of 1173 * a RX queue to poll on a RX port for input messages, associated with 1174 * a TX queue of a TX port where to send forwarded packets. 1175 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1176 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1177 * following rules: 1178 * In VT mode, 1179 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1180 * - TxQl = RxQj 1181 * In non-VT mode, 1182 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1183 * There is a mapping of RxQj to TxQl to be required,and the mapping was implemented 1184 * in dcb_rxq_2_txq_mapping function. 1185 */ 1186 static void 1187 dcb_fwd_config_setup(void) 1188 { 1189 portid_t rxp; 1190 portid_t txp; 1191 queueid_t rxq; 1192 queueid_t nb_q; 1193 lcoreid_t lc_id; 1194 uint16_t sm_id; 1195 1196 nb_q = nb_rxq; 1197 1198 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1199 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1200 cur_fwd_config.nb_fwd_streams = 1201 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1202 1203 /* reinitialize forwarding streams */ 1204 init_fwd_streams(); 1205 1206 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1207 rxp = 0; rxq = 0; 1208 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1209 /* a fwd core can run multi-streams */ 1210 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) 1211 { 1212 struct fwd_stream *fs; 1213 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1214 if ((rxp & 0x1) == 0) 1215 txp = (portid_t) (rxp + 1); 1216 else 1217 txp = (portid_t) (rxp - 1); 1218 fs->rx_port = fwd_ports_ids[rxp]; 1219 fs->rx_queue = rxq; 1220 fs->tx_port = fwd_ports_ids[txp]; 1221 if (dcb_q_mapping == DCB_VT_Q_MAPPING) 1222 fs->tx_queue = rxq; 1223 else 1224 dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue); 1225 fs->peer_addr = fs->tx_port; 1226 rxq = (queueid_t) (rxq + 1); 1227 if (rxq < nb_q) 1228 continue; 1229 rxq = 0; 1230 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 1231 rxp = (portid_t) 1232 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 1233 else 1234 rxp = (portid_t) (rxp + 1); 1235 } 1236 } 1237 } 1238 1239 static void 1240 icmp_echo_config_setup(void) 1241 { 1242 portid_t rxp; 1243 queueid_t rxq; 1244 lcoreid_t lc_id; 1245 uint16_t sm_id; 1246 1247 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 1248 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 1249 (nb_txq * nb_fwd_ports); 1250 else 1251 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1252 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1253 cur_fwd_config.nb_fwd_streams = 1254 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 1255 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1256 cur_fwd_config.nb_fwd_lcores = 1257 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1258 if (verbose_level > 0) { 1259 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 1260 __FUNCTION__, 1261 cur_fwd_config.nb_fwd_lcores, 1262 cur_fwd_config.nb_fwd_ports, 1263 cur_fwd_config.nb_fwd_streams); 1264 } 1265 1266 /* reinitialize forwarding streams */ 1267 init_fwd_streams(); 1268 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1269 rxp = 0; rxq = 0; 1270 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 1271 if (verbose_level > 0) 1272 printf(" core=%d: \n", lc_id); 1273 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1274 struct fwd_stream *fs; 1275 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1276 fs->rx_port = fwd_ports_ids[rxp]; 1277 fs->rx_queue = rxq; 1278 fs->tx_port = fs->rx_port; 1279 fs->tx_queue = lc_id; 1280 fs->peer_addr = fs->tx_port; 1281 if (verbose_level > 0) 1282 printf(" stream=%d port=%d rxq=%d txq=%d\n", 1283 sm_id, fs->rx_port, fs->rx_queue, 1284 fs->tx_queue); 1285 rxq = (queueid_t) (rxq + 1); 1286 if (rxq == nb_rxq) { 1287 rxq = 0; 1288 rxp = (portid_t) (rxp + 1); 1289 } 1290 } 1291 } 1292 } 1293 1294 void 1295 fwd_config_setup(void) 1296 { 1297 cur_fwd_config.fwd_eng = cur_fwd_eng; 1298 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 1299 icmp_echo_config_setup(); 1300 return; 1301 } 1302 if ((nb_rxq > 1) && (nb_txq > 1)){ 1303 if (dcb_config) 1304 dcb_fwd_config_setup(); 1305 else 1306 rss_fwd_config_setup(); 1307 } 1308 else 1309 simple_fwd_config_setup(); 1310 } 1311 1312 static void 1313 pkt_fwd_config_display(struct fwd_config *cfg) 1314 { 1315 struct fwd_stream *fs; 1316 lcoreid_t lc_id; 1317 streamid_t sm_id; 1318 1319 printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - " 1320 "NUMA support %s, MP over anonymous pages %s\n", 1321 cfg->fwd_eng->fwd_mode_name, 1322 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 1323 numa_support == 1 ? "enabled" : "disabled", 1324 mp_anon != 0 ? "enabled" : "disabled"); 1325 1326 if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0) 1327 printf("TX retry num: %u, delay between TX retries: %uus\n", 1328 burst_tx_retry_num, burst_tx_delay_time); 1329 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 1330 printf("Logical Core %u (socket %u) forwards packets on " 1331 "%d streams:", 1332 fwd_lcores_cpuids[lc_id], 1333 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 1334 fwd_lcores[lc_id]->stream_nb); 1335 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 1336 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 1337 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 1338 "P=%d/Q=%d (socket %u) ", 1339 fs->rx_port, fs->rx_queue, 1340 ports[fs->rx_port].socket_id, 1341 fs->tx_port, fs->tx_queue, 1342 ports[fs->tx_port].socket_id); 1343 print_ethaddr("peer=", 1344 &peer_eth_addrs[fs->peer_addr]); 1345 } 1346 printf("\n"); 1347 } 1348 printf("\n"); 1349 } 1350 1351 1352 void 1353 fwd_config_display(void) 1354 { 1355 if((dcb_config) && (nb_fwd_lcores == 1)) { 1356 printf("In DCB mode,the nb forwarding cores should be larger than 1\n"); 1357 return; 1358 } 1359 fwd_config_setup(); 1360 pkt_fwd_config_display(&cur_fwd_config); 1361 } 1362 1363 int 1364 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1365 { 1366 unsigned int i; 1367 unsigned int lcore_cpuid; 1368 int record_now; 1369 1370 record_now = 0; 1371 again: 1372 for (i = 0; i < nb_lc; i++) { 1373 lcore_cpuid = lcorelist[i]; 1374 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1375 printf("lcore %u not enabled\n", lcore_cpuid); 1376 return -1; 1377 } 1378 if (lcore_cpuid == rte_get_master_lcore()) { 1379 printf("lcore %u cannot be masked on for running " 1380 "packet forwarding, which is the master lcore " 1381 "and reserved for command line parsing only\n", 1382 lcore_cpuid); 1383 return -1; 1384 } 1385 if (record_now) 1386 fwd_lcores_cpuids[i] = lcore_cpuid; 1387 } 1388 if (record_now == 0) { 1389 record_now = 1; 1390 goto again; 1391 } 1392 nb_cfg_lcores = (lcoreid_t) nb_lc; 1393 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1394 printf("previous number of forwarding cores %u - changed to " 1395 "number of configured cores %u\n", 1396 (unsigned int) nb_fwd_lcores, nb_lc); 1397 nb_fwd_lcores = (lcoreid_t) nb_lc; 1398 } 1399 1400 return 0; 1401 } 1402 1403 int 1404 set_fwd_lcores_mask(uint64_t lcoremask) 1405 { 1406 unsigned int lcorelist[64]; 1407 unsigned int nb_lc; 1408 unsigned int i; 1409 1410 if (lcoremask == 0) { 1411 printf("Invalid NULL mask of cores\n"); 1412 return -1; 1413 } 1414 nb_lc = 0; 1415 for (i = 0; i < 64; i++) { 1416 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1417 continue; 1418 lcorelist[nb_lc++] = i; 1419 } 1420 return set_fwd_lcores_list(lcorelist, nb_lc); 1421 } 1422 1423 void 1424 set_fwd_lcores_number(uint16_t nb_lc) 1425 { 1426 if (nb_lc > nb_cfg_lcores) { 1427 printf("nb fwd cores %u > %u (max. number of configured " 1428 "lcores) - ignored\n", 1429 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1430 return; 1431 } 1432 nb_fwd_lcores = (lcoreid_t) nb_lc; 1433 printf("Number of forwarding cores set to %u\n", 1434 (unsigned int) nb_fwd_lcores); 1435 } 1436 1437 void 1438 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1439 { 1440 unsigned int i; 1441 portid_t port_id; 1442 int record_now; 1443 1444 record_now = 0; 1445 again: 1446 for (i = 0; i < nb_pt; i++) { 1447 port_id = (portid_t) portlist[i]; 1448 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1449 return; 1450 if (record_now) 1451 fwd_ports_ids[i] = port_id; 1452 } 1453 if (record_now == 0) { 1454 record_now = 1; 1455 goto again; 1456 } 1457 nb_cfg_ports = (portid_t) nb_pt; 1458 if (nb_fwd_ports != (portid_t) nb_pt) { 1459 printf("previous number of forwarding ports %u - changed to " 1460 "number of configured ports %u\n", 1461 (unsigned int) nb_fwd_ports, nb_pt); 1462 nb_fwd_ports = (portid_t) nb_pt; 1463 } 1464 } 1465 1466 void 1467 set_fwd_ports_mask(uint64_t portmask) 1468 { 1469 unsigned int portlist[64]; 1470 unsigned int nb_pt; 1471 unsigned int i; 1472 1473 if (portmask == 0) { 1474 printf("Invalid NULL mask of ports\n"); 1475 return; 1476 } 1477 nb_pt = 0; 1478 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) { 1479 if (! ((uint64_t)(1ULL << i) & portmask)) 1480 continue; 1481 portlist[nb_pt++] = i; 1482 } 1483 set_fwd_ports_list(portlist, nb_pt); 1484 } 1485 1486 void 1487 set_fwd_ports_number(uint16_t nb_pt) 1488 { 1489 if (nb_pt > nb_cfg_ports) { 1490 printf("nb fwd ports %u > %u (number of configured " 1491 "ports) - ignored\n", 1492 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1493 return; 1494 } 1495 nb_fwd_ports = (portid_t) nb_pt; 1496 printf("Number of forwarding ports set to %u\n", 1497 (unsigned int) nb_fwd_ports); 1498 } 1499 1500 void 1501 set_nb_pkt_per_burst(uint16_t nb) 1502 { 1503 if (nb > MAX_PKT_BURST) { 1504 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1505 " ignored\n", 1506 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1507 return; 1508 } 1509 nb_pkt_per_burst = nb; 1510 printf("Number of packets per burst set to %u\n", 1511 (unsigned int) nb_pkt_per_burst); 1512 } 1513 1514 void 1515 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1516 { 1517 uint16_t tx_pkt_len; 1518 unsigned i; 1519 1520 if (nb_segs >= (unsigned) nb_txd) { 1521 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1522 nb_segs, (unsigned int) nb_txd); 1523 return; 1524 } 1525 1526 /* 1527 * Check that each segment length is greater or equal than 1528 * the mbuf data sise. 1529 * Check also that the total packet length is greater or equal than the 1530 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1531 */ 1532 tx_pkt_len = 0; 1533 for (i = 0; i < nb_segs; i++) { 1534 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1535 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1536 i, seg_lengths[i], (unsigned) mbuf_data_size); 1537 return; 1538 } 1539 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1540 } 1541 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1542 printf("total packet length=%u < %d - give up\n", 1543 (unsigned) tx_pkt_len, 1544 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1545 return; 1546 } 1547 1548 for (i = 0; i < nb_segs; i++) 1549 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1550 1551 tx_pkt_length = tx_pkt_len; 1552 tx_pkt_nb_segs = (uint8_t) nb_segs; 1553 } 1554 1555 char* 1556 list_pkt_forwarding_modes(void) 1557 { 1558 static char fwd_modes[128] = ""; 1559 const char *separator = "|"; 1560 struct fwd_engine *fwd_eng; 1561 unsigned i = 0; 1562 1563 if (strlen (fwd_modes) == 0) { 1564 while ((fwd_eng = fwd_engines[i++]) != NULL) { 1565 strcat(fwd_modes, fwd_eng->fwd_mode_name); 1566 strcat(fwd_modes, separator); 1567 } 1568 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 1569 } 1570 1571 return fwd_modes; 1572 } 1573 1574 void 1575 set_pkt_forwarding_mode(const char *fwd_mode_name) 1576 { 1577 struct fwd_engine *fwd_eng; 1578 unsigned i; 1579 1580 i = 0; 1581 while ((fwd_eng = fwd_engines[i]) != NULL) { 1582 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1583 printf("Set %s packet forwarding mode\n", 1584 fwd_mode_name); 1585 cur_fwd_eng = fwd_eng; 1586 return; 1587 } 1588 i++; 1589 } 1590 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1591 } 1592 1593 void 1594 set_verbose_level(uint16_t vb_level) 1595 { 1596 printf("Change verbose level from %u to %u\n", 1597 (unsigned int) verbose_level, (unsigned int) vb_level); 1598 verbose_level = vb_level; 1599 } 1600 1601 void 1602 vlan_extend_set(portid_t port_id, int on) 1603 { 1604 int diag; 1605 int vlan_offload; 1606 1607 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1608 return; 1609 1610 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1611 1612 if (on) 1613 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1614 else 1615 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1616 1617 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1618 if (diag < 0) 1619 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1620 "diag=%d\n", port_id, on, diag); 1621 } 1622 1623 void 1624 rx_vlan_strip_set(portid_t port_id, int on) 1625 { 1626 int diag; 1627 int vlan_offload; 1628 1629 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1630 return; 1631 1632 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1633 1634 if (on) 1635 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1636 else 1637 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1638 1639 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1640 if (diag < 0) 1641 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1642 "diag=%d\n", port_id, on, diag); 1643 } 1644 1645 void 1646 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1647 { 1648 int diag; 1649 1650 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1651 return; 1652 1653 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1654 if (diag < 0) 1655 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1656 "diag=%d\n", port_id, queue_id, on, diag); 1657 } 1658 1659 void 1660 rx_vlan_filter_set(portid_t port_id, int on) 1661 { 1662 int diag; 1663 int vlan_offload; 1664 1665 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1666 return; 1667 1668 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1669 1670 if (on) 1671 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1672 else 1673 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1674 1675 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1676 if (diag < 0) 1677 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1678 "diag=%d\n", port_id, on, diag); 1679 } 1680 1681 int 1682 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1683 { 1684 int diag; 1685 1686 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1687 return 1; 1688 if (vlan_id_is_invalid(vlan_id)) 1689 return 1; 1690 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1691 if (diag == 0) 1692 return 0; 1693 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1694 "diag=%d\n", 1695 port_id, vlan_id, on, diag); 1696 return -1; 1697 } 1698 1699 void 1700 rx_vlan_all_filter_set(portid_t port_id, int on) 1701 { 1702 uint16_t vlan_id; 1703 1704 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1705 return; 1706 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 1707 if (rx_vft_set(port_id, vlan_id, on)) 1708 break; 1709 } 1710 } 1711 1712 void 1713 vlan_tpid_set(portid_t port_id, uint16_t tp_id) 1714 { 1715 int diag; 1716 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1717 return; 1718 1719 diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id); 1720 if (diag == 0) 1721 return; 1722 1723 printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed " 1724 "diag=%d\n", 1725 port_id, tp_id, diag); 1726 } 1727 1728 void 1729 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1730 { 1731 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1732 return; 1733 if (vlan_id_is_invalid(vlan_id)) 1734 return; 1735 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1736 ports[port_id].tx_vlan_id = vlan_id; 1737 } 1738 1739 void 1740 tx_vlan_reset(portid_t port_id) 1741 { 1742 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1743 return; 1744 ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_INSERT_VLAN; 1745 } 1746 1747 void 1748 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 1749 { 1750 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1751 return; 1752 1753 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 1754 } 1755 1756 void 1757 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 1758 { 1759 uint16_t i; 1760 uint8_t existing_mapping_found = 0; 1761 1762 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1763 return; 1764 1765 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 1766 return; 1767 1768 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1769 printf("map_value not in required range 0..%d\n", 1770 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1771 return; 1772 } 1773 1774 if (!is_rx) { /*then tx*/ 1775 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1776 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1777 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 1778 tx_queue_stats_mappings[i].stats_counter_id = map_value; 1779 existing_mapping_found = 1; 1780 break; 1781 } 1782 } 1783 if (!existing_mapping_found) { /* A new additional mapping... */ 1784 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 1785 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 1786 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 1787 nb_tx_queue_stats_mappings++; 1788 } 1789 } 1790 else { /*rx*/ 1791 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1792 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1793 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 1794 rx_queue_stats_mappings[i].stats_counter_id = map_value; 1795 existing_mapping_found = 1; 1796 break; 1797 } 1798 } 1799 if (!existing_mapping_found) { /* A new additional mapping... */ 1800 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 1801 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 1802 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 1803 nb_rx_queue_stats_mappings++; 1804 } 1805 } 1806 } 1807 1808 static inline void 1809 print_fdir_mask(struct rte_eth_fdir_masks *mask) 1810 { 1811 printf("\n vlan_tci: 0x%04x, src_ipv4: 0x%08x, dst_ipv4: 0x%08x," 1812 " src_port: 0x%04x, dst_port: 0x%04x", 1813 mask->vlan_tci_mask, mask->ipv4_mask.src_ip, 1814 mask->ipv4_mask.dst_ip, 1815 mask->src_port_mask, mask->dst_port_mask); 1816 1817 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x," 1818 " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 1819 mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1], 1820 mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3], 1821 mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1], 1822 mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]); 1823 printf("\n"); 1824 } 1825 1826 static inline void 1827 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 1828 { 1829 struct rte_eth_flex_payload_cfg *cfg; 1830 uint32_t i, j; 1831 1832 for (i = 0; i < flex_conf->nb_payloads; i++) { 1833 cfg = &flex_conf->flex_set[i]; 1834 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 1835 printf("\n RAW: "); 1836 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 1837 printf("\n L2_PAYLOAD: "); 1838 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 1839 printf("\n L3_PAYLOAD: "); 1840 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 1841 printf("\n L4_PAYLOAD: "); 1842 else 1843 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 1844 for (j = 0; j < num; j++) 1845 printf(" %-5u", cfg->src_offset[j]); 1846 } 1847 printf("\n"); 1848 } 1849 1850 static char * 1851 flowtype_to_str(uint16_t flow_type) 1852 { 1853 struct flow_type_info { 1854 char str[32]; 1855 uint16_t ftype; 1856 }; 1857 1858 uint8_t i; 1859 static struct flow_type_info flowtype_str_table[] = { 1860 {"raw", RTE_ETH_FLOW_RAW}, 1861 {"ipv4", RTE_ETH_FLOW_IPV4}, 1862 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 1863 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 1864 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 1865 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 1866 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 1867 {"ipv6", RTE_ETH_FLOW_IPV6}, 1868 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 1869 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 1870 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 1871 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 1872 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 1873 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 1874 }; 1875 1876 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 1877 if (flowtype_str_table[i].ftype == flow_type) 1878 return flowtype_str_table[i].str; 1879 } 1880 1881 return NULL; 1882 } 1883 1884 static inline void 1885 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 1886 { 1887 struct rte_eth_fdir_flex_mask *mask; 1888 uint32_t i, j; 1889 char *p; 1890 1891 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 1892 mask = &flex_conf->flex_mask[i]; 1893 p = flowtype_to_str(mask->flow_type); 1894 printf("\n %s:\t", p ? p : "unknown"); 1895 for (j = 0; j < num; j++) 1896 printf(" %02x", mask->mask[j]); 1897 } 1898 printf("\n"); 1899 } 1900 1901 static inline void 1902 print_fdir_flow_type(uint32_t flow_types_mask) 1903 { 1904 int i; 1905 char *p; 1906 1907 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 1908 if (!(flow_types_mask & (1 << i))) 1909 continue; 1910 p = flowtype_to_str(i); 1911 if (p) 1912 printf(" %s", p); 1913 else 1914 printf(" unknown"); 1915 } 1916 printf("\n"); 1917 } 1918 1919 void 1920 fdir_get_infos(portid_t port_id) 1921 { 1922 struct rte_eth_fdir_stats fdir_stat; 1923 struct rte_eth_fdir_info fdir_info; 1924 int ret; 1925 1926 static const char *fdir_stats_border = "########################"; 1927 1928 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1929 return; 1930 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 1931 if (ret < 0) { 1932 printf("\n FDIR is not supported on port %-2d\n", 1933 port_id); 1934 return; 1935 } 1936 1937 memset(&fdir_info, 0, sizeof(fdir_info)); 1938 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 1939 RTE_ETH_FILTER_INFO, &fdir_info); 1940 memset(&fdir_stat, 0, sizeof(fdir_stat)); 1941 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 1942 RTE_ETH_FILTER_STATS, &fdir_stat); 1943 printf("\n %s FDIR infos for port %-2d %s\n", 1944 fdir_stats_border, port_id, fdir_stats_border); 1945 printf(" MODE: "); 1946 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 1947 printf(" PERFECT\n"); 1948 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 1949 printf(" SIGNATURE\n"); 1950 else 1951 printf(" DISABLE\n"); 1952 printf(" SUPPORTED FLOW TYPE: "); 1953 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 1954 printf(" FLEX PAYLOAD INFO:\n"); 1955 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 1956 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 1957 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 1958 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 1959 fdir_info.flex_payload_unit, 1960 fdir_info.max_flex_payload_segment_num, 1961 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 1962 printf(" MASK: "); 1963 print_fdir_mask(&fdir_info.mask); 1964 if (fdir_info.flex_conf.nb_payloads > 0) { 1965 printf(" FLEX PAYLOAD SRC OFFSET:"); 1966 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 1967 } 1968 if (fdir_info.flex_conf.nb_flexmasks > 0) { 1969 printf(" FLEX MASK CFG:"); 1970 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 1971 } 1972 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 1973 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 1974 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 1975 fdir_info.guarant_spc, fdir_info.best_spc); 1976 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 1977 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 1978 " add: %-10"PRIu64" remove: %"PRIu64"\n" 1979 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 1980 fdir_stat.collision, fdir_stat.free, 1981 fdir_stat.maxhash, fdir_stat.maxlen, 1982 fdir_stat.add, fdir_stat.remove, 1983 fdir_stat.f_add, fdir_stat.f_remove); 1984 printf(" %s############################%s\n", 1985 fdir_stats_border, fdir_stats_border); 1986 } 1987 1988 void 1989 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 1990 { 1991 struct rte_port *port; 1992 struct rte_eth_fdir_flex_conf *flex_conf; 1993 int i, idx = 0; 1994 1995 port = &ports[port_id]; 1996 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 1997 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 1998 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 1999 idx = i; 2000 break; 2001 } 2002 } 2003 if (i >= RTE_ETH_FLOW_MAX) { 2004 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 2005 idx = flex_conf->nb_flexmasks; 2006 flex_conf->nb_flexmasks++; 2007 } else { 2008 printf("The flex mask table is full. Can not set flex" 2009 " mask for flow_type(%u).", cfg->flow_type); 2010 return; 2011 } 2012 } 2013 (void)rte_memcpy(&flex_conf->flex_mask[idx], 2014 cfg, 2015 sizeof(struct rte_eth_fdir_flex_mask)); 2016 } 2017 2018 void 2019 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 2020 { 2021 struct rte_port *port; 2022 struct rte_eth_fdir_flex_conf *flex_conf; 2023 int i, idx = 0; 2024 2025 port = &ports[port_id]; 2026 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 2027 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 2028 if (cfg->type == flex_conf->flex_set[i].type) { 2029 idx = i; 2030 break; 2031 } 2032 } 2033 if (i >= RTE_ETH_PAYLOAD_MAX) { 2034 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 2035 idx = flex_conf->nb_payloads; 2036 flex_conf->nb_payloads++; 2037 } else { 2038 printf("The flex payload table is full. Can not set" 2039 " flex payload for type(%u).", cfg->type); 2040 return; 2041 } 2042 } 2043 (void)rte_memcpy(&flex_conf->flex_set[idx], 2044 cfg, 2045 sizeof(struct rte_eth_flex_payload_cfg)); 2046 2047 } 2048 2049 void 2050 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 2051 { 2052 int diag; 2053 2054 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2055 return; 2056 if (is_rx) 2057 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 2058 else 2059 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 2060 if (diag == 0) 2061 return; 2062 if(is_rx) 2063 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 2064 "diag=%d\n", port_id, diag); 2065 else 2066 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 2067 "diag=%d\n", port_id, diag); 2068 2069 } 2070 2071 void 2072 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 2073 { 2074 int diag; 2075 2076 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2077 return; 2078 if (vlan_id_is_invalid(vlan_id)) 2079 return; 2080 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 2081 if (diag == 0) 2082 return; 2083 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 2084 "diag=%d\n", port_id, diag); 2085 } 2086 2087 int 2088 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 2089 { 2090 int diag; 2091 struct rte_eth_link link; 2092 2093 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2094 return 1; 2095 rte_eth_link_get_nowait(port_id, &link); 2096 if (rate > link.link_speed) { 2097 printf("Invalid rate value:%u bigger than link speed: %u\n", 2098 rate, link.link_speed); 2099 return 1; 2100 } 2101 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 2102 if (diag == 0) 2103 return diag; 2104 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 2105 port_id, diag); 2106 return diag; 2107 } 2108 2109 int 2110 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 2111 { 2112 int diag; 2113 struct rte_eth_link link; 2114 2115 if (q_msk == 0) 2116 return 0; 2117 2118 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2119 return 1; 2120 rte_eth_link_get_nowait(port_id, &link); 2121 if (rate > link.link_speed) { 2122 printf("Invalid rate value:%u bigger than link speed: %u\n", 2123 rate, link.link_speed); 2124 return 1; 2125 } 2126 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk); 2127 if (diag == 0) 2128 return diag; 2129 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n", 2130 port_id, diag); 2131 return diag; 2132 } 2133