1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* BSD LICENSE 34 * 35 * Copyright(c) 2013 6WIND. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name of 6WIND S.A. nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <stdarg.h> 65 #include <errno.h> 66 #include <stdio.h> 67 #include <string.h> 68 #include <stdarg.h> 69 #include <stdint.h> 70 #include <inttypes.h> 71 72 #include <sys/queue.h> 73 74 #include <rte_common.h> 75 #include <rte_byteorder.h> 76 #include <rte_debug.h> 77 #include <rte_log.h> 78 #include <rte_memory.h> 79 #include <rte_memcpy.h> 80 #include <rte_memzone.h> 81 #include <rte_launch.h> 82 #include <rte_tailq.h> 83 #include <rte_eal.h> 84 #include <rte_per_lcore.h> 85 #include <rte_lcore.h> 86 #include <rte_atomic.h> 87 #include <rte_branch_prediction.h> 88 #include <rte_ring.h> 89 #include <rte_mempool.h> 90 #include <rte_mbuf.h> 91 #include <rte_interrupts.h> 92 #include <rte_pci.h> 93 #include <rte_ether.h> 94 #include <rte_ethdev.h> 95 #include <rte_string_fns.h> 96 97 #include "testpmd.h" 98 99 static void 100 print_ethaddr(const char *name, struct ether_addr *eth_addr) 101 { 102 printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, 103 eth_addr->addr_bytes[0], 104 eth_addr->addr_bytes[1], 105 eth_addr->addr_bytes[2], 106 eth_addr->addr_bytes[3], 107 eth_addr->addr_bytes[4], 108 eth_addr->addr_bytes[5]); 109 } 110 111 void 112 nic_stats_display(portid_t port_id) 113 { 114 struct rte_eth_stats stats; 115 struct rte_port *port = &ports[port_id]; 116 uint8_t i; 117 118 static const char *nic_stats_border = "########################"; 119 120 if (port_id >= nb_ports) { 121 printf("Invalid port, range is [0, %d]\n", nb_ports - 1); 122 return; 123 } 124 rte_eth_stats_get(port_id, &stats); 125 printf("\n %s NIC statistics for port %-2d %s\n", 126 nic_stats_border, port_id, nic_stats_border); 127 128 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 129 printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: " 130 "%-"PRIu64"\n" 131 " TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: " 132 "%-"PRIu64"\n", 133 stats.ipackets, stats.ierrors, stats.ibytes, 134 stats.opackets, stats.oerrors, stats.obytes); 135 } 136 else { 137 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 138 " RX-bytes: %10"PRIu64"\n" 139 " TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 140 " TX-bytes: %10"PRIu64"\n", 141 stats.ipackets, stats.ierrors, stats.ibytes, 142 stats.opackets, stats.oerrors, stats.obytes); 143 } 144 145 /* stats fdir */ 146 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 147 printf(" Fdirmiss: %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n", 148 stats.fdirmiss, 149 stats.fdirmatch); 150 151 if (port->rx_queue_stats_mapping_enabled) { 152 printf("\n"); 153 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 154 printf(" Stats reg %2d RX-packets: %10"PRIu64 155 " RX-errors: %10"PRIu64 156 " RX-bytes: %10"PRIu64"\n", 157 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 158 } 159 } 160 if (port->tx_queue_stats_mapping_enabled) { 161 printf("\n"); 162 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 163 printf(" Stats reg %2d TX-packets: %10"PRIu64 164 " TX-bytes: %10"PRIu64"\n", 165 i, stats.q_opackets[i], stats.q_obytes[i]); 166 } 167 } 168 169 printf(" %s############################%s\n", 170 nic_stats_border, nic_stats_border); 171 } 172 173 void 174 nic_stats_clear(portid_t port_id) 175 { 176 if (port_id >= nb_ports) { 177 printf("Invalid port, range is [0, %d]\n", nb_ports - 1); 178 return; 179 } 180 rte_eth_stats_reset(port_id); 181 printf("\n NIC statistics for port %d cleared\n", port_id); 182 } 183 184 185 void 186 nic_stats_mapping_display(portid_t port_id) 187 { 188 struct rte_port *port = &ports[port_id]; 189 uint16_t i; 190 191 static const char *nic_stats_mapping_border = "########################"; 192 193 if (port_id >= nb_ports) { 194 printf("Invalid port, range is [0, %d]\n", nb_ports - 1); 195 return; 196 } 197 198 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 199 printf("Port id %d - either does not support queue statistic mapping or" 200 " no queue statistic mapping set\n", port_id); 201 return; 202 } 203 204 printf("\n %s NIC statistics mapping for port %-2d %s\n", 205 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 206 207 if (port->rx_queue_stats_mapping_enabled) { 208 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 209 if (rx_queue_stats_mappings[i].port_id == port_id) { 210 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 211 rx_queue_stats_mappings[i].queue_id, 212 rx_queue_stats_mappings[i].stats_counter_id); 213 } 214 } 215 printf("\n"); 216 } 217 218 219 if (port->tx_queue_stats_mapping_enabled) { 220 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 221 if (tx_queue_stats_mappings[i].port_id == port_id) { 222 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 223 tx_queue_stats_mappings[i].queue_id, 224 tx_queue_stats_mappings[i].stats_counter_id); 225 } 226 } 227 } 228 229 printf(" %s####################################%s\n", 230 nic_stats_mapping_border, nic_stats_mapping_border); 231 } 232 233 void 234 port_infos_display(portid_t port_id) 235 { 236 struct rte_port *port; 237 struct rte_eth_link link; 238 int vlan_offload; 239 int socket_id; 240 struct rte_mempool * mp; 241 static const char *info_border = "*********************"; 242 243 if (port_id >= nb_ports) { 244 printf("Invalid port, range is [0, %d]\n", nb_ports - 1); 245 return; 246 } 247 port = &ports[port_id]; 248 rte_eth_link_get_nowait(port_id, &link); 249 socket_id = rte_eth_dev_socket_id(port_id); 250 printf("\n%s Infos for port %-2d %s\n", 251 info_border, port_id, info_border); 252 print_ethaddr("MAC address: ", &port->eth_addr); 253 printf("\nConnect to socket: %d",socket_id); 254 255 if (port_numa[port_id] != NUMA_NO_CONFIG) { 256 mp = mbuf_pool_find(port_numa[port_id]); 257 if (mp) 258 printf("\nmemory allocation on the socket: %d", 259 port_numa[port_id]); 260 } else 261 printf("\nmemory allocation on the socket: %d",socket_id); 262 263 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 264 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 265 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 266 ("full-duplex") : ("half-duplex")); 267 printf("Promiscuous mode: %s\n", 268 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 269 printf("Allmulticast mode: %s\n", 270 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 271 printf("Maximum number of MAC addresses: %u\n", 272 (unsigned int)(port->dev_info.max_mac_addrs)); 273 printf("Maximum number of MAC addresses of hash filtering: %u\n", 274 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 275 276 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 277 if (vlan_offload >= 0){ 278 printf("VLAN offload: \n"); 279 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 280 printf(" strip on \n"); 281 else 282 printf(" strip off \n"); 283 284 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 285 printf(" filter on \n"); 286 else 287 printf(" filter off \n"); 288 289 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 290 printf(" qinq(extend) on \n"); 291 else 292 printf(" qinq(extend) off \n"); 293 } 294 } 295 296 static int 297 port_id_is_invalid(portid_t port_id) 298 { 299 if (port_id < nb_ports) 300 return 0; 301 printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports); 302 return 1; 303 } 304 305 static int 306 vlan_id_is_invalid(uint16_t vlan_id) 307 { 308 if (vlan_id < 4096) 309 return 0; 310 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 311 return 1; 312 } 313 314 static int 315 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 316 { 317 uint64_t pci_len; 318 319 if (reg_off & 0x3) { 320 printf("Port register offset 0x%X not aligned on a 4-byte " 321 "boundary\n", 322 (unsigned)reg_off); 323 return 1; 324 } 325 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 326 if (reg_off >= pci_len) { 327 printf("Port %d: register offset %u (0x%X) out of port PCI " 328 "resource (length=%"PRIu64")\n", 329 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 330 return 1; 331 } 332 return 0; 333 } 334 335 static int 336 reg_bit_pos_is_invalid(uint8_t bit_pos) 337 { 338 if (bit_pos <= 31) 339 return 0; 340 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 341 return 1; 342 } 343 344 #define display_port_and_reg_off(port_id, reg_off) \ 345 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 346 347 static inline void 348 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 349 { 350 display_port_and_reg_off(port_id, (unsigned)reg_off); 351 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 352 } 353 354 void 355 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 356 { 357 uint32_t reg_v; 358 359 360 if (port_id_is_invalid(port_id)) 361 return; 362 if (port_reg_off_is_invalid(port_id, reg_off)) 363 return; 364 if (reg_bit_pos_is_invalid(bit_x)) 365 return; 366 reg_v = port_id_pci_reg_read(port_id, reg_off); 367 display_port_and_reg_off(port_id, (unsigned)reg_off); 368 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 369 } 370 371 void 372 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 373 uint8_t bit1_pos, uint8_t bit2_pos) 374 { 375 uint32_t reg_v; 376 uint8_t l_bit; 377 uint8_t h_bit; 378 379 if (port_id_is_invalid(port_id)) 380 return; 381 if (port_reg_off_is_invalid(port_id, reg_off)) 382 return; 383 if (reg_bit_pos_is_invalid(bit1_pos)) 384 return; 385 if (reg_bit_pos_is_invalid(bit2_pos)) 386 return; 387 if (bit1_pos > bit2_pos) 388 l_bit = bit2_pos, h_bit = bit1_pos; 389 else 390 l_bit = bit1_pos, h_bit = bit2_pos; 391 392 reg_v = port_id_pci_reg_read(port_id, reg_off); 393 reg_v >>= l_bit; 394 if (h_bit < 31) 395 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 396 display_port_and_reg_off(port_id, (unsigned)reg_off); 397 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 398 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 399 } 400 401 void 402 port_reg_display(portid_t port_id, uint32_t reg_off) 403 { 404 uint32_t reg_v; 405 406 if (port_id_is_invalid(port_id)) 407 return; 408 if (port_reg_off_is_invalid(port_id, reg_off)) 409 return; 410 reg_v = port_id_pci_reg_read(port_id, reg_off); 411 display_port_reg_value(port_id, reg_off, reg_v); 412 } 413 414 void 415 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 416 uint8_t bit_v) 417 { 418 uint32_t reg_v; 419 420 if (port_id_is_invalid(port_id)) 421 return; 422 if (port_reg_off_is_invalid(port_id, reg_off)) 423 return; 424 if (reg_bit_pos_is_invalid(bit_pos)) 425 return; 426 if (bit_v > 1) { 427 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 428 return; 429 } 430 reg_v = port_id_pci_reg_read(port_id, reg_off); 431 if (bit_v == 0) 432 reg_v &= ~(1 << bit_pos); 433 else 434 reg_v |= (1 << bit_pos); 435 port_id_pci_reg_write(port_id, reg_off, reg_v); 436 display_port_reg_value(port_id, reg_off, reg_v); 437 } 438 439 void 440 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 441 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 442 { 443 uint32_t max_v; 444 uint32_t reg_v; 445 uint8_t l_bit; 446 uint8_t h_bit; 447 448 if (port_id_is_invalid(port_id)) 449 return; 450 if (port_reg_off_is_invalid(port_id, reg_off)) 451 return; 452 if (reg_bit_pos_is_invalid(bit1_pos)) 453 return; 454 if (reg_bit_pos_is_invalid(bit2_pos)) 455 return; 456 if (bit1_pos > bit2_pos) 457 l_bit = bit2_pos, h_bit = bit1_pos; 458 else 459 l_bit = bit1_pos, h_bit = bit2_pos; 460 461 if ((h_bit - l_bit) < 31) 462 max_v = (1 << (h_bit - l_bit + 1)) - 1; 463 else 464 max_v = 0xFFFFFFFF; 465 466 if (value > max_v) { 467 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 468 (unsigned)value, (unsigned)value, 469 (unsigned)max_v, (unsigned)max_v); 470 return; 471 } 472 reg_v = port_id_pci_reg_read(port_id, reg_off); 473 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 474 reg_v |= (value << l_bit); /* Set changed bits */ 475 port_id_pci_reg_write(port_id, reg_off, reg_v); 476 display_port_reg_value(port_id, reg_off, reg_v); 477 } 478 479 void 480 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 481 { 482 if (port_id_is_invalid(port_id)) 483 return; 484 if (port_reg_off_is_invalid(port_id, reg_off)) 485 return; 486 port_id_pci_reg_write(port_id, reg_off, reg_v); 487 display_port_reg_value(port_id, reg_off, reg_v); 488 } 489 490 /* 491 * RX/TX ring descriptors display functions. 492 */ 493 static int 494 rx_queue_id_is_invalid(queueid_t rxq_id) 495 { 496 if (rxq_id < nb_rxq) 497 return 0; 498 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 499 return 1; 500 } 501 502 static int 503 tx_queue_id_is_invalid(queueid_t txq_id) 504 { 505 if (txq_id < nb_txq) 506 return 0; 507 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 508 return 1; 509 } 510 511 static int 512 rx_desc_id_is_invalid(uint16_t rxdesc_id) 513 { 514 if (rxdesc_id < nb_rxd) 515 return 0; 516 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 517 rxdesc_id, nb_rxd); 518 return 1; 519 } 520 521 static int 522 tx_desc_id_is_invalid(uint16_t txdesc_id) 523 { 524 if (txdesc_id < nb_txd) 525 return 0; 526 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 527 txdesc_id, nb_txd); 528 return 1; 529 } 530 531 static const struct rte_memzone * 532 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) 533 { 534 char mz_name[RTE_MEMZONE_NAMESIZE]; 535 const struct rte_memzone *mz; 536 537 rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 538 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 539 mz = rte_memzone_lookup(mz_name); 540 if (mz == NULL) 541 printf("%s ring memory zoneof (port %d, queue %d) not" 542 "found (zone name = %s\n", 543 ring_name, port_id, q_id, mz_name); 544 return (mz); 545 } 546 547 union igb_ring_dword { 548 uint64_t dword; 549 struct { 550 uint32_t hi; 551 uint32_t lo; 552 } words; 553 }; 554 555 struct igb_ring_desc { 556 union igb_ring_dword lo_dword; 557 union igb_ring_dword hi_dword; 558 }; 559 560 static void 561 ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 562 { 563 struct igb_ring_desc *ring; 564 struct igb_ring_desc rd; 565 566 ring = (struct igb_ring_desc *) ring_mz->addr; 567 rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword); 568 rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword); 569 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 570 (unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi, 571 (unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi); 572 } 573 574 void 575 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 576 { 577 const struct rte_memzone *rx_mz; 578 579 if (port_id_is_invalid(port_id)) 580 return; 581 if (rx_queue_id_is_invalid(rxq_id)) 582 return; 583 if (rx_desc_id_is_invalid(rxd_id)) 584 return; 585 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 586 if (rx_mz == NULL) 587 return; 588 ring_descriptor_display(rx_mz, rxd_id); 589 } 590 591 void 592 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 593 { 594 const struct rte_memzone *tx_mz; 595 596 if (port_id_is_invalid(port_id)) 597 return; 598 if (tx_queue_id_is_invalid(txq_id)) 599 return; 600 if (tx_desc_id_is_invalid(txd_id)) 601 return; 602 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 603 if (tx_mz == NULL) 604 return; 605 ring_descriptor_display(tx_mz, txd_id); 606 } 607 608 void 609 fwd_lcores_config_display(void) 610 { 611 lcoreid_t lc_id; 612 613 printf("List of forwarding lcores:"); 614 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 615 printf(" %2u", fwd_lcores_cpuids[lc_id]); 616 printf("\n"); 617 } 618 void 619 rxtx_config_display(void) 620 { 621 printf(" %s packet forwarding - CRC stripping %s - " 622 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, 623 rx_mode.hw_strip_crc ? "enabled" : "disabled", 624 nb_pkt_per_burst); 625 626 if (cur_fwd_eng == &tx_only_engine) 627 printf(" packet len=%u - nb packet segments=%d\n", 628 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 629 630 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 631 nb_fwd_lcores, nb_fwd_ports); 632 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 633 nb_rxq, nb_rxd, rx_free_thresh); 634 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 635 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh); 636 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 637 nb_txq, nb_txd, tx_free_thresh); 638 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", 639 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh); 640 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", 641 tx_rs_thresh, txq_flags); 642 } 643 644 void 645 port_rss_reta_info(portid_t port_id,struct rte_eth_rss_reta *reta_conf) 646 { 647 uint8_t i,j; 648 int ret; 649 650 if (port_id_is_invalid(port_id)) 651 return; 652 653 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf); 654 if (ret != 0) { 655 printf("Failed to get RSS RETA info, return code = %d\n", ret); 656 return; 657 } 658 659 if (reta_conf->mask_lo != 0) { 660 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { 661 if (reta_conf->mask_lo & (uint64_t)(1ULL << i)) 662 printf("RSS RETA configuration: hash index=%d," 663 "queue=%d\n",i,reta_conf->reta[i]); 664 } 665 } 666 667 if (reta_conf->mask_hi != 0) { 668 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { 669 if(reta_conf->mask_hi & (uint64_t)(1ULL << i)) { 670 j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2); 671 printf("RSS RETA configuration: hash index=%d," 672 "queue=%d\n",j,reta_conf->reta[j]); 673 } 674 } 675 } 676 } 677 678 /* 679 * Setup forwarding configuration for each logical core. 680 */ 681 static void 682 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 683 { 684 streamid_t nb_fs_per_lcore; 685 streamid_t nb_fs; 686 streamid_t sm_id; 687 lcoreid_t nb_extra; 688 lcoreid_t nb_fc; 689 lcoreid_t nb_lc; 690 lcoreid_t lc_id; 691 692 nb_fs = cfg->nb_fwd_streams; 693 nb_fc = cfg->nb_fwd_lcores; 694 if (nb_fs <= nb_fc) { 695 nb_fs_per_lcore = 1; 696 nb_extra = 0; 697 } else { 698 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 699 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 700 } 701 702 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 703 sm_id = 0; 704 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 705 fwd_lcores[lc_id]->stream_idx = sm_id; 706 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 707 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 708 } 709 710 /* 711 * Assign extra remaining streams, if any. 712 */ 713 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 714 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 715 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 716 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 717 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 718 } 719 } 720 721 static void 722 simple_fwd_config_setup(void) 723 { 724 portid_t i; 725 portid_t j; 726 portid_t inc = 2; 727 728 if (port_topology == PORT_TOPOLOGY_CHAINED) { 729 inc = 1; 730 } else if (nb_fwd_ports % 2) { 731 printf("\nWarning! Cannot handle an odd number of ports " 732 "with the current port topology. Configuration " 733 "must be changed to have an even number of ports, " 734 "or relaunch application with " 735 "--port-topology=chained\n\n"); 736 } 737 738 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 739 cur_fwd_config.nb_fwd_streams = 740 (streamid_t) cur_fwd_config.nb_fwd_ports; 741 742 /* reinitialize forwarding streams */ 743 init_fwd_streams(); 744 745 /* 746 * In the simple forwarding test, the number of forwarding cores 747 * must be lower or equal to the number of forwarding ports. 748 */ 749 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 750 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 751 cur_fwd_config.nb_fwd_lcores = 752 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 753 setup_fwd_config_of_each_lcore(&cur_fwd_config); 754 755 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 756 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 757 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 758 fwd_streams[i]->rx_queue = 0; 759 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 760 fwd_streams[i]->tx_queue = 0; 761 fwd_streams[i]->peer_addr = j; 762 763 if (port_topology == PORT_TOPOLOGY_PAIRED) { 764 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 765 fwd_streams[j]->rx_queue = 0; 766 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 767 fwd_streams[j]->tx_queue = 0; 768 fwd_streams[j]->peer_addr = i; 769 } 770 } 771 } 772 773 /** 774 * For the RSS forwarding test, each core is assigned on every port a transmit 775 * queue whose index is the index of the core itself. This approach limits the 776 * maximumm number of processing cores of the RSS test to the maximum number of 777 * TX queues supported by the devices. 778 * 779 * Each core is assigned a single stream, each stream being composed of 780 * a RX queue to poll on a RX port for input messages, associated with 781 * a TX queue of a TX port where to send forwarded packets. 782 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 783 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 784 * following rules: 785 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 786 * - TxQl = RxQj 787 */ 788 static void 789 rss_fwd_config_setup(void) 790 { 791 portid_t rxp; 792 portid_t txp; 793 queueid_t rxq; 794 queueid_t nb_q; 795 lcoreid_t lc_id; 796 797 nb_q = nb_rxq; 798 if (nb_q > nb_txq) 799 nb_q = nb_txq; 800 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 801 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 802 cur_fwd_config.nb_fwd_streams = 803 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 804 if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores) 805 cur_fwd_config.nb_fwd_streams = 806 (streamid_t)cur_fwd_config.nb_fwd_lcores; 807 else 808 cur_fwd_config.nb_fwd_lcores = 809 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 810 811 /* reinitialize forwarding streams */ 812 init_fwd_streams(); 813 814 setup_fwd_config_of_each_lcore(&cur_fwd_config); 815 rxp = 0; rxq = 0; 816 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 817 struct fwd_stream *fs; 818 819 fs = fwd_streams[lc_id]; 820 if ((rxp & 0x1) == 0) 821 txp = (portid_t) (rxp + 1); 822 else 823 txp = (portid_t) (rxp - 1); 824 fs->rx_port = fwd_ports_ids[rxp]; 825 fs->rx_queue = rxq; 826 fs->tx_port = fwd_ports_ids[txp]; 827 fs->tx_queue = rxq; 828 fs->peer_addr = fs->tx_port; 829 rxq = (queueid_t) (rxq + 1); 830 if (rxq < nb_q) 831 continue; 832 /* 833 * rxq == nb_q 834 * Restart from RX queue 0 on next RX port 835 */ 836 rxq = 0; 837 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 838 rxp = (portid_t) 839 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 840 else 841 rxp = (portid_t) (rxp + 1); 842 } 843 } 844 845 /* 846 * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues. 847 */ 848 static void 849 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq) 850 { 851 if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) { 852 853 if (rxq < 32) 854 /* tc0: 0-31 */ 855 *txq = rxq; 856 else if (rxq < 64) { 857 /* tc1: 64-95 */ 858 *txq = (uint16_t)(rxq + 32); 859 } 860 else { 861 /* tc2: 96-111;tc3:112-127 */ 862 *txq = (uint16_t)(rxq/2 + 64); 863 } 864 } 865 else { 866 if (rxq < 16) 867 /* tc0 mapping*/ 868 *txq = rxq; 869 else if (rxq < 32) { 870 /* tc1 mapping*/ 871 *txq = (uint16_t)(rxq + 16); 872 } 873 else if (rxq < 64) { 874 /*tc2,tc3 mapping */ 875 *txq = (uint16_t)(rxq + 32); 876 } 877 else { 878 /* tc4,tc5,tc6 and tc7 mapping */ 879 *txq = (uint16_t)(rxq/2 + 64); 880 } 881 } 882 } 883 884 /** 885 * For the DCB forwarding test, each core is assigned on every port multi-transmit 886 * queue. 887 * 888 * Each core is assigned a multi-stream, each stream being composed of 889 * a RX queue to poll on a RX port for input messages, associated with 890 * a TX queue of a TX port where to send forwarded packets. 891 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 892 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 893 * following rules: 894 * In VT mode, 895 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 896 * - TxQl = RxQj 897 * In non-VT mode, 898 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 899 * There is a mapping of RxQj to TxQl to be required,and the mapping was implemented 900 * in dcb_rxq_2_txq_mapping function. 901 */ 902 static void 903 dcb_fwd_config_setup(void) 904 { 905 portid_t rxp; 906 portid_t txp; 907 queueid_t rxq; 908 queueid_t nb_q; 909 lcoreid_t lc_id; 910 uint16_t sm_id; 911 912 nb_q = nb_rxq; 913 914 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 915 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 916 cur_fwd_config.nb_fwd_streams = 917 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 918 919 /* reinitialize forwarding streams */ 920 init_fwd_streams(); 921 922 setup_fwd_config_of_each_lcore(&cur_fwd_config); 923 rxp = 0; rxq = 0; 924 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 925 /* a fwd core can run multi-streams */ 926 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) 927 { 928 struct fwd_stream *fs; 929 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 930 if ((rxp & 0x1) == 0) 931 txp = (portid_t) (rxp + 1); 932 else 933 txp = (portid_t) (rxp - 1); 934 fs->rx_port = fwd_ports_ids[rxp]; 935 fs->rx_queue = rxq; 936 fs->tx_port = fwd_ports_ids[txp]; 937 if (dcb_q_mapping == DCB_VT_Q_MAPPING) 938 fs->tx_queue = rxq; 939 else 940 dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue); 941 fs->peer_addr = fs->tx_port; 942 rxq = (queueid_t) (rxq + 1); 943 if (rxq < nb_q) 944 continue; 945 rxq = 0; 946 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 947 rxp = (portid_t) 948 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 949 else 950 rxp = (portid_t) (rxp + 1); 951 } 952 } 953 } 954 955 void 956 fwd_config_setup(void) 957 { 958 cur_fwd_config.fwd_eng = cur_fwd_eng; 959 if ((nb_rxq > 1) && (nb_txq > 1)){ 960 if (dcb_config) 961 dcb_fwd_config_setup(); 962 else 963 rss_fwd_config_setup(); 964 } 965 else 966 simple_fwd_config_setup(); 967 } 968 969 static void 970 pkt_fwd_config_display(struct fwd_config *cfg) 971 { 972 struct fwd_stream *fs; 973 lcoreid_t lc_id; 974 streamid_t sm_id; 975 976 printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - " 977 "NUMA support %s\n", 978 cfg->fwd_eng->fwd_mode_name, 979 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 980 numa_support == 1 ? "enabled" : "disabled"); 981 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 982 printf("Logical Core %u (socket %u) forwards packets on " 983 "%d streams:", 984 fwd_lcores_cpuids[lc_id], 985 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 986 fwd_lcores[lc_id]->stream_nb); 987 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 988 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 989 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 990 "P=%d/Q=%d (socket %u) ", 991 fs->rx_port, fs->rx_queue, 992 ports[fs->rx_port].socket_id, 993 fs->tx_port, fs->tx_queue, 994 ports[fs->tx_port].socket_id); 995 print_ethaddr("peer=", 996 &peer_eth_addrs[fs->peer_addr]); 997 } 998 printf("\n"); 999 } 1000 printf("\n"); 1001 } 1002 1003 1004 void 1005 fwd_config_display(void) 1006 { 1007 if((dcb_config) && (nb_fwd_lcores == 1)) { 1008 printf("In DCB mode,the nb forwarding cores should be larger than 1\n"); 1009 return; 1010 } 1011 fwd_config_setup(); 1012 pkt_fwd_config_display(&cur_fwd_config); 1013 } 1014 1015 int 1016 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 1017 { 1018 unsigned int i; 1019 unsigned int lcore_cpuid; 1020 int record_now; 1021 1022 record_now = 0; 1023 again: 1024 for (i = 0; i < nb_lc; i++) { 1025 lcore_cpuid = lcorelist[i]; 1026 if (! rte_lcore_is_enabled(lcore_cpuid)) { 1027 printf("lcore %u not enabled\n", lcore_cpuid); 1028 return -1; 1029 } 1030 if (lcore_cpuid == rte_get_master_lcore()) { 1031 printf("lcore %u cannot be masked on for running " 1032 "packet forwarding, which is the master lcore " 1033 "and reserved for command line parsing only\n", 1034 lcore_cpuid); 1035 return -1; 1036 } 1037 if (record_now) 1038 fwd_lcores_cpuids[i] = lcore_cpuid; 1039 } 1040 if (record_now == 0) { 1041 record_now = 1; 1042 goto again; 1043 } 1044 nb_cfg_lcores = (lcoreid_t) nb_lc; 1045 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 1046 printf("previous number of forwarding cores %u - changed to " 1047 "number of configured cores %u\n", 1048 (unsigned int) nb_fwd_lcores, nb_lc); 1049 nb_fwd_lcores = (lcoreid_t) nb_lc; 1050 } 1051 1052 return 0; 1053 } 1054 1055 int 1056 set_fwd_lcores_mask(uint64_t lcoremask) 1057 { 1058 unsigned int lcorelist[64]; 1059 unsigned int nb_lc; 1060 unsigned int i; 1061 1062 if (lcoremask == 0) { 1063 printf("Invalid NULL mask of cores\n"); 1064 return -1; 1065 } 1066 nb_lc = 0; 1067 for (i = 0; i < 64; i++) { 1068 if (! ((uint64_t)(1ULL << i) & lcoremask)) 1069 continue; 1070 lcorelist[nb_lc++] = i; 1071 } 1072 return set_fwd_lcores_list(lcorelist, nb_lc); 1073 } 1074 1075 void 1076 set_fwd_lcores_number(uint16_t nb_lc) 1077 { 1078 if (nb_lc > nb_cfg_lcores) { 1079 printf("nb fwd cores %u > %u (max. number of configured " 1080 "lcores) - ignored\n", 1081 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 1082 return; 1083 } 1084 nb_fwd_lcores = (lcoreid_t) nb_lc; 1085 printf("Number of forwarding cores set to %u\n", 1086 (unsigned int) nb_fwd_lcores); 1087 } 1088 1089 void 1090 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 1091 { 1092 unsigned int i; 1093 portid_t port_id; 1094 int record_now; 1095 1096 record_now = 0; 1097 again: 1098 for (i = 0; i < nb_pt; i++) { 1099 port_id = (portid_t) portlist[i]; 1100 if (port_id >= nb_ports) { 1101 printf("Invalid port id %u >= %u\n", 1102 (unsigned int) port_id, 1103 (unsigned int) nb_ports); 1104 return; 1105 } 1106 if (record_now) 1107 fwd_ports_ids[i] = port_id; 1108 } 1109 if (record_now == 0) { 1110 record_now = 1; 1111 goto again; 1112 } 1113 nb_cfg_ports = (portid_t) nb_pt; 1114 if (nb_fwd_ports != (portid_t) nb_pt) { 1115 printf("previous number of forwarding ports %u - changed to " 1116 "number of configured ports %u\n", 1117 (unsigned int) nb_fwd_ports, nb_pt); 1118 nb_fwd_ports = (portid_t) nb_pt; 1119 } 1120 } 1121 1122 void 1123 set_fwd_ports_mask(uint64_t portmask) 1124 { 1125 unsigned int portlist[64]; 1126 unsigned int nb_pt; 1127 unsigned int i; 1128 1129 if (portmask == 0) { 1130 printf("Invalid NULL mask of ports\n"); 1131 return; 1132 } 1133 nb_pt = 0; 1134 for (i = 0; i < 64; i++) { 1135 if (! ((uint64_t)(1ULL << i) & portmask)) 1136 continue; 1137 portlist[nb_pt++] = i; 1138 } 1139 set_fwd_ports_list(portlist, nb_pt); 1140 } 1141 1142 void 1143 set_fwd_ports_number(uint16_t nb_pt) 1144 { 1145 if (nb_pt > nb_cfg_ports) { 1146 printf("nb fwd ports %u > %u (number of configured " 1147 "ports) - ignored\n", 1148 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 1149 return; 1150 } 1151 nb_fwd_ports = (portid_t) nb_pt; 1152 printf("Number of forwarding ports set to %u\n", 1153 (unsigned int) nb_fwd_ports); 1154 } 1155 1156 void 1157 set_nb_pkt_per_burst(uint16_t nb) 1158 { 1159 if (nb > MAX_PKT_BURST) { 1160 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 1161 " ignored\n", 1162 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 1163 return; 1164 } 1165 nb_pkt_per_burst = nb; 1166 printf("Number of packets per burst set to %u\n", 1167 (unsigned int) nb_pkt_per_burst); 1168 } 1169 1170 void 1171 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 1172 { 1173 uint16_t tx_pkt_len; 1174 unsigned i; 1175 1176 if (nb_segs >= (unsigned) nb_txd) { 1177 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 1178 nb_segs, (unsigned int) nb_txd); 1179 return; 1180 } 1181 1182 /* 1183 * Check that each segment length is greater or equal than 1184 * the mbuf data sise. 1185 * Check also that the total packet length is greater or equal than the 1186 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 1187 */ 1188 tx_pkt_len = 0; 1189 for (i = 0; i < nb_segs; i++) { 1190 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 1191 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 1192 i, seg_lengths[i], (unsigned) mbuf_data_size); 1193 return; 1194 } 1195 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 1196 } 1197 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 1198 printf("total packet length=%u < %d - give up\n", 1199 (unsigned) tx_pkt_len, 1200 (int)(sizeof(struct ether_hdr) + 20 + 8)); 1201 return; 1202 } 1203 1204 for (i = 0; i < nb_segs; i++) 1205 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 1206 1207 tx_pkt_length = tx_pkt_len; 1208 tx_pkt_nb_segs = (uint8_t) nb_segs; 1209 } 1210 1211 void 1212 set_pkt_forwarding_mode(const char *fwd_mode_name) 1213 { 1214 struct fwd_engine *fwd_eng; 1215 unsigned i; 1216 1217 i = 0; 1218 while ((fwd_eng = fwd_engines[i]) != NULL) { 1219 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 1220 printf("Set %s packet forwarding mode\n", 1221 fwd_mode_name); 1222 cur_fwd_eng = fwd_eng; 1223 return; 1224 } 1225 i++; 1226 } 1227 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 1228 } 1229 1230 void 1231 set_verbose_level(uint16_t vb_level) 1232 { 1233 printf("Change verbose level from %u to %u\n", 1234 (unsigned int) verbose_level, (unsigned int) vb_level); 1235 verbose_level = vb_level; 1236 } 1237 1238 void 1239 vlan_extend_set(portid_t port_id, int on) 1240 { 1241 int diag; 1242 int vlan_offload; 1243 1244 if (port_id_is_invalid(port_id)) 1245 return; 1246 1247 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1248 1249 if (on) 1250 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 1251 else 1252 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 1253 1254 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1255 if (diag < 0) 1256 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 1257 "diag=%d\n", port_id, on, diag); 1258 } 1259 1260 void 1261 rx_vlan_strip_set(portid_t port_id, int on) 1262 { 1263 int diag; 1264 int vlan_offload; 1265 1266 if (port_id_is_invalid(port_id)) 1267 return; 1268 1269 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1270 1271 if (on) 1272 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 1273 else 1274 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 1275 1276 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1277 if (diag < 0) 1278 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 1279 "diag=%d\n", port_id, on, diag); 1280 } 1281 1282 void 1283 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 1284 { 1285 int diag; 1286 1287 if (port_id_is_invalid(port_id)) 1288 return; 1289 1290 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 1291 if (diag < 0) 1292 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 1293 "diag=%d\n", port_id, queue_id, on, diag); 1294 } 1295 1296 void 1297 rx_vlan_filter_set(portid_t port_id, int on) 1298 { 1299 int diag; 1300 int vlan_offload; 1301 1302 if (port_id_is_invalid(port_id)) 1303 return; 1304 1305 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 1306 1307 if (on) 1308 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 1309 else 1310 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 1311 1312 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 1313 if (diag < 0) 1314 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 1315 "diag=%d\n", port_id, on, diag); 1316 } 1317 1318 void 1319 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 1320 { 1321 int diag; 1322 1323 if (port_id_is_invalid(port_id)) 1324 return; 1325 if (vlan_id_is_invalid(vlan_id)) 1326 return; 1327 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 1328 if (diag == 0) 1329 return; 1330 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 1331 "diag=%d\n", 1332 port_id, vlan_id, on, diag); 1333 } 1334 1335 void 1336 rx_vlan_all_filter_set(portid_t port_id, int on) 1337 { 1338 uint16_t vlan_id; 1339 1340 if (port_id_is_invalid(port_id)) 1341 return; 1342 for (vlan_id = 0; vlan_id < 4096; vlan_id++) 1343 rx_vft_set(port_id, vlan_id, on); 1344 } 1345 1346 void 1347 vlan_tpid_set(portid_t port_id, uint16_t tp_id) 1348 { 1349 int diag; 1350 if (port_id_is_invalid(port_id)) 1351 return; 1352 1353 diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id); 1354 if (diag == 0) 1355 return; 1356 1357 printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed " 1358 "diag=%d\n", 1359 port_id, tp_id, diag); 1360 } 1361 1362 void 1363 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 1364 { 1365 if (port_id_is_invalid(port_id)) 1366 return; 1367 if (vlan_id_is_invalid(vlan_id)) 1368 return; 1369 ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT; 1370 ports[port_id].tx_vlan_id = vlan_id; 1371 } 1372 1373 void 1374 tx_vlan_reset(portid_t port_id) 1375 { 1376 if (port_id_is_invalid(port_id)) 1377 return; 1378 ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT; 1379 } 1380 1381 void 1382 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 1383 { 1384 uint16_t i; 1385 uint8_t existing_mapping_found = 0; 1386 1387 if (port_id_is_invalid(port_id)) 1388 return; 1389 1390 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 1391 return; 1392 1393 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1394 printf("map_value not in required range 0..%d\n", 1395 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1396 return; 1397 } 1398 1399 if (!is_rx) { /*then tx*/ 1400 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1401 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1402 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 1403 tx_queue_stats_mappings[i].stats_counter_id = map_value; 1404 existing_mapping_found = 1; 1405 break; 1406 } 1407 } 1408 if (!existing_mapping_found) { /* A new additional mapping... */ 1409 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 1410 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 1411 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 1412 nb_tx_queue_stats_mappings++; 1413 } 1414 } 1415 else { /*rx*/ 1416 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1417 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1418 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 1419 rx_queue_stats_mappings[i].stats_counter_id = map_value; 1420 existing_mapping_found = 1; 1421 break; 1422 } 1423 } 1424 if (!existing_mapping_found) { /* A new additional mapping... */ 1425 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 1426 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 1427 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 1428 nb_rx_queue_stats_mappings++; 1429 } 1430 } 1431 } 1432 1433 void 1434 tx_cksum_set(portid_t port_id, uint8_t cksum_mask) 1435 { 1436 uint16_t tx_ol_flags; 1437 if (port_id_is_invalid(port_id)) 1438 return; 1439 /* Clear last 4 bits and then set L3/4 checksum mask again */ 1440 tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0); 1441 ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags); 1442 } 1443 1444 void 1445 fdir_add_signature_filter(portid_t port_id, uint8_t queue_id, 1446 struct rte_fdir_filter *fdir_filter) 1447 { 1448 int diag; 1449 1450 if (port_id_is_invalid(port_id)) 1451 return; 1452 1453 diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter, 1454 queue_id); 1455 if (diag == 0) 1456 return; 1457 1458 printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed " 1459 "diag=%d\n", port_id, diag); 1460 } 1461 1462 void 1463 fdir_update_signature_filter(portid_t port_id, uint8_t queue_id, 1464 struct rte_fdir_filter *fdir_filter) 1465 { 1466 int diag; 1467 1468 if (port_id_is_invalid(port_id)) 1469 return; 1470 1471 diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter, 1472 queue_id); 1473 if (diag == 0) 1474 return; 1475 1476 printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed " 1477 "diag=%d\n", port_id, diag); 1478 } 1479 1480 void 1481 fdir_remove_signature_filter(portid_t port_id, 1482 struct rte_fdir_filter *fdir_filter) 1483 { 1484 int diag; 1485 1486 if (port_id_is_invalid(port_id)) 1487 return; 1488 1489 diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter); 1490 if (diag == 0) 1491 return; 1492 1493 printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed " 1494 "diag=%d\n", port_id, diag); 1495 1496 } 1497 1498 void 1499 fdir_get_infos(portid_t port_id) 1500 { 1501 struct rte_eth_fdir fdir_infos; 1502 1503 static const char *fdir_stats_border = "########################"; 1504 1505 if (port_id_is_invalid(port_id)) 1506 return; 1507 1508 rte_eth_dev_fdir_get_infos(port_id, &fdir_infos); 1509 1510 printf("\n %s FDIR infos for port %-2d %s\n", 1511 fdir_stats_border, port_id, fdir_stats_border); 1512 1513 printf(" collision: %-10"PRIu64" free: %"PRIu64"\n" 1514 " maxhash: %-10"PRIu64" maxlen: %"PRIu64"\n" 1515 " add: %-10"PRIu64" remove: %"PRIu64"\n" 1516 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 1517 (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free), 1518 (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen), 1519 fdir_infos.add, fdir_infos.remove, 1520 fdir_infos.f_add, fdir_infos.f_remove); 1521 printf(" %s############################%s\n", 1522 fdir_stats_border, fdir_stats_border); 1523 } 1524 1525 void 1526 fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id, 1527 uint8_t drop, struct rte_fdir_filter *fdir_filter) 1528 { 1529 int diag; 1530 1531 if (port_id_is_invalid(port_id)) 1532 return; 1533 1534 diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter, 1535 soft_id, queue_id, drop); 1536 if (diag == 0) 1537 return; 1538 1539 printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed " 1540 "diag=%d\n", port_id, diag); 1541 } 1542 1543 void 1544 fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id, 1545 uint8_t drop, struct rte_fdir_filter *fdir_filter) 1546 { 1547 int diag; 1548 1549 if (port_id_is_invalid(port_id)) 1550 return; 1551 1552 diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter, 1553 soft_id, queue_id, drop); 1554 if (diag == 0) 1555 return; 1556 1557 printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed " 1558 "diag=%d\n", port_id, diag); 1559 } 1560 1561 void 1562 fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id, 1563 struct rte_fdir_filter *fdir_filter) 1564 { 1565 int diag; 1566 1567 if (port_id_is_invalid(port_id)) 1568 return; 1569 1570 diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter, 1571 soft_id); 1572 if (diag == 0) 1573 return; 1574 1575 printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed " 1576 "diag=%d\n", port_id, diag); 1577 } 1578 1579 void 1580 fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks) 1581 { 1582 int diag; 1583 1584 if (port_id_is_invalid(port_id)) 1585 return; 1586 1587 diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks); 1588 if (diag == 0) 1589 return; 1590 1591 printf("rte_eth_dev_set_masks_filter for port_id=%d failed " 1592 "diag=%d\n", port_id, diag); 1593 } 1594 1595 void 1596 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 1597 { 1598 int diag; 1599 1600 if (port_id_is_invalid(port_id)) 1601 return; 1602 if (is_rx) 1603 diag = rte_eth_dev_set_vf_rx(port_id,vf,on); 1604 else 1605 diag = rte_eth_dev_set_vf_tx(port_id,vf,on); 1606 if (diag == 0) 1607 return; 1608 if(is_rx) 1609 printf("rte_eth_dev_set_vf_rx for port_id=%d failed " 1610 "diag=%d\n", port_id, diag); 1611 else 1612 printf("rte_eth_dev_set_vf_tx for port_id=%d failed " 1613 "diag=%d\n", port_id, diag); 1614 1615 } 1616 1617 void 1618 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) 1619 { 1620 int diag; 1621 1622 if (port_id_is_invalid(port_id)) 1623 return; 1624 if (vlan_id_is_invalid(vlan_id)) 1625 return; 1626 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on); 1627 if (diag == 0) 1628 return; 1629 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed " 1630 "diag=%d\n", port_id, diag); 1631 } 1632 1633