1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", ETH_RSS_ETH }, 95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 97 { "vlan", ETH_RSS_VLAN }, 98 { "s-vlan", ETH_RSS_S_VLAN }, 99 { "c-vlan", ETH_RSS_C_VLAN }, 100 { "ipv4", ETH_RSS_IPV4 }, 101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", ETH_RSS_IPV6 }, 107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 116 { "port", ETH_RSS_PORT }, 117 { "vxlan", ETH_RSS_VXLAN }, 118 { "geneve", ETH_RSS_GENEVE }, 119 { "nvgre", ETH_RSS_NVGRE }, 120 { "ip", ETH_RSS_IP }, 121 { "udp", ETH_RSS_UDP }, 122 { "tcp", ETH_RSS_TCP }, 123 { "sctp", ETH_RSS_SCTP }, 124 { "tunnel", ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 135 { "esp", ETH_RSS_ESP }, 136 { "ah", ETH_RSS_AH }, 137 { "l2tpv3", ETH_RSS_L2TPV3 }, 138 { "pfcp", ETH_RSS_PFCP }, 139 { "pppoe", ETH_RSS_PPPOE }, 140 { "gtpu", ETH_RSS_GTPU }, 141 { "ecpri", ETH_RSS_ECPRI }, 142 { "mpls", ETH_RSS_MPLS }, 143 { NULL, 0 }, 144 }; 145 146 static const struct { 147 enum rte_eth_fec_mode mode; 148 const char *name; 149 } fec_mode_name[] = { 150 { 151 .mode = RTE_ETH_FEC_NOFEC, 152 .name = "off", 153 }, 154 { 155 .mode = RTE_ETH_FEC_AUTO, 156 .name = "auto", 157 }, 158 { 159 .mode = RTE_ETH_FEC_BASER, 160 .name = "baser", 161 }, 162 { 163 .mode = RTE_ETH_FEC_RS, 164 .name = "rs", 165 }, 166 }; 167 168 static void 169 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 170 { 171 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 172 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 173 printf("%s%s", name, buf); 174 } 175 176 void 177 nic_stats_display(portid_t port_id) 178 { 179 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 180 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 181 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 182 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 183 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 184 struct timespec cur_time; 185 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 186 diff_ns; 187 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 188 struct rte_eth_stats stats; 189 190 static const char *nic_stats_border = "########################"; 191 192 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 193 print_valid_ports(); 194 return; 195 } 196 rte_eth_stats_get(port_id, &stats); 197 printf("\n %s NIC statistics for port %-2d %s\n", 198 nic_stats_border, port_id, nic_stats_border); 199 200 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 201 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 203 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 204 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 205 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 206 207 diff_ns = 0; 208 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 209 uint64_t ns; 210 211 ns = cur_time.tv_sec * NS_PER_SEC; 212 ns += cur_time.tv_nsec; 213 214 if (prev_ns[port_id] != 0) 215 diff_ns = ns - prev_ns[port_id]; 216 prev_ns[port_id] = ns; 217 } 218 219 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 220 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 221 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 222 (stats.opackets - prev_pkts_tx[port_id]) : 0; 223 prev_pkts_rx[port_id] = stats.ipackets; 224 prev_pkts_tx[port_id] = stats.opackets; 225 mpps_rx = diff_ns > 0 ? 226 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 227 mpps_tx = diff_ns > 0 ? 228 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 229 230 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 231 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 232 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 233 (stats.obytes - prev_bytes_tx[port_id]) : 0; 234 prev_bytes_rx[port_id] = stats.ibytes; 235 prev_bytes_tx[port_id] = stats.obytes; 236 mbps_rx = diff_ns > 0 ? 237 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 238 mbps_tx = diff_ns > 0 ? 239 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 240 241 printf("\n Throughput (since last show)\n"); 242 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 243 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 244 mpps_tx, mbps_tx * 8); 245 246 printf(" %s############################%s\n", 247 nic_stats_border, nic_stats_border); 248 } 249 250 void 251 nic_stats_clear(portid_t port_id) 252 { 253 int ret; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 260 ret = rte_eth_stats_reset(port_id); 261 if (ret != 0) { 262 printf("%s: Error: failed to reset stats (port %u): %s", 263 __func__, port_id, strerror(-ret)); 264 return; 265 } 266 267 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 268 if (ret != 0) { 269 if (ret < 0) 270 ret = -ret; 271 printf("%s: Error: failed to get stats (port %u): %s", 272 __func__, port_id, strerror(ret)); 273 return; 274 } 275 printf("\n NIC statistics for port %d cleared\n", port_id); 276 } 277 278 void 279 nic_xstats_display(portid_t port_id) 280 { 281 struct rte_eth_xstat *xstats; 282 int cnt_xstats, idx_xstat; 283 struct rte_eth_xstat_name *xstats_names; 284 285 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 286 print_valid_ports(); 287 return; 288 } 289 printf("###### NIC extended statistics for port %-2d\n", port_id); 290 if (!rte_eth_dev_is_valid_port(port_id)) { 291 printf("Error: Invalid port number %i\n", port_id); 292 return; 293 } 294 295 /* Get count */ 296 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 297 if (cnt_xstats < 0) { 298 printf("Error: Cannot get count of xstats\n"); 299 return; 300 } 301 302 /* Get id-name lookup table */ 303 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 304 if (xstats_names == NULL) { 305 printf("Cannot allocate memory for xstats lookup\n"); 306 return; 307 } 308 if (cnt_xstats != rte_eth_xstats_get_names( 309 port_id, xstats_names, cnt_xstats)) { 310 printf("Error: Cannot get xstats lookup\n"); 311 free(xstats_names); 312 return; 313 } 314 315 /* Get stats themselves */ 316 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 317 if (xstats == NULL) { 318 printf("Cannot allocate memory for xstats\n"); 319 free(xstats_names); 320 return; 321 } 322 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 323 printf("Error: Unable to get xstats\n"); 324 free(xstats_names); 325 free(xstats); 326 return; 327 } 328 329 /* Display xstats */ 330 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 331 if (xstats_hide_zero && !xstats[idx_xstat].value) 332 continue; 333 printf("%s: %"PRIu64"\n", 334 xstats_names[idx_xstat].name, 335 xstats[idx_xstat].value); 336 } 337 free(xstats_names); 338 free(xstats); 339 } 340 341 void 342 nic_xstats_clear(portid_t port_id) 343 { 344 int ret; 345 346 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 347 print_valid_ports(); 348 return; 349 } 350 351 ret = rte_eth_xstats_reset(port_id); 352 if (ret != 0) { 353 printf("%s: Error: failed to reset xstats (port %u): %s", 354 __func__, port_id, strerror(-ret)); 355 return; 356 } 357 358 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 359 if (ret != 0) { 360 if (ret < 0) 361 ret = -ret; 362 printf("%s: Error: failed to get stats (port %u): %s", 363 __func__, port_id, strerror(ret)); 364 return; 365 } 366 } 367 368 static const char * 369 get_queue_state_name(uint8_t queue_state) 370 { 371 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 372 return "stopped"; 373 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 374 return "started"; 375 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 376 return "hairpin"; 377 else 378 return "unknown"; 379 } 380 381 void 382 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 383 { 384 struct rte_eth_burst_mode mode; 385 struct rte_eth_rxq_info qinfo; 386 int32_t rc; 387 static const char *info_border = "*********************"; 388 389 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 390 if (rc != 0) { 391 printf("Failed to retrieve information for port: %u, " 392 "RX queue: %hu\nerror desc: %s(%d)\n", 393 port_id, queue_id, strerror(-rc), rc); 394 return; 395 } 396 397 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 398 info_border, port_id, queue_id, info_border); 399 400 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 401 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 402 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 403 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 404 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 405 printf("\nRX drop packets: %s", 406 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 407 printf("\nRX deferred start: %s", 408 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 409 printf("\nRX scattered packets: %s", 410 (qinfo.scattered_rx != 0) ? "on" : "off"); 411 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 412 if (qinfo.rx_buf_size != 0) 413 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 414 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 415 416 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 417 printf("\nBurst mode: %s%s", 418 mode.info, 419 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 420 " (per queue)" : ""); 421 422 printf("\n"); 423 } 424 425 void 426 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 427 { 428 struct rte_eth_burst_mode mode; 429 struct rte_eth_txq_info qinfo; 430 int32_t rc; 431 static const char *info_border = "*********************"; 432 433 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 434 if (rc != 0) { 435 printf("Failed to retrieve information for port: %u, " 436 "TX queue: %hu\nerror desc: %s(%d)\n", 437 port_id, queue_id, strerror(-rc), rc); 438 return; 439 } 440 441 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 442 info_border, port_id, queue_id, info_border); 443 444 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 445 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 446 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 447 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 448 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 449 printf("\nTX deferred start: %s", 450 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 451 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 452 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 453 454 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 455 printf("\nBurst mode: %s%s", 456 mode.info, 457 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 458 " (per queue)" : ""); 459 460 printf("\n"); 461 } 462 463 static int bus_match_all(const struct rte_bus *bus, const void *data) 464 { 465 RTE_SET_USED(bus); 466 RTE_SET_USED(data); 467 return 0; 468 } 469 470 static void 471 device_infos_display_speeds(uint32_t speed_capa) 472 { 473 printf("\n\tDevice speed capability:"); 474 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 475 printf(" Autonegotiate (all speeds)"); 476 if (speed_capa & ETH_LINK_SPEED_FIXED) 477 printf(" Disable autonegotiate (fixed speed) "); 478 if (speed_capa & ETH_LINK_SPEED_10M_HD) 479 printf(" 10 Mbps half-duplex "); 480 if (speed_capa & ETH_LINK_SPEED_10M) 481 printf(" 10 Mbps full-duplex "); 482 if (speed_capa & ETH_LINK_SPEED_100M_HD) 483 printf(" 100 Mbps half-duplex "); 484 if (speed_capa & ETH_LINK_SPEED_100M) 485 printf(" 100 Mbps full-duplex "); 486 if (speed_capa & ETH_LINK_SPEED_1G) 487 printf(" 1 Gbps "); 488 if (speed_capa & ETH_LINK_SPEED_2_5G) 489 printf(" 2.5 Gbps "); 490 if (speed_capa & ETH_LINK_SPEED_5G) 491 printf(" 5 Gbps "); 492 if (speed_capa & ETH_LINK_SPEED_10G) 493 printf(" 10 Gbps "); 494 if (speed_capa & ETH_LINK_SPEED_20G) 495 printf(" 20 Gbps "); 496 if (speed_capa & ETH_LINK_SPEED_25G) 497 printf(" 25 Gbps "); 498 if (speed_capa & ETH_LINK_SPEED_40G) 499 printf(" 40 Gbps "); 500 if (speed_capa & ETH_LINK_SPEED_50G) 501 printf(" 50 Gbps "); 502 if (speed_capa & ETH_LINK_SPEED_56G) 503 printf(" 56 Gbps "); 504 if (speed_capa & ETH_LINK_SPEED_100G) 505 printf(" 100 Gbps "); 506 if (speed_capa & ETH_LINK_SPEED_200G) 507 printf(" 200 Gbps "); 508 } 509 510 void 511 device_infos_display(const char *identifier) 512 { 513 static const char *info_border = "*********************"; 514 struct rte_bus *start = NULL, *next; 515 struct rte_dev_iterator dev_iter; 516 char name[RTE_ETH_NAME_MAX_LEN]; 517 struct rte_ether_addr mac_addr; 518 struct rte_device *dev; 519 struct rte_devargs da; 520 portid_t port_id; 521 struct rte_eth_dev_info dev_info; 522 char devstr[128]; 523 524 memset(&da, 0, sizeof(da)); 525 if (!identifier) 526 goto skip_parse; 527 528 if (rte_devargs_parsef(&da, "%s", identifier)) { 529 printf("cannot parse identifier\n"); 530 return; 531 } 532 533 skip_parse: 534 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 535 536 start = next; 537 if (identifier && da.bus != next) 538 continue; 539 540 /* Skip buses that don't have iterate method */ 541 if (!next->dev_iterate) 542 continue; 543 544 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 545 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 546 547 if (!dev->driver) 548 continue; 549 /* Check for matching device if identifier is present */ 550 if (identifier && 551 strncmp(da.name, dev->name, strlen(dev->name))) 552 continue; 553 printf("\n%s Infos for device %s %s\n", 554 info_border, dev->name, info_border); 555 printf("Bus name: %s", dev->bus->name); 556 printf("\nDriver name: %s", dev->driver->name); 557 printf("\nDevargs: %s", 558 dev->devargs ? dev->devargs->args : ""); 559 printf("\nConnect to socket: %d", dev->numa_node); 560 printf("\n"); 561 562 /* List ports with matching device name */ 563 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 564 printf("\n\tPort id: %-2d", port_id); 565 if (eth_macaddr_get_print_err(port_id, 566 &mac_addr) == 0) 567 print_ethaddr("\n\tMAC address: ", 568 &mac_addr); 569 rte_eth_dev_get_name_by_port(port_id, name); 570 printf("\n\tDevice name: %s", name); 571 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 572 device_infos_display_speeds(dev_info.speed_capa); 573 printf("\n"); 574 } 575 } 576 }; 577 rte_devargs_reset(&da); 578 } 579 580 void 581 port_infos_display(portid_t port_id) 582 { 583 struct rte_port *port; 584 struct rte_ether_addr mac_addr; 585 struct rte_eth_link link; 586 struct rte_eth_dev_info dev_info; 587 int vlan_offload; 588 struct rte_mempool * mp; 589 static const char *info_border = "*********************"; 590 uint16_t mtu; 591 char name[RTE_ETH_NAME_MAX_LEN]; 592 int ret; 593 char fw_version[ETHDEV_FWVERS_LEN]; 594 595 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 596 print_valid_ports(); 597 return; 598 } 599 port = &ports[port_id]; 600 ret = eth_link_get_nowait_print_err(port_id, &link); 601 if (ret < 0) 602 return; 603 604 ret = eth_dev_info_get_print_err(port_id, &dev_info); 605 if (ret != 0) 606 return; 607 608 printf("\n%s Infos for port %-2d %s\n", 609 info_border, port_id, info_border); 610 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 611 print_ethaddr("MAC address: ", &mac_addr); 612 rte_eth_dev_get_name_by_port(port_id, name); 613 printf("\nDevice name: %s", name); 614 printf("\nDriver name: %s", dev_info.driver_name); 615 616 if (rte_eth_dev_fw_version_get(port_id, fw_version, 617 ETHDEV_FWVERS_LEN) == 0) 618 printf("\nFirmware-version: %s", fw_version); 619 else 620 printf("\nFirmware-version: %s", "not available"); 621 622 if (dev_info.device->devargs && dev_info.device->devargs->args) 623 printf("\nDevargs: %s", dev_info.device->devargs->args); 624 printf("\nConnect to socket: %u", port->socket_id); 625 626 if (port_numa[port_id] != NUMA_NO_CONFIG) { 627 mp = mbuf_pool_find(port_numa[port_id], 0); 628 if (mp) 629 printf("\nmemory allocation on the socket: %d", 630 port_numa[port_id]); 631 } else 632 printf("\nmemory allocation on the socket: %u",port->socket_id); 633 634 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 635 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 636 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 637 ("full-duplex") : ("half-duplex")); 638 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ? 639 ("On") : ("Off")); 640 641 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 642 printf("MTU: %u\n", mtu); 643 644 printf("Promiscuous mode: %s\n", 645 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 646 printf("Allmulticast mode: %s\n", 647 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 648 printf("Maximum number of MAC addresses: %u\n", 649 (unsigned int)(port->dev_info.max_mac_addrs)); 650 printf("Maximum number of MAC addresses of hash filtering: %u\n", 651 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 652 653 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 654 if (vlan_offload >= 0){ 655 printf("VLAN offload: \n"); 656 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 657 printf(" strip on, "); 658 else 659 printf(" strip off, "); 660 661 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 662 printf("filter on, "); 663 else 664 printf("filter off, "); 665 666 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 667 printf("extend on, "); 668 else 669 printf("extend off, "); 670 671 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 672 printf("qinq strip on\n"); 673 else 674 printf("qinq strip off\n"); 675 } 676 677 if (dev_info.hash_key_size > 0) 678 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 679 if (dev_info.reta_size > 0) 680 printf("Redirection table size: %u\n", dev_info.reta_size); 681 if (!dev_info.flow_type_rss_offloads) 682 printf("No RSS offload flow type is supported.\n"); 683 else { 684 uint16_t i; 685 char *p; 686 687 printf("Supported RSS offload flow types:\n"); 688 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 689 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 690 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 691 continue; 692 p = flowtype_to_str(i); 693 if (p) 694 printf(" %s\n", p); 695 else 696 printf(" user defined %d\n", i); 697 } 698 } 699 700 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 701 printf("Maximum configurable length of RX packet: %u\n", 702 dev_info.max_rx_pktlen); 703 printf("Maximum configurable size of LRO aggregated packet: %u\n", 704 dev_info.max_lro_pkt_size); 705 if (dev_info.max_vfs) 706 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 707 if (dev_info.max_vmdq_pools) 708 printf("Maximum number of VMDq pools: %u\n", 709 dev_info.max_vmdq_pools); 710 711 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 712 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 713 printf("Max possible number of RXDs per queue: %hu\n", 714 dev_info.rx_desc_lim.nb_max); 715 printf("Min possible number of RXDs per queue: %hu\n", 716 dev_info.rx_desc_lim.nb_min); 717 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 718 719 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 720 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 721 printf("Max possible number of TXDs per queue: %hu\n", 722 dev_info.tx_desc_lim.nb_max); 723 printf("Min possible number of TXDs per queue: %hu\n", 724 dev_info.tx_desc_lim.nb_min); 725 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 726 printf("Max segment number per packet: %hu\n", 727 dev_info.tx_desc_lim.nb_seg_max); 728 printf("Max segment number per MTU/TSO: %hu\n", 729 dev_info.tx_desc_lim.nb_mtu_seg_max); 730 731 /* Show switch info only if valid switch domain and port id is set */ 732 if (dev_info.switch_info.domain_id != 733 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 734 if (dev_info.switch_info.name) 735 printf("Switch name: %s\n", dev_info.switch_info.name); 736 737 printf("Switch domain Id: %u\n", 738 dev_info.switch_info.domain_id); 739 printf("Switch Port Id: %u\n", 740 dev_info.switch_info.port_id); 741 } 742 } 743 744 void 745 port_summary_header_display(void) 746 { 747 uint16_t port_number; 748 749 port_number = rte_eth_dev_count_avail(); 750 printf("Number of available ports: %i\n", port_number); 751 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 752 "Driver", "Status", "Link"); 753 } 754 755 void 756 port_summary_display(portid_t port_id) 757 { 758 struct rte_ether_addr mac_addr; 759 struct rte_eth_link link; 760 struct rte_eth_dev_info dev_info; 761 char name[RTE_ETH_NAME_MAX_LEN]; 762 int ret; 763 764 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 765 print_valid_ports(); 766 return; 767 } 768 769 ret = eth_link_get_nowait_print_err(port_id, &link); 770 if (ret < 0) 771 return; 772 773 ret = eth_dev_info_get_print_err(port_id, &dev_info); 774 if (ret != 0) 775 return; 776 777 rte_eth_dev_get_name_by_port(port_id, name); 778 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 779 if (ret != 0) 780 return; 781 782 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 783 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 784 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 785 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 786 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 787 rte_eth_link_speed_to_str(link.link_speed)); 788 } 789 790 void 791 port_eeprom_display(portid_t port_id) 792 { 793 struct rte_dev_eeprom_info einfo; 794 int ret; 795 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 796 print_valid_ports(); 797 return; 798 } 799 800 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 801 if (len_eeprom < 0) { 802 switch (len_eeprom) { 803 case -ENODEV: 804 printf("port index %d invalid\n", port_id); 805 break; 806 case -ENOTSUP: 807 printf("operation not supported by device\n"); 808 break; 809 case -EIO: 810 printf("device is removed\n"); 811 break; 812 default: 813 printf("Unable to get EEPROM: %d\n", len_eeprom); 814 break; 815 } 816 return; 817 } 818 819 char buf[len_eeprom]; 820 einfo.offset = 0; 821 einfo.length = len_eeprom; 822 einfo.data = buf; 823 824 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 825 if (ret != 0) { 826 switch (ret) { 827 case -ENODEV: 828 printf("port index %d invalid\n", port_id); 829 break; 830 case -ENOTSUP: 831 printf("operation not supported by device\n"); 832 break; 833 case -EIO: 834 printf("device is removed\n"); 835 break; 836 default: 837 printf("Unable to get EEPROM: %d\n", ret); 838 break; 839 } 840 return; 841 } 842 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 843 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 844 } 845 846 void 847 port_module_eeprom_display(portid_t port_id) 848 { 849 struct rte_eth_dev_module_info minfo; 850 struct rte_dev_eeprom_info einfo; 851 int ret; 852 853 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 854 print_valid_ports(); 855 return; 856 } 857 858 859 ret = rte_eth_dev_get_module_info(port_id, &minfo); 860 if (ret != 0) { 861 switch (ret) { 862 case -ENODEV: 863 printf("port index %d invalid\n", port_id); 864 break; 865 case -ENOTSUP: 866 printf("operation not supported by device\n"); 867 break; 868 case -EIO: 869 printf("device is removed\n"); 870 break; 871 default: 872 printf("Unable to get module EEPROM: %d\n", ret); 873 break; 874 } 875 return; 876 } 877 878 char buf[minfo.eeprom_len]; 879 einfo.offset = 0; 880 einfo.length = minfo.eeprom_len; 881 einfo.data = buf; 882 883 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 884 if (ret != 0) { 885 switch (ret) { 886 case -ENODEV: 887 printf("port index %d invalid\n", port_id); 888 break; 889 case -ENOTSUP: 890 printf("operation not supported by device\n"); 891 break; 892 case -EIO: 893 printf("device is removed\n"); 894 break; 895 default: 896 printf("Unable to get module EEPROM: %d\n", ret); 897 break; 898 } 899 return; 900 } 901 902 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 903 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 904 } 905 906 int 907 port_id_is_invalid(portid_t port_id, enum print_warning warning) 908 { 909 uint16_t pid; 910 911 if (port_id == (portid_t)RTE_PORT_ALL) 912 return 0; 913 914 RTE_ETH_FOREACH_DEV(pid) 915 if (port_id == pid) 916 return 0; 917 918 if (warning == ENABLED_WARN) 919 printf("Invalid port %d\n", port_id); 920 921 return 1; 922 } 923 924 void print_valid_ports(void) 925 { 926 portid_t pid; 927 928 printf("The valid ports array is ["); 929 RTE_ETH_FOREACH_DEV(pid) { 930 printf(" %d", pid); 931 } 932 printf(" ]\n"); 933 } 934 935 static int 936 vlan_id_is_invalid(uint16_t vlan_id) 937 { 938 if (vlan_id < 4096) 939 return 0; 940 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 941 return 1; 942 } 943 944 static int 945 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 946 { 947 const struct rte_pci_device *pci_dev; 948 const struct rte_bus *bus; 949 uint64_t pci_len; 950 951 if (reg_off & 0x3) { 952 printf("Port register offset 0x%X not aligned on a 4-byte " 953 "boundary\n", 954 (unsigned)reg_off); 955 return 1; 956 } 957 958 if (!ports[port_id].dev_info.device) { 959 printf("Invalid device\n"); 960 return 0; 961 } 962 963 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 964 if (bus && !strcmp(bus->name, "pci")) { 965 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 966 } else { 967 printf("Not a PCI device\n"); 968 return 1; 969 } 970 971 pci_len = pci_dev->mem_resource[0].len; 972 if (reg_off >= pci_len) { 973 printf("Port %d: register offset %u (0x%X) out of port PCI " 974 "resource (length=%"PRIu64")\n", 975 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 976 return 1; 977 } 978 return 0; 979 } 980 981 static int 982 reg_bit_pos_is_invalid(uint8_t bit_pos) 983 { 984 if (bit_pos <= 31) 985 return 0; 986 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 987 return 1; 988 } 989 990 #define display_port_and_reg_off(port_id, reg_off) \ 991 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 992 993 static inline void 994 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 995 { 996 display_port_and_reg_off(port_id, (unsigned)reg_off); 997 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 998 } 999 1000 void 1001 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1002 { 1003 uint32_t reg_v; 1004 1005 1006 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1007 return; 1008 if (port_reg_off_is_invalid(port_id, reg_off)) 1009 return; 1010 if (reg_bit_pos_is_invalid(bit_x)) 1011 return; 1012 reg_v = port_id_pci_reg_read(port_id, reg_off); 1013 display_port_and_reg_off(port_id, (unsigned)reg_off); 1014 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1015 } 1016 1017 void 1018 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1019 uint8_t bit1_pos, uint8_t bit2_pos) 1020 { 1021 uint32_t reg_v; 1022 uint8_t l_bit; 1023 uint8_t h_bit; 1024 1025 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1026 return; 1027 if (port_reg_off_is_invalid(port_id, reg_off)) 1028 return; 1029 if (reg_bit_pos_is_invalid(bit1_pos)) 1030 return; 1031 if (reg_bit_pos_is_invalid(bit2_pos)) 1032 return; 1033 if (bit1_pos > bit2_pos) 1034 l_bit = bit2_pos, h_bit = bit1_pos; 1035 else 1036 l_bit = bit1_pos, h_bit = bit2_pos; 1037 1038 reg_v = port_id_pci_reg_read(port_id, reg_off); 1039 reg_v >>= l_bit; 1040 if (h_bit < 31) 1041 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1042 display_port_and_reg_off(port_id, (unsigned)reg_off); 1043 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1044 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1045 } 1046 1047 void 1048 port_reg_display(portid_t port_id, uint32_t reg_off) 1049 { 1050 uint32_t reg_v; 1051 1052 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1053 return; 1054 if (port_reg_off_is_invalid(port_id, reg_off)) 1055 return; 1056 reg_v = port_id_pci_reg_read(port_id, reg_off); 1057 display_port_reg_value(port_id, reg_off, reg_v); 1058 } 1059 1060 void 1061 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1062 uint8_t bit_v) 1063 { 1064 uint32_t reg_v; 1065 1066 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1067 return; 1068 if (port_reg_off_is_invalid(port_id, reg_off)) 1069 return; 1070 if (reg_bit_pos_is_invalid(bit_pos)) 1071 return; 1072 if (bit_v > 1) { 1073 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1074 return; 1075 } 1076 reg_v = port_id_pci_reg_read(port_id, reg_off); 1077 if (bit_v == 0) 1078 reg_v &= ~(1 << bit_pos); 1079 else 1080 reg_v |= (1 << bit_pos); 1081 port_id_pci_reg_write(port_id, reg_off, reg_v); 1082 display_port_reg_value(port_id, reg_off, reg_v); 1083 } 1084 1085 void 1086 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1087 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1088 { 1089 uint32_t max_v; 1090 uint32_t reg_v; 1091 uint8_t l_bit; 1092 uint8_t h_bit; 1093 1094 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1095 return; 1096 if (port_reg_off_is_invalid(port_id, reg_off)) 1097 return; 1098 if (reg_bit_pos_is_invalid(bit1_pos)) 1099 return; 1100 if (reg_bit_pos_is_invalid(bit2_pos)) 1101 return; 1102 if (bit1_pos > bit2_pos) 1103 l_bit = bit2_pos, h_bit = bit1_pos; 1104 else 1105 l_bit = bit1_pos, h_bit = bit2_pos; 1106 1107 if ((h_bit - l_bit) < 31) 1108 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1109 else 1110 max_v = 0xFFFFFFFF; 1111 1112 if (value > max_v) { 1113 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1114 (unsigned)value, (unsigned)value, 1115 (unsigned)max_v, (unsigned)max_v); 1116 return; 1117 } 1118 reg_v = port_id_pci_reg_read(port_id, reg_off); 1119 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1120 reg_v |= (value << l_bit); /* Set changed bits */ 1121 port_id_pci_reg_write(port_id, reg_off, reg_v); 1122 display_port_reg_value(port_id, reg_off, reg_v); 1123 } 1124 1125 void 1126 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1127 { 1128 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1129 return; 1130 if (port_reg_off_is_invalid(port_id, reg_off)) 1131 return; 1132 port_id_pci_reg_write(port_id, reg_off, reg_v); 1133 display_port_reg_value(port_id, reg_off, reg_v); 1134 } 1135 1136 void 1137 port_mtu_set(portid_t port_id, uint16_t mtu) 1138 { 1139 int diag; 1140 struct rte_port *rte_port = &ports[port_id]; 1141 struct rte_eth_dev_info dev_info; 1142 uint16_t eth_overhead; 1143 int ret; 1144 1145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1146 return; 1147 1148 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1149 if (ret != 0) 1150 return; 1151 1152 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1153 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1154 mtu, dev_info.min_mtu, dev_info.max_mtu); 1155 return; 1156 } 1157 diag = rte_eth_dev_set_mtu(port_id, mtu); 1158 if (diag) 1159 printf("Set MTU failed. diag=%d\n", diag); 1160 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1161 /* 1162 * Ether overhead in driver is equal to the difference of 1163 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1164 * device supports jumbo frame. 1165 */ 1166 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1167 if (mtu > RTE_ETHER_MTU) { 1168 rte_port->dev_conf.rxmode.offloads |= 1169 DEV_RX_OFFLOAD_JUMBO_FRAME; 1170 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1171 mtu + eth_overhead; 1172 } else 1173 rte_port->dev_conf.rxmode.offloads &= 1174 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1175 } 1176 } 1177 1178 /* Generic flow management functions. */ 1179 1180 static struct port_flow_tunnel * 1181 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1182 { 1183 struct port_flow_tunnel *flow_tunnel; 1184 1185 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1186 if (flow_tunnel->id == port_tunnel_id) 1187 goto out; 1188 } 1189 flow_tunnel = NULL; 1190 1191 out: 1192 return flow_tunnel; 1193 } 1194 1195 const char * 1196 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1197 { 1198 const char *type; 1199 switch (tunnel->type) { 1200 default: 1201 type = "unknown"; 1202 break; 1203 case RTE_FLOW_ITEM_TYPE_VXLAN: 1204 type = "vxlan"; 1205 break; 1206 } 1207 1208 return type; 1209 } 1210 1211 struct port_flow_tunnel * 1212 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1213 { 1214 struct rte_port *port = &ports[port_id]; 1215 struct port_flow_tunnel *flow_tunnel; 1216 1217 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1218 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1219 goto out; 1220 } 1221 flow_tunnel = NULL; 1222 1223 out: 1224 return flow_tunnel; 1225 } 1226 1227 void port_flow_tunnel_list(portid_t port_id) 1228 { 1229 struct rte_port *port = &ports[port_id]; 1230 struct port_flow_tunnel *flt; 1231 1232 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1233 printf("port %u tunnel #%u type=%s", 1234 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1235 if (flt->tunnel.tun_id) 1236 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1237 printf("\n"); 1238 } 1239 } 1240 1241 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1242 { 1243 struct rte_port *port = &ports[port_id]; 1244 struct port_flow_tunnel *flt; 1245 1246 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1247 if (flt->id == tunnel_id) 1248 break; 1249 } 1250 if (flt) { 1251 LIST_REMOVE(flt, chain); 1252 free(flt); 1253 printf("port %u: flow tunnel #%u destroyed\n", 1254 port_id, tunnel_id); 1255 } 1256 } 1257 1258 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1259 { 1260 struct rte_port *port = &ports[port_id]; 1261 enum rte_flow_item_type type; 1262 struct port_flow_tunnel *flt; 1263 1264 if (!strcmp(ops->type, "vxlan")) 1265 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1266 else { 1267 printf("cannot offload \"%s\" tunnel type\n", ops->type); 1268 return; 1269 } 1270 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1271 if (flt->tunnel.type == type) 1272 break; 1273 } 1274 if (!flt) { 1275 flt = calloc(1, sizeof(*flt)); 1276 if (!flt) { 1277 printf("failed to allocate port flt object\n"); 1278 return; 1279 } 1280 flt->tunnel.type = type; 1281 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1282 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1283 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1284 } 1285 printf("port %d: flow tunnel #%u type %s\n", 1286 port_id, flt->id, ops->type); 1287 } 1288 1289 /** Generate a port_flow entry from attributes/pattern/actions. */ 1290 static struct port_flow * 1291 port_flow_new(const struct rte_flow_attr *attr, 1292 const struct rte_flow_item *pattern, 1293 const struct rte_flow_action *actions, 1294 struct rte_flow_error *error) 1295 { 1296 const struct rte_flow_conv_rule rule = { 1297 .attr_ro = attr, 1298 .pattern_ro = pattern, 1299 .actions_ro = actions, 1300 }; 1301 struct port_flow *pf; 1302 int ret; 1303 1304 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1305 if (ret < 0) 1306 return NULL; 1307 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1308 if (!pf) { 1309 rte_flow_error_set 1310 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1311 "calloc() failed"); 1312 return NULL; 1313 } 1314 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1315 error) >= 0) 1316 return pf; 1317 free(pf); 1318 return NULL; 1319 } 1320 1321 /** Print a message out of a flow error. */ 1322 static int 1323 port_flow_complain(struct rte_flow_error *error) 1324 { 1325 static const char *const errstrlist[] = { 1326 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1327 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1328 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1329 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1330 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1331 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1332 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1333 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1334 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1335 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1336 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1337 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1338 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1339 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1340 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1341 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1342 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1343 }; 1344 const char *errstr; 1345 char buf[32]; 1346 int err = rte_errno; 1347 1348 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1349 !errstrlist[error->type]) 1350 errstr = "unknown type"; 1351 else 1352 errstr = errstrlist[error->type]; 1353 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1354 error->type, errstr, 1355 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1356 error->cause), buf) : "", 1357 error->message ? error->message : "(no stated reason)", 1358 rte_strerror(err)); 1359 return -err; 1360 } 1361 1362 static void 1363 rss_config_display(struct rte_flow_action_rss *rss_conf) 1364 { 1365 uint8_t i; 1366 1367 if (rss_conf == NULL) { 1368 printf("Invalid rule\n"); 1369 return; 1370 } 1371 1372 printf("RSS:\n" 1373 " queues:"); 1374 if (rss_conf->queue_num == 0) 1375 printf(" none"); 1376 for (i = 0; i < rss_conf->queue_num; i++) 1377 printf(" %d", rss_conf->queue[i]); 1378 printf("\n"); 1379 1380 printf(" function: "); 1381 switch (rss_conf->func) { 1382 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1383 printf("default\n"); 1384 break; 1385 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1386 printf("toeplitz\n"); 1387 break; 1388 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1389 printf("simple_xor\n"); 1390 break; 1391 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1392 printf("symmetric_toeplitz\n"); 1393 break; 1394 default: 1395 printf("Unknown function\n"); 1396 return; 1397 } 1398 1399 printf(" types:\n"); 1400 if (rss_conf->types == 0) { 1401 printf(" none\n"); 1402 return; 1403 } 1404 for (i = 0; rss_type_table[i].str; i++) { 1405 if ((rss_conf->types & 1406 rss_type_table[i].rss_type) == 1407 rss_type_table[i].rss_type && 1408 rss_type_table[i].rss_type != 0) 1409 printf(" %s\n", rss_type_table[i].str); 1410 } 1411 } 1412 1413 static struct port_indirect_action * 1414 action_get_by_id(portid_t port_id, uint32_t id) 1415 { 1416 struct rte_port *port; 1417 struct port_indirect_action **ppia; 1418 struct port_indirect_action *pia = NULL; 1419 1420 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1421 port_id == (portid_t)RTE_PORT_ALL) 1422 return NULL; 1423 port = &ports[port_id]; 1424 ppia = &port->actions_list; 1425 while (*ppia) { 1426 if ((*ppia)->id == id) { 1427 pia = *ppia; 1428 break; 1429 } 1430 ppia = &(*ppia)->next; 1431 } 1432 if (!pia) 1433 printf("Failed to find indirect action #%u on port %u\n", 1434 id, port_id); 1435 return pia; 1436 } 1437 1438 static int 1439 action_alloc(portid_t port_id, uint32_t id, 1440 struct port_indirect_action **action) 1441 { 1442 struct rte_port *port; 1443 struct port_indirect_action **ppia; 1444 struct port_indirect_action *pia = NULL; 1445 1446 *action = NULL; 1447 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1448 port_id == (portid_t)RTE_PORT_ALL) 1449 return -EINVAL; 1450 port = &ports[port_id]; 1451 if (id == UINT32_MAX) { 1452 /* taking first available ID */ 1453 if (port->actions_list) { 1454 if (port->actions_list->id == UINT32_MAX - 1) { 1455 printf("Highest indirect action ID is already" 1456 " assigned, delete it first\n"); 1457 return -ENOMEM; 1458 } 1459 id = port->actions_list->id + 1; 1460 } else { 1461 id = 0; 1462 } 1463 } 1464 pia = calloc(1, sizeof(*pia)); 1465 if (!pia) { 1466 printf("Allocation of port %u indirect action failed\n", 1467 port_id); 1468 return -ENOMEM; 1469 } 1470 ppia = &port->actions_list; 1471 while (*ppia && (*ppia)->id > id) 1472 ppia = &(*ppia)->next; 1473 if (*ppia && (*ppia)->id == id) { 1474 printf("Indirect action #%u is already assigned," 1475 " delete it first\n", id); 1476 free(pia); 1477 return -EINVAL; 1478 } 1479 pia->next = *ppia; 1480 pia->id = id; 1481 *ppia = pia; 1482 *action = pia; 1483 return 0; 1484 } 1485 1486 /** Create indirect action */ 1487 int 1488 port_action_handle_create(portid_t port_id, uint32_t id, 1489 const struct rte_flow_indir_action_conf *conf, 1490 const struct rte_flow_action *action) 1491 { 1492 struct port_indirect_action *pia; 1493 int ret; 1494 struct rte_flow_error error; 1495 1496 ret = action_alloc(port_id, id, &pia); 1497 if (ret) 1498 return ret; 1499 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1500 struct rte_flow_action_age *age = 1501 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1502 1503 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1504 age->context = &pia->age_type; 1505 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1506 struct rte_flow_action_conntrack *ct = 1507 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1508 1509 memcpy(ct, &conntrack_context, sizeof(*ct)); 1510 } 1511 /* Poisoning to make sure PMDs update it in case of error. */ 1512 memset(&error, 0x22, sizeof(error)); 1513 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1514 &error); 1515 if (!pia->handle) { 1516 uint32_t destroy_id = pia->id; 1517 port_action_handle_destroy(port_id, 1, &destroy_id); 1518 return port_flow_complain(&error); 1519 } 1520 pia->type = action->type; 1521 printf("Indirect action #%u created\n", pia->id); 1522 return 0; 1523 } 1524 1525 /** Destroy indirect action */ 1526 int 1527 port_action_handle_destroy(portid_t port_id, 1528 uint32_t n, 1529 const uint32_t *actions) 1530 { 1531 struct rte_port *port; 1532 struct port_indirect_action **tmp; 1533 uint32_t c = 0; 1534 int ret = 0; 1535 1536 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1537 port_id == (portid_t)RTE_PORT_ALL) 1538 return -EINVAL; 1539 port = &ports[port_id]; 1540 tmp = &port->actions_list; 1541 while (*tmp) { 1542 uint32_t i; 1543 1544 for (i = 0; i != n; ++i) { 1545 struct rte_flow_error error; 1546 struct port_indirect_action *pia = *tmp; 1547 1548 if (actions[i] != pia->id) 1549 continue; 1550 /* 1551 * Poisoning to make sure PMDs update it in case 1552 * of error. 1553 */ 1554 memset(&error, 0x33, sizeof(error)); 1555 1556 if (pia->handle && rte_flow_action_handle_destroy( 1557 port_id, pia->handle, &error)) { 1558 ret = port_flow_complain(&error); 1559 continue; 1560 } 1561 *tmp = pia->next; 1562 printf("Indirect action #%u destroyed\n", pia->id); 1563 free(pia); 1564 break; 1565 } 1566 if (i == n) 1567 tmp = &(*tmp)->next; 1568 ++c; 1569 } 1570 return ret; 1571 } 1572 1573 1574 /** Get indirect action by port + id */ 1575 struct rte_flow_action_handle * 1576 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1577 { 1578 1579 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1580 1581 return (pia) ? pia->handle : NULL; 1582 } 1583 1584 /** Update indirect action */ 1585 int 1586 port_action_handle_update(portid_t port_id, uint32_t id, 1587 const struct rte_flow_action *action) 1588 { 1589 struct rte_flow_error error; 1590 struct rte_flow_action_handle *action_handle; 1591 struct port_indirect_action *pia; 1592 const void *update; 1593 1594 action_handle = port_action_handle_get_by_id(port_id, id); 1595 if (!action_handle) 1596 return -EINVAL; 1597 pia = action_get_by_id(port_id, id); 1598 if (!pia) 1599 return -EINVAL; 1600 switch (pia->type) { 1601 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1602 update = action->conf; 1603 break; 1604 default: 1605 update = action; 1606 break; 1607 } 1608 if (rte_flow_action_handle_update(port_id, action_handle, update, 1609 &error)) { 1610 return port_flow_complain(&error); 1611 } 1612 printf("Indirect action #%u updated\n", id); 1613 return 0; 1614 } 1615 1616 int 1617 port_action_handle_query(portid_t port_id, uint32_t id) 1618 { 1619 struct rte_flow_error error; 1620 struct port_indirect_action *pia; 1621 uint64_t default_data; 1622 void *data = NULL; 1623 int ret = 0; 1624 1625 pia = action_get_by_id(port_id, id); 1626 if (!pia) 1627 return -EINVAL; 1628 switch (pia->type) { 1629 case RTE_FLOW_ACTION_TYPE_AGE: 1630 data = &default_data; 1631 break; 1632 default: 1633 printf("Indirect action %u (type: %d) on port %u doesn't" 1634 " support query\n", id, pia->type, port_id); 1635 return -1; 1636 } 1637 if (rte_flow_action_handle_query(port_id, pia->handle, data, &error)) 1638 ret = port_flow_complain(&error); 1639 switch (pia->type) { 1640 case RTE_FLOW_ACTION_TYPE_AGE: 1641 if (!ret) { 1642 struct rte_flow_query_age *resp = data; 1643 1644 printf("AGE:\n" 1645 " aged: %u\n" 1646 " sec_since_last_hit_valid: %u\n" 1647 " sec_since_last_hit: %" PRIu32 "\n", 1648 resp->aged, 1649 resp->sec_since_last_hit_valid, 1650 resp->sec_since_last_hit); 1651 } 1652 data = NULL; 1653 break; 1654 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1655 if (!ret) { 1656 struct rte_flow_action_conntrack *ct = data; 1657 1658 printf("Conntrack Context:\n" 1659 " Peer: %u, Flow dir: %s, Enable: %u\n" 1660 " Live: %u, SACK: %u, CACK: %u\n" 1661 " Packet dir: %s, Liberal: %u, State: %u\n" 1662 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1663 " Last Seq: %u, Last ACK: %u\n" 1664 " Last Win: %u, Last End: %u\n", 1665 ct->peer_port, 1666 ct->is_original_dir ? "Original" : "Reply", 1667 ct->enable, ct->live_connection, 1668 ct->selective_ack, ct->challenge_ack_passed, 1669 ct->last_direction ? "Original" : "Reply", 1670 ct->liberal_mode, ct->state, 1671 ct->max_ack_window, ct->retransmission_limit, 1672 ct->last_index, ct->last_seq, ct->last_ack, 1673 ct->last_window, ct->last_end); 1674 printf(" Original Dir:\n" 1675 " scale: %u, fin: %u, ack seen: %u\n" 1676 " unacked data: %u\n Sent end: %u," 1677 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1678 ct->original_dir.scale, 1679 ct->original_dir.close_initiated, 1680 ct->original_dir.last_ack_seen, 1681 ct->original_dir.data_unacked, 1682 ct->original_dir.sent_end, 1683 ct->original_dir.reply_end, 1684 ct->original_dir.max_win, 1685 ct->original_dir.max_ack); 1686 printf(" Reply Dir:\n" 1687 " scale: %u, fin: %u, ack seen: %u\n" 1688 " unacked data: %u\n Sent end: %u," 1689 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1690 ct->reply_dir.scale, 1691 ct->reply_dir.close_initiated, 1692 ct->reply_dir.last_ack_seen, 1693 ct->reply_dir.data_unacked, 1694 ct->reply_dir.sent_end, ct->reply_dir.reply_end, 1695 ct->reply_dir.max_win, ct->reply_dir.max_ack); 1696 } 1697 data = NULL; 1698 break; 1699 default: 1700 printf("Indirect action %u (type: %d) on port %u doesn't" 1701 " support query\n", id, pia->type, port_id); 1702 ret = -1; 1703 } 1704 return ret; 1705 } 1706 1707 static struct port_flow_tunnel * 1708 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1709 const struct rte_flow_item *pattern, 1710 const struct rte_flow_action *actions, 1711 const struct tunnel_ops *tunnel_ops) 1712 { 1713 int ret; 1714 struct rte_port *port; 1715 struct port_flow_tunnel *pft; 1716 struct rte_flow_error error; 1717 1718 port = &ports[port_id]; 1719 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1720 if (!pft) { 1721 printf("failed to locate port flow tunnel #%u\n", 1722 tunnel_ops->id); 1723 return NULL; 1724 } 1725 if (tunnel_ops->actions) { 1726 uint32_t num_actions; 1727 const struct rte_flow_action *aptr; 1728 1729 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1730 &pft->pmd_actions, 1731 &pft->num_pmd_actions, 1732 &error); 1733 if (ret) { 1734 port_flow_complain(&error); 1735 return NULL; 1736 } 1737 for (aptr = actions, num_actions = 1; 1738 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1739 aptr++, num_actions++); 1740 pft->actions = malloc( 1741 (num_actions + pft->num_pmd_actions) * 1742 sizeof(actions[0])); 1743 if (!pft->actions) { 1744 rte_flow_tunnel_action_decap_release( 1745 port_id, pft->actions, 1746 pft->num_pmd_actions, &error); 1747 return NULL; 1748 } 1749 rte_memcpy(pft->actions, pft->pmd_actions, 1750 pft->num_pmd_actions * sizeof(actions[0])); 1751 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1752 num_actions * sizeof(actions[0])); 1753 } 1754 if (tunnel_ops->items) { 1755 uint32_t num_items; 1756 const struct rte_flow_item *iptr; 1757 1758 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1759 &pft->pmd_items, 1760 &pft->num_pmd_items, 1761 &error); 1762 if (ret) { 1763 port_flow_complain(&error); 1764 return NULL; 1765 } 1766 for (iptr = pattern, num_items = 1; 1767 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1768 iptr++, num_items++); 1769 pft->items = malloc((num_items + pft->num_pmd_items) * 1770 sizeof(pattern[0])); 1771 if (!pft->items) { 1772 rte_flow_tunnel_item_release( 1773 port_id, pft->pmd_items, 1774 pft->num_pmd_items, &error); 1775 return NULL; 1776 } 1777 rte_memcpy(pft->items, pft->pmd_items, 1778 pft->num_pmd_items * sizeof(pattern[0])); 1779 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1780 num_items * sizeof(pattern[0])); 1781 } 1782 1783 return pft; 1784 } 1785 1786 static void 1787 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1788 const struct tunnel_ops *tunnel_ops, 1789 struct port_flow_tunnel *pft) 1790 { 1791 struct rte_flow_error error; 1792 1793 if (tunnel_ops->actions) { 1794 free(pft->actions); 1795 rte_flow_tunnel_action_decap_release( 1796 port_id, pft->pmd_actions, 1797 pft->num_pmd_actions, &error); 1798 pft->actions = NULL; 1799 pft->pmd_actions = NULL; 1800 } 1801 if (tunnel_ops->items) { 1802 free(pft->items); 1803 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1804 pft->num_pmd_items, 1805 &error); 1806 pft->items = NULL; 1807 pft->pmd_items = NULL; 1808 } 1809 } 1810 1811 /** Add port meter policy */ 1812 int 1813 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1814 const struct rte_flow_action *actions) 1815 { 1816 struct rte_mtr_error error; 1817 const struct rte_flow_action *act = actions; 1818 const struct rte_flow_action *start; 1819 struct rte_mtr_meter_policy_params policy; 1820 uint32_t i = 0, act_n; 1821 int ret; 1822 1823 for (i = 0; i < RTE_COLORS; i++) { 1824 for (act_n = 0, start = act; 1825 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1826 act_n++; 1827 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1828 policy.actions[i] = start; 1829 else 1830 policy.actions[i] = NULL; 1831 act++; 1832 } 1833 ret = rte_mtr_meter_policy_add(port_id, 1834 policy_id, 1835 &policy, &error); 1836 if (ret) 1837 print_mtr_err_msg(&error); 1838 return ret; 1839 } 1840 1841 /** Validate flow rule. */ 1842 int 1843 port_flow_validate(portid_t port_id, 1844 const struct rte_flow_attr *attr, 1845 const struct rte_flow_item *pattern, 1846 const struct rte_flow_action *actions, 1847 const struct tunnel_ops *tunnel_ops) 1848 { 1849 struct rte_flow_error error; 1850 struct port_flow_tunnel *pft = NULL; 1851 1852 /* Poisoning to make sure PMDs update it in case of error. */ 1853 memset(&error, 0x11, sizeof(error)); 1854 if (tunnel_ops->enabled) { 1855 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1856 actions, tunnel_ops); 1857 if (!pft) 1858 return -ENOENT; 1859 if (pft->items) 1860 pattern = pft->items; 1861 if (pft->actions) 1862 actions = pft->actions; 1863 } 1864 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1865 return port_flow_complain(&error); 1866 if (tunnel_ops->enabled) 1867 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1868 printf("Flow rule validated\n"); 1869 return 0; 1870 } 1871 1872 /** Return age action structure if exists, otherwise NULL. */ 1873 static struct rte_flow_action_age * 1874 age_action_get(const struct rte_flow_action *actions) 1875 { 1876 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1877 switch (actions->type) { 1878 case RTE_FLOW_ACTION_TYPE_AGE: 1879 return (struct rte_flow_action_age *) 1880 (uintptr_t)actions->conf; 1881 default: 1882 break; 1883 } 1884 } 1885 return NULL; 1886 } 1887 1888 /** Create flow rule. */ 1889 int 1890 port_flow_create(portid_t port_id, 1891 const struct rte_flow_attr *attr, 1892 const struct rte_flow_item *pattern, 1893 const struct rte_flow_action *actions, 1894 const struct tunnel_ops *tunnel_ops) 1895 { 1896 struct rte_flow *flow; 1897 struct rte_port *port; 1898 struct port_flow *pf; 1899 uint32_t id = 0; 1900 struct rte_flow_error error; 1901 struct port_flow_tunnel *pft = NULL; 1902 struct rte_flow_action_age *age = age_action_get(actions); 1903 1904 port = &ports[port_id]; 1905 if (port->flow_list) { 1906 if (port->flow_list->id == UINT32_MAX) { 1907 printf("Highest rule ID is already assigned, delete" 1908 " it first"); 1909 return -ENOMEM; 1910 } 1911 id = port->flow_list->id + 1; 1912 } 1913 if (tunnel_ops->enabled) { 1914 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1915 actions, tunnel_ops); 1916 if (!pft) 1917 return -ENOENT; 1918 if (pft->items) 1919 pattern = pft->items; 1920 if (pft->actions) 1921 actions = pft->actions; 1922 } 1923 pf = port_flow_new(attr, pattern, actions, &error); 1924 if (!pf) 1925 return port_flow_complain(&error); 1926 if (age) { 1927 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 1928 age->context = &pf->age_type; 1929 } 1930 /* Poisoning to make sure PMDs update it in case of error. */ 1931 memset(&error, 0x22, sizeof(error)); 1932 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1933 if (!flow) { 1934 free(pf); 1935 return port_flow_complain(&error); 1936 } 1937 pf->next = port->flow_list; 1938 pf->id = id; 1939 pf->flow = flow; 1940 port->flow_list = pf; 1941 if (tunnel_ops->enabled) 1942 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1943 printf("Flow rule #%u created\n", pf->id); 1944 return 0; 1945 } 1946 1947 /** Destroy a number of flow rules. */ 1948 int 1949 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1950 { 1951 struct rte_port *port; 1952 struct port_flow **tmp; 1953 uint32_t c = 0; 1954 int ret = 0; 1955 1956 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1957 port_id == (portid_t)RTE_PORT_ALL) 1958 return -EINVAL; 1959 port = &ports[port_id]; 1960 tmp = &port->flow_list; 1961 while (*tmp) { 1962 uint32_t i; 1963 1964 for (i = 0; i != n; ++i) { 1965 struct rte_flow_error error; 1966 struct port_flow *pf = *tmp; 1967 1968 if (rule[i] != pf->id) 1969 continue; 1970 /* 1971 * Poisoning to make sure PMDs update it in case 1972 * of error. 1973 */ 1974 memset(&error, 0x33, sizeof(error)); 1975 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1976 ret = port_flow_complain(&error); 1977 continue; 1978 } 1979 printf("Flow rule #%u destroyed\n", pf->id); 1980 *tmp = pf->next; 1981 free(pf); 1982 break; 1983 } 1984 if (i == n) 1985 tmp = &(*tmp)->next; 1986 ++c; 1987 } 1988 return ret; 1989 } 1990 1991 /** Remove all flow rules. */ 1992 int 1993 port_flow_flush(portid_t port_id) 1994 { 1995 struct rte_flow_error error; 1996 struct rte_port *port; 1997 int ret = 0; 1998 1999 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2000 port_id == (portid_t)RTE_PORT_ALL) 2001 return -EINVAL; 2002 2003 port = &ports[port_id]; 2004 2005 if (port->flow_list == NULL) 2006 return ret; 2007 2008 /* Poisoning to make sure PMDs update it in case of error. */ 2009 memset(&error, 0x44, sizeof(error)); 2010 if (rte_flow_flush(port_id, &error)) { 2011 port_flow_complain(&error); 2012 } 2013 2014 while (port->flow_list) { 2015 struct port_flow *pf = port->flow_list->next; 2016 2017 free(port->flow_list); 2018 port->flow_list = pf; 2019 } 2020 return ret; 2021 } 2022 2023 /** Dump flow rules. */ 2024 int 2025 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2026 const char *file_name) 2027 { 2028 int ret = 0; 2029 FILE *file = stdout; 2030 struct rte_flow_error error; 2031 struct rte_port *port; 2032 struct port_flow *pflow; 2033 struct rte_flow *tmpFlow = NULL; 2034 bool found = false; 2035 2036 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2037 port_id == (portid_t)RTE_PORT_ALL) 2038 return -EINVAL; 2039 2040 if (!dump_all) { 2041 port = &ports[port_id]; 2042 pflow = port->flow_list; 2043 while (pflow) { 2044 if (rule_id != pflow->id) { 2045 pflow = pflow->next; 2046 } else { 2047 tmpFlow = pflow->flow; 2048 if (tmpFlow) 2049 found = true; 2050 break; 2051 } 2052 } 2053 if (found == false) { 2054 printf("Failed to dump to flow %d\n", rule_id); 2055 return -EINVAL; 2056 } 2057 } 2058 2059 if (file_name && strlen(file_name)) { 2060 file = fopen(file_name, "w"); 2061 if (!file) { 2062 printf("Failed to create file %s: %s\n", file_name, 2063 strerror(errno)); 2064 return -errno; 2065 } 2066 } 2067 2068 if (!dump_all) 2069 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2070 else 2071 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2072 if (ret) { 2073 port_flow_complain(&error); 2074 printf("Failed to dump flow: %s\n", strerror(-ret)); 2075 } else 2076 printf("Flow dump finished\n"); 2077 if (file_name && strlen(file_name)) 2078 fclose(file); 2079 return ret; 2080 } 2081 2082 /** Query a flow rule. */ 2083 int 2084 port_flow_query(portid_t port_id, uint32_t rule, 2085 const struct rte_flow_action *action) 2086 { 2087 struct rte_flow_error error; 2088 struct rte_port *port; 2089 struct port_flow *pf; 2090 const char *name; 2091 union { 2092 struct rte_flow_query_count count; 2093 struct rte_flow_action_rss rss_conf; 2094 struct rte_flow_query_age age; 2095 } query; 2096 int ret; 2097 2098 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2099 port_id == (portid_t)RTE_PORT_ALL) 2100 return -EINVAL; 2101 port = &ports[port_id]; 2102 for (pf = port->flow_list; pf; pf = pf->next) 2103 if (pf->id == rule) 2104 break; 2105 if (!pf) { 2106 printf("Flow rule #%u not found\n", rule); 2107 return -ENOENT; 2108 } 2109 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2110 &name, sizeof(name), 2111 (void *)(uintptr_t)action->type, &error); 2112 if (ret < 0) 2113 return port_flow_complain(&error); 2114 switch (action->type) { 2115 case RTE_FLOW_ACTION_TYPE_COUNT: 2116 case RTE_FLOW_ACTION_TYPE_RSS: 2117 case RTE_FLOW_ACTION_TYPE_AGE: 2118 break; 2119 default: 2120 printf("Cannot query action type %d (%s)\n", 2121 action->type, name); 2122 return -ENOTSUP; 2123 } 2124 /* Poisoning to make sure PMDs update it in case of error. */ 2125 memset(&error, 0x55, sizeof(error)); 2126 memset(&query, 0, sizeof(query)); 2127 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2128 return port_flow_complain(&error); 2129 switch (action->type) { 2130 case RTE_FLOW_ACTION_TYPE_COUNT: 2131 printf("%s:\n" 2132 " hits_set: %u\n" 2133 " bytes_set: %u\n" 2134 " hits: %" PRIu64 "\n" 2135 " bytes: %" PRIu64 "\n", 2136 name, 2137 query.count.hits_set, 2138 query.count.bytes_set, 2139 query.count.hits, 2140 query.count.bytes); 2141 break; 2142 case RTE_FLOW_ACTION_TYPE_RSS: 2143 rss_config_display(&query.rss_conf); 2144 break; 2145 case RTE_FLOW_ACTION_TYPE_AGE: 2146 printf("%s:\n" 2147 " aged: %u\n" 2148 " sec_since_last_hit_valid: %u\n" 2149 " sec_since_last_hit: %" PRIu32 "\n", 2150 name, 2151 query.age.aged, 2152 query.age.sec_since_last_hit_valid, 2153 query.age.sec_since_last_hit); 2154 break; 2155 default: 2156 printf("Cannot display result for action type %d (%s)\n", 2157 action->type, name); 2158 break; 2159 } 2160 return 0; 2161 } 2162 2163 /** List simply and destroy all aged flows. */ 2164 void 2165 port_flow_aged(portid_t port_id, uint8_t destroy) 2166 { 2167 void **contexts; 2168 int nb_context, total = 0, idx; 2169 struct rte_flow_error error; 2170 enum age_action_context_type *type; 2171 union { 2172 struct port_flow *pf; 2173 struct port_indirect_action *pia; 2174 } ctx; 2175 2176 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2177 port_id == (portid_t)RTE_PORT_ALL) 2178 return; 2179 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2180 printf("Port %u total aged flows: %d\n", port_id, total); 2181 if (total < 0) { 2182 port_flow_complain(&error); 2183 return; 2184 } 2185 if (total == 0) 2186 return; 2187 contexts = malloc(sizeof(void *) * total); 2188 if (contexts == NULL) { 2189 printf("Cannot allocate contexts for aged flow\n"); 2190 return; 2191 } 2192 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2193 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2194 if (nb_context != total) { 2195 printf("Port:%d get aged flows count(%d) != total(%d)\n", 2196 port_id, nb_context, total); 2197 free(contexts); 2198 return; 2199 } 2200 total = 0; 2201 for (idx = 0; idx < nb_context; idx++) { 2202 if (!contexts[idx]) { 2203 printf("Error: get Null context in port %u\n", port_id); 2204 continue; 2205 } 2206 type = (enum age_action_context_type *)contexts[idx]; 2207 switch (*type) { 2208 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2209 ctx.pf = container_of(type, struct port_flow, age_type); 2210 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2211 "\t%c%c%c\t\n", 2212 "Flow", 2213 ctx.pf->id, 2214 ctx.pf->rule.attr->group, 2215 ctx.pf->rule.attr->priority, 2216 ctx.pf->rule.attr->ingress ? 'i' : '-', 2217 ctx.pf->rule.attr->egress ? 'e' : '-', 2218 ctx.pf->rule.attr->transfer ? 't' : '-'); 2219 if (destroy && !port_flow_destroy(port_id, 1, 2220 &ctx.pf->id)) 2221 total++; 2222 break; 2223 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2224 ctx.pia = container_of(type, 2225 struct port_indirect_action, age_type); 2226 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2227 ctx.pia->id); 2228 break; 2229 default: 2230 printf("Error: invalid context type %u\n", port_id); 2231 break; 2232 } 2233 } 2234 printf("\n%d flows destroyed\n", total); 2235 free(contexts); 2236 } 2237 2238 /** List flow rules. */ 2239 void 2240 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2241 { 2242 struct rte_port *port; 2243 struct port_flow *pf; 2244 struct port_flow *list = NULL; 2245 uint32_t i; 2246 2247 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2248 port_id == (portid_t)RTE_PORT_ALL) 2249 return; 2250 port = &ports[port_id]; 2251 if (!port->flow_list) 2252 return; 2253 /* Sort flows by group, priority and ID. */ 2254 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2255 struct port_flow **tmp; 2256 const struct rte_flow_attr *curr = pf->rule.attr; 2257 2258 if (n) { 2259 /* Filter out unwanted groups. */ 2260 for (i = 0; i != n; ++i) 2261 if (curr->group == group[i]) 2262 break; 2263 if (i == n) 2264 continue; 2265 } 2266 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2267 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2268 2269 if (curr->group > comp->group || 2270 (curr->group == comp->group && 2271 curr->priority > comp->priority) || 2272 (curr->group == comp->group && 2273 curr->priority == comp->priority && 2274 pf->id > (*tmp)->id)) 2275 continue; 2276 break; 2277 } 2278 pf->tmp = *tmp; 2279 *tmp = pf; 2280 } 2281 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2282 for (pf = list; pf != NULL; pf = pf->tmp) { 2283 const struct rte_flow_item *item = pf->rule.pattern; 2284 const struct rte_flow_action *action = pf->rule.actions; 2285 const char *name; 2286 2287 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2288 pf->id, 2289 pf->rule.attr->group, 2290 pf->rule.attr->priority, 2291 pf->rule.attr->ingress ? 'i' : '-', 2292 pf->rule.attr->egress ? 'e' : '-', 2293 pf->rule.attr->transfer ? 't' : '-'); 2294 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2295 if ((uint32_t)item->type > INT_MAX) 2296 name = "PMD_INTERNAL"; 2297 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2298 &name, sizeof(name), 2299 (void *)(uintptr_t)item->type, 2300 NULL) <= 0) 2301 name = "[UNKNOWN]"; 2302 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2303 printf("%s ", name); 2304 ++item; 2305 } 2306 printf("=>"); 2307 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2308 if ((uint32_t)action->type > INT_MAX) 2309 name = "PMD_INTERNAL"; 2310 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2311 &name, sizeof(name), 2312 (void *)(uintptr_t)action->type, 2313 NULL) <= 0) 2314 name = "[UNKNOWN]"; 2315 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2316 printf(" %s", name); 2317 ++action; 2318 } 2319 printf("\n"); 2320 } 2321 } 2322 2323 /** Restrict ingress traffic to the defined flow rules. */ 2324 int 2325 port_flow_isolate(portid_t port_id, int set) 2326 { 2327 struct rte_flow_error error; 2328 2329 /* Poisoning to make sure PMDs update it in case of error. */ 2330 memset(&error, 0x66, sizeof(error)); 2331 if (rte_flow_isolate(port_id, set, &error)) 2332 return port_flow_complain(&error); 2333 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2334 port_id, 2335 set ? "now restricted" : "not restricted anymore"); 2336 return 0; 2337 } 2338 2339 /* 2340 * RX/TX ring descriptors display functions. 2341 */ 2342 int 2343 rx_queue_id_is_invalid(queueid_t rxq_id) 2344 { 2345 if (rxq_id < nb_rxq) 2346 return 0; 2347 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2348 return 1; 2349 } 2350 2351 int 2352 tx_queue_id_is_invalid(queueid_t txq_id) 2353 { 2354 if (txq_id < nb_txq) 2355 return 0; 2356 printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq); 2357 return 1; 2358 } 2359 2360 static int 2361 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2362 { 2363 struct rte_port *port = &ports[port_id]; 2364 struct rte_eth_rxq_info rx_qinfo; 2365 int ret; 2366 2367 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2368 if (ret == 0) { 2369 *ring_size = rx_qinfo.nb_desc; 2370 return ret; 2371 } 2372 2373 if (ret != -ENOTSUP) 2374 return ret; 2375 /* 2376 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2377 * ring_size stored in testpmd will be used for validity verification. 2378 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2379 * being 0, it will use a default value provided by PMDs to setup this 2380 * rxq. If the default value is 0, it will use the 2381 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2382 */ 2383 if (port->nb_rx_desc[rxq_id]) 2384 *ring_size = port->nb_rx_desc[rxq_id]; 2385 else if (port->dev_info.default_rxportconf.ring_size) 2386 *ring_size = port->dev_info.default_rxportconf.ring_size; 2387 else 2388 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2389 return 0; 2390 } 2391 2392 static int 2393 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2394 { 2395 struct rte_port *port = &ports[port_id]; 2396 struct rte_eth_txq_info tx_qinfo; 2397 int ret; 2398 2399 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2400 if (ret == 0) { 2401 *ring_size = tx_qinfo.nb_desc; 2402 return ret; 2403 } 2404 2405 if (ret != -ENOTSUP) 2406 return ret; 2407 /* 2408 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2409 * ring_size stored in testpmd will be used for validity verification. 2410 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2411 * being 0, it will use a default value provided by PMDs to setup this 2412 * txq. If the default value is 0, it will use the 2413 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2414 */ 2415 if (port->nb_tx_desc[txq_id]) 2416 *ring_size = port->nb_tx_desc[txq_id]; 2417 else if (port->dev_info.default_txportconf.ring_size) 2418 *ring_size = port->dev_info.default_txportconf.ring_size; 2419 else 2420 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2421 return 0; 2422 } 2423 2424 static int 2425 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2426 { 2427 uint16_t ring_size; 2428 int ret; 2429 2430 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2431 if (ret) 2432 return 1; 2433 2434 if (rxdesc_id < ring_size) 2435 return 0; 2436 2437 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2438 rxdesc_id, ring_size); 2439 return 1; 2440 } 2441 2442 static int 2443 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2444 { 2445 uint16_t ring_size; 2446 int ret; 2447 2448 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2449 if (ret) 2450 return 1; 2451 2452 if (txdesc_id < ring_size) 2453 return 0; 2454 2455 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2456 txdesc_id, ring_size); 2457 return 1; 2458 } 2459 2460 static const struct rte_memzone * 2461 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2462 { 2463 char mz_name[RTE_MEMZONE_NAMESIZE]; 2464 const struct rte_memzone *mz; 2465 2466 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2467 port_id, q_id, ring_name); 2468 mz = rte_memzone_lookup(mz_name); 2469 if (mz == NULL) 2470 printf("%s ring memory zoneof (port %d, queue %d) not" 2471 "found (zone name = %s\n", 2472 ring_name, port_id, q_id, mz_name); 2473 return mz; 2474 } 2475 2476 union igb_ring_dword { 2477 uint64_t dword; 2478 struct { 2479 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2480 uint32_t lo; 2481 uint32_t hi; 2482 #else 2483 uint32_t hi; 2484 uint32_t lo; 2485 #endif 2486 } words; 2487 }; 2488 2489 struct igb_ring_desc_32_bytes { 2490 union igb_ring_dword lo_dword; 2491 union igb_ring_dword hi_dword; 2492 union igb_ring_dword resv1; 2493 union igb_ring_dword resv2; 2494 }; 2495 2496 struct igb_ring_desc_16_bytes { 2497 union igb_ring_dword lo_dword; 2498 union igb_ring_dword hi_dword; 2499 }; 2500 2501 static void 2502 ring_rxd_display_dword(union igb_ring_dword dword) 2503 { 2504 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2505 (unsigned)dword.words.hi); 2506 } 2507 2508 static void 2509 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2510 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2511 portid_t port_id, 2512 #else 2513 __rte_unused portid_t port_id, 2514 #endif 2515 uint16_t desc_id) 2516 { 2517 struct igb_ring_desc_16_bytes *ring = 2518 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2519 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2520 int ret; 2521 struct rte_eth_dev_info dev_info; 2522 2523 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2524 if (ret != 0) 2525 return; 2526 2527 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2528 /* 32 bytes RX descriptor, i40e only */ 2529 struct igb_ring_desc_32_bytes *ring = 2530 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2531 ring[desc_id].lo_dword.dword = 2532 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2533 ring_rxd_display_dword(ring[desc_id].lo_dword); 2534 ring[desc_id].hi_dword.dword = 2535 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2536 ring_rxd_display_dword(ring[desc_id].hi_dword); 2537 ring[desc_id].resv1.dword = 2538 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2539 ring_rxd_display_dword(ring[desc_id].resv1); 2540 ring[desc_id].resv2.dword = 2541 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2542 ring_rxd_display_dword(ring[desc_id].resv2); 2543 2544 return; 2545 } 2546 #endif 2547 /* 16 bytes RX descriptor */ 2548 ring[desc_id].lo_dword.dword = 2549 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2550 ring_rxd_display_dword(ring[desc_id].lo_dword); 2551 ring[desc_id].hi_dword.dword = 2552 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2553 ring_rxd_display_dword(ring[desc_id].hi_dword); 2554 } 2555 2556 static void 2557 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2558 { 2559 struct igb_ring_desc_16_bytes *ring; 2560 struct igb_ring_desc_16_bytes txd; 2561 2562 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2563 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2564 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2565 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2566 (unsigned)txd.lo_dword.words.lo, 2567 (unsigned)txd.lo_dword.words.hi, 2568 (unsigned)txd.hi_dword.words.lo, 2569 (unsigned)txd.hi_dword.words.hi); 2570 } 2571 2572 void 2573 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2574 { 2575 const struct rte_memzone *rx_mz; 2576 2577 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2578 return; 2579 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2580 if (rx_mz == NULL) 2581 return; 2582 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2583 } 2584 2585 void 2586 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2587 { 2588 const struct rte_memzone *tx_mz; 2589 2590 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2591 return; 2592 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2593 if (tx_mz == NULL) 2594 return; 2595 ring_tx_descriptor_display(tx_mz, txd_id); 2596 } 2597 2598 void 2599 fwd_lcores_config_display(void) 2600 { 2601 lcoreid_t lc_id; 2602 2603 printf("List of forwarding lcores:"); 2604 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2605 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2606 printf("\n"); 2607 } 2608 void 2609 rxtx_config_display(void) 2610 { 2611 portid_t pid; 2612 queueid_t qid; 2613 2614 printf(" %s packet forwarding%s packets/burst=%d\n", 2615 cur_fwd_eng->fwd_mode_name, 2616 retry_enabled == 0 ? "" : " with retry", 2617 nb_pkt_per_burst); 2618 2619 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2620 printf(" packet len=%u - nb packet segments=%d\n", 2621 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2622 2623 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2624 nb_fwd_lcores, nb_fwd_ports); 2625 2626 RTE_ETH_FOREACH_DEV(pid) { 2627 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2628 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2629 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2630 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2631 struct rte_eth_rxq_info rx_qinfo; 2632 struct rte_eth_txq_info tx_qinfo; 2633 uint16_t rx_free_thresh_tmp; 2634 uint16_t tx_free_thresh_tmp; 2635 uint16_t tx_rs_thresh_tmp; 2636 uint16_t nb_rx_desc_tmp; 2637 uint16_t nb_tx_desc_tmp; 2638 uint64_t offloads_tmp; 2639 uint8_t pthresh_tmp; 2640 uint8_t hthresh_tmp; 2641 uint8_t wthresh_tmp; 2642 int32_t rc; 2643 2644 /* per port config */ 2645 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2646 (unsigned int)pid, nb_rxq, nb_txq); 2647 2648 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2649 ports[pid].dev_conf.rxmode.offloads, 2650 ports[pid].dev_conf.txmode.offloads); 2651 2652 /* per rx queue config only for first queue to be less verbose */ 2653 for (qid = 0; qid < 1; qid++) { 2654 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2655 if (rc) { 2656 nb_rx_desc_tmp = nb_rx_desc[qid]; 2657 rx_free_thresh_tmp = 2658 rx_conf[qid].rx_free_thresh; 2659 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2660 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2661 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2662 offloads_tmp = rx_conf[qid].offloads; 2663 } else { 2664 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2665 rx_free_thresh_tmp = 2666 rx_qinfo.conf.rx_free_thresh; 2667 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2668 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2669 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2670 offloads_tmp = rx_qinfo.conf.offloads; 2671 } 2672 2673 printf(" RX queue: %d\n", qid); 2674 printf(" RX desc=%d - RX free threshold=%d\n", 2675 nb_rx_desc_tmp, rx_free_thresh_tmp); 2676 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2677 " wthresh=%d\n", 2678 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2679 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2680 } 2681 2682 /* per tx queue config only for first queue to be less verbose */ 2683 for (qid = 0; qid < 1; qid++) { 2684 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2685 if (rc) { 2686 nb_tx_desc_tmp = nb_tx_desc[qid]; 2687 tx_free_thresh_tmp = 2688 tx_conf[qid].tx_free_thresh; 2689 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2690 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2691 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2692 offloads_tmp = tx_conf[qid].offloads; 2693 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2694 } else { 2695 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2696 tx_free_thresh_tmp = 2697 tx_qinfo.conf.tx_free_thresh; 2698 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2699 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2700 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2701 offloads_tmp = tx_qinfo.conf.offloads; 2702 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2703 } 2704 2705 printf(" TX queue: %d\n", qid); 2706 printf(" TX desc=%d - TX free threshold=%d\n", 2707 nb_tx_desc_tmp, tx_free_thresh_tmp); 2708 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2709 " wthresh=%d\n", 2710 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2711 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2712 offloads_tmp, tx_rs_thresh_tmp); 2713 } 2714 } 2715 } 2716 2717 void 2718 port_rss_reta_info(portid_t port_id, 2719 struct rte_eth_rss_reta_entry64 *reta_conf, 2720 uint16_t nb_entries) 2721 { 2722 uint16_t i, idx, shift; 2723 int ret; 2724 2725 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2726 return; 2727 2728 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2729 if (ret != 0) { 2730 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2731 return; 2732 } 2733 2734 for (i = 0; i < nb_entries; i++) { 2735 idx = i / RTE_RETA_GROUP_SIZE; 2736 shift = i % RTE_RETA_GROUP_SIZE; 2737 if (!(reta_conf[idx].mask & (1ULL << shift))) 2738 continue; 2739 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2740 i, reta_conf[idx].reta[shift]); 2741 } 2742 } 2743 2744 /* 2745 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2746 * key of the port. 2747 */ 2748 void 2749 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2750 { 2751 struct rte_eth_rss_conf rss_conf = {0}; 2752 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2753 uint64_t rss_hf; 2754 uint8_t i; 2755 int diag; 2756 struct rte_eth_dev_info dev_info; 2757 uint8_t hash_key_size; 2758 int ret; 2759 2760 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2761 return; 2762 2763 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2764 if (ret != 0) 2765 return; 2766 2767 if (dev_info.hash_key_size > 0 && 2768 dev_info.hash_key_size <= sizeof(rss_key)) 2769 hash_key_size = dev_info.hash_key_size; 2770 else { 2771 printf("dev_info did not provide a valid hash key size\n"); 2772 return; 2773 } 2774 2775 /* Get RSS hash key if asked to display it */ 2776 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2777 rss_conf.rss_key_len = hash_key_size; 2778 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2779 if (diag != 0) { 2780 switch (diag) { 2781 case -ENODEV: 2782 printf("port index %d invalid\n", port_id); 2783 break; 2784 case -ENOTSUP: 2785 printf("operation not supported by device\n"); 2786 break; 2787 default: 2788 printf("operation failed - diag=%d\n", diag); 2789 break; 2790 } 2791 return; 2792 } 2793 rss_hf = rss_conf.rss_hf; 2794 if (rss_hf == 0) { 2795 printf("RSS disabled\n"); 2796 return; 2797 } 2798 printf("RSS functions:\n "); 2799 for (i = 0; rss_type_table[i].str; i++) { 2800 if (rss_hf & rss_type_table[i].rss_type) 2801 printf("%s ", rss_type_table[i].str); 2802 } 2803 printf("\n"); 2804 if (!show_rss_key) 2805 return; 2806 printf("RSS key:\n"); 2807 for (i = 0; i < hash_key_size; i++) 2808 printf("%02X", rss_key[i]); 2809 printf("\n"); 2810 } 2811 2812 void 2813 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2814 uint8_t hash_key_len) 2815 { 2816 struct rte_eth_rss_conf rss_conf; 2817 int diag; 2818 unsigned int i; 2819 2820 rss_conf.rss_key = NULL; 2821 rss_conf.rss_key_len = hash_key_len; 2822 rss_conf.rss_hf = 0; 2823 for (i = 0; rss_type_table[i].str; i++) { 2824 if (!strcmp(rss_type_table[i].str, rss_type)) 2825 rss_conf.rss_hf = rss_type_table[i].rss_type; 2826 } 2827 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2828 if (diag == 0) { 2829 rss_conf.rss_key = hash_key; 2830 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2831 } 2832 if (diag == 0) 2833 return; 2834 2835 switch (diag) { 2836 case -ENODEV: 2837 printf("port index %d invalid\n", port_id); 2838 break; 2839 case -ENOTSUP: 2840 printf("operation not supported by device\n"); 2841 break; 2842 default: 2843 printf("operation failed - diag=%d\n", diag); 2844 break; 2845 } 2846 } 2847 2848 /* 2849 * Setup forwarding configuration for each logical core. 2850 */ 2851 static void 2852 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2853 { 2854 streamid_t nb_fs_per_lcore; 2855 streamid_t nb_fs; 2856 streamid_t sm_id; 2857 lcoreid_t nb_extra; 2858 lcoreid_t nb_fc; 2859 lcoreid_t nb_lc; 2860 lcoreid_t lc_id; 2861 2862 nb_fs = cfg->nb_fwd_streams; 2863 nb_fc = cfg->nb_fwd_lcores; 2864 if (nb_fs <= nb_fc) { 2865 nb_fs_per_lcore = 1; 2866 nb_extra = 0; 2867 } else { 2868 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2869 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2870 } 2871 2872 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2873 sm_id = 0; 2874 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2875 fwd_lcores[lc_id]->stream_idx = sm_id; 2876 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2877 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2878 } 2879 2880 /* 2881 * Assign extra remaining streams, if any. 2882 */ 2883 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2884 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2885 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2886 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2887 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2888 } 2889 } 2890 2891 static portid_t 2892 fwd_topology_tx_port_get(portid_t rxp) 2893 { 2894 static int warning_once = 1; 2895 2896 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2897 2898 switch (port_topology) { 2899 default: 2900 case PORT_TOPOLOGY_PAIRED: 2901 if ((rxp & 0x1) == 0) { 2902 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2903 return rxp + 1; 2904 if (warning_once) { 2905 printf("\nWarning! port-topology=paired" 2906 " and odd forward ports number," 2907 " the last port will pair with" 2908 " itself.\n\n"); 2909 warning_once = 0; 2910 } 2911 return rxp; 2912 } 2913 return rxp - 1; 2914 case PORT_TOPOLOGY_CHAINED: 2915 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2916 case PORT_TOPOLOGY_LOOP: 2917 return rxp; 2918 } 2919 } 2920 2921 static void 2922 simple_fwd_config_setup(void) 2923 { 2924 portid_t i; 2925 2926 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2927 cur_fwd_config.nb_fwd_streams = 2928 (streamid_t) cur_fwd_config.nb_fwd_ports; 2929 2930 /* reinitialize forwarding streams */ 2931 init_fwd_streams(); 2932 2933 /* 2934 * In the simple forwarding test, the number of forwarding cores 2935 * must be lower or equal to the number of forwarding ports. 2936 */ 2937 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2938 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2939 cur_fwd_config.nb_fwd_lcores = 2940 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2941 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2942 2943 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2944 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2945 fwd_streams[i]->rx_queue = 0; 2946 fwd_streams[i]->tx_port = 2947 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2948 fwd_streams[i]->tx_queue = 0; 2949 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2950 fwd_streams[i]->retry_enabled = retry_enabled; 2951 } 2952 } 2953 2954 /** 2955 * For the RSS forwarding test all streams distributed over lcores. Each stream 2956 * being composed of a RX queue to poll on a RX port for input messages, 2957 * associated with a TX queue of a TX port where to send forwarded packets. 2958 */ 2959 static void 2960 rss_fwd_config_setup(void) 2961 { 2962 portid_t rxp; 2963 portid_t txp; 2964 queueid_t rxq; 2965 queueid_t nb_q; 2966 streamid_t sm_id; 2967 2968 nb_q = nb_rxq; 2969 if (nb_q > nb_txq) 2970 nb_q = nb_txq; 2971 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2972 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2973 cur_fwd_config.nb_fwd_streams = 2974 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2975 2976 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2977 cur_fwd_config.nb_fwd_lcores = 2978 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2979 2980 /* reinitialize forwarding streams */ 2981 init_fwd_streams(); 2982 2983 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2984 rxp = 0; rxq = 0; 2985 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2986 struct fwd_stream *fs; 2987 2988 fs = fwd_streams[sm_id]; 2989 txp = fwd_topology_tx_port_get(rxp); 2990 fs->rx_port = fwd_ports_ids[rxp]; 2991 fs->rx_queue = rxq; 2992 fs->tx_port = fwd_ports_ids[txp]; 2993 fs->tx_queue = rxq; 2994 fs->peer_addr = fs->tx_port; 2995 fs->retry_enabled = retry_enabled; 2996 rxp++; 2997 if (rxp < nb_fwd_ports) 2998 continue; 2999 rxp = 0; 3000 rxq++; 3001 } 3002 } 3003 3004 static uint16_t 3005 get_fwd_port_total_tc_num(void) 3006 { 3007 struct rte_eth_dcb_info dcb_info; 3008 uint16_t total_tc_num = 0; 3009 unsigned int i; 3010 3011 for (i = 0; i < nb_fwd_ports; i++) { 3012 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3013 total_tc_num += dcb_info.nb_tcs; 3014 } 3015 3016 return total_tc_num; 3017 } 3018 3019 /** 3020 * For the DCB forwarding test, each core is assigned on each traffic class. 3021 * 3022 * Each core is assigned a multi-stream, each stream being composed of 3023 * a RX queue to poll on a RX port for input messages, associated with 3024 * a TX queue of a TX port where to send forwarded packets. All RX and 3025 * TX queues are mapping to the same traffic class. 3026 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3027 * the same core 3028 */ 3029 static void 3030 dcb_fwd_config_setup(void) 3031 { 3032 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3033 portid_t txp, rxp = 0; 3034 queueid_t txq, rxq = 0; 3035 lcoreid_t lc_id; 3036 uint16_t nb_rx_queue, nb_tx_queue; 3037 uint16_t i, j, k, sm_id = 0; 3038 uint16_t total_tc_num; 3039 struct rte_port *port; 3040 uint8_t tc = 0; 3041 portid_t pid; 3042 int ret; 3043 3044 /* 3045 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3046 * or RTE_PORT_STOPPED. 3047 * 3048 * Re-configure ports to get updated mapping between tc and queue in 3049 * case the queue number of the port is changed. Skip for started ports 3050 * since modifying queue number and calling dev_configure need to stop 3051 * ports first. 3052 */ 3053 for (pid = 0; pid < nb_fwd_ports; pid++) { 3054 if (port_is_started(pid) == 1) 3055 continue; 3056 3057 port = &ports[pid]; 3058 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3059 &port->dev_conf); 3060 if (ret < 0) { 3061 printf("Failed to re-configure port %d, ret = %d.\n", 3062 pid, ret); 3063 return; 3064 } 3065 } 3066 3067 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3068 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3069 cur_fwd_config.nb_fwd_streams = 3070 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3071 total_tc_num = get_fwd_port_total_tc_num(); 3072 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3073 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3074 3075 /* reinitialize forwarding streams */ 3076 init_fwd_streams(); 3077 sm_id = 0; 3078 txp = 1; 3079 /* get the dcb info on the first RX and TX ports */ 3080 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3081 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3082 3083 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3084 fwd_lcores[lc_id]->stream_nb = 0; 3085 fwd_lcores[lc_id]->stream_idx = sm_id; 3086 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3087 /* if the nb_queue is zero, means this tc is 3088 * not enabled on the POOL 3089 */ 3090 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3091 break; 3092 k = fwd_lcores[lc_id]->stream_nb + 3093 fwd_lcores[lc_id]->stream_idx; 3094 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3095 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3096 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3097 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3098 for (j = 0; j < nb_rx_queue; j++) { 3099 struct fwd_stream *fs; 3100 3101 fs = fwd_streams[k + j]; 3102 fs->rx_port = fwd_ports_ids[rxp]; 3103 fs->rx_queue = rxq + j; 3104 fs->tx_port = fwd_ports_ids[txp]; 3105 fs->tx_queue = txq + j % nb_tx_queue; 3106 fs->peer_addr = fs->tx_port; 3107 fs->retry_enabled = retry_enabled; 3108 } 3109 fwd_lcores[lc_id]->stream_nb += 3110 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3111 } 3112 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3113 3114 tc++; 3115 if (tc < rxp_dcb_info.nb_tcs) 3116 continue; 3117 /* Restart from TC 0 on next RX port */ 3118 tc = 0; 3119 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3120 rxp = (portid_t) 3121 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3122 else 3123 rxp++; 3124 if (rxp >= nb_fwd_ports) 3125 return; 3126 /* get the dcb information on next RX and TX ports */ 3127 if ((rxp & 0x1) == 0) 3128 txp = (portid_t) (rxp + 1); 3129 else 3130 txp = (portid_t) (rxp - 1); 3131 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3132 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3133 } 3134 } 3135 3136 static void 3137 icmp_echo_config_setup(void) 3138 { 3139 portid_t rxp; 3140 queueid_t rxq; 3141 lcoreid_t lc_id; 3142 uint16_t sm_id; 3143 3144 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3145 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3146 (nb_txq * nb_fwd_ports); 3147 else 3148 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3149 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3150 cur_fwd_config.nb_fwd_streams = 3151 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3152 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3153 cur_fwd_config.nb_fwd_lcores = 3154 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3155 if (verbose_level > 0) { 3156 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3157 __FUNCTION__, 3158 cur_fwd_config.nb_fwd_lcores, 3159 cur_fwd_config.nb_fwd_ports, 3160 cur_fwd_config.nb_fwd_streams); 3161 } 3162 3163 /* reinitialize forwarding streams */ 3164 init_fwd_streams(); 3165 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3166 rxp = 0; rxq = 0; 3167 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3168 if (verbose_level > 0) 3169 printf(" core=%d: \n", lc_id); 3170 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3171 struct fwd_stream *fs; 3172 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3173 fs->rx_port = fwd_ports_ids[rxp]; 3174 fs->rx_queue = rxq; 3175 fs->tx_port = fs->rx_port; 3176 fs->tx_queue = rxq; 3177 fs->peer_addr = fs->tx_port; 3178 fs->retry_enabled = retry_enabled; 3179 if (verbose_level > 0) 3180 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3181 sm_id, fs->rx_port, fs->rx_queue, 3182 fs->tx_queue); 3183 rxq = (queueid_t) (rxq + 1); 3184 if (rxq == nb_rxq) { 3185 rxq = 0; 3186 rxp = (portid_t) (rxp + 1); 3187 } 3188 } 3189 } 3190 } 3191 3192 void 3193 fwd_config_setup(void) 3194 { 3195 struct rte_port *port; 3196 portid_t pt_id; 3197 unsigned int i; 3198 3199 cur_fwd_config.fwd_eng = cur_fwd_eng; 3200 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3201 icmp_echo_config_setup(); 3202 return; 3203 } 3204 3205 if ((nb_rxq > 1) && (nb_txq > 1)){ 3206 if (dcb_config) { 3207 for (i = 0; i < nb_fwd_ports; i++) { 3208 pt_id = fwd_ports_ids[i]; 3209 port = &ports[pt_id]; 3210 if (!port->dcb_flag) { 3211 printf("In DCB mode, all forwarding ports must " 3212 "be configured in this mode.\n"); 3213 return; 3214 } 3215 } 3216 if (nb_fwd_lcores == 1) { 3217 printf("In DCB mode,the nb forwarding cores " 3218 "should be larger than 1.\n"); 3219 return; 3220 } 3221 3222 dcb_fwd_config_setup(); 3223 } else 3224 rss_fwd_config_setup(); 3225 } 3226 else 3227 simple_fwd_config_setup(); 3228 } 3229 3230 static const char * 3231 mp_alloc_to_str(uint8_t mode) 3232 { 3233 switch (mode) { 3234 case MP_ALLOC_NATIVE: 3235 return "native"; 3236 case MP_ALLOC_ANON: 3237 return "anon"; 3238 case MP_ALLOC_XMEM: 3239 return "xmem"; 3240 case MP_ALLOC_XMEM_HUGE: 3241 return "xmemhuge"; 3242 case MP_ALLOC_XBUF: 3243 return "xbuf"; 3244 default: 3245 return "invalid"; 3246 } 3247 } 3248 3249 void 3250 pkt_fwd_config_display(struct fwd_config *cfg) 3251 { 3252 struct fwd_stream *fs; 3253 lcoreid_t lc_id; 3254 streamid_t sm_id; 3255 3256 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3257 "NUMA support %s, MP allocation mode: %s\n", 3258 cfg->fwd_eng->fwd_mode_name, 3259 retry_enabled == 0 ? "" : " with retry", 3260 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3261 numa_support == 1 ? "enabled" : "disabled", 3262 mp_alloc_to_str(mp_alloc_type)); 3263 3264 if (retry_enabled) 3265 printf("TX retry num: %u, delay between TX retries: %uus\n", 3266 burst_tx_retry_num, burst_tx_delay_time); 3267 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3268 printf("Logical Core %u (socket %u) forwards packets on " 3269 "%d streams:", 3270 fwd_lcores_cpuids[lc_id], 3271 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3272 fwd_lcores[lc_id]->stream_nb); 3273 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3274 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3275 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3276 "P=%d/Q=%d (socket %u) ", 3277 fs->rx_port, fs->rx_queue, 3278 ports[fs->rx_port].socket_id, 3279 fs->tx_port, fs->tx_queue, 3280 ports[fs->tx_port].socket_id); 3281 print_ethaddr("peer=", 3282 &peer_eth_addrs[fs->peer_addr]); 3283 } 3284 printf("\n"); 3285 } 3286 printf("\n"); 3287 } 3288 3289 void 3290 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3291 { 3292 struct rte_ether_addr new_peer_addr; 3293 if (!rte_eth_dev_is_valid_port(port_id)) { 3294 printf("Error: Invalid port number %i\n", port_id); 3295 return; 3296 } 3297 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3298 printf("Error: Invalid ethernet address: %s\n", peer_addr); 3299 return; 3300 } 3301 peer_eth_addrs[port_id] = new_peer_addr; 3302 } 3303 3304 int 3305 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3306 { 3307 unsigned int i; 3308 unsigned int lcore_cpuid; 3309 int record_now; 3310 3311 record_now = 0; 3312 again: 3313 for (i = 0; i < nb_lc; i++) { 3314 lcore_cpuid = lcorelist[i]; 3315 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3316 printf("lcore %u not enabled\n", lcore_cpuid); 3317 return -1; 3318 } 3319 if (lcore_cpuid == rte_get_main_lcore()) { 3320 printf("lcore %u cannot be masked on for running " 3321 "packet forwarding, which is the main lcore " 3322 "and reserved for command line parsing only\n", 3323 lcore_cpuid); 3324 return -1; 3325 } 3326 if (record_now) 3327 fwd_lcores_cpuids[i] = lcore_cpuid; 3328 } 3329 if (record_now == 0) { 3330 record_now = 1; 3331 goto again; 3332 } 3333 nb_cfg_lcores = (lcoreid_t) nb_lc; 3334 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3335 printf("previous number of forwarding cores %u - changed to " 3336 "number of configured cores %u\n", 3337 (unsigned int) nb_fwd_lcores, nb_lc); 3338 nb_fwd_lcores = (lcoreid_t) nb_lc; 3339 } 3340 3341 return 0; 3342 } 3343 3344 int 3345 set_fwd_lcores_mask(uint64_t lcoremask) 3346 { 3347 unsigned int lcorelist[64]; 3348 unsigned int nb_lc; 3349 unsigned int i; 3350 3351 if (lcoremask == 0) { 3352 printf("Invalid NULL mask of cores\n"); 3353 return -1; 3354 } 3355 nb_lc = 0; 3356 for (i = 0; i < 64; i++) { 3357 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3358 continue; 3359 lcorelist[nb_lc++] = i; 3360 } 3361 return set_fwd_lcores_list(lcorelist, nb_lc); 3362 } 3363 3364 void 3365 set_fwd_lcores_number(uint16_t nb_lc) 3366 { 3367 if (test_done == 0) { 3368 printf("Please stop forwarding first\n"); 3369 return; 3370 } 3371 if (nb_lc > nb_cfg_lcores) { 3372 printf("nb fwd cores %u > %u (max. number of configured " 3373 "lcores) - ignored\n", 3374 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3375 return; 3376 } 3377 nb_fwd_lcores = (lcoreid_t) nb_lc; 3378 printf("Number of forwarding cores set to %u\n", 3379 (unsigned int) nb_fwd_lcores); 3380 } 3381 3382 void 3383 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3384 { 3385 unsigned int i; 3386 portid_t port_id; 3387 int record_now; 3388 3389 record_now = 0; 3390 again: 3391 for (i = 0; i < nb_pt; i++) { 3392 port_id = (portid_t) portlist[i]; 3393 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3394 return; 3395 if (record_now) 3396 fwd_ports_ids[i] = port_id; 3397 } 3398 if (record_now == 0) { 3399 record_now = 1; 3400 goto again; 3401 } 3402 nb_cfg_ports = (portid_t) nb_pt; 3403 if (nb_fwd_ports != (portid_t) nb_pt) { 3404 printf("previous number of forwarding ports %u - changed to " 3405 "number of configured ports %u\n", 3406 (unsigned int) nb_fwd_ports, nb_pt); 3407 nb_fwd_ports = (portid_t) nb_pt; 3408 } 3409 } 3410 3411 /** 3412 * Parse the user input and obtain the list of forwarding ports 3413 * 3414 * @param[in] list 3415 * String containing the user input. User can specify 3416 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3417 * For example, if the user wants to use all the available 3418 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3419 * If the user wants to use only the ports 1,2 then the input 3420 * is 1,2. 3421 * valid characters are '-' and ',' 3422 * @param[out] values 3423 * This array will be filled with a list of port IDs 3424 * based on the user input 3425 * Note that duplicate entries are discarded and only the first 3426 * count entries in this array are port IDs and all the rest 3427 * will contain default values 3428 * @param[in] maxsize 3429 * This parameter denotes 2 things 3430 * 1) Number of elements in the values array 3431 * 2) Maximum value of each element in the values array 3432 * @return 3433 * On success, returns total count of parsed port IDs 3434 * On failure, returns 0 3435 */ 3436 static unsigned int 3437 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3438 { 3439 unsigned int count = 0; 3440 char *end = NULL; 3441 int min, max; 3442 int value, i; 3443 unsigned int marked[maxsize]; 3444 3445 if (list == NULL || values == NULL) 3446 return 0; 3447 3448 for (i = 0; i < (int)maxsize; i++) 3449 marked[i] = 0; 3450 3451 min = INT_MAX; 3452 3453 do { 3454 /*Remove the blank spaces if any*/ 3455 while (isblank(*list)) 3456 list++; 3457 if (*list == '\0') 3458 break; 3459 errno = 0; 3460 value = strtol(list, &end, 10); 3461 if (errno || end == NULL) 3462 return 0; 3463 if (value < 0 || value >= (int)maxsize) 3464 return 0; 3465 while (isblank(*end)) 3466 end++; 3467 if (*end == '-' && min == INT_MAX) { 3468 min = value; 3469 } else if ((*end == ',') || (*end == '\0')) { 3470 max = value; 3471 if (min == INT_MAX) 3472 min = value; 3473 for (i = min; i <= max; i++) { 3474 if (count < maxsize) { 3475 if (marked[i]) 3476 continue; 3477 values[count] = i; 3478 marked[i] = 1; 3479 count++; 3480 } 3481 } 3482 min = INT_MAX; 3483 } else 3484 return 0; 3485 list = end + 1; 3486 } while (*end != '\0'); 3487 3488 return count; 3489 } 3490 3491 void 3492 parse_fwd_portlist(const char *portlist) 3493 { 3494 unsigned int portcount; 3495 unsigned int portindex[RTE_MAX_ETHPORTS]; 3496 unsigned int i, valid_port_count = 0; 3497 3498 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3499 if (!portcount) 3500 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3501 3502 /* 3503 * Here we verify the validity of the ports 3504 * and thereby calculate the total number of 3505 * valid ports 3506 */ 3507 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3508 if (rte_eth_dev_is_valid_port(portindex[i])) { 3509 portindex[valid_port_count] = portindex[i]; 3510 valid_port_count++; 3511 } 3512 } 3513 3514 set_fwd_ports_list(portindex, valid_port_count); 3515 } 3516 3517 void 3518 set_fwd_ports_mask(uint64_t portmask) 3519 { 3520 unsigned int portlist[64]; 3521 unsigned int nb_pt; 3522 unsigned int i; 3523 3524 if (portmask == 0) { 3525 printf("Invalid NULL mask of ports\n"); 3526 return; 3527 } 3528 nb_pt = 0; 3529 RTE_ETH_FOREACH_DEV(i) { 3530 if (! ((uint64_t)(1ULL << i) & portmask)) 3531 continue; 3532 portlist[nb_pt++] = i; 3533 } 3534 set_fwd_ports_list(portlist, nb_pt); 3535 } 3536 3537 void 3538 set_fwd_ports_number(uint16_t nb_pt) 3539 { 3540 if (nb_pt > nb_cfg_ports) { 3541 printf("nb fwd ports %u > %u (number of configured " 3542 "ports) - ignored\n", 3543 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3544 return; 3545 } 3546 nb_fwd_ports = (portid_t) nb_pt; 3547 printf("Number of forwarding ports set to %u\n", 3548 (unsigned int) nb_fwd_ports); 3549 } 3550 3551 int 3552 port_is_forwarding(portid_t port_id) 3553 { 3554 unsigned int i; 3555 3556 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3557 return -1; 3558 3559 for (i = 0; i < nb_fwd_ports; i++) { 3560 if (fwd_ports_ids[i] == port_id) 3561 return 1; 3562 } 3563 3564 return 0; 3565 } 3566 3567 void 3568 set_nb_pkt_per_burst(uint16_t nb) 3569 { 3570 if (nb > MAX_PKT_BURST) { 3571 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3572 " ignored\n", 3573 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3574 return; 3575 } 3576 nb_pkt_per_burst = nb; 3577 printf("Number of packets per burst set to %u\n", 3578 (unsigned int) nb_pkt_per_burst); 3579 } 3580 3581 static const char * 3582 tx_split_get_name(enum tx_pkt_split split) 3583 { 3584 uint32_t i; 3585 3586 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3587 if (tx_split_name[i].split == split) 3588 return tx_split_name[i].name; 3589 } 3590 return NULL; 3591 } 3592 3593 void 3594 set_tx_pkt_split(const char *name) 3595 { 3596 uint32_t i; 3597 3598 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3599 if (strcmp(tx_split_name[i].name, name) == 0) { 3600 tx_pkt_split = tx_split_name[i].split; 3601 return; 3602 } 3603 } 3604 printf("unknown value: \"%s\"\n", name); 3605 } 3606 3607 int 3608 parse_fec_mode(const char *name, uint32_t *mode) 3609 { 3610 uint8_t i; 3611 3612 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3613 if (strcmp(fec_mode_name[i].name, name) == 0) { 3614 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3615 return 0; 3616 } 3617 } 3618 return -1; 3619 } 3620 3621 void 3622 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3623 { 3624 unsigned int i, j; 3625 3626 printf("FEC capabilities:\n"); 3627 3628 for (i = 0; i < num; i++) { 3629 printf("%s : ", 3630 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3631 3632 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3633 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3634 speed_fec_capa[i].capa) 3635 printf("%s ", fec_mode_name[j].name); 3636 } 3637 printf("\n"); 3638 } 3639 } 3640 3641 void 3642 show_rx_pkt_offsets(void) 3643 { 3644 uint32_t i, n; 3645 3646 n = rx_pkt_nb_offs; 3647 printf("Number of offsets: %u\n", n); 3648 if (n) { 3649 printf("Segment offsets: "); 3650 for (i = 0; i != n - 1; i++) 3651 printf("%hu,", rx_pkt_seg_offsets[i]); 3652 printf("%hu\n", rx_pkt_seg_lengths[i]); 3653 } 3654 } 3655 3656 void 3657 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3658 { 3659 unsigned int i; 3660 3661 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3662 printf("nb segments per RX packets=%u >= " 3663 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3664 return; 3665 } 3666 3667 /* 3668 * No extra check here, the segment length will be checked by PMD 3669 * in the extended queue setup. 3670 */ 3671 for (i = 0; i < nb_offs; i++) { 3672 if (seg_offsets[i] >= UINT16_MAX) { 3673 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3674 i, seg_offsets[i]); 3675 return; 3676 } 3677 } 3678 3679 for (i = 0; i < nb_offs; i++) 3680 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3681 3682 rx_pkt_nb_offs = (uint8_t) nb_offs; 3683 } 3684 3685 void 3686 show_rx_pkt_segments(void) 3687 { 3688 uint32_t i, n; 3689 3690 n = rx_pkt_nb_segs; 3691 printf("Number of segments: %u\n", n); 3692 if (n) { 3693 printf("Segment sizes: "); 3694 for (i = 0; i != n - 1; i++) 3695 printf("%hu,", rx_pkt_seg_lengths[i]); 3696 printf("%hu\n", rx_pkt_seg_lengths[i]); 3697 } 3698 } 3699 3700 void 3701 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3702 { 3703 unsigned int i; 3704 3705 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3706 printf("nb segments per RX packets=%u >= " 3707 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3708 return; 3709 } 3710 3711 /* 3712 * No extra check here, the segment length will be checked by PMD 3713 * in the extended queue setup. 3714 */ 3715 for (i = 0; i < nb_segs; i++) { 3716 if (seg_lengths[i] >= UINT16_MAX) { 3717 printf("length[%u]=%u > UINT16_MAX - give up\n", 3718 i, seg_lengths[i]); 3719 return; 3720 } 3721 } 3722 3723 for (i = 0; i < nb_segs; i++) 3724 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3725 3726 rx_pkt_nb_segs = (uint8_t) nb_segs; 3727 } 3728 3729 void 3730 show_tx_pkt_segments(void) 3731 { 3732 uint32_t i, n; 3733 const char *split; 3734 3735 n = tx_pkt_nb_segs; 3736 split = tx_split_get_name(tx_pkt_split); 3737 3738 printf("Number of segments: %u\n", n); 3739 printf("Segment sizes: "); 3740 for (i = 0; i != n - 1; i++) 3741 printf("%hu,", tx_pkt_seg_lengths[i]); 3742 printf("%hu\n", tx_pkt_seg_lengths[i]); 3743 printf("Split packet: %s\n", split); 3744 } 3745 3746 static bool 3747 nb_segs_is_invalid(unsigned int nb_segs) 3748 { 3749 uint16_t ring_size; 3750 uint16_t queue_id; 3751 uint16_t port_id; 3752 int ret; 3753 3754 RTE_ETH_FOREACH_DEV(port_id) { 3755 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3756 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3757 if (ret) { 3758 /* Port may not be initialized yet, can't say 3759 * the port is invalid in this stage. 3760 */ 3761 continue; 3762 } 3763 if (ring_size < nb_segs) { 3764 printf("nb segments per TX packets=%u >= TX " 3765 "queue(%u) ring_size=%u - txpkts ignored\n", 3766 nb_segs, queue_id, ring_size); 3767 return true; 3768 } 3769 } 3770 } 3771 3772 return false; 3773 } 3774 3775 void 3776 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3777 { 3778 uint16_t tx_pkt_len; 3779 unsigned int i; 3780 3781 /* 3782 * For single segment settings failed check is ignored. 3783 * It is a very basic capability to send the single segment 3784 * packets, suppose it is always supported. 3785 */ 3786 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 3787 printf("Tx segment size(%u) is not supported - txpkts ignored\n", 3788 nb_segs); 3789 return; 3790 } 3791 3792 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 3793 printf("Tx segment size(%u) is bigger than max number of segment(%u)\n", 3794 nb_segs, RTE_MAX_SEGS_PER_PKT); 3795 return; 3796 } 3797 3798 /* 3799 * Check that each segment length is greater or equal than 3800 * the mbuf data size. 3801 * Check also that the total packet length is greater or equal than the 3802 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3803 * 20 + 8). 3804 */ 3805 tx_pkt_len = 0; 3806 for (i = 0; i < nb_segs; i++) { 3807 if (seg_lengths[i] > mbuf_data_size[0]) { 3808 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3809 i, seg_lengths[i], mbuf_data_size[0]); 3810 return; 3811 } 3812 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3813 } 3814 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3815 printf("total packet length=%u < %d - give up\n", 3816 (unsigned) tx_pkt_len, 3817 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3818 return; 3819 } 3820 3821 for (i = 0; i < nb_segs; i++) 3822 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3823 3824 tx_pkt_length = tx_pkt_len; 3825 tx_pkt_nb_segs = (uint8_t) nb_segs; 3826 } 3827 3828 void 3829 show_tx_pkt_times(void) 3830 { 3831 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3832 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3833 } 3834 3835 void 3836 set_tx_pkt_times(unsigned int *tx_times) 3837 { 3838 tx_pkt_times_inter = tx_times[0]; 3839 tx_pkt_times_intra = tx_times[1]; 3840 } 3841 3842 void 3843 setup_gro(const char *onoff, portid_t port_id) 3844 { 3845 if (!rte_eth_dev_is_valid_port(port_id)) { 3846 printf("invalid port id %u\n", port_id); 3847 return; 3848 } 3849 if (test_done == 0) { 3850 printf("Before enable/disable GRO," 3851 " please stop forwarding first\n"); 3852 return; 3853 } 3854 if (strcmp(onoff, "on") == 0) { 3855 if (gro_ports[port_id].enable != 0) { 3856 printf("Port %u has enabled GRO. Please" 3857 " disable GRO first\n", port_id); 3858 return; 3859 } 3860 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3861 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3862 gro_ports[port_id].param.max_flow_num = 3863 GRO_DEFAULT_FLOW_NUM; 3864 gro_ports[port_id].param.max_item_per_flow = 3865 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3866 } 3867 gro_ports[port_id].enable = 1; 3868 } else { 3869 if (gro_ports[port_id].enable == 0) { 3870 printf("Port %u has disabled GRO\n", port_id); 3871 return; 3872 } 3873 gro_ports[port_id].enable = 0; 3874 } 3875 } 3876 3877 void 3878 setup_gro_flush_cycles(uint8_t cycles) 3879 { 3880 if (test_done == 0) { 3881 printf("Before change flush interval for GRO," 3882 " please stop forwarding first.\n"); 3883 return; 3884 } 3885 3886 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3887 GRO_DEFAULT_FLUSH_CYCLES) { 3888 printf("The flushing cycle be in the range" 3889 " of 1 to %u. Revert to the default" 3890 " value %u.\n", 3891 GRO_MAX_FLUSH_CYCLES, 3892 GRO_DEFAULT_FLUSH_CYCLES); 3893 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3894 } 3895 3896 gro_flush_cycles = cycles; 3897 } 3898 3899 void 3900 show_gro(portid_t port_id) 3901 { 3902 struct rte_gro_param *param; 3903 uint32_t max_pkts_num; 3904 3905 param = &gro_ports[port_id].param; 3906 3907 if (!rte_eth_dev_is_valid_port(port_id)) { 3908 printf("Invalid port id %u.\n", port_id); 3909 return; 3910 } 3911 if (gro_ports[port_id].enable) { 3912 printf("GRO type: TCP/IPv4\n"); 3913 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3914 max_pkts_num = param->max_flow_num * 3915 param->max_item_per_flow; 3916 } else 3917 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3918 printf("Max number of packets to perform GRO: %u\n", 3919 max_pkts_num); 3920 printf("Flushing cycles: %u\n", gro_flush_cycles); 3921 } else 3922 printf("Port %u doesn't enable GRO.\n", port_id); 3923 } 3924 3925 void 3926 setup_gso(const char *mode, portid_t port_id) 3927 { 3928 if (!rte_eth_dev_is_valid_port(port_id)) { 3929 printf("invalid port id %u\n", port_id); 3930 return; 3931 } 3932 if (strcmp(mode, "on") == 0) { 3933 if (test_done == 0) { 3934 printf("before enabling GSO," 3935 " please stop forwarding first\n"); 3936 return; 3937 } 3938 gso_ports[port_id].enable = 1; 3939 } else if (strcmp(mode, "off") == 0) { 3940 if (test_done == 0) { 3941 printf("before disabling GSO," 3942 " please stop forwarding first\n"); 3943 return; 3944 } 3945 gso_ports[port_id].enable = 0; 3946 } 3947 } 3948 3949 char* 3950 list_pkt_forwarding_modes(void) 3951 { 3952 static char fwd_modes[128] = ""; 3953 const char *separator = "|"; 3954 struct fwd_engine *fwd_eng; 3955 unsigned i = 0; 3956 3957 if (strlen (fwd_modes) == 0) { 3958 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3959 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3960 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3961 strncat(fwd_modes, separator, 3962 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 3963 } 3964 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3965 } 3966 3967 return fwd_modes; 3968 } 3969 3970 char* 3971 list_pkt_forwarding_retry_modes(void) 3972 { 3973 static char fwd_modes[128] = ""; 3974 const char *separator = "|"; 3975 struct fwd_engine *fwd_eng; 3976 unsigned i = 0; 3977 3978 if (strlen(fwd_modes) == 0) { 3979 while ((fwd_eng = fwd_engines[i++]) != NULL) { 3980 if (fwd_eng == &rx_only_engine) 3981 continue; 3982 strncat(fwd_modes, fwd_eng->fwd_mode_name, 3983 sizeof(fwd_modes) - 3984 strlen(fwd_modes) - 1); 3985 strncat(fwd_modes, separator, 3986 sizeof(fwd_modes) - 3987 strlen(fwd_modes) - 1); 3988 } 3989 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 3990 } 3991 3992 return fwd_modes; 3993 } 3994 3995 void 3996 set_pkt_forwarding_mode(const char *fwd_mode_name) 3997 { 3998 struct fwd_engine *fwd_eng; 3999 unsigned i; 4000 4001 i = 0; 4002 while ((fwd_eng = fwd_engines[i]) != NULL) { 4003 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4004 printf("Set %s packet forwarding mode%s\n", 4005 fwd_mode_name, 4006 retry_enabled == 0 ? "" : " with retry"); 4007 cur_fwd_eng = fwd_eng; 4008 return; 4009 } 4010 i++; 4011 } 4012 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 4013 } 4014 4015 void 4016 add_rx_dump_callbacks(portid_t portid) 4017 { 4018 struct rte_eth_dev_info dev_info; 4019 uint16_t queue; 4020 int ret; 4021 4022 if (port_id_is_invalid(portid, ENABLED_WARN)) 4023 return; 4024 4025 ret = eth_dev_info_get_print_err(portid, &dev_info); 4026 if (ret != 0) 4027 return; 4028 4029 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4030 if (!ports[portid].rx_dump_cb[queue]) 4031 ports[portid].rx_dump_cb[queue] = 4032 rte_eth_add_rx_callback(portid, queue, 4033 dump_rx_pkts, NULL); 4034 } 4035 4036 void 4037 add_tx_dump_callbacks(portid_t portid) 4038 { 4039 struct rte_eth_dev_info dev_info; 4040 uint16_t queue; 4041 int ret; 4042 4043 if (port_id_is_invalid(portid, ENABLED_WARN)) 4044 return; 4045 4046 ret = eth_dev_info_get_print_err(portid, &dev_info); 4047 if (ret != 0) 4048 return; 4049 4050 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4051 if (!ports[portid].tx_dump_cb[queue]) 4052 ports[portid].tx_dump_cb[queue] = 4053 rte_eth_add_tx_callback(portid, queue, 4054 dump_tx_pkts, NULL); 4055 } 4056 4057 void 4058 remove_rx_dump_callbacks(portid_t portid) 4059 { 4060 struct rte_eth_dev_info dev_info; 4061 uint16_t queue; 4062 int ret; 4063 4064 if (port_id_is_invalid(portid, ENABLED_WARN)) 4065 return; 4066 4067 ret = eth_dev_info_get_print_err(portid, &dev_info); 4068 if (ret != 0) 4069 return; 4070 4071 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4072 if (ports[portid].rx_dump_cb[queue]) { 4073 rte_eth_remove_rx_callback(portid, queue, 4074 ports[portid].rx_dump_cb[queue]); 4075 ports[portid].rx_dump_cb[queue] = NULL; 4076 } 4077 } 4078 4079 void 4080 remove_tx_dump_callbacks(portid_t portid) 4081 { 4082 struct rte_eth_dev_info dev_info; 4083 uint16_t queue; 4084 int ret; 4085 4086 if (port_id_is_invalid(portid, ENABLED_WARN)) 4087 return; 4088 4089 ret = eth_dev_info_get_print_err(portid, &dev_info); 4090 if (ret != 0) 4091 return; 4092 4093 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4094 if (ports[portid].tx_dump_cb[queue]) { 4095 rte_eth_remove_tx_callback(portid, queue, 4096 ports[portid].tx_dump_cb[queue]); 4097 ports[portid].tx_dump_cb[queue] = NULL; 4098 } 4099 } 4100 4101 void 4102 configure_rxtx_dump_callbacks(uint16_t verbose) 4103 { 4104 portid_t portid; 4105 4106 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4107 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4108 return; 4109 #endif 4110 4111 RTE_ETH_FOREACH_DEV(portid) 4112 { 4113 if (verbose == 1 || verbose > 2) 4114 add_rx_dump_callbacks(portid); 4115 else 4116 remove_rx_dump_callbacks(portid); 4117 if (verbose >= 2) 4118 add_tx_dump_callbacks(portid); 4119 else 4120 remove_tx_dump_callbacks(portid); 4121 } 4122 } 4123 4124 void 4125 set_verbose_level(uint16_t vb_level) 4126 { 4127 printf("Change verbose level from %u to %u\n", 4128 (unsigned int) verbose_level, (unsigned int) vb_level); 4129 verbose_level = vb_level; 4130 configure_rxtx_dump_callbacks(verbose_level); 4131 } 4132 4133 void 4134 vlan_extend_set(portid_t port_id, int on) 4135 { 4136 int diag; 4137 int vlan_offload; 4138 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4139 4140 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4141 return; 4142 4143 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4144 4145 if (on) { 4146 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4147 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4148 } else { 4149 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4150 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4151 } 4152 4153 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4154 if (diag < 0) { 4155 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 4156 "diag=%d\n", port_id, on, diag); 4157 return; 4158 } 4159 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4160 } 4161 4162 void 4163 rx_vlan_strip_set(portid_t port_id, int on) 4164 { 4165 int diag; 4166 int vlan_offload; 4167 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4168 4169 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4170 return; 4171 4172 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4173 4174 if (on) { 4175 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4176 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4177 } else { 4178 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4179 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4180 } 4181 4182 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4183 if (diag < 0) { 4184 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 4185 "diag=%d\n", port_id, on, diag); 4186 return; 4187 } 4188 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4189 } 4190 4191 void 4192 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4193 { 4194 int diag; 4195 4196 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4197 return; 4198 4199 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4200 if (diag < 0) 4201 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 4202 "diag=%d\n", port_id, queue_id, on, diag); 4203 } 4204 4205 void 4206 rx_vlan_filter_set(portid_t port_id, int on) 4207 { 4208 int diag; 4209 int vlan_offload; 4210 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4211 4212 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4213 return; 4214 4215 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4216 4217 if (on) { 4218 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4219 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4220 } else { 4221 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4222 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4223 } 4224 4225 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4226 if (diag < 0) { 4227 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 4228 "diag=%d\n", port_id, on, diag); 4229 return; 4230 } 4231 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4232 } 4233 4234 void 4235 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4236 { 4237 int diag; 4238 int vlan_offload; 4239 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4240 4241 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4242 return; 4243 4244 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4245 4246 if (on) { 4247 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4248 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4249 } else { 4250 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4251 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4252 } 4253 4254 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4255 if (diag < 0) { 4256 printf("%s(port_pi=%d, on=%d) failed " 4257 "diag=%d\n", __func__, port_id, on, diag); 4258 return; 4259 } 4260 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4261 } 4262 4263 int 4264 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4265 { 4266 int diag; 4267 4268 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4269 return 1; 4270 if (vlan_id_is_invalid(vlan_id)) 4271 return 1; 4272 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4273 if (diag == 0) 4274 return 0; 4275 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 4276 "diag=%d\n", 4277 port_id, vlan_id, on, diag); 4278 return -1; 4279 } 4280 4281 void 4282 rx_vlan_all_filter_set(portid_t port_id, int on) 4283 { 4284 uint16_t vlan_id; 4285 4286 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4287 return; 4288 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4289 if (rx_vft_set(port_id, vlan_id, on)) 4290 break; 4291 } 4292 } 4293 4294 void 4295 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4296 { 4297 int diag; 4298 4299 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4300 return; 4301 4302 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4303 if (diag == 0) 4304 return; 4305 4306 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 4307 "diag=%d\n", 4308 port_id, vlan_type, tp_id, diag); 4309 } 4310 4311 void 4312 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4313 { 4314 struct rte_eth_dev_info dev_info; 4315 int ret; 4316 4317 if (vlan_id_is_invalid(vlan_id)) 4318 return; 4319 4320 if (ports[port_id].dev_conf.txmode.offloads & 4321 DEV_TX_OFFLOAD_QINQ_INSERT) { 4322 printf("Error, as QinQ has been enabled.\n"); 4323 return; 4324 } 4325 4326 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4327 if (ret != 0) 4328 return; 4329 4330 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4331 printf("Error: vlan insert is not supported by port %d\n", 4332 port_id); 4333 return; 4334 } 4335 4336 tx_vlan_reset(port_id); 4337 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4338 ports[port_id].tx_vlan_id = vlan_id; 4339 } 4340 4341 void 4342 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4343 { 4344 struct rte_eth_dev_info dev_info; 4345 int ret; 4346 4347 if (vlan_id_is_invalid(vlan_id)) 4348 return; 4349 if (vlan_id_is_invalid(vlan_id_outer)) 4350 return; 4351 4352 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4353 if (ret != 0) 4354 return; 4355 4356 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4357 printf("Error: qinq insert not supported by port %d\n", 4358 port_id); 4359 return; 4360 } 4361 4362 tx_vlan_reset(port_id); 4363 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4364 DEV_TX_OFFLOAD_QINQ_INSERT); 4365 ports[port_id].tx_vlan_id = vlan_id; 4366 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4367 } 4368 4369 void 4370 tx_vlan_reset(portid_t port_id) 4371 { 4372 ports[port_id].dev_conf.txmode.offloads &= 4373 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4374 DEV_TX_OFFLOAD_QINQ_INSERT); 4375 ports[port_id].tx_vlan_id = 0; 4376 ports[port_id].tx_vlan_id_outer = 0; 4377 } 4378 4379 void 4380 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4381 { 4382 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4383 return; 4384 4385 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4386 } 4387 4388 void 4389 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4390 { 4391 int ret; 4392 4393 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4394 return; 4395 4396 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4397 return; 4398 4399 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4400 printf("map_value not in required range 0..%d\n", 4401 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4402 return; 4403 } 4404 4405 if (!is_rx) { /* tx */ 4406 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4407 map_value); 4408 if (ret) { 4409 printf("failed to set tx queue stats mapping.\n"); 4410 return; 4411 } 4412 } else { /* rx */ 4413 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4414 map_value); 4415 if (ret) { 4416 printf("failed to set rx queue stats mapping.\n"); 4417 return; 4418 } 4419 } 4420 } 4421 4422 void 4423 set_xstats_hide_zero(uint8_t on_off) 4424 { 4425 xstats_hide_zero = on_off; 4426 } 4427 4428 void 4429 set_record_core_cycles(uint8_t on_off) 4430 { 4431 record_core_cycles = on_off; 4432 } 4433 4434 void 4435 set_record_burst_stats(uint8_t on_off) 4436 { 4437 record_burst_stats = on_off; 4438 } 4439 4440 static inline void 4441 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4442 { 4443 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4444 4445 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4446 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4447 " tunnel_id: 0x%08x", 4448 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4449 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4450 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4451 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4452 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4453 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4454 4455 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4456 rte_be_to_cpu_16(mask->src_port_mask), 4457 rte_be_to_cpu_16(mask->dst_port_mask)); 4458 4459 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4460 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4461 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4462 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4463 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4464 4465 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4466 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4467 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4468 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4469 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4470 } 4471 4472 printf("\n"); 4473 } 4474 4475 static inline void 4476 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4477 { 4478 struct rte_eth_flex_payload_cfg *cfg; 4479 uint32_t i, j; 4480 4481 for (i = 0; i < flex_conf->nb_payloads; i++) { 4482 cfg = &flex_conf->flex_set[i]; 4483 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4484 printf("\n RAW: "); 4485 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4486 printf("\n L2_PAYLOAD: "); 4487 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4488 printf("\n L3_PAYLOAD: "); 4489 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4490 printf("\n L4_PAYLOAD: "); 4491 else 4492 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4493 for (j = 0; j < num; j++) 4494 printf(" %-5u", cfg->src_offset[j]); 4495 } 4496 printf("\n"); 4497 } 4498 4499 static char * 4500 flowtype_to_str(uint16_t flow_type) 4501 { 4502 struct flow_type_info { 4503 char str[32]; 4504 uint16_t ftype; 4505 }; 4506 4507 uint8_t i; 4508 static struct flow_type_info flowtype_str_table[] = { 4509 {"raw", RTE_ETH_FLOW_RAW}, 4510 {"ipv4", RTE_ETH_FLOW_IPV4}, 4511 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4512 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4513 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4514 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4515 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4516 {"ipv6", RTE_ETH_FLOW_IPV6}, 4517 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4518 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4519 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4520 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4521 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4522 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4523 {"port", RTE_ETH_FLOW_PORT}, 4524 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4525 {"geneve", RTE_ETH_FLOW_GENEVE}, 4526 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4527 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4528 }; 4529 4530 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4531 if (flowtype_str_table[i].ftype == flow_type) 4532 return flowtype_str_table[i].str; 4533 } 4534 4535 return NULL; 4536 } 4537 4538 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4539 4540 static inline void 4541 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4542 { 4543 struct rte_eth_fdir_flex_mask *mask; 4544 uint32_t i, j; 4545 char *p; 4546 4547 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4548 mask = &flex_conf->flex_mask[i]; 4549 p = flowtype_to_str(mask->flow_type); 4550 printf("\n %s:\t", p ? p : "unknown"); 4551 for (j = 0; j < num; j++) 4552 printf(" %02x", mask->mask[j]); 4553 } 4554 printf("\n"); 4555 } 4556 4557 static inline void 4558 print_fdir_flow_type(uint32_t flow_types_mask) 4559 { 4560 int i; 4561 char *p; 4562 4563 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4564 if (!(flow_types_mask & (1 << i))) 4565 continue; 4566 p = flowtype_to_str(i); 4567 if (p) 4568 printf(" %s", p); 4569 else 4570 printf(" unknown"); 4571 } 4572 printf("\n"); 4573 } 4574 4575 static int 4576 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4577 struct rte_eth_fdir_stats *fdir_stat) 4578 { 4579 int ret = -ENOTSUP; 4580 4581 #ifdef RTE_NET_I40E 4582 if (ret == -ENOTSUP) { 4583 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4584 if (!ret) 4585 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4586 } 4587 #endif 4588 #ifdef RTE_NET_IXGBE 4589 if (ret == -ENOTSUP) { 4590 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4591 if (!ret) 4592 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4593 } 4594 #endif 4595 switch (ret) { 4596 case 0: 4597 break; 4598 case -ENOTSUP: 4599 printf("\n FDIR is not supported on port %-2d\n", 4600 port_id); 4601 break; 4602 default: 4603 printf("programming error: (%s)\n", strerror(-ret)); 4604 break; 4605 } 4606 return ret; 4607 } 4608 4609 void 4610 fdir_get_infos(portid_t port_id) 4611 { 4612 struct rte_eth_fdir_stats fdir_stat; 4613 struct rte_eth_fdir_info fdir_info; 4614 4615 static const char *fdir_stats_border = "########################"; 4616 4617 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4618 return; 4619 4620 memset(&fdir_info, 0, sizeof(fdir_info)); 4621 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4622 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4623 return; 4624 4625 printf("\n %s FDIR infos for port %-2d %s\n", 4626 fdir_stats_border, port_id, fdir_stats_border); 4627 printf(" MODE: "); 4628 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4629 printf(" PERFECT\n"); 4630 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4631 printf(" PERFECT-MAC-VLAN\n"); 4632 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4633 printf(" PERFECT-TUNNEL\n"); 4634 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4635 printf(" SIGNATURE\n"); 4636 else 4637 printf(" DISABLE\n"); 4638 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4639 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4640 printf(" SUPPORTED FLOW TYPE: "); 4641 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4642 } 4643 printf(" FLEX PAYLOAD INFO:\n"); 4644 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4645 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4646 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4647 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4648 fdir_info.flex_payload_unit, 4649 fdir_info.max_flex_payload_segment_num, 4650 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4651 printf(" MASK: "); 4652 print_fdir_mask(&fdir_info.mask); 4653 if (fdir_info.flex_conf.nb_payloads > 0) { 4654 printf(" FLEX PAYLOAD SRC OFFSET:"); 4655 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4656 } 4657 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4658 printf(" FLEX MASK CFG:"); 4659 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4660 } 4661 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4662 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4663 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4664 fdir_info.guarant_spc, fdir_info.best_spc); 4665 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4666 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4667 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4668 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4669 fdir_stat.collision, fdir_stat.free, 4670 fdir_stat.maxhash, fdir_stat.maxlen, 4671 fdir_stat.add, fdir_stat.remove, 4672 fdir_stat.f_add, fdir_stat.f_remove); 4673 printf(" %s############################%s\n", 4674 fdir_stats_border, fdir_stats_border); 4675 } 4676 4677 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4678 4679 void 4680 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4681 { 4682 struct rte_port *port; 4683 struct rte_eth_fdir_flex_conf *flex_conf; 4684 int i, idx = 0; 4685 4686 port = &ports[port_id]; 4687 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4688 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4689 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4690 idx = i; 4691 break; 4692 } 4693 } 4694 if (i >= RTE_ETH_FLOW_MAX) { 4695 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4696 idx = flex_conf->nb_flexmasks; 4697 flex_conf->nb_flexmasks++; 4698 } else { 4699 printf("The flex mask table is full. Can not set flex" 4700 " mask for flow_type(%u).", cfg->flow_type); 4701 return; 4702 } 4703 } 4704 rte_memcpy(&flex_conf->flex_mask[idx], 4705 cfg, 4706 sizeof(struct rte_eth_fdir_flex_mask)); 4707 } 4708 4709 void 4710 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4711 { 4712 struct rte_port *port; 4713 struct rte_eth_fdir_flex_conf *flex_conf; 4714 int i, idx = 0; 4715 4716 port = &ports[port_id]; 4717 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4718 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4719 if (cfg->type == flex_conf->flex_set[i].type) { 4720 idx = i; 4721 break; 4722 } 4723 } 4724 if (i >= RTE_ETH_PAYLOAD_MAX) { 4725 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4726 idx = flex_conf->nb_payloads; 4727 flex_conf->nb_payloads++; 4728 } else { 4729 printf("The flex payload table is full. Can not set" 4730 " flex payload for type(%u).", cfg->type); 4731 return; 4732 } 4733 } 4734 rte_memcpy(&flex_conf->flex_set[idx], 4735 cfg, 4736 sizeof(struct rte_eth_flex_payload_cfg)); 4737 4738 } 4739 4740 void 4741 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4742 { 4743 #ifdef RTE_NET_IXGBE 4744 int diag; 4745 4746 if (is_rx) 4747 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4748 else 4749 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4750 4751 if (diag == 0) 4752 return; 4753 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4754 is_rx ? "rx" : "tx", port_id, diag); 4755 return; 4756 #endif 4757 printf("VF %s setting not supported for port %d\n", 4758 is_rx ? "Rx" : "Tx", port_id); 4759 RTE_SET_USED(vf); 4760 RTE_SET_USED(on); 4761 } 4762 4763 int 4764 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4765 { 4766 int diag; 4767 struct rte_eth_link link; 4768 int ret; 4769 4770 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4771 return 1; 4772 ret = eth_link_get_nowait_print_err(port_id, &link); 4773 if (ret < 0) 4774 return 1; 4775 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4776 rate > link.link_speed) { 4777 printf("Invalid rate value:%u bigger than link speed: %u\n", 4778 rate, link.link_speed); 4779 return 1; 4780 } 4781 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4782 if (diag == 0) 4783 return diag; 4784 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4785 port_id, diag); 4786 return diag; 4787 } 4788 4789 int 4790 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4791 { 4792 int diag = -ENOTSUP; 4793 4794 RTE_SET_USED(vf); 4795 RTE_SET_USED(rate); 4796 RTE_SET_USED(q_msk); 4797 4798 #ifdef RTE_NET_IXGBE 4799 if (diag == -ENOTSUP) 4800 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4801 q_msk); 4802 #endif 4803 #ifdef RTE_NET_BNXT 4804 if (diag == -ENOTSUP) 4805 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4806 #endif 4807 if (diag == 0) 4808 return diag; 4809 4810 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4811 port_id, diag); 4812 return diag; 4813 } 4814 4815 /* 4816 * Functions to manage the set of filtered Multicast MAC addresses. 4817 * 4818 * A pool of filtered multicast MAC addresses is associated with each port. 4819 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4820 * The address of the pool and the number of valid multicast MAC addresses 4821 * recorded in the pool are stored in the fields "mc_addr_pool" and 4822 * "mc_addr_nb" of the "rte_port" data structure. 4823 * 4824 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4825 * to be supplied a contiguous array of multicast MAC addresses. 4826 * To comply with this constraint, the set of multicast addresses recorded 4827 * into the pool are systematically compacted at the beginning of the pool. 4828 * Hence, when a multicast address is removed from the pool, all following 4829 * addresses, if any, are copied back to keep the set contiguous. 4830 */ 4831 #define MCAST_POOL_INC 32 4832 4833 static int 4834 mcast_addr_pool_extend(struct rte_port *port) 4835 { 4836 struct rte_ether_addr *mc_pool; 4837 size_t mc_pool_size; 4838 4839 /* 4840 * If a free entry is available at the end of the pool, just 4841 * increment the number of recorded multicast addresses. 4842 */ 4843 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4844 port->mc_addr_nb++; 4845 return 0; 4846 } 4847 4848 /* 4849 * [re]allocate a pool with MCAST_POOL_INC more entries. 4850 * The previous test guarantees that port->mc_addr_nb is a multiple 4851 * of MCAST_POOL_INC. 4852 */ 4853 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4854 MCAST_POOL_INC); 4855 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4856 mc_pool_size); 4857 if (mc_pool == NULL) { 4858 printf("allocation of pool of %u multicast addresses failed\n", 4859 port->mc_addr_nb + MCAST_POOL_INC); 4860 return -ENOMEM; 4861 } 4862 4863 port->mc_addr_pool = mc_pool; 4864 port->mc_addr_nb++; 4865 return 0; 4866 4867 } 4868 4869 static void 4870 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4871 { 4872 if (mcast_addr_pool_extend(port) != 0) 4873 return; 4874 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4875 } 4876 4877 static void 4878 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4879 { 4880 port->mc_addr_nb--; 4881 if (addr_idx == port->mc_addr_nb) { 4882 /* No need to recompact the set of multicast addressses. */ 4883 if (port->mc_addr_nb == 0) { 4884 /* free the pool of multicast addresses. */ 4885 free(port->mc_addr_pool); 4886 port->mc_addr_pool = NULL; 4887 } 4888 return; 4889 } 4890 memmove(&port->mc_addr_pool[addr_idx], 4891 &port->mc_addr_pool[addr_idx + 1], 4892 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4893 } 4894 4895 static int 4896 eth_port_multicast_addr_list_set(portid_t port_id) 4897 { 4898 struct rte_port *port; 4899 int diag; 4900 4901 port = &ports[port_id]; 4902 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4903 port->mc_addr_nb); 4904 if (diag < 0) 4905 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4906 port_id, port->mc_addr_nb, diag); 4907 4908 return diag; 4909 } 4910 4911 void 4912 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4913 { 4914 struct rte_port *port; 4915 uint32_t i; 4916 4917 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4918 return; 4919 4920 port = &ports[port_id]; 4921 4922 /* 4923 * Check that the added multicast MAC address is not already recorded 4924 * in the pool of multicast addresses. 4925 */ 4926 for (i = 0; i < port->mc_addr_nb; i++) { 4927 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 4928 printf("multicast address already filtered by port\n"); 4929 return; 4930 } 4931 } 4932 4933 mcast_addr_pool_append(port, mc_addr); 4934 if (eth_port_multicast_addr_list_set(port_id) < 0) 4935 /* Rollback on failure, remove the address from the pool */ 4936 mcast_addr_pool_remove(port, i); 4937 } 4938 4939 void 4940 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 4941 { 4942 struct rte_port *port; 4943 uint32_t i; 4944 4945 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4946 return; 4947 4948 port = &ports[port_id]; 4949 4950 /* 4951 * Search the pool of multicast MAC addresses for the removed address. 4952 */ 4953 for (i = 0; i < port->mc_addr_nb; i++) { 4954 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 4955 break; 4956 } 4957 if (i == port->mc_addr_nb) { 4958 printf("multicast address not filtered by port %d\n", port_id); 4959 return; 4960 } 4961 4962 mcast_addr_pool_remove(port, i); 4963 if (eth_port_multicast_addr_list_set(port_id) < 0) 4964 /* Rollback on failure, add the address back into the pool */ 4965 mcast_addr_pool_append(port, mc_addr); 4966 } 4967 4968 void 4969 port_dcb_info_display(portid_t port_id) 4970 { 4971 struct rte_eth_dcb_info dcb_info; 4972 uint16_t i; 4973 int ret; 4974 static const char *border = "================"; 4975 4976 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4977 return; 4978 4979 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 4980 if (ret) { 4981 printf("\n Failed to get dcb infos on port %-2d\n", 4982 port_id); 4983 return; 4984 } 4985 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 4986 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 4987 printf("\n TC : "); 4988 for (i = 0; i < dcb_info.nb_tcs; i++) 4989 printf("\t%4d", i); 4990 printf("\n Priority : "); 4991 for (i = 0; i < dcb_info.nb_tcs; i++) 4992 printf("\t%4d", dcb_info.prio_tc[i]); 4993 printf("\n BW percent :"); 4994 for (i = 0; i < dcb_info.nb_tcs; i++) 4995 printf("\t%4d%%", dcb_info.tc_bws[i]); 4996 printf("\n RXQ base : "); 4997 for (i = 0; i < dcb_info.nb_tcs; i++) 4998 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 4999 printf("\n RXQ number :"); 5000 for (i = 0; i < dcb_info.nb_tcs; i++) 5001 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5002 printf("\n TXQ base : "); 5003 for (i = 0; i < dcb_info.nb_tcs; i++) 5004 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5005 printf("\n TXQ number :"); 5006 for (i = 0; i < dcb_info.nb_tcs; i++) 5007 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5008 printf("\n"); 5009 } 5010 5011 uint8_t * 5012 open_file(const char *file_path, uint32_t *size) 5013 { 5014 int fd = open(file_path, O_RDONLY); 5015 off_t pkg_size; 5016 uint8_t *buf = NULL; 5017 int ret = 0; 5018 struct stat st_buf; 5019 5020 if (size) 5021 *size = 0; 5022 5023 if (fd == -1) { 5024 printf("%s: Failed to open %s\n", __func__, file_path); 5025 return buf; 5026 } 5027 5028 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5029 close(fd); 5030 printf("%s: File operations failed\n", __func__); 5031 return buf; 5032 } 5033 5034 pkg_size = st_buf.st_size; 5035 if (pkg_size < 0) { 5036 close(fd); 5037 printf("%s: File operations failed\n", __func__); 5038 return buf; 5039 } 5040 5041 buf = (uint8_t *)malloc(pkg_size); 5042 if (!buf) { 5043 close(fd); 5044 printf("%s: Failed to malloc memory\n", __func__); 5045 return buf; 5046 } 5047 5048 ret = read(fd, buf, pkg_size); 5049 if (ret < 0) { 5050 close(fd); 5051 printf("%s: File read operation failed\n", __func__); 5052 close_file(buf); 5053 return NULL; 5054 } 5055 5056 if (size) 5057 *size = pkg_size; 5058 5059 close(fd); 5060 5061 return buf; 5062 } 5063 5064 int 5065 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5066 { 5067 FILE *fh = fopen(file_path, "wb"); 5068 5069 if (fh == NULL) { 5070 printf("%s: Failed to open %s\n", __func__, file_path); 5071 return -1; 5072 } 5073 5074 if (fwrite(buf, 1, size, fh) != size) { 5075 fclose(fh); 5076 printf("%s: File write operation failed\n", __func__); 5077 return -1; 5078 } 5079 5080 fclose(fh); 5081 5082 return 0; 5083 } 5084 5085 int 5086 close_file(uint8_t *buf) 5087 { 5088 if (buf) { 5089 free((void *)buf); 5090 return 0; 5091 } 5092 5093 return -1; 5094 } 5095 5096 void 5097 port_queue_region_info_display(portid_t port_id, void *buf) 5098 { 5099 #ifdef RTE_NET_I40E 5100 uint16_t i, j; 5101 struct rte_pmd_i40e_queue_regions *info = 5102 (struct rte_pmd_i40e_queue_regions *)buf; 5103 static const char *queue_region_info_stats_border = "-------"; 5104 5105 if (!info->queue_region_number) 5106 printf("there is no region has been set before"); 5107 5108 printf("\n %s All queue region info for port=%2d %s", 5109 queue_region_info_stats_border, port_id, 5110 queue_region_info_stats_border); 5111 printf("\n queue_region_number: %-14u \n", 5112 info->queue_region_number); 5113 5114 for (i = 0; i < info->queue_region_number; i++) { 5115 printf("\n region_id: %-14u queue_number: %-14u " 5116 "queue_start_index: %-14u \n", 5117 info->region[i].region_id, 5118 info->region[i].queue_num, 5119 info->region[i].queue_start_index); 5120 5121 printf(" user_priority_num is %-14u :", 5122 info->region[i].user_priority_num); 5123 for (j = 0; j < info->region[i].user_priority_num; j++) 5124 printf(" %-14u ", info->region[i].user_priority[j]); 5125 5126 printf("\n flowtype_num is %-14u :", 5127 info->region[i].flowtype_num); 5128 for (j = 0; j < info->region[i].flowtype_num; j++) 5129 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5130 } 5131 #else 5132 RTE_SET_USED(port_id); 5133 RTE_SET_USED(buf); 5134 #endif 5135 5136 printf("\n\n"); 5137 } 5138 5139 void 5140 show_macs(portid_t port_id) 5141 { 5142 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5143 struct rte_eth_dev_info dev_info; 5144 struct rte_ether_addr *addr; 5145 uint32_t i, num_macs = 0; 5146 struct rte_eth_dev *dev; 5147 5148 dev = &rte_eth_devices[port_id]; 5149 5150 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5151 return; 5152 5153 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5154 addr = &dev->data->mac_addrs[i]; 5155 5156 /* skip zero address */ 5157 if (rte_is_zero_ether_addr(addr)) 5158 continue; 5159 5160 num_macs++; 5161 } 5162 5163 printf("Number of MAC address added: %d\n", num_macs); 5164 5165 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5166 addr = &dev->data->mac_addrs[i]; 5167 5168 /* skip zero address */ 5169 if (rte_is_zero_ether_addr(addr)) 5170 continue; 5171 5172 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5173 printf(" %s\n", buf); 5174 } 5175 } 5176 5177 void 5178 show_mcast_macs(portid_t port_id) 5179 { 5180 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5181 struct rte_ether_addr *addr; 5182 struct rte_port *port; 5183 uint32_t i; 5184 5185 port = &ports[port_id]; 5186 5187 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5188 5189 for (i = 0; i < port->mc_addr_nb; i++) { 5190 addr = &port->mc_addr_pool[i]; 5191 5192 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5193 printf(" %s\n", buf); 5194 } 5195 } 5196