1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 90 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 91 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 92 ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", ETH_RSS_ETH }, 95 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 97 { "vlan", ETH_RSS_VLAN }, 98 { "s-vlan", ETH_RSS_S_VLAN }, 99 { "c-vlan", ETH_RSS_C_VLAN }, 100 { "ipv4", ETH_RSS_IPV4 }, 101 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", ETH_RSS_IPV6 }, 107 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 116 { "port", ETH_RSS_PORT }, 117 { "vxlan", ETH_RSS_VXLAN }, 118 { "geneve", ETH_RSS_GENEVE }, 119 { "nvgre", ETH_RSS_NVGRE }, 120 { "ip", ETH_RSS_IP }, 121 { "udp", ETH_RSS_UDP }, 122 { "tcp", ETH_RSS_TCP }, 123 { "sctp", ETH_RSS_SCTP }, 124 { "tunnel", ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 135 { "esp", ETH_RSS_ESP }, 136 { "ah", ETH_RSS_AH }, 137 { "l2tpv3", ETH_RSS_L2TPV3 }, 138 { "pfcp", ETH_RSS_PFCP }, 139 { "pppoe", ETH_RSS_PPPOE }, 140 { "gtpu", ETH_RSS_GTPU }, 141 { "ecpri", ETH_RSS_ECPRI }, 142 { "mpls", ETH_RSS_MPLS }, 143 { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM }, 144 { "l4-chksum", ETH_RSS_L4_CHKSUM }, 145 { NULL, 0 }, 146 }; 147 148 static const struct { 149 enum rte_eth_fec_mode mode; 150 const char *name; 151 } fec_mode_name[] = { 152 { 153 .mode = RTE_ETH_FEC_NOFEC, 154 .name = "off", 155 }, 156 { 157 .mode = RTE_ETH_FEC_AUTO, 158 .name = "auto", 159 }, 160 { 161 .mode = RTE_ETH_FEC_BASER, 162 .name = "baser", 163 }, 164 { 165 .mode = RTE_ETH_FEC_RS, 166 .name = "rs", 167 }, 168 }; 169 170 static void 171 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 172 { 173 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 174 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 175 printf("%s%s", name, buf); 176 } 177 178 void 179 nic_stats_display(portid_t port_id) 180 { 181 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 182 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 183 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 184 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 185 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 186 struct timespec cur_time; 187 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 188 diff_ns; 189 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 190 struct rte_eth_stats stats; 191 192 static const char *nic_stats_border = "########################"; 193 194 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 195 print_valid_ports(); 196 return; 197 } 198 rte_eth_stats_get(port_id, &stats); 199 printf("\n %s NIC statistics for port %-2d %s\n", 200 nic_stats_border, port_id, nic_stats_border); 201 202 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 203 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 204 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 205 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 206 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 207 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 208 209 diff_ns = 0; 210 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 211 uint64_t ns; 212 213 ns = cur_time.tv_sec * NS_PER_SEC; 214 ns += cur_time.tv_nsec; 215 216 if (prev_ns[port_id] != 0) 217 diff_ns = ns - prev_ns[port_id]; 218 prev_ns[port_id] = ns; 219 } 220 221 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 222 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 223 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 224 (stats.opackets - prev_pkts_tx[port_id]) : 0; 225 prev_pkts_rx[port_id] = stats.ipackets; 226 prev_pkts_tx[port_id] = stats.opackets; 227 mpps_rx = diff_ns > 0 ? 228 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 229 mpps_tx = diff_ns > 0 ? 230 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 231 232 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 233 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 234 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 235 (stats.obytes - prev_bytes_tx[port_id]) : 0; 236 prev_bytes_rx[port_id] = stats.ibytes; 237 prev_bytes_tx[port_id] = stats.obytes; 238 mbps_rx = diff_ns > 0 ? 239 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 240 mbps_tx = diff_ns > 0 ? 241 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 242 243 printf("\n Throughput (since last show)\n"); 244 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 245 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 246 mpps_tx, mbps_tx * 8); 247 248 printf(" %s############################%s\n", 249 nic_stats_border, nic_stats_border); 250 } 251 252 void 253 nic_stats_clear(portid_t port_id) 254 { 255 int ret; 256 257 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 258 print_valid_ports(); 259 return; 260 } 261 262 ret = rte_eth_stats_reset(port_id); 263 if (ret != 0) { 264 fprintf(stderr, 265 "%s: Error: failed to reset stats (port %u): %s", 266 __func__, port_id, strerror(-ret)); 267 return; 268 } 269 270 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 271 if (ret != 0) { 272 if (ret < 0) 273 ret = -ret; 274 fprintf(stderr, 275 "%s: Error: failed to get stats (port %u): %s", 276 __func__, port_id, strerror(ret)); 277 return; 278 } 279 printf("\n NIC statistics for port %d cleared\n", port_id); 280 } 281 282 void 283 nic_xstats_display(portid_t port_id) 284 { 285 struct rte_eth_xstat *xstats; 286 int cnt_xstats, idx_xstat; 287 struct rte_eth_xstat_name *xstats_names; 288 289 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 290 print_valid_ports(); 291 return; 292 } 293 printf("###### NIC extended statistics for port %-2d\n", port_id); 294 if (!rte_eth_dev_is_valid_port(port_id)) { 295 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 296 return; 297 } 298 299 /* Get count */ 300 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 301 if (cnt_xstats < 0) { 302 fprintf(stderr, "Error: Cannot get count of xstats\n"); 303 return; 304 } 305 306 /* Get id-name lookup table */ 307 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 308 if (xstats_names == NULL) { 309 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 310 return; 311 } 312 if (cnt_xstats != rte_eth_xstats_get_names( 313 port_id, xstats_names, cnt_xstats)) { 314 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 315 free(xstats_names); 316 return; 317 } 318 319 /* Get stats themselves */ 320 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 321 if (xstats == NULL) { 322 fprintf(stderr, "Cannot allocate memory for xstats\n"); 323 free(xstats_names); 324 return; 325 } 326 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 327 fprintf(stderr, "Error: Unable to get xstats\n"); 328 free(xstats_names); 329 free(xstats); 330 return; 331 } 332 333 /* Display xstats */ 334 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 335 if (xstats_hide_zero && !xstats[idx_xstat].value) 336 continue; 337 printf("%s: %"PRIu64"\n", 338 xstats_names[idx_xstat].name, 339 xstats[idx_xstat].value); 340 } 341 free(xstats_names); 342 free(xstats); 343 } 344 345 void 346 nic_xstats_clear(portid_t port_id) 347 { 348 int ret; 349 350 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 351 print_valid_ports(); 352 return; 353 } 354 355 ret = rte_eth_xstats_reset(port_id); 356 if (ret != 0) { 357 fprintf(stderr, 358 "%s: Error: failed to reset xstats (port %u): %s\n", 359 __func__, port_id, strerror(-ret)); 360 return; 361 } 362 363 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 364 if (ret != 0) { 365 if (ret < 0) 366 ret = -ret; 367 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 368 __func__, port_id, strerror(ret)); 369 return; 370 } 371 } 372 373 static const char * 374 get_queue_state_name(uint8_t queue_state) 375 { 376 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 377 return "stopped"; 378 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 379 return "started"; 380 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 381 return "hairpin"; 382 else 383 return "unknown"; 384 } 385 386 void 387 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 388 { 389 struct rte_eth_burst_mode mode; 390 struct rte_eth_rxq_info qinfo; 391 int32_t rc; 392 static const char *info_border = "*********************"; 393 394 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 395 if (rc != 0) { 396 fprintf(stderr, 397 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 398 port_id, queue_id, strerror(-rc), rc); 399 return; 400 } 401 402 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 403 info_border, port_id, queue_id, info_border); 404 405 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 406 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 407 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 408 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 409 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 410 printf("\nRX drop packets: %s", 411 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 412 printf("\nRX deferred start: %s", 413 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 414 printf("\nRX scattered packets: %s", 415 (qinfo.scattered_rx != 0) ? "on" : "off"); 416 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 417 if (qinfo.rx_buf_size != 0) 418 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 419 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 420 421 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 422 printf("\nBurst mode: %s%s", 423 mode.info, 424 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 425 " (per queue)" : ""); 426 427 printf("\n"); 428 } 429 430 void 431 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 432 { 433 struct rte_eth_burst_mode mode; 434 struct rte_eth_txq_info qinfo; 435 int32_t rc; 436 static const char *info_border = "*********************"; 437 438 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 439 if (rc != 0) { 440 fprintf(stderr, 441 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 442 port_id, queue_id, strerror(-rc), rc); 443 return; 444 } 445 446 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 447 info_border, port_id, queue_id, info_border); 448 449 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 450 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 451 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 452 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 453 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 454 printf("\nTX deferred start: %s", 455 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 456 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 457 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 458 459 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 460 printf("\nBurst mode: %s%s", 461 mode.info, 462 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 463 " (per queue)" : ""); 464 465 printf("\n"); 466 } 467 468 static int bus_match_all(const struct rte_bus *bus, const void *data) 469 { 470 RTE_SET_USED(bus); 471 RTE_SET_USED(data); 472 return 0; 473 } 474 475 static void 476 device_infos_display_speeds(uint32_t speed_capa) 477 { 478 printf("\n\tDevice speed capability:"); 479 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 480 printf(" Autonegotiate (all speeds)"); 481 if (speed_capa & ETH_LINK_SPEED_FIXED) 482 printf(" Disable autonegotiate (fixed speed) "); 483 if (speed_capa & ETH_LINK_SPEED_10M_HD) 484 printf(" 10 Mbps half-duplex "); 485 if (speed_capa & ETH_LINK_SPEED_10M) 486 printf(" 10 Mbps full-duplex "); 487 if (speed_capa & ETH_LINK_SPEED_100M_HD) 488 printf(" 100 Mbps half-duplex "); 489 if (speed_capa & ETH_LINK_SPEED_100M) 490 printf(" 100 Mbps full-duplex "); 491 if (speed_capa & ETH_LINK_SPEED_1G) 492 printf(" 1 Gbps "); 493 if (speed_capa & ETH_LINK_SPEED_2_5G) 494 printf(" 2.5 Gbps "); 495 if (speed_capa & ETH_LINK_SPEED_5G) 496 printf(" 5 Gbps "); 497 if (speed_capa & ETH_LINK_SPEED_10G) 498 printf(" 10 Gbps "); 499 if (speed_capa & ETH_LINK_SPEED_20G) 500 printf(" 20 Gbps "); 501 if (speed_capa & ETH_LINK_SPEED_25G) 502 printf(" 25 Gbps "); 503 if (speed_capa & ETH_LINK_SPEED_40G) 504 printf(" 40 Gbps "); 505 if (speed_capa & ETH_LINK_SPEED_50G) 506 printf(" 50 Gbps "); 507 if (speed_capa & ETH_LINK_SPEED_56G) 508 printf(" 56 Gbps "); 509 if (speed_capa & ETH_LINK_SPEED_100G) 510 printf(" 100 Gbps "); 511 if (speed_capa & ETH_LINK_SPEED_200G) 512 printf(" 200 Gbps "); 513 } 514 515 void 516 device_infos_display(const char *identifier) 517 { 518 static const char *info_border = "*********************"; 519 struct rte_bus *start = NULL, *next; 520 struct rte_dev_iterator dev_iter; 521 char name[RTE_ETH_NAME_MAX_LEN]; 522 struct rte_ether_addr mac_addr; 523 struct rte_device *dev; 524 struct rte_devargs da; 525 portid_t port_id; 526 struct rte_eth_dev_info dev_info; 527 char devstr[128]; 528 529 memset(&da, 0, sizeof(da)); 530 if (!identifier) 531 goto skip_parse; 532 533 if (rte_devargs_parsef(&da, "%s", identifier)) { 534 fprintf(stderr, "cannot parse identifier\n"); 535 return; 536 } 537 538 skip_parse: 539 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 540 541 start = next; 542 if (identifier && da.bus != next) 543 continue; 544 545 /* Skip buses that don't have iterate method */ 546 if (!next->dev_iterate) 547 continue; 548 549 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 550 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 551 552 if (!dev->driver) 553 continue; 554 /* Check for matching device if identifier is present */ 555 if (identifier && 556 strncmp(da.name, dev->name, strlen(dev->name))) 557 continue; 558 printf("\n%s Infos for device %s %s\n", 559 info_border, dev->name, info_border); 560 printf("Bus name: %s", dev->bus->name); 561 printf("\nDriver name: %s", dev->driver->name); 562 printf("\nDevargs: %s", 563 dev->devargs ? dev->devargs->args : ""); 564 printf("\nConnect to socket: %d", dev->numa_node); 565 printf("\n"); 566 567 /* List ports with matching device name */ 568 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 569 printf("\n\tPort id: %-2d", port_id); 570 if (eth_macaddr_get_print_err(port_id, 571 &mac_addr) == 0) 572 print_ethaddr("\n\tMAC address: ", 573 &mac_addr); 574 rte_eth_dev_get_name_by_port(port_id, name); 575 printf("\n\tDevice name: %s", name); 576 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 577 device_infos_display_speeds(dev_info.speed_capa); 578 printf("\n"); 579 } 580 } 581 }; 582 rte_devargs_reset(&da); 583 } 584 585 void 586 port_infos_display(portid_t port_id) 587 { 588 struct rte_port *port; 589 struct rte_ether_addr mac_addr; 590 struct rte_eth_link link; 591 struct rte_eth_dev_info dev_info; 592 int vlan_offload; 593 struct rte_mempool * mp; 594 static const char *info_border = "*********************"; 595 uint16_t mtu; 596 char name[RTE_ETH_NAME_MAX_LEN]; 597 int ret; 598 char fw_version[ETHDEV_FWVERS_LEN]; 599 600 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 601 print_valid_ports(); 602 return; 603 } 604 port = &ports[port_id]; 605 ret = eth_link_get_nowait_print_err(port_id, &link); 606 if (ret < 0) 607 return; 608 609 ret = eth_dev_info_get_print_err(port_id, &dev_info); 610 if (ret != 0) 611 return; 612 613 printf("\n%s Infos for port %-2d %s\n", 614 info_border, port_id, info_border); 615 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 616 print_ethaddr("MAC address: ", &mac_addr); 617 rte_eth_dev_get_name_by_port(port_id, name); 618 printf("\nDevice name: %s", name); 619 printf("\nDriver name: %s", dev_info.driver_name); 620 621 if (rte_eth_dev_fw_version_get(port_id, fw_version, 622 ETHDEV_FWVERS_LEN) == 0) 623 printf("\nFirmware-version: %s", fw_version); 624 else 625 printf("\nFirmware-version: %s", "not available"); 626 627 if (dev_info.device->devargs && dev_info.device->devargs->args) 628 printf("\nDevargs: %s", dev_info.device->devargs->args); 629 printf("\nConnect to socket: %u", port->socket_id); 630 631 if (port_numa[port_id] != NUMA_NO_CONFIG) { 632 mp = mbuf_pool_find(port_numa[port_id], 0); 633 if (mp) 634 printf("\nmemory allocation on the socket: %d", 635 port_numa[port_id]); 636 } else 637 printf("\nmemory allocation on the socket: %u",port->socket_id); 638 639 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 640 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 641 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 642 ("full-duplex") : ("half-duplex")); 643 printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ? 644 ("On") : ("Off")); 645 646 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 647 printf("MTU: %u\n", mtu); 648 649 printf("Promiscuous mode: %s\n", 650 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 651 printf("Allmulticast mode: %s\n", 652 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 653 printf("Maximum number of MAC addresses: %u\n", 654 (unsigned int)(port->dev_info.max_mac_addrs)); 655 printf("Maximum number of MAC addresses of hash filtering: %u\n", 656 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 657 658 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 659 if (vlan_offload >= 0){ 660 printf("VLAN offload: \n"); 661 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 662 printf(" strip on, "); 663 else 664 printf(" strip off, "); 665 666 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 667 printf("filter on, "); 668 else 669 printf("filter off, "); 670 671 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 672 printf("extend on, "); 673 else 674 printf("extend off, "); 675 676 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 677 printf("qinq strip on\n"); 678 else 679 printf("qinq strip off\n"); 680 } 681 682 if (dev_info.hash_key_size > 0) 683 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 684 if (dev_info.reta_size > 0) 685 printf("Redirection table size: %u\n", dev_info.reta_size); 686 if (!dev_info.flow_type_rss_offloads) 687 printf("No RSS offload flow type is supported.\n"); 688 else { 689 uint16_t i; 690 char *p; 691 692 printf("Supported RSS offload flow types:\n"); 693 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 694 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 695 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 696 continue; 697 p = flowtype_to_str(i); 698 if (p) 699 printf(" %s\n", p); 700 else 701 printf(" user defined %d\n", i); 702 } 703 } 704 705 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 706 printf("Maximum configurable length of RX packet: %u\n", 707 dev_info.max_rx_pktlen); 708 printf("Maximum configurable size of LRO aggregated packet: %u\n", 709 dev_info.max_lro_pkt_size); 710 if (dev_info.max_vfs) 711 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 712 if (dev_info.max_vmdq_pools) 713 printf("Maximum number of VMDq pools: %u\n", 714 dev_info.max_vmdq_pools); 715 716 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 717 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 718 printf("Max possible number of RXDs per queue: %hu\n", 719 dev_info.rx_desc_lim.nb_max); 720 printf("Min possible number of RXDs per queue: %hu\n", 721 dev_info.rx_desc_lim.nb_min); 722 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 723 724 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 725 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 726 printf("Max possible number of TXDs per queue: %hu\n", 727 dev_info.tx_desc_lim.nb_max); 728 printf("Min possible number of TXDs per queue: %hu\n", 729 dev_info.tx_desc_lim.nb_min); 730 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 731 printf("Max segment number per packet: %hu\n", 732 dev_info.tx_desc_lim.nb_seg_max); 733 printf("Max segment number per MTU/TSO: %hu\n", 734 dev_info.tx_desc_lim.nb_mtu_seg_max); 735 736 /* Show switch info only if valid switch domain and port id is set */ 737 if (dev_info.switch_info.domain_id != 738 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 739 if (dev_info.switch_info.name) 740 printf("Switch name: %s\n", dev_info.switch_info.name); 741 742 printf("Switch domain Id: %u\n", 743 dev_info.switch_info.domain_id); 744 printf("Switch Port Id: %u\n", 745 dev_info.switch_info.port_id); 746 } 747 } 748 749 void 750 port_summary_header_display(void) 751 { 752 uint16_t port_number; 753 754 port_number = rte_eth_dev_count_avail(); 755 printf("Number of available ports: %i\n", port_number); 756 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 757 "Driver", "Status", "Link"); 758 } 759 760 void 761 port_summary_display(portid_t port_id) 762 { 763 struct rte_ether_addr mac_addr; 764 struct rte_eth_link link; 765 struct rte_eth_dev_info dev_info; 766 char name[RTE_ETH_NAME_MAX_LEN]; 767 int ret; 768 769 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 770 print_valid_ports(); 771 return; 772 } 773 774 ret = eth_link_get_nowait_print_err(port_id, &link); 775 if (ret < 0) 776 return; 777 778 ret = eth_dev_info_get_print_err(port_id, &dev_info); 779 if (ret != 0) 780 return; 781 782 rte_eth_dev_get_name_by_port(port_id, name); 783 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 784 if (ret != 0) 785 return; 786 787 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 788 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 789 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 790 rte_eth_link_speed_to_str(link.link_speed)); 791 } 792 793 void 794 port_eeprom_display(portid_t port_id) 795 { 796 struct rte_dev_eeprom_info einfo; 797 int ret; 798 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 799 print_valid_ports(); 800 return; 801 } 802 803 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 804 if (len_eeprom < 0) { 805 switch (len_eeprom) { 806 case -ENODEV: 807 fprintf(stderr, "port index %d invalid\n", port_id); 808 break; 809 case -ENOTSUP: 810 fprintf(stderr, "operation not supported by device\n"); 811 break; 812 case -EIO: 813 fprintf(stderr, "device is removed\n"); 814 break; 815 default: 816 fprintf(stderr, "Unable to get EEPROM: %d\n", 817 len_eeprom); 818 break; 819 } 820 return; 821 } 822 823 char buf[len_eeprom]; 824 einfo.offset = 0; 825 einfo.length = len_eeprom; 826 einfo.data = buf; 827 828 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 829 if (ret != 0) { 830 switch (ret) { 831 case -ENODEV: 832 fprintf(stderr, "port index %d invalid\n", port_id); 833 break; 834 case -ENOTSUP: 835 fprintf(stderr, "operation not supported by device\n"); 836 break; 837 case -EIO: 838 fprintf(stderr, "device is removed\n"); 839 break; 840 default: 841 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 842 break; 843 } 844 return; 845 } 846 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 847 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 848 } 849 850 void 851 port_module_eeprom_display(portid_t port_id) 852 { 853 struct rte_eth_dev_module_info minfo; 854 struct rte_dev_eeprom_info einfo; 855 int ret; 856 857 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 858 print_valid_ports(); 859 return; 860 } 861 862 863 ret = rte_eth_dev_get_module_info(port_id, &minfo); 864 if (ret != 0) { 865 switch (ret) { 866 case -ENODEV: 867 fprintf(stderr, "port index %d invalid\n", port_id); 868 break; 869 case -ENOTSUP: 870 fprintf(stderr, "operation not supported by device\n"); 871 break; 872 case -EIO: 873 fprintf(stderr, "device is removed\n"); 874 break; 875 default: 876 fprintf(stderr, "Unable to get module EEPROM: %d\n", 877 ret); 878 break; 879 } 880 return; 881 } 882 883 char buf[minfo.eeprom_len]; 884 einfo.offset = 0; 885 einfo.length = minfo.eeprom_len; 886 einfo.data = buf; 887 888 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 889 if (ret != 0) { 890 switch (ret) { 891 case -ENODEV: 892 fprintf(stderr, "port index %d invalid\n", port_id); 893 break; 894 case -ENOTSUP: 895 fprintf(stderr, "operation not supported by device\n"); 896 break; 897 case -EIO: 898 fprintf(stderr, "device is removed\n"); 899 break; 900 default: 901 fprintf(stderr, "Unable to get module EEPROM: %d\n", 902 ret); 903 break; 904 } 905 return; 906 } 907 908 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 909 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 910 } 911 912 int 913 port_id_is_invalid(portid_t port_id, enum print_warning warning) 914 { 915 uint16_t pid; 916 917 if (port_id == (portid_t)RTE_PORT_ALL) 918 return 0; 919 920 RTE_ETH_FOREACH_DEV(pid) 921 if (port_id == pid) 922 return 0; 923 924 if (warning == ENABLED_WARN) 925 fprintf(stderr, "Invalid port %d\n", port_id); 926 927 return 1; 928 } 929 930 void print_valid_ports(void) 931 { 932 portid_t pid; 933 934 printf("The valid ports array is ["); 935 RTE_ETH_FOREACH_DEV(pid) { 936 printf(" %d", pid); 937 } 938 printf(" ]\n"); 939 } 940 941 static int 942 vlan_id_is_invalid(uint16_t vlan_id) 943 { 944 if (vlan_id < 4096) 945 return 0; 946 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 947 return 1; 948 } 949 950 static int 951 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 952 { 953 const struct rte_pci_device *pci_dev; 954 const struct rte_bus *bus; 955 uint64_t pci_len; 956 957 if (reg_off & 0x3) { 958 fprintf(stderr, 959 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 960 (unsigned int)reg_off); 961 return 1; 962 } 963 964 if (!ports[port_id].dev_info.device) { 965 fprintf(stderr, "Invalid device\n"); 966 return 0; 967 } 968 969 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 970 if (bus && !strcmp(bus->name, "pci")) { 971 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 972 } else { 973 fprintf(stderr, "Not a PCI device\n"); 974 return 1; 975 } 976 977 pci_len = pci_dev->mem_resource[0].len; 978 if (reg_off >= pci_len) { 979 fprintf(stderr, 980 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 981 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 982 pci_len); 983 return 1; 984 } 985 return 0; 986 } 987 988 static int 989 reg_bit_pos_is_invalid(uint8_t bit_pos) 990 { 991 if (bit_pos <= 31) 992 return 0; 993 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 994 return 1; 995 } 996 997 #define display_port_and_reg_off(port_id, reg_off) \ 998 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 999 1000 static inline void 1001 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1002 { 1003 display_port_and_reg_off(port_id, (unsigned)reg_off); 1004 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1005 } 1006 1007 void 1008 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1009 { 1010 uint32_t reg_v; 1011 1012 1013 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1014 return; 1015 if (port_reg_off_is_invalid(port_id, reg_off)) 1016 return; 1017 if (reg_bit_pos_is_invalid(bit_x)) 1018 return; 1019 reg_v = port_id_pci_reg_read(port_id, reg_off); 1020 display_port_and_reg_off(port_id, (unsigned)reg_off); 1021 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1022 } 1023 1024 void 1025 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1026 uint8_t bit1_pos, uint8_t bit2_pos) 1027 { 1028 uint32_t reg_v; 1029 uint8_t l_bit; 1030 uint8_t h_bit; 1031 1032 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1033 return; 1034 if (port_reg_off_is_invalid(port_id, reg_off)) 1035 return; 1036 if (reg_bit_pos_is_invalid(bit1_pos)) 1037 return; 1038 if (reg_bit_pos_is_invalid(bit2_pos)) 1039 return; 1040 if (bit1_pos > bit2_pos) 1041 l_bit = bit2_pos, h_bit = bit1_pos; 1042 else 1043 l_bit = bit1_pos, h_bit = bit2_pos; 1044 1045 reg_v = port_id_pci_reg_read(port_id, reg_off); 1046 reg_v >>= l_bit; 1047 if (h_bit < 31) 1048 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1049 display_port_and_reg_off(port_id, (unsigned)reg_off); 1050 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1051 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1052 } 1053 1054 void 1055 port_reg_display(portid_t port_id, uint32_t reg_off) 1056 { 1057 uint32_t reg_v; 1058 1059 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1060 return; 1061 if (port_reg_off_is_invalid(port_id, reg_off)) 1062 return; 1063 reg_v = port_id_pci_reg_read(port_id, reg_off); 1064 display_port_reg_value(port_id, reg_off, reg_v); 1065 } 1066 1067 void 1068 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1069 uint8_t bit_v) 1070 { 1071 uint32_t reg_v; 1072 1073 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1074 return; 1075 if (port_reg_off_is_invalid(port_id, reg_off)) 1076 return; 1077 if (reg_bit_pos_is_invalid(bit_pos)) 1078 return; 1079 if (bit_v > 1) { 1080 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1081 (int) bit_v); 1082 return; 1083 } 1084 reg_v = port_id_pci_reg_read(port_id, reg_off); 1085 if (bit_v == 0) 1086 reg_v &= ~(1 << bit_pos); 1087 else 1088 reg_v |= (1 << bit_pos); 1089 port_id_pci_reg_write(port_id, reg_off, reg_v); 1090 display_port_reg_value(port_id, reg_off, reg_v); 1091 } 1092 1093 void 1094 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1095 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1096 { 1097 uint32_t max_v; 1098 uint32_t reg_v; 1099 uint8_t l_bit; 1100 uint8_t h_bit; 1101 1102 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1103 return; 1104 if (port_reg_off_is_invalid(port_id, reg_off)) 1105 return; 1106 if (reg_bit_pos_is_invalid(bit1_pos)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit2_pos)) 1109 return; 1110 if (bit1_pos > bit2_pos) 1111 l_bit = bit2_pos, h_bit = bit1_pos; 1112 else 1113 l_bit = bit1_pos, h_bit = bit2_pos; 1114 1115 if ((h_bit - l_bit) < 31) 1116 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1117 else 1118 max_v = 0xFFFFFFFF; 1119 1120 if (value > max_v) { 1121 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1122 (unsigned)value, (unsigned)value, 1123 (unsigned)max_v, (unsigned)max_v); 1124 return; 1125 } 1126 reg_v = port_id_pci_reg_read(port_id, reg_off); 1127 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1128 reg_v |= (value << l_bit); /* Set changed bits */ 1129 port_id_pci_reg_write(port_id, reg_off, reg_v); 1130 display_port_reg_value(port_id, reg_off, reg_v); 1131 } 1132 1133 void 1134 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1135 { 1136 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1137 return; 1138 if (port_reg_off_is_invalid(port_id, reg_off)) 1139 return; 1140 port_id_pci_reg_write(port_id, reg_off, reg_v); 1141 display_port_reg_value(port_id, reg_off, reg_v); 1142 } 1143 1144 void 1145 port_mtu_set(portid_t port_id, uint16_t mtu) 1146 { 1147 int diag; 1148 struct rte_port *rte_port = &ports[port_id]; 1149 struct rte_eth_dev_info dev_info; 1150 uint16_t eth_overhead; 1151 int ret; 1152 1153 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1154 return; 1155 1156 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1157 if (ret != 0) 1158 return; 1159 1160 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1161 fprintf(stderr, 1162 "Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1163 mtu, dev_info.min_mtu, dev_info.max_mtu); 1164 return; 1165 } 1166 diag = rte_eth_dev_set_mtu(port_id, mtu); 1167 if (diag) 1168 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1169 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1170 /* 1171 * Ether overhead in driver is equal to the difference of 1172 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1173 * device supports jumbo frame. 1174 */ 1175 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1176 if (mtu > RTE_ETHER_MTU) { 1177 rte_port->dev_conf.rxmode.offloads |= 1178 DEV_RX_OFFLOAD_JUMBO_FRAME; 1179 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1180 mtu + eth_overhead; 1181 } else 1182 rte_port->dev_conf.rxmode.offloads &= 1183 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1184 } 1185 } 1186 1187 /* Generic flow management functions. */ 1188 1189 static struct port_flow_tunnel * 1190 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1191 { 1192 struct port_flow_tunnel *flow_tunnel; 1193 1194 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1195 if (flow_tunnel->id == port_tunnel_id) 1196 goto out; 1197 } 1198 flow_tunnel = NULL; 1199 1200 out: 1201 return flow_tunnel; 1202 } 1203 1204 const char * 1205 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1206 { 1207 const char *type; 1208 switch (tunnel->type) { 1209 default: 1210 type = "unknown"; 1211 break; 1212 case RTE_FLOW_ITEM_TYPE_VXLAN: 1213 type = "vxlan"; 1214 break; 1215 } 1216 1217 return type; 1218 } 1219 1220 struct port_flow_tunnel * 1221 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 struct port_flow_tunnel *flow_tunnel; 1225 1226 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1227 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1228 goto out; 1229 } 1230 flow_tunnel = NULL; 1231 1232 out: 1233 return flow_tunnel; 1234 } 1235 1236 void port_flow_tunnel_list(portid_t port_id) 1237 { 1238 struct rte_port *port = &ports[port_id]; 1239 struct port_flow_tunnel *flt; 1240 1241 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1242 printf("port %u tunnel #%u type=%s", 1243 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1244 if (flt->tunnel.tun_id) 1245 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1246 printf("\n"); 1247 } 1248 } 1249 1250 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1251 { 1252 struct rte_port *port = &ports[port_id]; 1253 struct port_flow_tunnel *flt; 1254 1255 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1256 if (flt->id == tunnel_id) 1257 break; 1258 } 1259 if (flt) { 1260 LIST_REMOVE(flt, chain); 1261 free(flt); 1262 printf("port %u: flow tunnel #%u destroyed\n", 1263 port_id, tunnel_id); 1264 } 1265 } 1266 1267 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1268 { 1269 struct rte_port *port = &ports[port_id]; 1270 enum rte_flow_item_type type; 1271 struct port_flow_tunnel *flt; 1272 1273 if (!strcmp(ops->type, "vxlan")) 1274 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1275 else { 1276 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1277 ops->type); 1278 return; 1279 } 1280 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1281 if (flt->tunnel.type == type) 1282 break; 1283 } 1284 if (!flt) { 1285 flt = calloc(1, sizeof(*flt)); 1286 if (!flt) { 1287 fprintf(stderr, "failed to allocate port flt object\n"); 1288 return; 1289 } 1290 flt->tunnel.type = type; 1291 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1292 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1293 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1294 } 1295 printf("port %d: flow tunnel #%u type %s\n", 1296 port_id, flt->id, ops->type); 1297 } 1298 1299 /** Generate a port_flow entry from attributes/pattern/actions. */ 1300 static struct port_flow * 1301 port_flow_new(const struct rte_flow_attr *attr, 1302 const struct rte_flow_item *pattern, 1303 const struct rte_flow_action *actions, 1304 struct rte_flow_error *error) 1305 { 1306 const struct rte_flow_conv_rule rule = { 1307 .attr_ro = attr, 1308 .pattern_ro = pattern, 1309 .actions_ro = actions, 1310 }; 1311 struct port_flow *pf; 1312 int ret; 1313 1314 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1315 if (ret < 0) 1316 return NULL; 1317 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1318 if (!pf) { 1319 rte_flow_error_set 1320 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1321 "calloc() failed"); 1322 return NULL; 1323 } 1324 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1325 error) >= 0) 1326 return pf; 1327 free(pf); 1328 return NULL; 1329 } 1330 1331 /** Print a message out of a flow error. */ 1332 static int 1333 port_flow_complain(struct rte_flow_error *error) 1334 { 1335 static const char *const errstrlist[] = { 1336 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1337 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1338 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1339 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1340 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1341 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1342 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1343 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1344 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1345 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1346 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1347 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1348 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1349 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1350 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1351 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1352 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1353 }; 1354 const char *errstr; 1355 char buf[32]; 1356 int err = rte_errno; 1357 1358 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1359 !errstrlist[error->type]) 1360 errstr = "unknown type"; 1361 else 1362 errstr = errstrlist[error->type]; 1363 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1364 __func__, error->type, errstr, 1365 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1366 error->cause), buf) : "", 1367 error->message ? error->message : "(no stated reason)", 1368 rte_strerror(err)); 1369 return -err; 1370 } 1371 1372 static void 1373 rss_config_display(struct rte_flow_action_rss *rss_conf) 1374 { 1375 uint8_t i; 1376 1377 if (rss_conf == NULL) { 1378 fprintf(stderr, "Invalid rule\n"); 1379 return; 1380 } 1381 1382 printf("RSS:\n" 1383 " queues:"); 1384 if (rss_conf->queue_num == 0) 1385 printf(" none"); 1386 for (i = 0; i < rss_conf->queue_num; i++) 1387 printf(" %d", rss_conf->queue[i]); 1388 printf("\n"); 1389 1390 printf(" function: "); 1391 switch (rss_conf->func) { 1392 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1393 printf("default\n"); 1394 break; 1395 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1396 printf("toeplitz\n"); 1397 break; 1398 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1399 printf("simple_xor\n"); 1400 break; 1401 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1402 printf("symmetric_toeplitz\n"); 1403 break; 1404 default: 1405 printf("Unknown function\n"); 1406 return; 1407 } 1408 1409 printf(" types:\n"); 1410 if (rss_conf->types == 0) { 1411 printf(" none\n"); 1412 return; 1413 } 1414 for (i = 0; rss_type_table[i].str; i++) { 1415 if ((rss_conf->types & 1416 rss_type_table[i].rss_type) == 1417 rss_type_table[i].rss_type && 1418 rss_type_table[i].rss_type != 0) 1419 printf(" %s\n", rss_type_table[i].str); 1420 } 1421 } 1422 1423 static struct port_indirect_action * 1424 action_get_by_id(portid_t port_id, uint32_t id) 1425 { 1426 struct rte_port *port; 1427 struct port_indirect_action **ppia; 1428 struct port_indirect_action *pia = NULL; 1429 1430 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1431 port_id == (portid_t)RTE_PORT_ALL) 1432 return NULL; 1433 port = &ports[port_id]; 1434 ppia = &port->actions_list; 1435 while (*ppia) { 1436 if ((*ppia)->id == id) { 1437 pia = *ppia; 1438 break; 1439 } 1440 ppia = &(*ppia)->next; 1441 } 1442 if (!pia) 1443 fprintf(stderr, 1444 "Failed to find indirect action #%u on port %u\n", 1445 id, port_id); 1446 return pia; 1447 } 1448 1449 static int 1450 action_alloc(portid_t port_id, uint32_t id, 1451 struct port_indirect_action **action) 1452 { 1453 struct rte_port *port; 1454 struct port_indirect_action **ppia; 1455 struct port_indirect_action *pia = NULL; 1456 1457 *action = NULL; 1458 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1459 port_id == (portid_t)RTE_PORT_ALL) 1460 return -EINVAL; 1461 port = &ports[port_id]; 1462 if (id == UINT32_MAX) { 1463 /* taking first available ID */ 1464 if (port->actions_list) { 1465 if (port->actions_list->id == UINT32_MAX - 1) { 1466 fprintf(stderr, 1467 "Highest indirect action ID is already assigned, delete it first\n"); 1468 return -ENOMEM; 1469 } 1470 id = port->actions_list->id + 1; 1471 } else { 1472 id = 0; 1473 } 1474 } 1475 pia = calloc(1, sizeof(*pia)); 1476 if (!pia) { 1477 fprintf(stderr, 1478 "Allocation of port %u indirect action failed\n", 1479 port_id); 1480 return -ENOMEM; 1481 } 1482 ppia = &port->actions_list; 1483 while (*ppia && (*ppia)->id > id) 1484 ppia = &(*ppia)->next; 1485 if (*ppia && (*ppia)->id == id) { 1486 fprintf(stderr, 1487 "Indirect action #%u is already assigned, delete it first\n", 1488 id); 1489 free(pia); 1490 return -EINVAL; 1491 } 1492 pia->next = *ppia; 1493 pia->id = id; 1494 *ppia = pia; 1495 *action = pia; 1496 return 0; 1497 } 1498 1499 /** Create indirect action */ 1500 int 1501 port_action_handle_create(portid_t port_id, uint32_t id, 1502 const struct rte_flow_indir_action_conf *conf, 1503 const struct rte_flow_action *action) 1504 { 1505 struct port_indirect_action *pia; 1506 int ret; 1507 struct rte_flow_error error; 1508 1509 ret = action_alloc(port_id, id, &pia); 1510 if (ret) 1511 return ret; 1512 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1513 struct rte_flow_action_age *age = 1514 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1515 1516 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1517 age->context = &pia->age_type; 1518 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1519 struct rte_flow_action_conntrack *ct = 1520 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1521 1522 memcpy(ct, &conntrack_context, sizeof(*ct)); 1523 } 1524 /* Poisoning to make sure PMDs update it in case of error. */ 1525 memset(&error, 0x22, sizeof(error)); 1526 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1527 &error); 1528 if (!pia->handle) { 1529 uint32_t destroy_id = pia->id; 1530 port_action_handle_destroy(port_id, 1, &destroy_id); 1531 return port_flow_complain(&error); 1532 } 1533 pia->type = action->type; 1534 printf("Indirect action #%u created\n", pia->id); 1535 return 0; 1536 } 1537 1538 /** Destroy indirect action */ 1539 int 1540 port_action_handle_destroy(portid_t port_id, 1541 uint32_t n, 1542 const uint32_t *actions) 1543 { 1544 struct rte_port *port; 1545 struct port_indirect_action **tmp; 1546 uint32_t c = 0; 1547 int ret = 0; 1548 1549 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1550 port_id == (portid_t)RTE_PORT_ALL) 1551 return -EINVAL; 1552 port = &ports[port_id]; 1553 tmp = &port->actions_list; 1554 while (*tmp) { 1555 uint32_t i; 1556 1557 for (i = 0; i != n; ++i) { 1558 struct rte_flow_error error; 1559 struct port_indirect_action *pia = *tmp; 1560 1561 if (actions[i] != pia->id) 1562 continue; 1563 /* 1564 * Poisoning to make sure PMDs update it in case 1565 * of error. 1566 */ 1567 memset(&error, 0x33, sizeof(error)); 1568 1569 if (pia->handle && rte_flow_action_handle_destroy( 1570 port_id, pia->handle, &error)) { 1571 ret = port_flow_complain(&error); 1572 continue; 1573 } 1574 *tmp = pia->next; 1575 printf("Indirect action #%u destroyed\n", pia->id); 1576 free(pia); 1577 break; 1578 } 1579 if (i == n) 1580 tmp = &(*tmp)->next; 1581 ++c; 1582 } 1583 return ret; 1584 } 1585 1586 1587 /** Get indirect action by port + id */ 1588 struct rte_flow_action_handle * 1589 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1590 { 1591 1592 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1593 1594 return (pia) ? pia->handle : NULL; 1595 } 1596 1597 /** Update indirect action */ 1598 int 1599 port_action_handle_update(portid_t port_id, uint32_t id, 1600 const struct rte_flow_action *action) 1601 { 1602 struct rte_flow_error error; 1603 struct rte_flow_action_handle *action_handle; 1604 struct port_indirect_action *pia; 1605 const void *update; 1606 1607 action_handle = port_action_handle_get_by_id(port_id, id); 1608 if (!action_handle) 1609 return -EINVAL; 1610 pia = action_get_by_id(port_id, id); 1611 if (!pia) 1612 return -EINVAL; 1613 switch (pia->type) { 1614 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1615 update = action->conf; 1616 break; 1617 default: 1618 update = action; 1619 break; 1620 } 1621 if (rte_flow_action_handle_update(port_id, action_handle, update, 1622 &error)) { 1623 return port_flow_complain(&error); 1624 } 1625 printf("Indirect action #%u updated\n", id); 1626 return 0; 1627 } 1628 1629 int 1630 port_action_handle_query(portid_t port_id, uint32_t id) 1631 { 1632 struct rte_flow_error error; 1633 struct port_indirect_action *pia; 1634 union { 1635 struct rte_flow_query_count count; 1636 struct rte_flow_query_age age; 1637 struct rte_flow_action_conntrack ct; 1638 } query; 1639 1640 pia = action_get_by_id(port_id, id); 1641 if (!pia) 1642 return -EINVAL; 1643 switch (pia->type) { 1644 case RTE_FLOW_ACTION_TYPE_AGE: 1645 case RTE_FLOW_ACTION_TYPE_COUNT: 1646 break; 1647 default: 1648 fprintf(stderr, 1649 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1650 id, pia->type, port_id); 1651 return -ENOTSUP; 1652 } 1653 /* Poisoning to make sure PMDs update it in case of error. */ 1654 memset(&error, 0x55, sizeof(error)); 1655 memset(&query, 0, sizeof(query)); 1656 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1657 return port_flow_complain(&error); 1658 switch (pia->type) { 1659 case RTE_FLOW_ACTION_TYPE_AGE: 1660 printf("Indirect AGE action:\n" 1661 " aged: %u\n" 1662 " sec_since_last_hit_valid: %u\n" 1663 " sec_since_last_hit: %" PRIu32 "\n", 1664 query.age.aged, 1665 query.age.sec_since_last_hit_valid, 1666 query.age.sec_since_last_hit); 1667 break; 1668 case RTE_FLOW_ACTION_TYPE_COUNT: 1669 printf("Indirect COUNT action:\n" 1670 " hits_set: %u\n" 1671 " bytes_set: %u\n" 1672 " hits: %" PRIu64 "\n" 1673 " bytes: %" PRIu64 "\n", 1674 query.count.hits_set, 1675 query.count.bytes_set, 1676 query.count.hits, 1677 query.count.bytes); 1678 break; 1679 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1680 printf("Conntrack Context:\n" 1681 " Peer: %u, Flow dir: %s, Enable: %u\n" 1682 " Live: %u, SACK: %u, CACK: %u\n" 1683 " Packet dir: %s, Liberal: %u, State: %u\n" 1684 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1685 " Last Seq: %u, Last ACK: %u\n" 1686 " Last Win: %u, Last End: %u\n", 1687 query.ct.peer_port, 1688 query.ct.is_original_dir ? "Original" : "Reply", 1689 query.ct.enable, query.ct.live_connection, 1690 query.ct.selective_ack, query.ct.challenge_ack_passed, 1691 query.ct.last_direction ? "Original" : "Reply", 1692 query.ct.liberal_mode, query.ct.state, 1693 query.ct.max_ack_window, query.ct.retransmission_limit, 1694 query.ct.last_index, query.ct.last_seq, 1695 query.ct.last_ack, query.ct.last_window, 1696 query.ct.last_end); 1697 printf(" Original Dir:\n" 1698 " scale: %u, fin: %u, ack seen: %u\n" 1699 " unacked data: %u\n Sent end: %u," 1700 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1701 query.ct.original_dir.scale, 1702 query.ct.original_dir.close_initiated, 1703 query.ct.original_dir.last_ack_seen, 1704 query.ct.original_dir.data_unacked, 1705 query.ct.original_dir.sent_end, 1706 query.ct.original_dir.reply_end, 1707 query.ct.original_dir.max_win, 1708 query.ct.original_dir.max_ack); 1709 printf(" Reply Dir:\n" 1710 " scale: %u, fin: %u, ack seen: %u\n" 1711 " unacked data: %u\n Sent end: %u," 1712 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1713 query.ct.reply_dir.scale, 1714 query.ct.reply_dir.close_initiated, 1715 query.ct.reply_dir.last_ack_seen, 1716 query.ct.reply_dir.data_unacked, 1717 query.ct.reply_dir.sent_end, 1718 query.ct.reply_dir.reply_end, 1719 query.ct.reply_dir.max_win, 1720 query.ct.reply_dir.max_ack); 1721 break; 1722 default: 1723 fprintf(stderr, 1724 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1725 id, pia->type, port_id); 1726 break; 1727 } 1728 return 0; 1729 } 1730 1731 static struct port_flow_tunnel * 1732 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1733 const struct rte_flow_item *pattern, 1734 const struct rte_flow_action *actions, 1735 const struct tunnel_ops *tunnel_ops) 1736 { 1737 int ret; 1738 struct rte_port *port; 1739 struct port_flow_tunnel *pft; 1740 struct rte_flow_error error; 1741 1742 port = &ports[port_id]; 1743 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1744 if (!pft) { 1745 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1746 tunnel_ops->id); 1747 return NULL; 1748 } 1749 if (tunnel_ops->actions) { 1750 uint32_t num_actions; 1751 const struct rte_flow_action *aptr; 1752 1753 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1754 &pft->pmd_actions, 1755 &pft->num_pmd_actions, 1756 &error); 1757 if (ret) { 1758 port_flow_complain(&error); 1759 return NULL; 1760 } 1761 for (aptr = actions, num_actions = 1; 1762 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1763 aptr++, num_actions++); 1764 pft->actions = malloc( 1765 (num_actions + pft->num_pmd_actions) * 1766 sizeof(actions[0])); 1767 if (!pft->actions) { 1768 rte_flow_tunnel_action_decap_release( 1769 port_id, pft->actions, 1770 pft->num_pmd_actions, &error); 1771 return NULL; 1772 } 1773 rte_memcpy(pft->actions, pft->pmd_actions, 1774 pft->num_pmd_actions * sizeof(actions[0])); 1775 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1776 num_actions * sizeof(actions[0])); 1777 } 1778 if (tunnel_ops->items) { 1779 uint32_t num_items; 1780 const struct rte_flow_item *iptr; 1781 1782 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1783 &pft->pmd_items, 1784 &pft->num_pmd_items, 1785 &error); 1786 if (ret) { 1787 port_flow_complain(&error); 1788 return NULL; 1789 } 1790 for (iptr = pattern, num_items = 1; 1791 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1792 iptr++, num_items++); 1793 pft->items = malloc((num_items + pft->num_pmd_items) * 1794 sizeof(pattern[0])); 1795 if (!pft->items) { 1796 rte_flow_tunnel_item_release( 1797 port_id, pft->pmd_items, 1798 pft->num_pmd_items, &error); 1799 return NULL; 1800 } 1801 rte_memcpy(pft->items, pft->pmd_items, 1802 pft->num_pmd_items * sizeof(pattern[0])); 1803 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1804 num_items * sizeof(pattern[0])); 1805 } 1806 1807 return pft; 1808 } 1809 1810 static void 1811 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1812 const struct tunnel_ops *tunnel_ops, 1813 struct port_flow_tunnel *pft) 1814 { 1815 struct rte_flow_error error; 1816 1817 if (tunnel_ops->actions) { 1818 free(pft->actions); 1819 rte_flow_tunnel_action_decap_release( 1820 port_id, pft->pmd_actions, 1821 pft->num_pmd_actions, &error); 1822 pft->actions = NULL; 1823 pft->pmd_actions = NULL; 1824 } 1825 if (tunnel_ops->items) { 1826 free(pft->items); 1827 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1828 pft->num_pmd_items, 1829 &error); 1830 pft->items = NULL; 1831 pft->pmd_items = NULL; 1832 } 1833 } 1834 1835 /** Add port meter policy */ 1836 int 1837 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1838 const struct rte_flow_action *actions) 1839 { 1840 struct rte_mtr_error error; 1841 const struct rte_flow_action *act = actions; 1842 const struct rte_flow_action *start; 1843 struct rte_mtr_meter_policy_params policy; 1844 uint32_t i = 0, act_n; 1845 int ret; 1846 1847 for (i = 0; i < RTE_COLORS; i++) { 1848 for (act_n = 0, start = act; 1849 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1850 act_n++; 1851 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1852 policy.actions[i] = start; 1853 else 1854 policy.actions[i] = NULL; 1855 act++; 1856 } 1857 ret = rte_mtr_meter_policy_add(port_id, 1858 policy_id, 1859 &policy, &error); 1860 if (ret) 1861 print_mtr_err_msg(&error); 1862 return ret; 1863 } 1864 1865 /** Validate flow rule. */ 1866 int 1867 port_flow_validate(portid_t port_id, 1868 const struct rte_flow_attr *attr, 1869 const struct rte_flow_item *pattern, 1870 const struct rte_flow_action *actions, 1871 const struct tunnel_ops *tunnel_ops) 1872 { 1873 struct rte_flow_error error; 1874 struct port_flow_tunnel *pft = NULL; 1875 1876 /* Poisoning to make sure PMDs update it in case of error. */ 1877 memset(&error, 0x11, sizeof(error)); 1878 if (tunnel_ops->enabled) { 1879 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1880 actions, tunnel_ops); 1881 if (!pft) 1882 return -ENOENT; 1883 if (pft->items) 1884 pattern = pft->items; 1885 if (pft->actions) 1886 actions = pft->actions; 1887 } 1888 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1889 return port_flow_complain(&error); 1890 if (tunnel_ops->enabled) 1891 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1892 printf("Flow rule validated\n"); 1893 return 0; 1894 } 1895 1896 /** Return age action structure if exists, otherwise NULL. */ 1897 static struct rte_flow_action_age * 1898 age_action_get(const struct rte_flow_action *actions) 1899 { 1900 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1901 switch (actions->type) { 1902 case RTE_FLOW_ACTION_TYPE_AGE: 1903 return (struct rte_flow_action_age *) 1904 (uintptr_t)actions->conf; 1905 default: 1906 break; 1907 } 1908 } 1909 return NULL; 1910 } 1911 1912 /** Create flow rule. */ 1913 int 1914 port_flow_create(portid_t port_id, 1915 const struct rte_flow_attr *attr, 1916 const struct rte_flow_item *pattern, 1917 const struct rte_flow_action *actions, 1918 const struct tunnel_ops *tunnel_ops) 1919 { 1920 struct rte_flow *flow; 1921 struct rte_port *port; 1922 struct port_flow *pf; 1923 uint32_t id = 0; 1924 struct rte_flow_error error; 1925 struct port_flow_tunnel *pft = NULL; 1926 struct rte_flow_action_age *age = age_action_get(actions); 1927 1928 port = &ports[port_id]; 1929 if (port->flow_list) { 1930 if (port->flow_list->id == UINT32_MAX) { 1931 fprintf(stderr, 1932 "Highest rule ID is already assigned, delete it first"); 1933 return -ENOMEM; 1934 } 1935 id = port->flow_list->id + 1; 1936 } 1937 if (tunnel_ops->enabled) { 1938 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1939 actions, tunnel_ops); 1940 if (!pft) 1941 return -ENOENT; 1942 if (pft->items) 1943 pattern = pft->items; 1944 if (pft->actions) 1945 actions = pft->actions; 1946 } 1947 pf = port_flow_new(attr, pattern, actions, &error); 1948 if (!pf) 1949 return port_flow_complain(&error); 1950 if (age) { 1951 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 1952 age->context = &pf->age_type; 1953 } 1954 /* Poisoning to make sure PMDs update it in case of error. */ 1955 memset(&error, 0x22, sizeof(error)); 1956 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1957 if (!flow) { 1958 if (tunnel_ops->enabled) 1959 port_flow_tunnel_offload_cmd_release(port_id, 1960 tunnel_ops, pft); 1961 free(pf); 1962 return port_flow_complain(&error); 1963 } 1964 pf->next = port->flow_list; 1965 pf->id = id; 1966 pf->flow = flow; 1967 port->flow_list = pf; 1968 if (tunnel_ops->enabled) 1969 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1970 printf("Flow rule #%u created\n", pf->id); 1971 return 0; 1972 } 1973 1974 /** Destroy a number of flow rules. */ 1975 int 1976 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1977 { 1978 struct rte_port *port; 1979 struct port_flow **tmp; 1980 uint32_t c = 0; 1981 int ret = 0; 1982 1983 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1984 port_id == (portid_t)RTE_PORT_ALL) 1985 return -EINVAL; 1986 port = &ports[port_id]; 1987 tmp = &port->flow_list; 1988 while (*tmp) { 1989 uint32_t i; 1990 1991 for (i = 0; i != n; ++i) { 1992 struct rte_flow_error error; 1993 struct port_flow *pf = *tmp; 1994 1995 if (rule[i] != pf->id) 1996 continue; 1997 /* 1998 * Poisoning to make sure PMDs update it in case 1999 * of error. 2000 */ 2001 memset(&error, 0x33, sizeof(error)); 2002 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2003 ret = port_flow_complain(&error); 2004 continue; 2005 } 2006 printf("Flow rule #%u destroyed\n", pf->id); 2007 *tmp = pf->next; 2008 free(pf); 2009 break; 2010 } 2011 if (i == n) 2012 tmp = &(*tmp)->next; 2013 ++c; 2014 } 2015 return ret; 2016 } 2017 2018 /** Remove all flow rules. */ 2019 int 2020 port_flow_flush(portid_t port_id) 2021 { 2022 struct rte_flow_error error; 2023 struct rte_port *port; 2024 int ret = 0; 2025 2026 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2027 port_id == (portid_t)RTE_PORT_ALL) 2028 return -EINVAL; 2029 2030 port = &ports[port_id]; 2031 2032 if (port->flow_list == NULL) 2033 return ret; 2034 2035 /* Poisoning to make sure PMDs update it in case of error. */ 2036 memset(&error, 0x44, sizeof(error)); 2037 if (rte_flow_flush(port_id, &error)) { 2038 port_flow_complain(&error); 2039 } 2040 2041 while (port->flow_list) { 2042 struct port_flow *pf = port->flow_list->next; 2043 2044 free(port->flow_list); 2045 port->flow_list = pf; 2046 } 2047 return ret; 2048 } 2049 2050 /** Dump flow rules. */ 2051 int 2052 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2053 const char *file_name) 2054 { 2055 int ret = 0; 2056 FILE *file = stdout; 2057 struct rte_flow_error error; 2058 struct rte_port *port; 2059 struct port_flow *pflow; 2060 struct rte_flow *tmpFlow = NULL; 2061 bool found = false; 2062 2063 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2064 port_id == (portid_t)RTE_PORT_ALL) 2065 return -EINVAL; 2066 2067 if (!dump_all) { 2068 port = &ports[port_id]; 2069 pflow = port->flow_list; 2070 while (pflow) { 2071 if (rule_id != pflow->id) { 2072 pflow = pflow->next; 2073 } else { 2074 tmpFlow = pflow->flow; 2075 if (tmpFlow) 2076 found = true; 2077 break; 2078 } 2079 } 2080 if (found == false) { 2081 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2082 return -EINVAL; 2083 } 2084 } 2085 2086 if (file_name && strlen(file_name)) { 2087 file = fopen(file_name, "w"); 2088 if (!file) { 2089 fprintf(stderr, "Failed to create file %s: %s\n", 2090 file_name, strerror(errno)); 2091 return -errno; 2092 } 2093 } 2094 2095 if (!dump_all) 2096 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2097 else 2098 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2099 if (ret) { 2100 port_flow_complain(&error); 2101 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2102 } else 2103 printf("Flow dump finished\n"); 2104 if (file_name && strlen(file_name)) 2105 fclose(file); 2106 return ret; 2107 } 2108 2109 /** Query a flow rule. */ 2110 int 2111 port_flow_query(portid_t port_id, uint32_t rule, 2112 const struct rte_flow_action *action) 2113 { 2114 struct rte_flow_error error; 2115 struct rte_port *port; 2116 struct port_flow *pf; 2117 const char *name; 2118 union { 2119 struct rte_flow_query_count count; 2120 struct rte_flow_action_rss rss_conf; 2121 struct rte_flow_query_age age; 2122 } query; 2123 int ret; 2124 2125 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2126 port_id == (portid_t)RTE_PORT_ALL) 2127 return -EINVAL; 2128 port = &ports[port_id]; 2129 for (pf = port->flow_list; pf; pf = pf->next) 2130 if (pf->id == rule) 2131 break; 2132 if (!pf) { 2133 fprintf(stderr, "Flow rule #%u not found\n", rule); 2134 return -ENOENT; 2135 } 2136 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2137 &name, sizeof(name), 2138 (void *)(uintptr_t)action->type, &error); 2139 if (ret < 0) 2140 return port_flow_complain(&error); 2141 switch (action->type) { 2142 case RTE_FLOW_ACTION_TYPE_COUNT: 2143 case RTE_FLOW_ACTION_TYPE_RSS: 2144 case RTE_FLOW_ACTION_TYPE_AGE: 2145 break; 2146 default: 2147 fprintf(stderr, "Cannot query action type %d (%s)\n", 2148 action->type, name); 2149 return -ENOTSUP; 2150 } 2151 /* Poisoning to make sure PMDs update it in case of error. */ 2152 memset(&error, 0x55, sizeof(error)); 2153 memset(&query, 0, sizeof(query)); 2154 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2155 return port_flow_complain(&error); 2156 switch (action->type) { 2157 case RTE_FLOW_ACTION_TYPE_COUNT: 2158 printf("%s:\n" 2159 " hits_set: %u\n" 2160 " bytes_set: %u\n" 2161 " hits: %" PRIu64 "\n" 2162 " bytes: %" PRIu64 "\n", 2163 name, 2164 query.count.hits_set, 2165 query.count.bytes_set, 2166 query.count.hits, 2167 query.count.bytes); 2168 break; 2169 case RTE_FLOW_ACTION_TYPE_RSS: 2170 rss_config_display(&query.rss_conf); 2171 break; 2172 case RTE_FLOW_ACTION_TYPE_AGE: 2173 printf("%s:\n" 2174 " aged: %u\n" 2175 " sec_since_last_hit_valid: %u\n" 2176 " sec_since_last_hit: %" PRIu32 "\n", 2177 name, 2178 query.age.aged, 2179 query.age.sec_since_last_hit_valid, 2180 query.age.sec_since_last_hit); 2181 break; 2182 default: 2183 fprintf(stderr, 2184 "Cannot display result for action type %d (%s)\n", 2185 action->type, name); 2186 break; 2187 } 2188 return 0; 2189 } 2190 2191 /** List simply and destroy all aged flows. */ 2192 void 2193 port_flow_aged(portid_t port_id, uint8_t destroy) 2194 { 2195 void **contexts; 2196 int nb_context, total = 0, idx; 2197 struct rte_flow_error error; 2198 enum age_action_context_type *type; 2199 union { 2200 struct port_flow *pf; 2201 struct port_indirect_action *pia; 2202 } ctx; 2203 2204 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2205 port_id == (portid_t)RTE_PORT_ALL) 2206 return; 2207 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2208 printf("Port %u total aged flows: %d\n", port_id, total); 2209 if (total < 0) { 2210 port_flow_complain(&error); 2211 return; 2212 } 2213 if (total == 0) 2214 return; 2215 contexts = malloc(sizeof(void *) * total); 2216 if (contexts == NULL) { 2217 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2218 return; 2219 } 2220 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2221 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2222 if (nb_context != total) { 2223 fprintf(stderr, 2224 "Port:%d get aged flows count(%d) != total(%d)\n", 2225 port_id, nb_context, total); 2226 free(contexts); 2227 return; 2228 } 2229 total = 0; 2230 for (idx = 0; idx < nb_context; idx++) { 2231 if (!contexts[idx]) { 2232 fprintf(stderr, "Error: get Null context in port %u\n", 2233 port_id); 2234 continue; 2235 } 2236 type = (enum age_action_context_type *)contexts[idx]; 2237 switch (*type) { 2238 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2239 ctx.pf = container_of(type, struct port_flow, age_type); 2240 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2241 "\t%c%c%c\t\n", 2242 "Flow", 2243 ctx.pf->id, 2244 ctx.pf->rule.attr->group, 2245 ctx.pf->rule.attr->priority, 2246 ctx.pf->rule.attr->ingress ? 'i' : '-', 2247 ctx.pf->rule.attr->egress ? 'e' : '-', 2248 ctx.pf->rule.attr->transfer ? 't' : '-'); 2249 if (destroy && !port_flow_destroy(port_id, 1, 2250 &ctx.pf->id)) 2251 total++; 2252 break; 2253 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2254 ctx.pia = container_of(type, 2255 struct port_indirect_action, age_type); 2256 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2257 ctx.pia->id); 2258 break; 2259 default: 2260 fprintf(stderr, "Error: invalid context type %u\n", 2261 port_id); 2262 break; 2263 } 2264 } 2265 printf("\n%d flows destroyed\n", total); 2266 free(contexts); 2267 } 2268 2269 /** List flow rules. */ 2270 void 2271 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2272 { 2273 struct rte_port *port; 2274 struct port_flow *pf; 2275 struct port_flow *list = NULL; 2276 uint32_t i; 2277 2278 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2279 port_id == (portid_t)RTE_PORT_ALL) 2280 return; 2281 port = &ports[port_id]; 2282 if (!port->flow_list) 2283 return; 2284 /* Sort flows by group, priority and ID. */ 2285 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2286 struct port_flow **tmp; 2287 const struct rte_flow_attr *curr = pf->rule.attr; 2288 2289 if (n) { 2290 /* Filter out unwanted groups. */ 2291 for (i = 0; i != n; ++i) 2292 if (curr->group == group[i]) 2293 break; 2294 if (i == n) 2295 continue; 2296 } 2297 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2298 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2299 2300 if (curr->group > comp->group || 2301 (curr->group == comp->group && 2302 curr->priority > comp->priority) || 2303 (curr->group == comp->group && 2304 curr->priority == comp->priority && 2305 pf->id > (*tmp)->id)) 2306 continue; 2307 break; 2308 } 2309 pf->tmp = *tmp; 2310 *tmp = pf; 2311 } 2312 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2313 for (pf = list; pf != NULL; pf = pf->tmp) { 2314 const struct rte_flow_item *item = pf->rule.pattern; 2315 const struct rte_flow_action *action = pf->rule.actions; 2316 const char *name; 2317 2318 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2319 pf->id, 2320 pf->rule.attr->group, 2321 pf->rule.attr->priority, 2322 pf->rule.attr->ingress ? 'i' : '-', 2323 pf->rule.attr->egress ? 'e' : '-', 2324 pf->rule.attr->transfer ? 't' : '-'); 2325 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2326 if ((uint32_t)item->type > INT_MAX) 2327 name = "PMD_INTERNAL"; 2328 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2329 &name, sizeof(name), 2330 (void *)(uintptr_t)item->type, 2331 NULL) <= 0) 2332 name = "[UNKNOWN]"; 2333 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2334 printf("%s ", name); 2335 ++item; 2336 } 2337 printf("=>"); 2338 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2339 if ((uint32_t)action->type > INT_MAX) 2340 name = "PMD_INTERNAL"; 2341 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2342 &name, sizeof(name), 2343 (void *)(uintptr_t)action->type, 2344 NULL) <= 0) 2345 name = "[UNKNOWN]"; 2346 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2347 printf(" %s", name); 2348 ++action; 2349 } 2350 printf("\n"); 2351 } 2352 } 2353 2354 /** Restrict ingress traffic to the defined flow rules. */ 2355 int 2356 port_flow_isolate(portid_t port_id, int set) 2357 { 2358 struct rte_flow_error error; 2359 2360 /* Poisoning to make sure PMDs update it in case of error. */ 2361 memset(&error, 0x66, sizeof(error)); 2362 if (rte_flow_isolate(port_id, set, &error)) 2363 return port_flow_complain(&error); 2364 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2365 port_id, 2366 set ? "now restricted" : "not restricted anymore"); 2367 return 0; 2368 } 2369 2370 /* 2371 * RX/TX ring descriptors display functions. 2372 */ 2373 int 2374 rx_queue_id_is_invalid(queueid_t rxq_id) 2375 { 2376 if (rxq_id < nb_rxq) 2377 return 0; 2378 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2379 rxq_id, nb_rxq); 2380 return 1; 2381 } 2382 2383 int 2384 tx_queue_id_is_invalid(queueid_t txq_id) 2385 { 2386 if (txq_id < nb_txq) 2387 return 0; 2388 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2389 txq_id, nb_txq); 2390 return 1; 2391 } 2392 2393 static int 2394 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2395 { 2396 struct rte_port *port = &ports[port_id]; 2397 struct rte_eth_rxq_info rx_qinfo; 2398 int ret; 2399 2400 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2401 if (ret == 0) { 2402 *ring_size = rx_qinfo.nb_desc; 2403 return ret; 2404 } 2405 2406 if (ret != -ENOTSUP) 2407 return ret; 2408 /* 2409 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2410 * ring_size stored in testpmd will be used for validity verification. 2411 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2412 * being 0, it will use a default value provided by PMDs to setup this 2413 * rxq. If the default value is 0, it will use the 2414 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2415 */ 2416 if (port->nb_rx_desc[rxq_id]) 2417 *ring_size = port->nb_rx_desc[rxq_id]; 2418 else if (port->dev_info.default_rxportconf.ring_size) 2419 *ring_size = port->dev_info.default_rxportconf.ring_size; 2420 else 2421 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2422 return 0; 2423 } 2424 2425 static int 2426 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2427 { 2428 struct rte_port *port = &ports[port_id]; 2429 struct rte_eth_txq_info tx_qinfo; 2430 int ret; 2431 2432 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2433 if (ret == 0) { 2434 *ring_size = tx_qinfo.nb_desc; 2435 return ret; 2436 } 2437 2438 if (ret != -ENOTSUP) 2439 return ret; 2440 /* 2441 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2442 * ring_size stored in testpmd will be used for validity verification. 2443 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2444 * being 0, it will use a default value provided by PMDs to setup this 2445 * txq. If the default value is 0, it will use the 2446 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2447 */ 2448 if (port->nb_tx_desc[txq_id]) 2449 *ring_size = port->nb_tx_desc[txq_id]; 2450 else if (port->dev_info.default_txportconf.ring_size) 2451 *ring_size = port->dev_info.default_txportconf.ring_size; 2452 else 2453 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2454 return 0; 2455 } 2456 2457 static int 2458 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2459 { 2460 uint16_t ring_size; 2461 int ret; 2462 2463 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2464 if (ret) 2465 return 1; 2466 2467 if (rxdesc_id < ring_size) 2468 return 0; 2469 2470 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2471 rxdesc_id, ring_size); 2472 return 1; 2473 } 2474 2475 static int 2476 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2477 { 2478 uint16_t ring_size; 2479 int ret; 2480 2481 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2482 if (ret) 2483 return 1; 2484 2485 if (txdesc_id < ring_size) 2486 return 0; 2487 2488 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2489 txdesc_id, ring_size); 2490 return 1; 2491 } 2492 2493 static const struct rte_memzone * 2494 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2495 { 2496 char mz_name[RTE_MEMZONE_NAMESIZE]; 2497 const struct rte_memzone *mz; 2498 2499 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2500 port_id, q_id, ring_name); 2501 mz = rte_memzone_lookup(mz_name); 2502 if (mz == NULL) 2503 fprintf(stderr, 2504 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2505 ring_name, port_id, q_id, mz_name); 2506 return mz; 2507 } 2508 2509 union igb_ring_dword { 2510 uint64_t dword; 2511 struct { 2512 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2513 uint32_t lo; 2514 uint32_t hi; 2515 #else 2516 uint32_t hi; 2517 uint32_t lo; 2518 #endif 2519 } words; 2520 }; 2521 2522 struct igb_ring_desc_32_bytes { 2523 union igb_ring_dword lo_dword; 2524 union igb_ring_dword hi_dword; 2525 union igb_ring_dword resv1; 2526 union igb_ring_dword resv2; 2527 }; 2528 2529 struct igb_ring_desc_16_bytes { 2530 union igb_ring_dword lo_dword; 2531 union igb_ring_dword hi_dword; 2532 }; 2533 2534 static void 2535 ring_rxd_display_dword(union igb_ring_dword dword) 2536 { 2537 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2538 (unsigned)dword.words.hi); 2539 } 2540 2541 static void 2542 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2543 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2544 portid_t port_id, 2545 #else 2546 __rte_unused portid_t port_id, 2547 #endif 2548 uint16_t desc_id) 2549 { 2550 struct igb_ring_desc_16_bytes *ring = 2551 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2552 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2553 int ret; 2554 struct rte_eth_dev_info dev_info; 2555 2556 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2557 if (ret != 0) 2558 return; 2559 2560 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2561 /* 32 bytes RX descriptor, i40e only */ 2562 struct igb_ring_desc_32_bytes *ring = 2563 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2564 ring[desc_id].lo_dword.dword = 2565 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2566 ring_rxd_display_dword(ring[desc_id].lo_dword); 2567 ring[desc_id].hi_dword.dword = 2568 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2569 ring_rxd_display_dword(ring[desc_id].hi_dword); 2570 ring[desc_id].resv1.dword = 2571 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2572 ring_rxd_display_dword(ring[desc_id].resv1); 2573 ring[desc_id].resv2.dword = 2574 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2575 ring_rxd_display_dword(ring[desc_id].resv2); 2576 2577 return; 2578 } 2579 #endif 2580 /* 16 bytes RX descriptor */ 2581 ring[desc_id].lo_dword.dword = 2582 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2583 ring_rxd_display_dword(ring[desc_id].lo_dword); 2584 ring[desc_id].hi_dword.dword = 2585 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2586 ring_rxd_display_dword(ring[desc_id].hi_dword); 2587 } 2588 2589 static void 2590 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2591 { 2592 struct igb_ring_desc_16_bytes *ring; 2593 struct igb_ring_desc_16_bytes txd; 2594 2595 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2596 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2597 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2598 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2599 (unsigned)txd.lo_dword.words.lo, 2600 (unsigned)txd.lo_dword.words.hi, 2601 (unsigned)txd.hi_dword.words.lo, 2602 (unsigned)txd.hi_dword.words.hi); 2603 } 2604 2605 void 2606 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2607 { 2608 const struct rte_memzone *rx_mz; 2609 2610 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2611 return; 2612 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2613 if (rx_mz == NULL) 2614 return; 2615 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2616 } 2617 2618 void 2619 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2620 { 2621 const struct rte_memzone *tx_mz; 2622 2623 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2624 return; 2625 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2626 if (tx_mz == NULL) 2627 return; 2628 ring_tx_descriptor_display(tx_mz, txd_id); 2629 } 2630 2631 void 2632 fwd_lcores_config_display(void) 2633 { 2634 lcoreid_t lc_id; 2635 2636 printf("List of forwarding lcores:"); 2637 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2638 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2639 printf("\n"); 2640 } 2641 void 2642 rxtx_config_display(void) 2643 { 2644 portid_t pid; 2645 queueid_t qid; 2646 2647 printf(" %s packet forwarding%s packets/burst=%d\n", 2648 cur_fwd_eng->fwd_mode_name, 2649 retry_enabled == 0 ? "" : " with retry", 2650 nb_pkt_per_burst); 2651 2652 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2653 printf(" packet len=%u - nb packet segments=%d\n", 2654 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2655 2656 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2657 nb_fwd_lcores, nb_fwd_ports); 2658 2659 RTE_ETH_FOREACH_DEV(pid) { 2660 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2661 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2662 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2663 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2664 struct rte_eth_rxq_info rx_qinfo; 2665 struct rte_eth_txq_info tx_qinfo; 2666 uint16_t rx_free_thresh_tmp; 2667 uint16_t tx_free_thresh_tmp; 2668 uint16_t tx_rs_thresh_tmp; 2669 uint16_t nb_rx_desc_tmp; 2670 uint16_t nb_tx_desc_tmp; 2671 uint64_t offloads_tmp; 2672 uint8_t pthresh_tmp; 2673 uint8_t hthresh_tmp; 2674 uint8_t wthresh_tmp; 2675 int32_t rc; 2676 2677 /* per port config */ 2678 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2679 (unsigned int)pid, nb_rxq, nb_txq); 2680 2681 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2682 ports[pid].dev_conf.rxmode.offloads, 2683 ports[pid].dev_conf.txmode.offloads); 2684 2685 /* per rx queue config only for first queue to be less verbose */ 2686 for (qid = 0; qid < 1; qid++) { 2687 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2688 if (rc) { 2689 nb_rx_desc_tmp = nb_rx_desc[qid]; 2690 rx_free_thresh_tmp = 2691 rx_conf[qid].rx_free_thresh; 2692 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2693 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2694 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2695 offloads_tmp = rx_conf[qid].offloads; 2696 } else { 2697 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2698 rx_free_thresh_tmp = 2699 rx_qinfo.conf.rx_free_thresh; 2700 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2701 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2702 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2703 offloads_tmp = rx_qinfo.conf.offloads; 2704 } 2705 2706 printf(" RX queue: %d\n", qid); 2707 printf(" RX desc=%d - RX free threshold=%d\n", 2708 nb_rx_desc_tmp, rx_free_thresh_tmp); 2709 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2710 " wthresh=%d\n", 2711 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2712 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2713 } 2714 2715 /* per tx queue config only for first queue to be less verbose */ 2716 for (qid = 0; qid < 1; qid++) { 2717 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2718 if (rc) { 2719 nb_tx_desc_tmp = nb_tx_desc[qid]; 2720 tx_free_thresh_tmp = 2721 tx_conf[qid].tx_free_thresh; 2722 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2723 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2724 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2725 offloads_tmp = tx_conf[qid].offloads; 2726 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2727 } else { 2728 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2729 tx_free_thresh_tmp = 2730 tx_qinfo.conf.tx_free_thresh; 2731 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2732 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2733 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2734 offloads_tmp = tx_qinfo.conf.offloads; 2735 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2736 } 2737 2738 printf(" TX queue: %d\n", qid); 2739 printf(" TX desc=%d - TX free threshold=%d\n", 2740 nb_tx_desc_tmp, tx_free_thresh_tmp); 2741 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2742 " wthresh=%d\n", 2743 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2744 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2745 offloads_tmp, tx_rs_thresh_tmp); 2746 } 2747 } 2748 } 2749 2750 void 2751 port_rss_reta_info(portid_t port_id, 2752 struct rte_eth_rss_reta_entry64 *reta_conf, 2753 uint16_t nb_entries) 2754 { 2755 uint16_t i, idx, shift; 2756 int ret; 2757 2758 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2759 return; 2760 2761 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2762 if (ret != 0) { 2763 fprintf(stderr, 2764 "Failed to get RSS RETA info, return code = %d\n", 2765 ret); 2766 return; 2767 } 2768 2769 for (i = 0; i < nb_entries; i++) { 2770 idx = i / RTE_RETA_GROUP_SIZE; 2771 shift = i % RTE_RETA_GROUP_SIZE; 2772 if (!(reta_conf[idx].mask & (1ULL << shift))) 2773 continue; 2774 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2775 i, reta_conf[idx].reta[shift]); 2776 } 2777 } 2778 2779 /* 2780 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2781 * key of the port. 2782 */ 2783 void 2784 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2785 { 2786 struct rte_eth_rss_conf rss_conf = {0}; 2787 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2788 uint64_t rss_hf; 2789 uint8_t i; 2790 int diag; 2791 struct rte_eth_dev_info dev_info; 2792 uint8_t hash_key_size; 2793 int ret; 2794 2795 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2796 return; 2797 2798 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2799 if (ret != 0) 2800 return; 2801 2802 if (dev_info.hash_key_size > 0 && 2803 dev_info.hash_key_size <= sizeof(rss_key)) 2804 hash_key_size = dev_info.hash_key_size; 2805 else { 2806 fprintf(stderr, 2807 "dev_info did not provide a valid hash key size\n"); 2808 return; 2809 } 2810 2811 /* Get RSS hash key if asked to display it */ 2812 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2813 rss_conf.rss_key_len = hash_key_size; 2814 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2815 if (diag != 0) { 2816 switch (diag) { 2817 case -ENODEV: 2818 fprintf(stderr, "port index %d invalid\n", port_id); 2819 break; 2820 case -ENOTSUP: 2821 fprintf(stderr, "operation not supported by device\n"); 2822 break; 2823 default: 2824 fprintf(stderr, "operation failed - diag=%d\n", diag); 2825 break; 2826 } 2827 return; 2828 } 2829 rss_hf = rss_conf.rss_hf; 2830 if (rss_hf == 0) { 2831 printf("RSS disabled\n"); 2832 return; 2833 } 2834 printf("RSS functions:\n "); 2835 for (i = 0; rss_type_table[i].str; i++) { 2836 if (rss_hf & rss_type_table[i].rss_type) 2837 printf("%s ", rss_type_table[i].str); 2838 } 2839 printf("\n"); 2840 if (!show_rss_key) 2841 return; 2842 printf("RSS key:\n"); 2843 for (i = 0; i < hash_key_size; i++) 2844 printf("%02X", rss_key[i]); 2845 printf("\n"); 2846 } 2847 2848 void 2849 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2850 uint8_t hash_key_len) 2851 { 2852 struct rte_eth_rss_conf rss_conf; 2853 int diag; 2854 unsigned int i; 2855 2856 rss_conf.rss_key = NULL; 2857 rss_conf.rss_key_len = hash_key_len; 2858 rss_conf.rss_hf = 0; 2859 for (i = 0; rss_type_table[i].str; i++) { 2860 if (!strcmp(rss_type_table[i].str, rss_type)) 2861 rss_conf.rss_hf = rss_type_table[i].rss_type; 2862 } 2863 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2864 if (diag == 0) { 2865 rss_conf.rss_key = hash_key; 2866 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2867 } 2868 if (diag == 0) 2869 return; 2870 2871 switch (diag) { 2872 case -ENODEV: 2873 fprintf(stderr, "port index %d invalid\n", port_id); 2874 break; 2875 case -ENOTSUP: 2876 fprintf(stderr, "operation not supported by device\n"); 2877 break; 2878 default: 2879 fprintf(stderr, "operation failed - diag=%d\n", diag); 2880 break; 2881 } 2882 } 2883 2884 /* 2885 * Setup forwarding configuration for each logical core. 2886 */ 2887 static void 2888 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2889 { 2890 streamid_t nb_fs_per_lcore; 2891 streamid_t nb_fs; 2892 streamid_t sm_id; 2893 lcoreid_t nb_extra; 2894 lcoreid_t nb_fc; 2895 lcoreid_t nb_lc; 2896 lcoreid_t lc_id; 2897 2898 nb_fs = cfg->nb_fwd_streams; 2899 nb_fc = cfg->nb_fwd_lcores; 2900 if (nb_fs <= nb_fc) { 2901 nb_fs_per_lcore = 1; 2902 nb_extra = 0; 2903 } else { 2904 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2905 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2906 } 2907 2908 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2909 sm_id = 0; 2910 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2911 fwd_lcores[lc_id]->stream_idx = sm_id; 2912 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2913 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2914 } 2915 2916 /* 2917 * Assign extra remaining streams, if any. 2918 */ 2919 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2920 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2921 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2922 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2923 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2924 } 2925 } 2926 2927 static portid_t 2928 fwd_topology_tx_port_get(portid_t rxp) 2929 { 2930 static int warning_once = 1; 2931 2932 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2933 2934 switch (port_topology) { 2935 default: 2936 case PORT_TOPOLOGY_PAIRED: 2937 if ((rxp & 0x1) == 0) { 2938 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2939 return rxp + 1; 2940 if (warning_once) { 2941 fprintf(stderr, 2942 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 2943 warning_once = 0; 2944 } 2945 return rxp; 2946 } 2947 return rxp - 1; 2948 case PORT_TOPOLOGY_CHAINED: 2949 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2950 case PORT_TOPOLOGY_LOOP: 2951 return rxp; 2952 } 2953 } 2954 2955 static void 2956 simple_fwd_config_setup(void) 2957 { 2958 portid_t i; 2959 2960 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2961 cur_fwd_config.nb_fwd_streams = 2962 (streamid_t) cur_fwd_config.nb_fwd_ports; 2963 2964 /* reinitialize forwarding streams */ 2965 init_fwd_streams(); 2966 2967 /* 2968 * In the simple forwarding test, the number of forwarding cores 2969 * must be lower or equal to the number of forwarding ports. 2970 */ 2971 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2972 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2973 cur_fwd_config.nb_fwd_lcores = 2974 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2975 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2976 2977 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2978 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2979 fwd_streams[i]->rx_queue = 0; 2980 fwd_streams[i]->tx_port = 2981 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2982 fwd_streams[i]->tx_queue = 0; 2983 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2984 fwd_streams[i]->retry_enabled = retry_enabled; 2985 } 2986 } 2987 2988 /** 2989 * For the RSS forwarding test all streams distributed over lcores. Each stream 2990 * being composed of a RX queue to poll on a RX port for input messages, 2991 * associated with a TX queue of a TX port where to send forwarded packets. 2992 */ 2993 static void 2994 rss_fwd_config_setup(void) 2995 { 2996 portid_t rxp; 2997 portid_t txp; 2998 queueid_t rxq; 2999 queueid_t nb_q; 3000 streamid_t sm_id; 3001 int start; 3002 int end; 3003 3004 nb_q = nb_rxq; 3005 if (nb_q > nb_txq) 3006 nb_q = nb_txq; 3007 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3008 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3009 cur_fwd_config.nb_fwd_streams = 3010 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3011 3012 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3013 cur_fwd_config.nb_fwd_lcores = 3014 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3015 3016 /* reinitialize forwarding streams */ 3017 init_fwd_streams(); 3018 3019 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3020 3021 if (proc_id > 0 && nb_q % num_procs != 0) 3022 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3023 3024 /** 3025 * In multi-process, All queues are allocated to different 3026 * processes based on num_procs and proc_id. For example: 3027 * if supports 4 queues(nb_q), 2 processes(num_procs), 3028 * the 0~1 queue for primary process. 3029 * the 2~3 queue for secondary process. 3030 */ 3031 start = proc_id * nb_q / num_procs; 3032 end = start + nb_q / num_procs; 3033 rxp = 0; 3034 rxq = start; 3035 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3036 struct fwd_stream *fs; 3037 3038 fs = fwd_streams[sm_id]; 3039 txp = fwd_topology_tx_port_get(rxp); 3040 fs->rx_port = fwd_ports_ids[rxp]; 3041 fs->rx_queue = rxq; 3042 fs->tx_port = fwd_ports_ids[txp]; 3043 fs->tx_queue = rxq; 3044 fs->peer_addr = fs->tx_port; 3045 fs->retry_enabled = retry_enabled; 3046 rxp++; 3047 if (rxp < nb_fwd_ports) 3048 continue; 3049 rxp = 0; 3050 rxq++; 3051 if (rxq >= end) 3052 rxq = start; 3053 } 3054 } 3055 3056 static uint16_t 3057 get_fwd_port_total_tc_num(void) 3058 { 3059 struct rte_eth_dcb_info dcb_info; 3060 uint16_t total_tc_num = 0; 3061 unsigned int i; 3062 3063 for (i = 0; i < nb_fwd_ports; i++) { 3064 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3065 total_tc_num += dcb_info.nb_tcs; 3066 } 3067 3068 return total_tc_num; 3069 } 3070 3071 /** 3072 * For the DCB forwarding test, each core is assigned on each traffic class. 3073 * 3074 * Each core is assigned a multi-stream, each stream being composed of 3075 * a RX queue to poll on a RX port for input messages, associated with 3076 * a TX queue of a TX port where to send forwarded packets. All RX and 3077 * TX queues are mapping to the same traffic class. 3078 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3079 * the same core 3080 */ 3081 static void 3082 dcb_fwd_config_setup(void) 3083 { 3084 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3085 portid_t txp, rxp = 0; 3086 queueid_t txq, rxq = 0; 3087 lcoreid_t lc_id; 3088 uint16_t nb_rx_queue, nb_tx_queue; 3089 uint16_t i, j, k, sm_id = 0; 3090 uint16_t total_tc_num; 3091 struct rte_port *port; 3092 uint8_t tc = 0; 3093 portid_t pid; 3094 int ret; 3095 3096 /* 3097 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3098 * or RTE_PORT_STOPPED. 3099 * 3100 * Re-configure ports to get updated mapping between tc and queue in 3101 * case the queue number of the port is changed. Skip for started ports 3102 * since modifying queue number and calling dev_configure need to stop 3103 * ports first. 3104 */ 3105 for (pid = 0; pid < nb_fwd_ports; pid++) { 3106 if (port_is_started(pid) == 1) 3107 continue; 3108 3109 port = &ports[pid]; 3110 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3111 &port->dev_conf); 3112 if (ret < 0) { 3113 fprintf(stderr, 3114 "Failed to re-configure port %d, ret = %d.\n", 3115 pid, ret); 3116 return; 3117 } 3118 } 3119 3120 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3121 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3122 cur_fwd_config.nb_fwd_streams = 3123 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3124 total_tc_num = get_fwd_port_total_tc_num(); 3125 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3126 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3127 3128 /* reinitialize forwarding streams */ 3129 init_fwd_streams(); 3130 sm_id = 0; 3131 txp = 1; 3132 /* get the dcb info on the first RX and TX ports */ 3133 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3134 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3135 3136 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3137 fwd_lcores[lc_id]->stream_nb = 0; 3138 fwd_lcores[lc_id]->stream_idx = sm_id; 3139 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3140 /* if the nb_queue is zero, means this tc is 3141 * not enabled on the POOL 3142 */ 3143 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3144 break; 3145 k = fwd_lcores[lc_id]->stream_nb + 3146 fwd_lcores[lc_id]->stream_idx; 3147 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3148 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3149 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3150 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3151 for (j = 0; j < nb_rx_queue; j++) { 3152 struct fwd_stream *fs; 3153 3154 fs = fwd_streams[k + j]; 3155 fs->rx_port = fwd_ports_ids[rxp]; 3156 fs->rx_queue = rxq + j; 3157 fs->tx_port = fwd_ports_ids[txp]; 3158 fs->tx_queue = txq + j % nb_tx_queue; 3159 fs->peer_addr = fs->tx_port; 3160 fs->retry_enabled = retry_enabled; 3161 } 3162 fwd_lcores[lc_id]->stream_nb += 3163 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3164 } 3165 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3166 3167 tc++; 3168 if (tc < rxp_dcb_info.nb_tcs) 3169 continue; 3170 /* Restart from TC 0 on next RX port */ 3171 tc = 0; 3172 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3173 rxp = (portid_t) 3174 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3175 else 3176 rxp++; 3177 if (rxp >= nb_fwd_ports) 3178 return; 3179 /* get the dcb information on next RX and TX ports */ 3180 if ((rxp & 0x1) == 0) 3181 txp = (portid_t) (rxp + 1); 3182 else 3183 txp = (portid_t) (rxp - 1); 3184 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3185 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3186 } 3187 } 3188 3189 static void 3190 icmp_echo_config_setup(void) 3191 { 3192 portid_t rxp; 3193 queueid_t rxq; 3194 lcoreid_t lc_id; 3195 uint16_t sm_id; 3196 3197 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3198 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3199 (nb_txq * nb_fwd_ports); 3200 else 3201 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3202 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3203 cur_fwd_config.nb_fwd_streams = 3204 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3205 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3206 cur_fwd_config.nb_fwd_lcores = 3207 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3208 if (verbose_level > 0) { 3209 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3210 __FUNCTION__, 3211 cur_fwd_config.nb_fwd_lcores, 3212 cur_fwd_config.nb_fwd_ports, 3213 cur_fwd_config.nb_fwd_streams); 3214 } 3215 3216 /* reinitialize forwarding streams */ 3217 init_fwd_streams(); 3218 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3219 rxp = 0; rxq = 0; 3220 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3221 if (verbose_level > 0) 3222 printf(" core=%d: \n", lc_id); 3223 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3224 struct fwd_stream *fs; 3225 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3226 fs->rx_port = fwd_ports_ids[rxp]; 3227 fs->rx_queue = rxq; 3228 fs->tx_port = fs->rx_port; 3229 fs->tx_queue = rxq; 3230 fs->peer_addr = fs->tx_port; 3231 fs->retry_enabled = retry_enabled; 3232 if (verbose_level > 0) 3233 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3234 sm_id, fs->rx_port, fs->rx_queue, 3235 fs->tx_queue); 3236 rxq = (queueid_t) (rxq + 1); 3237 if (rxq == nb_rxq) { 3238 rxq = 0; 3239 rxp = (portid_t) (rxp + 1); 3240 } 3241 } 3242 } 3243 } 3244 3245 void 3246 fwd_config_setup(void) 3247 { 3248 struct rte_port *port; 3249 portid_t pt_id; 3250 unsigned int i; 3251 3252 cur_fwd_config.fwd_eng = cur_fwd_eng; 3253 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3254 icmp_echo_config_setup(); 3255 return; 3256 } 3257 3258 if ((nb_rxq > 1) && (nb_txq > 1)){ 3259 if (dcb_config) { 3260 for (i = 0; i < nb_fwd_ports; i++) { 3261 pt_id = fwd_ports_ids[i]; 3262 port = &ports[pt_id]; 3263 if (!port->dcb_flag) { 3264 fprintf(stderr, 3265 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3266 return; 3267 } 3268 } 3269 if (nb_fwd_lcores == 1) { 3270 fprintf(stderr, 3271 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3272 return; 3273 } 3274 3275 dcb_fwd_config_setup(); 3276 } else 3277 rss_fwd_config_setup(); 3278 } 3279 else 3280 simple_fwd_config_setup(); 3281 } 3282 3283 static const char * 3284 mp_alloc_to_str(uint8_t mode) 3285 { 3286 switch (mode) { 3287 case MP_ALLOC_NATIVE: 3288 return "native"; 3289 case MP_ALLOC_ANON: 3290 return "anon"; 3291 case MP_ALLOC_XMEM: 3292 return "xmem"; 3293 case MP_ALLOC_XMEM_HUGE: 3294 return "xmemhuge"; 3295 case MP_ALLOC_XBUF: 3296 return "xbuf"; 3297 default: 3298 return "invalid"; 3299 } 3300 } 3301 3302 void 3303 pkt_fwd_config_display(struct fwd_config *cfg) 3304 { 3305 struct fwd_stream *fs; 3306 lcoreid_t lc_id; 3307 streamid_t sm_id; 3308 3309 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3310 "NUMA support %s, MP allocation mode: %s\n", 3311 cfg->fwd_eng->fwd_mode_name, 3312 retry_enabled == 0 ? "" : " with retry", 3313 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3314 numa_support == 1 ? "enabled" : "disabled", 3315 mp_alloc_to_str(mp_alloc_type)); 3316 3317 if (retry_enabled) 3318 printf("TX retry num: %u, delay between TX retries: %uus\n", 3319 burst_tx_retry_num, burst_tx_delay_time); 3320 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3321 printf("Logical Core %u (socket %u) forwards packets on " 3322 "%d streams:", 3323 fwd_lcores_cpuids[lc_id], 3324 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3325 fwd_lcores[lc_id]->stream_nb); 3326 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3327 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3328 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3329 "P=%d/Q=%d (socket %u) ", 3330 fs->rx_port, fs->rx_queue, 3331 ports[fs->rx_port].socket_id, 3332 fs->tx_port, fs->tx_queue, 3333 ports[fs->tx_port].socket_id); 3334 print_ethaddr("peer=", 3335 &peer_eth_addrs[fs->peer_addr]); 3336 } 3337 printf("\n"); 3338 } 3339 printf("\n"); 3340 } 3341 3342 void 3343 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3344 { 3345 struct rte_ether_addr new_peer_addr; 3346 if (!rte_eth_dev_is_valid_port(port_id)) { 3347 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3348 return; 3349 } 3350 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3351 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3352 peer_addr); 3353 return; 3354 } 3355 peer_eth_addrs[port_id] = new_peer_addr; 3356 } 3357 3358 int 3359 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3360 { 3361 unsigned int i; 3362 unsigned int lcore_cpuid; 3363 int record_now; 3364 3365 record_now = 0; 3366 again: 3367 for (i = 0; i < nb_lc; i++) { 3368 lcore_cpuid = lcorelist[i]; 3369 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3370 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3371 return -1; 3372 } 3373 if (lcore_cpuid == rte_get_main_lcore()) { 3374 fprintf(stderr, 3375 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3376 lcore_cpuid); 3377 return -1; 3378 } 3379 if (record_now) 3380 fwd_lcores_cpuids[i] = lcore_cpuid; 3381 } 3382 if (record_now == 0) { 3383 record_now = 1; 3384 goto again; 3385 } 3386 nb_cfg_lcores = (lcoreid_t) nb_lc; 3387 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3388 printf("previous number of forwarding cores %u - changed to " 3389 "number of configured cores %u\n", 3390 (unsigned int) nb_fwd_lcores, nb_lc); 3391 nb_fwd_lcores = (lcoreid_t) nb_lc; 3392 } 3393 3394 return 0; 3395 } 3396 3397 int 3398 set_fwd_lcores_mask(uint64_t lcoremask) 3399 { 3400 unsigned int lcorelist[64]; 3401 unsigned int nb_lc; 3402 unsigned int i; 3403 3404 if (lcoremask == 0) { 3405 fprintf(stderr, "Invalid NULL mask of cores\n"); 3406 return -1; 3407 } 3408 nb_lc = 0; 3409 for (i = 0; i < 64; i++) { 3410 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3411 continue; 3412 lcorelist[nb_lc++] = i; 3413 } 3414 return set_fwd_lcores_list(lcorelist, nb_lc); 3415 } 3416 3417 void 3418 set_fwd_lcores_number(uint16_t nb_lc) 3419 { 3420 if (test_done == 0) { 3421 fprintf(stderr, "Please stop forwarding first\n"); 3422 return; 3423 } 3424 if (nb_lc > nb_cfg_lcores) { 3425 fprintf(stderr, 3426 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3427 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3428 return; 3429 } 3430 nb_fwd_lcores = (lcoreid_t) nb_lc; 3431 printf("Number of forwarding cores set to %u\n", 3432 (unsigned int) nb_fwd_lcores); 3433 } 3434 3435 void 3436 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3437 { 3438 unsigned int i; 3439 portid_t port_id; 3440 int record_now; 3441 3442 record_now = 0; 3443 again: 3444 for (i = 0; i < nb_pt; i++) { 3445 port_id = (portid_t) portlist[i]; 3446 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3447 return; 3448 if (record_now) 3449 fwd_ports_ids[i] = port_id; 3450 } 3451 if (record_now == 0) { 3452 record_now = 1; 3453 goto again; 3454 } 3455 nb_cfg_ports = (portid_t) nb_pt; 3456 if (nb_fwd_ports != (portid_t) nb_pt) { 3457 printf("previous number of forwarding ports %u - changed to " 3458 "number of configured ports %u\n", 3459 (unsigned int) nb_fwd_ports, nb_pt); 3460 nb_fwd_ports = (portid_t) nb_pt; 3461 } 3462 } 3463 3464 /** 3465 * Parse the user input and obtain the list of forwarding ports 3466 * 3467 * @param[in] list 3468 * String containing the user input. User can specify 3469 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3470 * For example, if the user wants to use all the available 3471 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3472 * If the user wants to use only the ports 1,2 then the input 3473 * is 1,2. 3474 * valid characters are '-' and ',' 3475 * @param[out] values 3476 * This array will be filled with a list of port IDs 3477 * based on the user input 3478 * Note that duplicate entries are discarded and only the first 3479 * count entries in this array are port IDs and all the rest 3480 * will contain default values 3481 * @param[in] maxsize 3482 * This parameter denotes 2 things 3483 * 1) Number of elements in the values array 3484 * 2) Maximum value of each element in the values array 3485 * @return 3486 * On success, returns total count of parsed port IDs 3487 * On failure, returns 0 3488 */ 3489 static unsigned int 3490 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3491 { 3492 unsigned int count = 0; 3493 char *end = NULL; 3494 int min, max; 3495 int value, i; 3496 unsigned int marked[maxsize]; 3497 3498 if (list == NULL || values == NULL) 3499 return 0; 3500 3501 for (i = 0; i < (int)maxsize; i++) 3502 marked[i] = 0; 3503 3504 min = INT_MAX; 3505 3506 do { 3507 /*Remove the blank spaces if any*/ 3508 while (isblank(*list)) 3509 list++; 3510 if (*list == '\0') 3511 break; 3512 errno = 0; 3513 value = strtol(list, &end, 10); 3514 if (errno || end == NULL) 3515 return 0; 3516 if (value < 0 || value >= (int)maxsize) 3517 return 0; 3518 while (isblank(*end)) 3519 end++; 3520 if (*end == '-' && min == INT_MAX) { 3521 min = value; 3522 } else if ((*end == ',') || (*end == '\0')) { 3523 max = value; 3524 if (min == INT_MAX) 3525 min = value; 3526 for (i = min; i <= max; i++) { 3527 if (count < maxsize) { 3528 if (marked[i]) 3529 continue; 3530 values[count] = i; 3531 marked[i] = 1; 3532 count++; 3533 } 3534 } 3535 min = INT_MAX; 3536 } else 3537 return 0; 3538 list = end + 1; 3539 } while (*end != '\0'); 3540 3541 return count; 3542 } 3543 3544 void 3545 parse_fwd_portlist(const char *portlist) 3546 { 3547 unsigned int portcount; 3548 unsigned int portindex[RTE_MAX_ETHPORTS]; 3549 unsigned int i, valid_port_count = 0; 3550 3551 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3552 if (!portcount) 3553 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3554 3555 /* 3556 * Here we verify the validity of the ports 3557 * and thereby calculate the total number of 3558 * valid ports 3559 */ 3560 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3561 if (rte_eth_dev_is_valid_port(portindex[i])) { 3562 portindex[valid_port_count] = portindex[i]; 3563 valid_port_count++; 3564 } 3565 } 3566 3567 set_fwd_ports_list(portindex, valid_port_count); 3568 } 3569 3570 void 3571 set_fwd_ports_mask(uint64_t portmask) 3572 { 3573 unsigned int portlist[64]; 3574 unsigned int nb_pt; 3575 unsigned int i; 3576 3577 if (portmask == 0) { 3578 fprintf(stderr, "Invalid NULL mask of ports\n"); 3579 return; 3580 } 3581 nb_pt = 0; 3582 RTE_ETH_FOREACH_DEV(i) { 3583 if (! ((uint64_t)(1ULL << i) & portmask)) 3584 continue; 3585 portlist[nb_pt++] = i; 3586 } 3587 set_fwd_ports_list(portlist, nb_pt); 3588 } 3589 3590 void 3591 set_fwd_ports_number(uint16_t nb_pt) 3592 { 3593 if (nb_pt > nb_cfg_ports) { 3594 fprintf(stderr, 3595 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3596 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3597 return; 3598 } 3599 nb_fwd_ports = (portid_t) nb_pt; 3600 printf("Number of forwarding ports set to %u\n", 3601 (unsigned int) nb_fwd_ports); 3602 } 3603 3604 int 3605 port_is_forwarding(portid_t port_id) 3606 { 3607 unsigned int i; 3608 3609 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3610 return -1; 3611 3612 for (i = 0; i < nb_fwd_ports; i++) { 3613 if (fwd_ports_ids[i] == port_id) 3614 return 1; 3615 } 3616 3617 return 0; 3618 } 3619 3620 void 3621 set_nb_pkt_per_burst(uint16_t nb) 3622 { 3623 if (nb > MAX_PKT_BURST) { 3624 fprintf(stderr, 3625 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3626 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3627 return; 3628 } 3629 nb_pkt_per_burst = nb; 3630 printf("Number of packets per burst set to %u\n", 3631 (unsigned int) nb_pkt_per_burst); 3632 } 3633 3634 static const char * 3635 tx_split_get_name(enum tx_pkt_split split) 3636 { 3637 uint32_t i; 3638 3639 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3640 if (tx_split_name[i].split == split) 3641 return tx_split_name[i].name; 3642 } 3643 return NULL; 3644 } 3645 3646 void 3647 set_tx_pkt_split(const char *name) 3648 { 3649 uint32_t i; 3650 3651 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3652 if (strcmp(tx_split_name[i].name, name) == 0) { 3653 tx_pkt_split = tx_split_name[i].split; 3654 return; 3655 } 3656 } 3657 fprintf(stderr, "unknown value: \"%s\"\n", name); 3658 } 3659 3660 int 3661 parse_fec_mode(const char *name, uint32_t *fec_capa) 3662 { 3663 uint8_t i; 3664 3665 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3666 if (strcmp(fec_mode_name[i].name, name) == 0) { 3667 *fec_capa = 3668 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3669 return 0; 3670 } 3671 } 3672 return -1; 3673 } 3674 3675 void 3676 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3677 { 3678 unsigned int i, j; 3679 3680 printf("FEC capabilities:\n"); 3681 3682 for (i = 0; i < num; i++) { 3683 printf("%s : ", 3684 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3685 3686 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3687 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3688 speed_fec_capa[i].capa) 3689 printf("%s ", fec_mode_name[j].name); 3690 } 3691 printf("\n"); 3692 } 3693 } 3694 3695 void 3696 show_rx_pkt_offsets(void) 3697 { 3698 uint32_t i, n; 3699 3700 n = rx_pkt_nb_offs; 3701 printf("Number of offsets: %u\n", n); 3702 if (n) { 3703 printf("Segment offsets: "); 3704 for (i = 0; i != n - 1; i++) 3705 printf("%hu,", rx_pkt_seg_offsets[i]); 3706 printf("%hu\n", rx_pkt_seg_lengths[i]); 3707 } 3708 } 3709 3710 void 3711 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3712 { 3713 unsigned int i; 3714 3715 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3716 printf("nb segments per RX packets=%u >= " 3717 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3718 return; 3719 } 3720 3721 /* 3722 * No extra check here, the segment length will be checked by PMD 3723 * in the extended queue setup. 3724 */ 3725 for (i = 0; i < nb_offs; i++) { 3726 if (seg_offsets[i] >= UINT16_MAX) { 3727 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3728 i, seg_offsets[i]); 3729 return; 3730 } 3731 } 3732 3733 for (i = 0; i < nb_offs; i++) 3734 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3735 3736 rx_pkt_nb_offs = (uint8_t) nb_offs; 3737 } 3738 3739 void 3740 show_rx_pkt_segments(void) 3741 { 3742 uint32_t i, n; 3743 3744 n = rx_pkt_nb_segs; 3745 printf("Number of segments: %u\n", n); 3746 if (n) { 3747 printf("Segment sizes: "); 3748 for (i = 0; i != n - 1; i++) 3749 printf("%hu,", rx_pkt_seg_lengths[i]); 3750 printf("%hu\n", rx_pkt_seg_lengths[i]); 3751 } 3752 } 3753 3754 void 3755 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3756 { 3757 unsigned int i; 3758 3759 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3760 printf("nb segments per RX packets=%u >= " 3761 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3762 return; 3763 } 3764 3765 /* 3766 * No extra check here, the segment length will be checked by PMD 3767 * in the extended queue setup. 3768 */ 3769 for (i = 0; i < nb_segs; i++) { 3770 if (seg_lengths[i] >= UINT16_MAX) { 3771 printf("length[%u]=%u > UINT16_MAX - give up\n", 3772 i, seg_lengths[i]); 3773 return; 3774 } 3775 } 3776 3777 for (i = 0; i < nb_segs; i++) 3778 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3779 3780 rx_pkt_nb_segs = (uint8_t) nb_segs; 3781 } 3782 3783 void 3784 show_tx_pkt_segments(void) 3785 { 3786 uint32_t i, n; 3787 const char *split; 3788 3789 n = tx_pkt_nb_segs; 3790 split = tx_split_get_name(tx_pkt_split); 3791 3792 printf("Number of segments: %u\n", n); 3793 printf("Segment sizes: "); 3794 for (i = 0; i != n - 1; i++) 3795 printf("%hu,", tx_pkt_seg_lengths[i]); 3796 printf("%hu\n", tx_pkt_seg_lengths[i]); 3797 printf("Split packet: %s\n", split); 3798 } 3799 3800 static bool 3801 nb_segs_is_invalid(unsigned int nb_segs) 3802 { 3803 uint16_t ring_size; 3804 uint16_t queue_id; 3805 uint16_t port_id; 3806 int ret; 3807 3808 RTE_ETH_FOREACH_DEV(port_id) { 3809 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3810 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3811 if (ret) { 3812 /* Port may not be initialized yet, can't say 3813 * the port is invalid in this stage. 3814 */ 3815 continue; 3816 } 3817 if (ring_size < nb_segs) { 3818 printf("nb segments per TX packets=%u >= TX " 3819 "queue(%u) ring_size=%u - txpkts ignored\n", 3820 nb_segs, queue_id, ring_size); 3821 return true; 3822 } 3823 } 3824 } 3825 3826 return false; 3827 } 3828 3829 void 3830 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3831 { 3832 uint16_t tx_pkt_len; 3833 unsigned int i; 3834 3835 /* 3836 * For single segment settings failed check is ignored. 3837 * It is a very basic capability to send the single segment 3838 * packets, suppose it is always supported. 3839 */ 3840 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 3841 fprintf(stderr, 3842 "Tx segment size(%u) is not supported - txpkts ignored\n", 3843 nb_segs); 3844 return; 3845 } 3846 3847 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 3848 fprintf(stderr, 3849 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 3850 nb_segs, RTE_MAX_SEGS_PER_PKT); 3851 return; 3852 } 3853 3854 /* 3855 * Check that each segment length is greater or equal than 3856 * the mbuf data size. 3857 * Check also that the total packet length is greater or equal than the 3858 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3859 * 20 + 8). 3860 */ 3861 tx_pkt_len = 0; 3862 for (i = 0; i < nb_segs; i++) { 3863 if (seg_lengths[i] > mbuf_data_size[0]) { 3864 fprintf(stderr, 3865 "length[%u]=%u > mbuf_data_size=%u - give up\n", 3866 i, seg_lengths[i], mbuf_data_size[0]); 3867 return; 3868 } 3869 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3870 } 3871 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3872 fprintf(stderr, "total packet length=%u < %d - give up\n", 3873 (unsigned) tx_pkt_len, 3874 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3875 return; 3876 } 3877 3878 for (i = 0; i < nb_segs; i++) 3879 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3880 3881 tx_pkt_length = tx_pkt_len; 3882 tx_pkt_nb_segs = (uint8_t) nb_segs; 3883 } 3884 3885 void 3886 show_tx_pkt_times(void) 3887 { 3888 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3889 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3890 } 3891 3892 void 3893 set_tx_pkt_times(unsigned int *tx_times) 3894 { 3895 tx_pkt_times_inter = tx_times[0]; 3896 tx_pkt_times_intra = tx_times[1]; 3897 } 3898 3899 void 3900 setup_gro(const char *onoff, portid_t port_id) 3901 { 3902 if (!rte_eth_dev_is_valid_port(port_id)) { 3903 fprintf(stderr, "invalid port id %u\n", port_id); 3904 return; 3905 } 3906 if (test_done == 0) { 3907 fprintf(stderr, 3908 "Before enable/disable GRO, please stop forwarding first\n"); 3909 return; 3910 } 3911 if (strcmp(onoff, "on") == 0) { 3912 if (gro_ports[port_id].enable != 0) { 3913 fprintf(stderr, 3914 "Port %u has enabled GRO. Please disable GRO first\n", 3915 port_id); 3916 return; 3917 } 3918 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3919 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3920 gro_ports[port_id].param.max_flow_num = 3921 GRO_DEFAULT_FLOW_NUM; 3922 gro_ports[port_id].param.max_item_per_flow = 3923 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3924 } 3925 gro_ports[port_id].enable = 1; 3926 } else { 3927 if (gro_ports[port_id].enable == 0) { 3928 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 3929 return; 3930 } 3931 gro_ports[port_id].enable = 0; 3932 } 3933 } 3934 3935 void 3936 setup_gro_flush_cycles(uint8_t cycles) 3937 { 3938 if (test_done == 0) { 3939 fprintf(stderr, 3940 "Before change flush interval for GRO, please stop forwarding first.\n"); 3941 return; 3942 } 3943 3944 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 3945 GRO_DEFAULT_FLUSH_CYCLES) { 3946 fprintf(stderr, 3947 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 3948 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 3949 cycles = GRO_DEFAULT_FLUSH_CYCLES; 3950 } 3951 3952 gro_flush_cycles = cycles; 3953 } 3954 3955 void 3956 show_gro(portid_t port_id) 3957 { 3958 struct rte_gro_param *param; 3959 uint32_t max_pkts_num; 3960 3961 param = &gro_ports[port_id].param; 3962 3963 if (!rte_eth_dev_is_valid_port(port_id)) { 3964 fprintf(stderr, "Invalid port id %u.\n", port_id); 3965 return; 3966 } 3967 if (gro_ports[port_id].enable) { 3968 printf("GRO type: TCP/IPv4\n"); 3969 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3970 max_pkts_num = param->max_flow_num * 3971 param->max_item_per_flow; 3972 } else 3973 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 3974 printf("Max number of packets to perform GRO: %u\n", 3975 max_pkts_num); 3976 printf("Flushing cycles: %u\n", gro_flush_cycles); 3977 } else 3978 printf("Port %u doesn't enable GRO.\n", port_id); 3979 } 3980 3981 void 3982 setup_gso(const char *mode, portid_t port_id) 3983 { 3984 if (!rte_eth_dev_is_valid_port(port_id)) { 3985 fprintf(stderr, "invalid port id %u\n", port_id); 3986 return; 3987 } 3988 if (strcmp(mode, "on") == 0) { 3989 if (test_done == 0) { 3990 fprintf(stderr, 3991 "before enabling GSO, please stop forwarding first\n"); 3992 return; 3993 } 3994 gso_ports[port_id].enable = 1; 3995 } else if (strcmp(mode, "off") == 0) { 3996 if (test_done == 0) { 3997 fprintf(stderr, 3998 "before disabling GSO, please stop forwarding first\n"); 3999 return; 4000 } 4001 gso_ports[port_id].enable = 0; 4002 } 4003 } 4004 4005 char* 4006 list_pkt_forwarding_modes(void) 4007 { 4008 static char fwd_modes[128] = ""; 4009 const char *separator = "|"; 4010 struct fwd_engine *fwd_eng; 4011 unsigned i = 0; 4012 4013 if (strlen (fwd_modes) == 0) { 4014 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4015 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4016 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4017 strncat(fwd_modes, separator, 4018 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4019 } 4020 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4021 } 4022 4023 return fwd_modes; 4024 } 4025 4026 char* 4027 list_pkt_forwarding_retry_modes(void) 4028 { 4029 static char fwd_modes[128] = ""; 4030 const char *separator = "|"; 4031 struct fwd_engine *fwd_eng; 4032 unsigned i = 0; 4033 4034 if (strlen(fwd_modes) == 0) { 4035 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4036 if (fwd_eng == &rx_only_engine) 4037 continue; 4038 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4039 sizeof(fwd_modes) - 4040 strlen(fwd_modes) - 1); 4041 strncat(fwd_modes, separator, 4042 sizeof(fwd_modes) - 4043 strlen(fwd_modes) - 1); 4044 } 4045 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4046 } 4047 4048 return fwd_modes; 4049 } 4050 4051 void 4052 set_pkt_forwarding_mode(const char *fwd_mode_name) 4053 { 4054 struct fwd_engine *fwd_eng; 4055 unsigned i; 4056 4057 i = 0; 4058 while ((fwd_eng = fwd_engines[i]) != NULL) { 4059 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4060 printf("Set %s packet forwarding mode%s\n", 4061 fwd_mode_name, 4062 retry_enabled == 0 ? "" : " with retry"); 4063 cur_fwd_eng = fwd_eng; 4064 return; 4065 } 4066 i++; 4067 } 4068 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4069 } 4070 4071 void 4072 add_rx_dump_callbacks(portid_t portid) 4073 { 4074 struct rte_eth_dev_info dev_info; 4075 uint16_t queue; 4076 int ret; 4077 4078 if (port_id_is_invalid(portid, ENABLED_WARN)) 4079 return; 4080 4081 ret = eth_dev_info_get_print_err(portid, &dev_info); 4082 if (ret != 0) 4083 return; 4084 4085 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4086 if (!ports[portid].rx_dump_cb[queue]) 4087 ports[portid].rx_dump_cb[queue] = 4088 rte_eth_add_rx_callback(portid, queue, 4089 dump_rx_pkts, NULL); 4090 } 4091 4092 void 4093 add_tx_dump_callbacks(portid_t portid) 4094 { 4095 struct rte_eth_dev_info dev_info; 4096 uint16_t queue; 4097 int ret; 4098 4099 if (port_id_is_invalid(portid, ENABLED_WARN)) 4100 return; 4101 4102 ret = eth_dev_info_get_print_err(portid, &dev_info); 4103 if (ret != 0) 4104 return; 4105 4106 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4107 if (!ports[portid].tx_dump_cb[queue]) 4108 ports[portid].tx_dump_cb[queue] = 4109 rte_eth_add_tx_callback(portid, queue, 4110 dump_tx_pkts, NULL); 4111 } 4112 4113 void 4114 remove_rx_dump_callbacks(portid_t portid) 4115 { 4116 struct rte_eth_dev_info dev_info; 4117 uint16_t queue; 4118 int ret; 4119 4120 if (port_id_is_invalid(portid, ENABLED_WARN)) 4121 return; 4122 4123 ret = eth_dev_info_get_print_err(portid, &dev_info); 4124 if (ret != 0) 4125 return; 4126 4127 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4128 if (ports[portid].rx_dump_cb[queue]) { 4129 rte_eth_remove_rx_callback(portid, queue, 4130 ports[portid].rx_dump_cb[queue]); 4131 ports[portid].rx_dump_cb[queue] = NULL; 4132 } 4133 } 4134 4135 void 4136 remove_tx_dump_callbacks(portid_t portid) 4137 { 4138 struct rte_eth_dev_info dev_info; 4139 uint16_t queue; 4140 int ret; 4141 4142 if (port_id_is_invalid(portid, ENABLED_WARN)) 4143 return; 4144 4145 ret = eth_dev_info_get_print_err(portid, &dev_info); 4146 if (ret != 0) 4147 return; 4148 4149 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4150 if (ports[portid].tx_dump_cb[queue]) { 4151 rte_eth_remove_tx_callback(portid, queue, 4152 ports[portid].tx_dump_cb[queue]); 4153 ports[portid].tx_dump_cb[queue] = NULL; 4154 } 4155 } 4156 4157 void 4158 configure_rxtx_dump_callbacks(uint16_t verbose) 4159 { 4160 portid_t portid; 4161 4162 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4163 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4164 return; 4165 #endif 4166 4167 RTE_ETH_FOREACH_DEV(portid) 4168 { 4169 if (verbose == 1 || verbose > 2) 4170 add_rx_dump_callbacks(portid); 4171 else 4172 remove_rx_dump_callbacks(portid); 4173 if (verbose >= 2) 4174 add_tx_dump_callbacks(portid); 4175 else 4176 remove_tx_dump_callbacks(portid); 4177 } 4178 } 4179 4180 void 4181 set_verbose_level(uint16_t vb_level) 4182 { 4183 printf("Change verbose level from %u to %u\n", 4184 (unsigned int) verbose_level, (unsigned int) vb_level); 4185 verbose_level = vb_level; 4186 configure_rxtx_dump_callbacks(verbose_level); 4187 } 4188 4189 void 4190 vlan_extend_set(portid_t port_id, int on) 4191 { 4192 int diag; 4193 int vlan_offload; 4194 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4195 4196 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4197 return; 4198 4199 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4200 4201 if (on) { 4202 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4203 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4204 } else { 4205 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4206 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4207 } 4208 4209 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4210 if (diag < 0) { 4211 fprintf(stderr, 4212 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4213 port_id, on, diag); 4214 return; 4215 } 4216 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4217 } 4218 4219 void 4220 rx_vlan_strip_set(portid_t port_id, int on) 4221 { 4222 int diag; 4223 int vlan_offload; 4224 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4225 4226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4227 return; 4228 4229 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4230 4231 if (on) { 4232 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4233 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4234 } else { 4235 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4236 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4237 } 4238 4239 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4240 if (diag < 0) { 4241 fprintf(stderr, 4242 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4243 __func__, port_id, on, diag); 4244 return; 4245 } 4246 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4247 } 4248 4249 void 4250 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4251 { 4252 int diag; 4253 4254 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4255 return; 4256 4257 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4258 if (diag < 0) 4259 fprintf(stderr, 4260 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4261 __func__, port_id, queue_id, on, diag); 4262 } 4263 4264 void 4265 rx_vlan_filter_set(portid_t port_id, int on) 4266 { 4267 int diag; 4268 int vlan_offload; 4269 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4270 4271 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4272 return; 4273 4274 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4275 4276 if (on) { 4277 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4278 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4279 } else { 4280 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4281 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4282 } 4283 4284 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4285 if (diag < 0) { 4286 fprintf(stderr, 4287 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4288 __func__, port_id, on, diag); 4289 return; 4290 } 4291 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4292 } 4293 4294 void 4295 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4296 { 4297 int diag; 4298 int vlan_offload; 4299 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4300 4301 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4302 return; 4303 4304 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4305 4306 if (on) { 4307 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4308 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4309 } else { 4310 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4311 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4312 } 4313 4314 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4315 if (diag < 0) { 4316 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4317 __func__, port_id, on, diag); 4318 return; 4319 } 4320 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4321 } 4322 4323 int 4324 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4325 { 4326 int diag; 4327 4328 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4329 return 1; 4330 if (vlan_id_is_invalid(vlan_id)) 4331 return 1; 4332 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4333 if (diag == 0) 4334 return 0; 4335 fprintf(stderr, 4336 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4337 port_id, vlan_id, on, diag); 4338 return -1; 4339 } 4340 4341 void 4342 rx_vlan_all_filter_set(portid_t port_id, int on) 4343 { 4344 uint16_t vlan_id; 4345 4346 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4347 return; 4348 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4349 if (rx_vft_set(port_id, vlan_id, on)) 4350 break; 4351 } 4352 } 4353 4354 void 4355 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4356 { 4357 int diag; 4358 4359 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4360 return; 4361 4362 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4363 if (diag == 0) 4364 return; 4365 4366 fprintf(stderr, 4367 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4368 port_id, vlan_type, tp_id, diag); 4369 } 4370 4371 void 4372 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4373 { 4374 struct rte_eth_dev_info dev_info; 4375 int ret; 4376 4377 if (vlan_id_is_invalid(vlan_id)) 4378 return; 4379 4380 if (ports[port_id].dev_conf.txmode.offloads & 4381 DEV_TX_OFFLOAD_QINQ_INSERT) { 4382 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4383 return; 4384 } 4385 4386 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4387 if (ret != 0) 4388 return; 4389 4390 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4391 fprintf(stderr, 4392 "Error: vlan insert is not supported by port %d\n", 4393 port_id); 4394 return; 4395 } 4396 4397 tx_vlan_reset(port_id); 4398 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4399 ports[port_id].tx_vlan_id = vlan_id; 4400 } 4401 4402 void 4403 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4404 { 4405 struct rte_eth_dev_info dev_info; 4406 int ret; 4407 4408 if (vlan_id_is_invalid(vlan_id)) 4409 return; 4410 if (vlan_id_is_invalid(vlan_id_outer)) 4411 return; 4412 4413 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4414 if (ret != 0) 4415 return; 4416 4417 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4418 fprintf(stderr, 4419 "Error: qinq insert not supported by port %d\n", 4420 port_id); 4421 return; 4422 } 4423 4424 tx_vlan_reset(port_id); 4425 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4426 DEV_TX_OFFLOAD_QINQ_INSERT); 4427 ports[port_id].tx_vlan_id = vlan_id; 4428 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4429 } 4430 4431 void 4432 tx_vlan_reset(portid_t port_id) 4433 { 4434 ports[port_id].dev_conf.txmode.offloads &= 4435 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4436 DEV_TX_OFFLOAD_QINQ_INSERT); 4437 ports[port_id].tx_vlan_id = 0; 4438 ports[port_id].tx_vlan_id_outer = 0; 4439 } 4440 4441 void 4442 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4443 { 4444 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4445 return; 4446 4447 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4448 } 4449 4450 void 4451 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4452 { 4453 int ret; 4454 4455 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4456 return; 4457 4458 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4459 return; 4460 4461 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4462 fprintf(stderr, "map_value not in required range 0..%d\n", 4463 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4464 return; 4465 } 4466 4467 if (!is_rx) { /* tx */ 4468 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4469 map_value); 4470 if (ret) { 4471 fprintf(stderr, 4472 "failed to set tx queue stats mapping.\n"); 4473 return; 4474 } 4475 } else { /* rx */ 4476 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4477 map_value); 4478 if (ret) { 4479 fprintf(stderr, 4480 "failed to set rx queue stats mapping.\n"); 4481 return; 4482 } 4483 } 4484 } 4485 4486 void 4487 set_xstats_hide_zero(uint8_t on_off) 4488 { 4489 xstats_hide_zero = on_off; 4490 } 4491 4492 void 4493 set_record_core_cycles(uint8_t on_off) 4494 { 4495 record_core_cycles = on_off; 4496 } 4497 4498 void 4499 set_record_burst_stats(uint8_t on_off) 4500 { 4501 record_burst_stats = on_off; 4502 } 4503 4504 static char* 4505 flowtype_to_str(uint16_t flow_type) 4506 { 4507 struct flow_type_info { 4508 char str[32]; 4509 uint16_t ftype; 4510 }; 4511 4512 uint8_t i; 4513 static struct flow_type_info flowtype_str_table[] = { 4514 {"raw", RTE_ETH_FLOW_RAW}, 4515 {"ipv4", RTE_ETH_FLOW_IPV4}, 4516 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4517 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4518 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4519 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4520 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4521 {"ipv6", RTE_ETH_FLOW_IPV6}, 4522 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4523 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4524 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4525 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4526 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4527 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4528 {"port", RTE_ETH_FLOW_PORT}, 4529 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4530 {"geneve", RTE_ETH_FLOW_GENEVE}, 4531 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4532 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4533 }; 4534 4535 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4536 if (flowtype_str_table[i].ftype == flow_type) 4537 return flowtype_str_table[i].str; 4538 } 4539 4540 return NULL; 4541 } 4542 4543 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4544 4545 static inline void 4546 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4547 { 4548 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4549 4550 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4551 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4552 " tunnel_id: 0x%08x", 4553 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4554 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4555 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4556 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4557 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4558 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4559 4560 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4561 rte_be_to_cpu_16(mask->src_port_mask), 4562 rte_be_to_cpu_16(mask->dst_port_mask)); 4563 4564 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4565 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4566 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4567 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4568 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4569 4570 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4571 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4572 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4573 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4574 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4575 } 4576 4577 printf("\n"); 4578 } 4579 4580 static inline void 4581 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4582 { 4583 struct rte_eth_flex_payload_cfg *cfg; 4584 uint32_t i, j; 4585 4586 for (i = 0; i < flex_conf->nb_payloads; i++) { 4587 cfg = &flex_conf->flex_set[i]; 4588 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4589 printf("\n RAW: "); 4590 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4591 printf("\n L2_PAYLOAD: "); 4592 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4593 printf("\n L3_PAYLOAD: "); 4594 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4595 printf("\n L4_PAYLOAD: "); 4596 else 4597 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4598 for (j = 0; j < num; j++) 4599 printf(" %-5u", cfg->src_offset[j]); 4600 } 4601 printf("\n"); 4602 } 4603 4604 static inline void 4605 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4606 { 4607 struct rte_eth_fdir_flex_mask *mask; 4608 uint32_t i, j; 4609 char *p; 4610 4611 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4612 mask = &flex_conf->flex_mask[i]; 4613 p = flowtype_to_str(mask->flow_type); 4614 printf("\n %s:\t", p ? p : "unknown"); 4615 for (j = 0; j < num; j++) 4616 printf(" %02x", mask->mask[j]); 4617 } 4618 printf("\n"); 4619 } 4620 4621 static inline void 4622 print_fdir_flow_type(uint32_t flow_types_mask) 4623 { 4624 int i; 4625 char *p; 4626 4627 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4628 if (!(flow_types_mask & (1 << i))) 4629 continue; 4630 p = flowtype_to_str(i); 4631 if (p) 4632 printf(" %s", p); 4633 else 4634 printf(" unknown"); 4635 } 4636 printf("\n"); 4637 } 4638 4639 static int 4640 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4641 struct rte_eth_fdir_stats *fdir_stat) 4642 { 4643 int ret = -ENOTSUP; 4644 4645 #ifdef RTE_NET_I40E 4646 if (ret == -ENOTSUP) { 4647 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4648 if (!ret) 4649 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4650 } 4651 #endif 4652 #ifdef RTE_NET_IXGBE 4653 if (ret == -ENOTSUP) { 4654 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4655 if (!ret) 4656 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4657 } 4658 #endif 4659 switch (ret) { 4660 case 0: 4661 break; 4662 case -ENOTSUP: 4663 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4664 port_id); 4665 break; 4666 default: 4667 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4668 break; 4669 } 4670 return ret; 4671 } 4672 4673 void 4674 fdir_get_infos(portid_t port_id) 4675 { 4676 struct rte_eth_fdir_stats fdir_stat; 4677 struct rte_eth_fdir_info fdir_info; 4678 4679 static const char *fdir_stats_border = "########################"; 4680 4681 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4682 return; 4683 4684 memset(&fdir_info, 0, sizeof(fdir_info)); 4685 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4686 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4687 return; 4688 4689 printf("\n %s FDIR infos for port %-2d %s\n", 4690 fdir_stats_border, port_id, fdir_stats_border); 4691 printf(" MODE: "); 4692 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4693 printf(" PERFECT\n"); 4694 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4695 printf(" PERFECT-MAC-VLAN\n"); 4696 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4697 printf(" PERFECT-TUNNEL\n"); 4698 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4699 printf(" SIGNATURE\n"); 4700 else 4701 printf(" DISABLE\n"); 4702 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4703 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4704 printf(" SUPPORTED FLOW TYPE: "); 4705 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4706 } 4707 printf(" FLEX PAYLOAD INFO:\n"); 4708 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4709 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4710 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4711 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4712 fdir_info.flex_payload_unit, 4713 fdir_info.max_flex_payload_segment_num, 4714 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4715 printf(" MASK: "); 4716 print_fdir_mask(&fdir_info.mask); 4717 if (fdir_info.flex_conf.nb_payloads > 0) { 4718 printf(" FLEX PAYLOAD SRC OFFSET:"); 4719 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4720 } 4721 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4722 printf(" FLEX MASK CFG:"); 4723 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4724 } 4725 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4726 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4727 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4728 fdir_info.guarant_spc, fdir_info.best_spc); 4729 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4730 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4731 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4732 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4733 fdir_stat.collision, fdir_stat.free, 4734 fdir_stat.maxhash, fdir_stat.maxlen, 4735 fdir_stat.add, fdir_stat.remove, 4736 fdir_stat.f_add, fdir_stat.f_remove); 4737 printf(" %s############################%s\n", 4738 fdir_stats_border, fdir_stats_border); 4739 } 4740 4741 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4742 4743 void 4744 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4745 { 4746 struct rte_port *port; 4747 struct rte_eth_fdir_flex_conf *flex_conf; 4748 int i, idx = 0; 4749 4750 port = &ports[port_id]; 4751 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4752 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4753 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4754 idx = i; 4755 break; 4756 } 4757 } 4758 if (i >= RTE_ETH_FLOW_MAX) { 4759 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4760 idx = flex_conf->nb_flexmasks; 4761 flex_conf->nb_flexmasks++; 4762 } else { 4763 fprintf(stderr, 4764 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 4765 cfg->flow_type); 4766 return; 4767 } 4768 } 4769 rte_memcpy(&flex_conf->flex_mask[idx], 4770 cfg, 4771 sizeof(struct rte_eth_fdir_flex_mask)); 4772 } 4773 4774 void 4775 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4776 { 4777 struct rte_port *port; 4778 struct rte_eth_fdir_flex_conf *flex_conf; 4779 int i, idx = 0; 4780 4781 port = &ports[port_id]; 4782 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4783 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4784 if (cfg->type == flex_conf->flex_set[i].type) { 4785 idx = i; 4786 break; 4787 } 4788 } 4789 if (i >= RTE_ETH_PAYLOAD_MAX) { 4790 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4791 idx = flex_conf->nb_payloads; 4792 flex_conf->nb_payloads++; 4793 } else { 4794 fprintf(stderr, 4795 "The flex payload table is full. Can not set flex payload for type(%u).", 4796 cfg->type); 4797 return; 4798 } 4799 } 4800 rte_memcpy(&flex_conf->flex_set[idx], 4801 cfg, 4802 sizeof(struct rte_eth_flex_payload_cfg)); 4803 4804 } 4805 4806 void 4807 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4808 { 4809 #ifdef RTE_NET_IXGBE 4810 int diag; 4811 4812 if (is_rx) 4813 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4814 else 4815 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4816 4817 if (diag == 0) 4818 return; 4819 fprintf(stderr, 4820 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4821 is_rx ? "rx" : "tx", port_id, diag); 4822 return; 4823 #endif 4824 fprintf(stderr, "VF %s setting not supported for port %d\n", 4825 is_rx ? "Rx" : "Tx", port_id); 4826 RTE_SET_USED(vf); 4827 RTE_SET_USED(on); 4828 } 4829 4830 int 4831 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4832 { 4833 int diag; 4834 struct rte_eth_link link; 4835 int ret; 4836 4837 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4838 return 1; 4839 ret = eth_link_get_nowait_print_err(port_id, &link); 4840 if (ret < 0) 4841 return 1; 4842 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4843 rate > link.link_speed) { 4844 fprintf(stderr, 4845 "Invalid rate value:%u bigger than link speed: %u\n", 4846 rate, link.link_speed); 4847 return 1; 4848 } 4849 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4850 if (diag == 0) 4851 return diag; 4852 fprintf(stderr, 4853 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4854 port_id, diag); 4855 return diag; 4856 } 4857 4858 int 4859 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4860 { 4861 int diag = -ENOTSUP; 4862 4863 RTE_SET_USED(vf); 4864 RTE_SET_USED(rate); 4865 RTE_SET_USED(q_msk); 4866 4867 #ifdef RTE_NET_IXGBE 4868 if (diag == -ENOTSUP) 4869 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4870 q_msk); 4871 #endif 4872 #ifdef RTE_NET_BNXT 4873 if (diag == -ENOTSUP) 4874 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4875 #endif 4876 if (diag == 0) 4877 return diag; 4878 4879 fprintf(stderr, 4880 "%s for port_id=%d failed diag=%d\n", 4881 __func__, port_id, diag); 4882 return diag; 4883 } 4884 4885 /* 4886 * Functions to manage the set of filtered Multicast MAC addresses. 4887 * 4888 * A pool of filtered multicast MAC addresses is associated with each port. 4889 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4890 * The address of the pool and the number of valid multicast MAC addresses 4891 * recorded in the pool are stored in the fields "mc_addr_pool" and 4892 * "mc_addr_nb" of the "rte_port" data structure. 4893 * 4894 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4895 * to be supplied a contiguous array of multicast MAC addresses. 4896 * To comply with this constraint, the set of multicast addresses recorded 4897 * into the pool are systematically compacted at the beginning of the pool. 4898 * Hence, when a multicast address is removed from the pool, all following 4899 * addresses, if any, are copied back to keep the set contiguous. 4900 */ 4901 #define MCAST_POOL_INC 32 4902 4903 static int 4904 mcast_addr_pool_extend(struct rte_port *port) 4905 { 4906 struct rte_ether_addr *mc_pool; 4907 size_t mc_pool_size; 4908 4909 /* 4910 * If a free entry is available at the end of the pool, just 4911 * increment the number of recorded multicast addresses. 4912 */ 4913 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4914 port->mc_addr_nb++; 4915 return 0; 4916 } 4917 4918 /* 4919 * [re]allocate a pool with MCAST_POOL_INC more entries. 4920 * The previous test guarantees that port->mc_addr_nb is a multiple 4921 * of MCAST_POOL_INC. 4922 */ 4923 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4924 MCAST_POOL_INC); 4925 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4926 mc_pool_size); 4927 if (mc_pool == NULL) { 4928 fprintf(stderr, 4929 "allocation of pool of %u multicast addresses failed\n", 4930 port->mc_addr_nb + MCAST_POOL_INC); 4931 return -ENOMEM; 4932 } 4933 4934 port->mc_addr_pool = mc_pool; 4935 port->mc_addr_nb++; 4936 return 0; 4937 4938 } 4939 4940 static void 4941 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 4942 { 4943 if (mcast_addr_pool_extend(port) != 0) 4944 return; 4945 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 4946 } 4947 4948 static void 4949 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 4950 { 4951 port->mc_addr_nb--; 4952 if (addr_idx == port->mc_addr_nb) { 4953 /* No need to recompact the set of multicast addressses. */ 4954 if (port->mc_addr_nb == 0) { 4955 /* free the pool of multicast addresses. */ 4956 free(port->mc_addr_pool); 4957 port->mc_addr_pool = NULL; 4958 } 4959 return; 4960 } 4961 memmove(&port->mc_addr_pool[addr_idx], 4962 &port->mc_addr_pool[addr_idx + 1], 4963 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 4964 } 4965 4966 static int 4967 eth_port_multicast_addr_list_set(portid_t port_id) 4968 { 4969 struct rte_port *port; 4970 int diag; 4971 4972 port = &ports[port_id]; 4973 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 4974 port->mc_addr_nb); 4975 if (diag < 0) 4976 fprintf(stderr, 4977 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 4978 port_id, port->mc_addr_nb, diag); 4979 4980 return diag; 4981 } 4982 4983 void 4984 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 4985 { 4986 struct rte_port *port; 4987 uint32_t i; 4988 4989 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4990 return; 4991 4992 port = &ports[port_id]; 4993 4994 /* 4995 * Check that the added multicast MAC address is not already recorded 4996 * in the pool of multicast addresses. 4997 */ 4998 for (i = 0; i < port->mc_addr_nb; i++) { 4999 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5000 fprintf(stderr, 5001 "multicast address already filtered by port\n"); 5002 return; 5003 } 5004 } 5005 5006 mcast_addr_pool_append(port, mc_addr); 5007 if (eth_port_multicast_addr_list_set(port_id) < 0) 5008 /* Rollback on failure, remove the address from the pool */ 5009 mcast_addr_pool_remove(port, i); 5010 } 5011 5012 void 5013 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5014 { 5015 struct rte_port *port; 5016 uint32_t i; 5017 5018 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5019 return; 5020 5021 port = &ports[port_id]; 5022 5023 /* 5024 * Search the pool of multicast MAC addresses for the removed address. 5025 */ 5026 for (i = 0; i < port->mc_addr_nb; i++) { 5027 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5028 break; 5029 } 5030 if (i == port->mc_addr_nb) { 5031 fprintf(stderr, "multicast address not filtered by port %d\n", 5032 port_id); 5033 return; 5034 } 5035 5036 mcast_addr_pool_remove(port, i); 5037 if (eth_port_multicast_addr_list_set(port_id) < 0) 5038 /* Rollback on failure, add the address back into the pool */ 5039 mcast_addr_pool_append(port, mc_addr); 5040 } 5041 5042 void 5043 port_dcb_info_display(portid_t port_id) 5044 { 5045 struct rte_eth_dcb_info dcb_info; 5046 uint16_t i; 5047 int ret; 5048 static const char *border = "================"; 5049 5050 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5051 return; 5052 5053 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5054 if (ret) { 5055 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5056 port_id); 5057 return; 5058 } 5059 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5060 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5061 printf("\n TC : "); 5062 for (i = 0; i < dcb_info.nb_tcs; i++) 5063 printf("\t%4d", i); 5064 printf("\n Priority : "); 5065 for (i = 0; i < dcb_info.nb_tcs; i++) 5066 printf("\t%4d", dcb_info.prio_tc[i]); 5067 printf("\n BW percent :"); 5068 for (i = 0; i < dcb_info.nb_tcs; i++) 5069 printf("\t%4d%%", dcb_info.tc_bws[i]); 5070 printf("\n RXQ base : "); 5071 for (i = 0; i < dcb_info.nb_tcs; i++) 5072 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5073 printf("\n RXQ number :"); 5074 for (i = 0; i < dcb_info.nb_tcs; i++) 5075 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5076 printf("\n TXQ base : "); 5077 for (i = 0; i < dcb_info.nb_tcs; i++) 5078 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5079 printf("\n TXQ number :"); 5080 for (i = 0; i < dcb_info.nb_tcs; i++) 5081 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5082 printf("\n"); 5083 } 5084 5085 uint8_t * 5086 open_file(const char *file_path, uint32_t *size) 5087 { 5088 int fd = open(file_path, O_RDONLY); 5089 off_t pkg_size; 5090 uint8_t *buf = NULL; 5091 int ret = 0; 5092 struct stat st_buf; 5093 5094 if (size) 5095 *size = 0; 5096 5097 if (fd == -1) { 5098 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5099 return buf; 5100 } 5101 5102 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5103 close(fd); 5104 fprintf(stderr, "%s: File operations failed\n", __func__); 5105 return buf; 5106 } 5107 5108 pkg_size = st_buf.st_size; 5109 if (pkg_size < 0) { 5110 close(fd); 5111 fprintf(stderr, "%s: File operations failed\n", __func__); 5112 return buf; 5113 } 5114 5115 buf = (uint8_t *)malloc(pkg_size); 5116 if (!buf) { 5117 close(fd); 5118 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5119 return buf; 5120 } 5121 5122 ret = read(fd, buf, pkg_size); 5123 if (ret < 0) { 5124 close(fd); 5125 fprintf(stderr, "%s: File read operation failed\n", __func__); 5126 close_file(buf); 5127 return NULL; 5128 } 5129 5130 if (size) 5131 *size = pkg_size; 5132 5133 close(fd); 5134 5135 return buf; 5136 } 5137 5138 int 5139 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5140 { 5141 FILE *fh = fopen(file_path, "wb"); 5142 5143 if (fh == NULL) { 5144 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5145 return -1; 5146 } 5147 5148 if (fwrite(buf, 1, size, fh) != size) { 5149 fclose(fh); 5150 fprintf(stderr, "%s: File write operation failed\n", __func__); 5151 return -1; 5152 } 5153 5154 fclose(fh); 5155 5156 return 0; 5157 } 5158 5159 int 5160 close_file(uint8_t *buf) 5161 { 5162 if (buf) { 5163 free((void *)buf); 5164 return 0; 5165 } 5166 5167 return -1; 5168 } 5169 5170 void 5171 port_queue_region_info_display(portid_t port_id, void *buf) 5172 { 5173 #ifdef RTE_NET_I40E 5174 uint16_t i, j; 5175 struct rte_pmd_i40e_queue_regions *info = 5176 (struct rte_pmd_i40e_queue_regions *)buf; 5177 static const char *queue_region_info_stats_border = "-------"; 5178 5179 if (!info->queue_region_number) 5180 printf("there is no region has been set before"); 5181 5182 printf("\n %s All queue region info for port=%2d %s", 5183 queue_region_info_stats_border, port_id, 5184 queue_region_info_stats_border); 5185 printf("\n queue_region_number: %-14u \n", 5186 info->queue_region_number); 5187 5188 for (i = 0; i < info->queue_region_number; i++) { 5189 printf("\n region_id: %-14u queue_number: %-14u " 5190 "queue_start_index: %-14u \n", 5191 info->region[i].region_id, 5192 info->region[i].queue_num, 5193 info->region[i].queue_start_index); 5194 5195 printf(" user_priority_num is %-14u :", 5196 info->region[i].user_priority_num); 5197 for (j = 0; j < info->region[i].user_priority_num; j++) 5198 printf(" %-14u ", info->region[i].user_priority[j]); 5199 5200 printf("\n flowtype_num is %-14u :", 5201 info->region[i].flowtype_num); 5202 for (j = 0; j < info->region[i].flowtype_num; j++) 5203 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5204 } 5205 #else 5206 RTE_SET_USED(port_id); 5207 RTE_SET_USED(buf); 5208 #endif 5209 5210 printf("\n\n"); 5211 } 5212 5213 void 5214 show_macs(portid_t port_id) 5215 { 5216 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5217 struct rte_eth_dev_info dev_info; 5218 struct rte_ether_addr *addr; 5219 uint32_t i, num_macs = 0; 5220 struct rte_eth_dev *dev; 5221 5222 dev = &rte_eth_devices[port_id]; 5223 5224 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5225 return; 5226 5227 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5228 addr = &dev->data->mac_addrs[i]; 5229 5230 /* skip zero address */ 5231 if (rte_is_zero_ether_addr(addr)) 5232 continue; 5233 5234 num_macs++; 5235 } 5236 5237 printf("Number of MAC address added: %d\n", num_macs); 5238 5239 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5240 addr = &dev->data->mac_addrs[i]; 5241 5242 /* skip zero address */ 5243 if (rte_is_zero_ether_addr(addr)) 5244 continue; 5245 5246 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5247 printf(" %s\n", buf); 5248 } 5249 } 5250 5251 void 5252 show_mcast_macs(portid_t port_id) 5253 { 5254 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5255 struct rte_ether_addr *addr; 5256 struct rte_port *port; 5257 uint32_t i; 5258 5259 port = &ports[port_id]; 5260 5261 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5262 5263 for (i = 0; i < port->mc_addr_nb; i++) { 5264 addr = &port->mc_addr_pool[i]; 5265 5266 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5267 printf(" %s\n", buf); 5268 } 5269 } 5270