1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static char *flowtype_to_str(uint16_t flow_type); 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 91 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 92 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 93 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 94 { "none", 0 }, 95 { "eth", RTE_ETH_RSS_ETH }, 96 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 97 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 98 { "vlan", RTE_ETH_RSS_VLAN }, 99 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 100 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 101 { "ipv4", RTE_ETH_RSS_IPV4 }, 102 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 103 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 104 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 105 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 106 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 107 { "ipv6", RTE_ETH_RSS_IPV6 }, 108 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 109 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 110 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 111 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 112 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 113 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 114 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 115 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 116 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 117 { "port", RTE_ETH_RSS_PORT }, 118 { "vxlan", RTE_ETH_RSS_VXLAN }, 119 { "geneve", RTE_ETH_RSS_GENEVE }, 120 { "nvgre", RTE_ETH_RSS_NVGRE }, 121 { "ip", RTE_ETH_RSS_IP }, 122 { "udp", RTE_ETH_RSS_UDP }, 123 { "tcp", RTE_ETH_RSS_TCP }, 124 { "sctp", RTE_ETH_RSS_SCTP }, 125 { "tunnel", RTE_ETH_RSS_TUNNEL }, 126 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 127 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 128 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 129 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 130 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 131 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 132 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 133 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 134 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 135 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 136 { "esp", RTE_ETH_RSS_ESP }, 137 { "ah", RTE_ETH_RSS_AH }, 138 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 139 { "pfcp", RTE_ETH_RSS_PFCP }, 140 { "pppoe", RTE_ETH_RSS_PPPOE }, 141 { "gtpu", RTE_ETH_RSS_GTPU }, 142 { "ecpri", RTE_ETH_RSS_ECPRI }, 143 { "mpls", RTE_ETH_RSS_MPLS }, 144 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 145 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 146 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 147 { NULL, 0 }, 148 }; 149 150 static const struct { 151 enum rte_eth_fec_mode mode; 152 const char *name; 153 } fec_mode_name[] = { 154 { 155 .mode = RTE_ETH_FEC_NOFEC, 156 .name = "off", 157 }, 158 { 159 .mode = RTE_ETH_FEC_AUTO, 160 .name = "auto", 161 }, 162 { 163 .mode = RTE_ETH_FEC_BASER, 164 .name = "baser", 165 }, 166 { 167 .mode = RTE_ETH_FEC_RS, 168 .name = "rs", 169 }, 170 }; 171 172 static void 173 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 174 { 175 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 176 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 177 printf("%s%s", name, buf); 178 } 179 180 static void 181 nic_xstats_display_periodic(portid_t port_id) 182 { 183 struct xstat_display_info *xstats_info; 184 uint64_t *prev_values, *curr_values; 185 uint64_t diff_value, value_rate; 186 struct timespec cur_time; 187 uint64_t *ids_supp; 188 size_t ids_supp_sz; 189 uint64_t diff_ns; 190 unsigned int i; 191 int rc; 192 193 xstats_info = &ports[port_id].xstats_info; 194 195 ids_supp_sz = xstats_info->ids_supp_sz; 196 if (ids_supp_sz == 0) 197 return; 198 199 printf("\n"); 200 201 ids_supp = xstats_info->ids_supp; 202 prev_values = xstats_info->prev_values; 203 curr_values = xstats_info->curr_values; 204 205 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 206 ids_supp_sz); 207 if (rc != (int)ids_supp_sz) { 208 fprintf(stderr, 209 "Failed to get values of %zu xstats for port %u - return code %d\n", 210 ids_supp_sz, port_id, rc); 211 return; 212 } 213 214 diff_ns = 0; 215 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 216 uint64_t ns; 217 218 ns = cur_time.tv_sec * NS_PER_SEC; 219 ns += cur_time.tv_nsec; 220 221 if (xstats_info->prev_ns != 0) 222 diff_ns = ns - xstats_info->prev_ns; 223 xstats_info->prev_ns = ns; 224 } 225 226 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 227 for (i = 0; i < ids_supp_sz; i++) { 228 diff_value = (curr_values[i] > prev_values[i]) ? 229 (curr_values[i] - prev_values[i]) : 0; 230 prev_values[i] = curr_values[i]; 231 value_rate = diff_ns > 0 ? 232 (double)diff_value / diff_ns * NS_PER_SEC : 0; 233 234 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 235 xstats_display[i].name, curr_values[i], value_rate); 236 } 237 } 238 239 void 240 nic_stats_display(portid_t port_id) 241 { 242 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 245 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 246 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 247 struct timespec cur_time; 248 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 249 diff_ns; 250 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 251 struct rte_eth_stats stats; 252 253 static const char *nic_stats_border = "########################"; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 rte_eth_stats_get(port_id, &stats); 260 printf("\n %s NIC statistics for port %-2d %s\n", 261 nic_stats_border, port_id, nic_stats_border); 262 263 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 264 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 265 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 266 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 267 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 268 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 269 270 diff_ns = 0; 271 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 272 uint64_t ns; 273 274 ns = cur_time.tv_sec * NS_PER_SEC; 275 ns += cur_time.tv_nsec; 276 277 if (prev_ns[port_id] != 0) 278 diff_ns = ns - prev_ns[port_id]; 279 prev_ns[port_id] = ns; 280 } 281 282 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 283 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 284 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 285 (stats.opackets - prev_pkts_tx[port_id]) : 0; 286 prev_pkts_rx[port_id] = stats.ipackets; 287 prev_pkts_tx[port_id] = stats.opackets; 288 mpps_rx = diff_ns > 0 ? 289 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 290 mpps_tx = diff_ns > 0 ? 291 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 292 293 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 294 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 295 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 296 (stats.obytes - prev_bytes_tx[port_id]) : 0; 297 prev_bytes_rx[port_id] = stats.ibytes; 298 prev_bytes_tx[port_id] = stats.obytes; 299 mbps_rx = diff_ns > 0 ? 300 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 301 mbps_tx = diff_ns > 0 ? 302 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 303 304 printf("\n Throughput (since last show)\n"); 305 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 306 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 307 mpps_tx, mbps_tx * 8); 308 309 if (xstats_display_num > 0) 310 nic_xstats_display_periodic(port_id); 311 312 printf(" %s############################%s\n", 313 nic_stats_border, nic_stats_border); 314 } 315 316 void 317 nic_stats_clear(portid_t port_id) 318 { 319 int ret; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 ret = rte_eth_stats_reset(port_id); 327 if (ret != 0) { 328 fprintf(stderr, 329 "%s: Error: failed to reset stats (port %u): %s", 330 __func__, port_id, strerror(-ret)); 331 return; 332 } 333 334 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 335 if (ret != 0) { 336 if (ret < 0) 337 ret = -ret; 338 fprintf(stderr, 339 "%s: Error: failed to get stats (port %u): %s", 340 __func__, port_id, strerror(ret)); 341 return; 342 } 343 printf("\n NIC statistics for port %d cleared\n", port_id); 344 } 345 346 void 347 nic_xstats_display(portid_t port_id) 348 { 349 struct rte_eth_xstat *xstats; 350 int cnt_xstats, idx_xstat; 351 struct rte_eth_xstat_name *xstats_names; 352 353 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 354 print_valid_ports(); 355 return; 356 } 357 printf("###### NIC extended statistics for port %-2d\n", port_id); 358 if (!rte_eth_dev_is_valid_port(port_id)) { 359 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 360 return; 361 } 362 363 /* Get count */ 364 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 365 if (cnt_xstats < 0) { 366 fprintf(stderr, "Error: Cannot get count of xstats\n"); 367 return; 368 } 369 370 /* Get id-name lookup table */ 371 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 372 if (xstats_names == NULL) { 373 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 374 return; 375 } 376 if (cnt_xstats != rte_eth_xstats_get_names( 377 port_id, xstats_names, cnt_xstats)) { 378 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 379 free(xstats_names); 380 return; 381 } 382 383 /* Get stats themselves */ 384 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 385 if (xstats == NULL) { 386 fprintf(stderr, "Cannot allocate memory for xstats\n"); 387 free(xstats_names); 388 return; 389 } 390 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 391 fprintf(stderr, "Error: Unable to get xstats\n"); 392 free(xstats_names); 393 free(xstats); 394 return; 395 } 396 397 /* Display xstats */ 398 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 399 if (xstats_hide_zero && !xstats[idx_xstat].value) 400 continue; 401 printf("%s: %"PRIu64"\n", 402 xstats_names[idx_xstat].name, 403 xstats[idx_xstat].value); 404 } 405 free(xstats_names); 406 free(xstats); 407 } 408 409 void 410 nic_xstats_clear(portid_t port_id) 411 { 412 int ret; 413 414 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 415 print_valid_ports(); 416 return; 417 } 418 419 ret = rte_eth_xstats_reset(port_id); 420 if (ret != 0) { 421 fprintf(stderr, 422 "%s: Error: failed to reset xstats (port %u): %s\n", 423 __func__, port_id, strerror(-ret)); 424 return; 425 } 426 427 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 428 if (ret != 0) { 429 if (ret < 0) 430 ret = -ret; 431 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 432 __func__, port_id, strerror(ret)); 433 return; 434 } 435 } 436 437 static const char * 438 get_queue_state_name(uint8_t queue_state) 439 { 440 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 441 return "stopped"; 442 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 443 return "started"; 444 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 445 return "hairpin"; 446 else 447 return "unknown"; 448 } 449 450 void 451 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 452 { 453 struct rte_eth_burst_mode mode; 454 struct rte_eth_rxq_info qinfo; 455 int32_t rc; 456 static const char *info_border = "*********************"; 457 458 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 459 if (rc != 0) { 460 fprintf(stderr, 461 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 462 port_id, queue_id, strerror(-rc), rc); 463 return; 464 } 465 466 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 467 info_border, port_id, queue_id, info_border); 468 469 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 470 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 471 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 472 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 473 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 474 printf("\nRX drop packets: %s", 475 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 476 printf("\nRX deferred start: %s", 477 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 478 printf("\nRX scattered packets: %s", 479 (qinfo.scattered_rx != 0) ? "on" : "off"); 480 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 481 if (qinfo.rx_buf_size != 0) 482 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 483 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 484 485 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 486 printf("\nBurst mode: %s%s", 487 mode.info, 488 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 489 " (per queue)" : ""); 490 491 printf("\n"); 492 } 493 494 void 495 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 496 { 497 struct rte_eth_burst_mode mode; 498 struct rte_eth_txq_info qinfo; 499 int32_t rc; 500 static const char *info_border = "*********************"; 501 502 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 503 if (rc != 0) { 504 fprintf(stderr, 505 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 506 port_id, queue_id, strerror(-rc), rc); 507 return; 508 } 509 510 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 511 info_border, port_id, queue_id, info_border); 512 513 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 514 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 515 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 516 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 517 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 518 printf("\nTX deferred start: %s", 519 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 520 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 521 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 522 523 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 static int bus_match_all(const struct rte_bus *bus, const void *data) 533 { 534 RTE_SET_USED(bus); 535 RTE_SET_USED(data); 536 return 0; 537 } 538 539 static void 540 device_infos_display_speeds(uint32_t speed_capa) 541 { 542 printf("\n\tDevice speed capability:"); 543 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 544 printf(" Autonegotiate (all speeds)"); 545 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 546 printf(" Disable autonegotiate (fixed speed) "); 547 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 548 printf(" 10 Mbps half-duplex "); 549 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 550 printf(" 10 Mbps full-duplex "); 551 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 552 printf(" 100 Mbps half-duplex "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 554 printf(" 100 Mbps full-duplex "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 556 printf(" 1 Gbps "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 558 printf(" 2.5 Gbps "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 560 printf(" 5 Gbps "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 562 printf(" 10 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 564 printf(" 20 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 566 printf(" 25 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 568 printf(" 40 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 570 printf(" 50 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 572 printf(" 56 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 574 printf(" 100 Gbps "); 575 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 576 printf(" 200 Gbps "); 577 } 578 579 void 580 device_infos_display(const char *identifier) 581 { 582 static const char *info_border = "*********************"; 583 struct rte_bus *start = NULL, *next; 584 struct rte_dev_iterator dev_iter; 585 char name[RTE_ETH_NAME_MAX_LEN]; 586 struct rte_ether_addr mac_addr; 587 struct rte_device *dev; 588 struct rte_devargs da; 589 portid_t port_id; 590 struct rte_eth_dev_info dev_info; 591 char devstr[128]; 592 593 memset(&da, 0, sizeof(da)); 594 if (!identifier) 595 goto skip_parse; 596 597 if (rte_devargs_parsef(&da, "%s", identifier)) { 598 fprintf(stderr, "cannot parse identifier\n"); 599 return; 600 } 601 602 skip_parse: 603 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 604 605 start = next; 606 if (identifier && da.bus != next) 607 continue; 608 609 /* Skip buses that don't have iterate method */ 610 if (!next->dev_iterate) 611 continue; 612 613 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 614 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 615 616 if (!dev->driver) 617 continue; 618 /* Check for matching device if identifier is present */ 619 if (identifier && 620 strncmp(da.name, dev->name, strlen(dev->name))) 621 continue; 622 printf("\n%s Infos for device %s %s\n", 623 info_border, dev->name, info_border); 624 printf("Bus name: %s", dev->bus->name); 625 printf("\nDriver name: %s", dev->driver->name); 626 printf("\nDevargs: %s", 627 dev->devargs ? dev->devargs->args : ""); 628 printf("\nConnect to socket: %d", dev->numa_node); 629 printf("\n"); 630 631 /* List ports with matching device name */ 632 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 633 printf("\n\tPort id: %-2d", port_id); 634 if (eth_macaddr_get_print_err(port_id, 635 &mac_addr) == 0) 636 print_ethaddr("\n\tMAC address: ", 637 &mac_addr); 638 rte_eth_dev_get_name_by_port(port_id, name); 639 printf("\n\tDevice name: %s", name); 640 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 641 device_infos_display_speeds(dev_info.speed_capa); 642 printf("\n"); 643 } 644 } 645 }; 646 rte_devargs_reset(&da); 647 } 648 649 static void 650 print_dev_capabilities(uint64_t capabilities) 651 { 652 uint64_t single_capa; 653 int begin; 654 int end; 655 int bit; 656 657 if (capabilities == 0) 658 return; 659 660 begin = __builtin_ctzll(capabilities); 661 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 662 663 single_capa = 1ULL << begin; 664 for (bit = begin; bit < end; bit++) { 665 if (capabilities & single_capa) 666 printf(" %s", 667 rte_eth_dev_capability_name(single_capa)); 668 single_capa <<= 1; 669 } 670 } 671 672 void 673 port_infos_display(portid_t port_id) 674 { 675 struct rte_port *port; 676 struct rte_ether_addr mac_addr; 677 struct rte_eth_link link; 678 struct rte_eth_dev_info dev_info; 679 int vlan_offload; 680 struct rte_mempool * mp; 681 static const char *info_border = "*********************"; 682 uint16_t mtu; 683 char name[RTE_ETH_NAME_MAX_LEN]; 684 int ret; 685 char fw_version[ETHDEV_FWVERS_LEN]; 686 687 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 688 print_valid_ports(); 689 return; 690 } 691 port = &ports[port_id]; 692 ret = eth_link_get_nowait_print_err(port_id, &link); 693 if (ret < 0) 694 return; 695 696 ret = eth_dev_info_get_print_err(port_id, &dev_info); 697 if (ret != 0) 698 return; 699 700 printf("\n%s Infos for port %-2d %s\n", 701 info_border, port_id, info_border); 702 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 703 print_ethaddr("MAC address: ", &mac_addr); 704 rte_eth_dev_get_name_by_port(port_id, name); 705 printf("\nDevice name: %s", name); 706 printf("\nDriver name: %s", dev_info.driver_name); 707 708 if (rte_eth_dev_fw_version_get(port_id, fw_version, 709 ETHDEV_FWVERS_LEN) == 0) 710 printf("\nFirmware-version: %s", fw_version); 711 else 712 printf("\nFirmware-version: %s", "not available"); 713 714 if (dev_info.device->devargs && dev_info.device->devargs->args) 715 printf("\nDevargs: %s", dev_info.device->devargs->args); 716 printf("\nConnect to socket: %u", port->socket_id); 717 718 if (port_numa[port_id] != NUMA_NO_CONFIG) { 719 mp = mbuf_pool_find(port_numa[port_id], 0); 720 if (mp) 721 printf("\nmemory allocation on the socket: %d", 722 port_numa[port_id]); 723 } else 724 printf("\nmemory allocation on the socket: %u",port->socket_id); 725 726 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 727 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 728 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 729 ("full-duplex") : ("half-duplex")); 730 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 731 ("On") : ("Off")); 732 733 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 734 printf("MTU: %u\n", mtu); 735 736 printf("Promiscuous mode: %s\n", 737 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 738 printf("Allmulticast mode: %s\n", 739 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 740 printf("Maximum number of MAC addresses: %u\n", 741 (unsigned int)(port->dev_info.max_mac_addrs)); 742 printf("Maximum number of MAC addresses of hash filtering: %u\n", 743 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 744 745 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 746 if (vlan_offload >= 0){ 747 printf("VLAN offload: \n"); 748 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 749 printf(" strip on, "); 750 else 751 printf(" strip off, "); 752 753 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 754 printf("filter on, "); 755 else 756 printf("filter off, "); 757 758 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 759 printf("extend on, "); 760 else 761 printf("extend off, "); 762 763 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 764 printf("qinq strip on\n"); 765 else 766 printf("qinq strip off\n"); 767 } 768 769 if (dev_info.hash_key_size > 0) 770 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 771 if (dev_info.reta_size > 0) 772 printf("Redirection table size: %u\n", dev_info.reta_size); 773 if (!dev_info.flow_type_rss_offloads) 774 printf("No RSS offload flow type is supported.\n"); 775 else { 776 uint16_t i; 777 char *p; 778 779 printf("Supported RSS offload flow types:\n"); 780 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 781 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 782 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 783 continue; 784 p = flowtype_to_str(i); 785 if (p) 786 printf(" %s\n", p); 787 else 788 printf(" user defined %d\n", i); 789 } 790 } 791 792 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 793 printf("Maximum configurable length of RX packet: %u\n", 794 dev_info.max_rx_pktlen); 795 printf("Maximum configurable size of LRO aggregated packet: %u\n", 796 dev_info.max_lro_pkt_size); 797 if (dev_info.max_vfs) 798 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 799 if (dev_info.max_vmdq_pools) 800 printf("Maximum number of VMDq pools: %u\n", 801 dev_info.max_vmdq_pools); 802 803 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 804 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 805 printf("Max possible number of RXDs per queue: %hu\n", 806 dev_info.rx_desc_lim.nb_max); 807 printf("Min possible number of RXDs per queue: %hu\n", 808 dev_info.rx_desc_lim.nb_min); 809 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 810 811 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 812 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 813 printf("Max possible number of TXDs per queue: %hu\n", 814 dev_info.tx_desc_lim.nb_max); 815 printf("Min possible number of TXDs per queue: %hu\n", 816 dev_info.tx_desc_lim.nb_min); 817 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 818 printf("Max segment number per packet: %hu\n", 819 dev_info.tx_desc_lim.nb_seg_max); 820 printf("Max segment number per MTU/TSO: %hu\n", 821 dev_info.tx_desc_lim.nb_mtu_seg_max); 822 823 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 824 print_dev_capabilities(dev_info.dev_capa); 825 printf(" )\n"); 826 /* Show switch info only if valid switch domain and port id is set */ 827 if (dev_info.switch_info.domain_id != 828 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 829 if (dev_info.switch_info.name) 830 printf("Switch name: %s\n", dev_info.switch_info.name); 831 832 printf("Switch domain Id: %u\n", 833 dev_info.switch_info.domain_id); 834 printf("Switch Port Id: %u\n", 835 dev_info.switch_info.port_id); 836 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 837 printf("Switch Rx domain: %u\n", 838 dev_info.switch_info.rx_domain); 839 } 840 } 841 842 void 843 port_summary_header_display(void) 844 { 845 uint16_t port_number; 846 847 port_number = rte_eth_dev_count_avail(); 848 printf("Number of available ports: %i\n", port_number); 849 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 850 "Driver", "Status", "Link"); 851 } 852 853 void 854 port_summary_display(portid_t port_id) 855 { 856 struct rte_ether_addr mac_addr; 857 struct rte_eth_link link; 858 struct rte_eth_dev_info dev_info; 859 char name[RTE_ETH_NAME_MAX_LEN]; 860 int ret; 861 862 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 863 print_valid_ports(); 864 return; 865 } 866 867 ret = eth_link_get_nowait_print_err(port_id, &link); 868 if (ret < 0) 869 return; 870 871 ret = eth_dev_info_get_print_err(port_id, &dev_info); 872 if (ret != 0) 873 return; 874 875 rte_eth_dev_get_name_by_port(port_id, name); 876 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 877 if (ret != 0) 878 return; 879 880 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 881 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 882 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 883 rte_eth_link_speed_to_str(link.link_speed)); 884 } 885 886 void 887 port_eeprom_display(portid_t port_id) 888 { 889 struct rte_dev_eeprom_info einfo; 890 int ret; 891 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 892 print_valid_ports(); 893 return; 894 } 895 896 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 897 if (len_eeprom < 0) { 898 switch (len_eeprom) { 899 case -ENODEV: 900 fprintf(stderr, "port index %d invalid\n", port_id); 901 break; 902 case -ENOTSUP: 903 fprintf(stderr, "operation not supported by device\n"); 904 break; 905 case -EIO: 906 fprintf(stderr, "device is removed\n"); 907 break; 908 default: 909 fprintf(stderr, "Unable to get EEPROM: %d\n", 910 len_eeprom); 911 break; 912 } 913 return; 914 } 915 916 einfo.offset = 0; 917 einfo.length = len_eeprom; 918 einfo.data = calloc(1, len_eeprom); 919 if (!einfo.data) { 920 fprintf(stderr, 921 "Allocation of port %u eeprom data failed\n", 922 port_id); 923 return; 924 } 925 926 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 927 if (ret != 0) { 928 switch (ret) { 929 case -ENODEV: 930 fprintf(stderr, "port index %d invalid\n", port_id); 931 break; 932 case -ENOTSUP: 933 fprintf(stderr, "operation not supported by device\n"); 934 break; 935 case -EIO: 936 fprintf(stderr, "device is removed\n"); 937 break; 938 default: 939 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 940 break; 941 } 942 free(einfo.data); 943 return; 944 } 945 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 946 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 947 free(einfo.data); 948 } 949 950 void 951 port_module_eeprom_display(portid_t port_id) 952 { 953 struct rte_eth_dev_module_info minfo; 954 struct rte_dev_eeprom_info einfo; 955 int ret; 956 957 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 958 print_valid_ports(); 959 return; 960 } 961 962 963 ret = rte_eth_dev_get_module_info(port_id, &minfo); 964 if (ret != 0) { 965 switch (ret) { 966 case -ENODEV: 967 fprintf(stderr, "port index %d invalid\n", port_id); 968 break; 969 case -ENOTSUP: 970 fprintf(stderr, "operation not supported by device\n"); 971 break; 972 case -EIO: 973 fprintf(stderr, "device is removed\n"); 974 break; 975 default: 976 fprintf(stderr, "Unable to get module EEPROM: %d\n", 977 ret); 978 break; 979 } 980 return; 981 } 982 983 einfo.offset = 0; 984 einfo.length = minfo.eeprom_len; 985 einfo.data = calloc(1, minfo.eeprom_len); 986 if (!einfo.data) { 987 fprintf(stderr, 988 "Allocation of port %u eeprom data failed\n", 989 port_id); 990 return; 991 } 992 993 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 994 if (ret != 0) { 995 switch (ret) { 996 case -ENODEV: 997 fprintf(stderr, "port index %d invalid\n", port_id); 998 break; 999 case -ENOTSUP: 1000 fprintf(stderr, "operation not supported by device\n"); 1001 break; 1002 case -EIO: 1003 fprintf(stderr, "device is removed\n"); 1004 break; 1005 default: 1006 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1007 ret); 1008 break; 1009 } 1010 free(einfo.data); 1011 return; 1012 } 1013 1014 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1015 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1016 free(einfo.data); 1017 } 1018 1019 int 1020 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1021 { 1022 uint16_t pid; 1023 1024 if (port_id == (portid_t)RTE_PORT_ALL) 1025 return 0; 1026 1027 RTE_ETH_FOREACH_DEV(pid) 1028 if (port_id == pid) 1029 return 0; 1030 1031 if (warning == ENABLED_WARN) 1032 fprintf(stderr, "Invalid port %d\n", port_id); 1033 1034 return 1; 1035 } 1036 1037 void print_valid_ports(void) 1038 { 1039 portid_t pid; 1040 1041 printf("The valid ports array is ["); 1042 RTE_ETH_FOREACH_DEV(pid) { 1043 printf(" %d", pid); 1044 } 1045 printf(" ]\n"); 1046 } 1047 1048 static int 1049 vlan_id_is_invalid(uint16_t vlan_id) 1050 { 1051 if (vlan_id < 4096) 1052 return 0; 1053 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1054 return 1; 1055 } 1056 1057 static int 1058 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1059 { 1060 const struct rte_pci_device *pci_dev; 1061 const struct rte_bus *bus; 1062 uint64_t pci_len; 1063 1064 if (reg_off & 0x3) { 1065 fprintf(stderr, 1066 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1067 (unsigned int)reg_off); 1068 return 1; 1069 } 1070 1071 if (!ports[port_id].dev_info.device) { 1072 fprintf(stderr, "Invalid device\n"); 1073 return 0; 1074 } 1075 1076 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1077 if (bus && !strcmp(bus->name, "pci")) { 1078 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1079 } else { 1080 fprintf(stderr, "Not a PCI device\n"); 1081 return 1; 1082 } 1083 1084 pci_len = pci_dev->mem_resource[0].len; 1085 if (reg_off >= pci_len) { 1086 fprintf(stderr, 1087 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1088 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1089 pci_len); 1090 return 1; 1091 } 1092 return 0; 1093 } 1094 1095 static int 1096 reg_bit_pos_is_invalid(uint8_t bit_pos) 1097 { 1098 if (bit_pos <= 31) 1099 return 0; 1100 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1101 return 1; 1102 } 1103 1104 #define display_port_and_reg_off(port_id, reg_off) \ 1105 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1106 1107 static inline void 1108 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1109 { 1110 display_port_and_reg_off(port_id, (unsigned)reg_off); 1111 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1112 } 1113 1114 void 1115 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1116 { 1117 uint32_t reg_v; 1118 1119 1120 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1121 return; 1122 if (port_reg_off_is_invalid(port_id, reg_off)) 1123 return; 1124 if (reg_bit_pos_is_invalid(bit_x)) 1125 return; 1126 reg_v = port_id_pci_reg_read(port_id, reg_off); 1127 display_port_and_reg_off(port_id, (unsigned)reg_off); 1128 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1129 } 1130 1131 void 1132 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1133 uint8_t bit1_pos, uint8_t bit2_pos) 1134 { 1135 uint32_t reg_v; 1136 uint8_t l_bit; 1137 uint8_t h_bit; 1138 1139 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1140 return; 1141 if (port_reg_off_is_invalid(port_id, reg_off)) 1142 return; 1143 if (reg_bit_pos_is_invalid(bit1_pos)) 1144 return; 1145 if (reg_bit_pos_is_invalid(bit2_pos)) 1146 return; 1147 if (bit1_pos > bit2_pos) 1148 l_bit = bit2_pos, h_bit = bit1_pos; 1149 else 1150 l_bit = bit1_pos, h_bit = bit2_pos; 1151 1152 reg_v = port_id_pci_reg_read(port_id, reg_off); 1153 reg_v >>= l_bit; 1154 if (h_bit < 31) 1155 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1156 display_port_and_reg_off(port_id, (unsigned)reg_off); 1157 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1158 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1159 } 1160 1161 void 1162 port_reg_display(portid_t port_id, uint32_t reg_off) 1163 { 1164 uint32_t reg_v; 1165 1166 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1167 return; 1168 if (port_reg_off_is_invalid(port_id, reg_off)) 1169 return; 1170 reg_v = port_id_pci_reg_read(port_id, reg_off); 1171 display_port_reg_value(port_id, reg_off, reg_v); 1172 } 1173 1174 void 1175 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1176 uint8_t bit_v) 1177 { 1178 uint32_t reg_v; 1179 1180 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1181 return; 1182 if (port_reg_off_is_invalid(port_id, reg_off)) 1183 return; 1184 if (reg_bit_pos_is_invalid(bit_pos)) 1185 return; 1186 if (bit_v > 1) { 1187 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1188 (int) bit_v); 1189 return; 1190 } 1191 reg_v = port_id_pci_reg_read(port_id, reg_off); 1192 if (bit_v == 0) 1193 reg_v &= ~(1 << bit_pos); 1194 else 1195 reg_v |= (1 << bit_pos); 1196 port_id_pci_reg_write(port_id, reg_off, reg_v); 1197 display_port_reg_value(port_id, reg_off, reg_v); 1198 } 1199 1200 void 1201 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1202 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1203 { 1204 uint32_t max_v; 1205 uint32_t reg_v; 1206 uint8_t l_bit; 1207 uint8_t h_bit; 1208 1209 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1210 return; 1211 if (port_reg_off_is_invalid(port_id, reg_off)) 1212 return; 1213 if (reg_bit_pos_is_invalid(bit1_pos)) 1214 return; 1215 if (reg_bit_pos_is_invalid(bit2_pos)) 1216 return; 1217 if (bit1_pos > bit2_pos) 1218 l_bit = bit2_pos, h_bit = bit1_pos; 1219 else 1220 l_bit = bit1_pos, h_bit = bit2_pos; 1221 1222 if ((h_bit - l_bit) < 31) 1223 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1224 else 1225 max_v = 0xFFFFFFFF; 1226 1227 if (value > max_v) { 1228 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1229 (unsigned)value, (unsigned)value, 1230 (unsigned)max_v, (unsigned)max_v); 1231 return; 1232 } 1233 reg_v = port_id_pci_reg_read(port_id, reg_off); 1234 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1235 reg_v |= (value << l_bit); /* Set changed bits */ 1236 port_id_pci_reg_write(port_id, reg_off, reg_v); 1237 display_port_reg_value(port_id, reg_off, reg_v); 1238 } 1239 1240 void 1241 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1242 { 1243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1244 return; 1245 if (port_reg_off_is_invalid(port_id, reg_off)) 1246 return; 1247 port_id_pci_reg_write(port_id, reg_off, reg_v); 1248 display_port_reg_value(port_id, reg_off, reg_v); 1249 } 1250 1251 void 1252 port_mtu_set(portid_t port_id, uint16_t mtu) 1253 { 1254 struct rte_port *port = &ports[port_id]; 1255 int diag; 1256 1257 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1258 return; 1259 1260 if (port->need_reconfig == 0) { 1261 diag = rte_eth_dev_set_mtu(port_id, mtu); 1262 if (diag != 0) { 1263 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1264 return; 1265 } 1266 } 1267 1268 port->dev_conf.rxmode.mtu = mtu; 1269 } 1270 1271 /* Generic flow management functions. */ 1272 1273 static struct port_flow_tunnel * 1274 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1275 { 1276 struct port_flow_tunnel *flow_tunnel; 1277 1278 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1279 if (flow_tunnel->id == port_tunnel_id) 1280 goto out; 1281 } 1282 flow_tunnel = NULL; 1283 1284 out: 1285 return flow_tunnel; 1286 } 1287 1288 const char * 1289 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1290 { 1291 const char *type; 1292 switch (tunnel->type) { 1293 default: 1294 type = "unknown"; 1295 break; 1296 case RTE_FLOW_ITEM_TYPE_VXLAN: 1297 type = "vxlan"; 1298 break; 1299 case RTE_FLOW_ITEM_TYPE_GRE: 1300 type = "gre"; 1301 break; 1302 case RTE_FLOW_ITEM_TYPE_NVGRE: 1303 type = "nvgre"; 1304 break; 1305 case RTE_FLOW_ITEM_TYPE_GENEVE: 1306 type = "geneve"; 1307 break; 1308 } 1309 1310 return type; 1311 } 1312 1313 struct port_flow_tunnel * 1314 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1315 { 1316 struct rte_port *port = &ports[port_id]; 1317 struct port_flow_tunnel *flow_tunnel; 1318 1319 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1320 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1321 goto out; 1322 } 1323 flow_tunnel = NULL; 1324 1325 out: 1326 return flow_tunnel; 1327 } 1328 1329 void port_flow_tunnel_list(portid_t port_id) 1330 { 1331 struct rte_port *port = &ports[port_id]; 1332 struct port_flow_tunnel *flt; 1333 1334 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1335 printf("port %u tunnel #%u type=%s", 1336 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1337 if (flt->tunnel.tun_id) 1338 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1339 printf("\n"); 1340 } 1341 } 1342 1343 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1344 { 1345 struct rte_port *port = &ports[port_id]; 1346 struct port_flow_tunnel *flt; 1347 1348 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1349 if (flt->id == tunnel_id) 1350 break; 1351 } 1352 if (flt) { 1353 LIST_REMOVE(flt, chain); 1354 free(flt); 1355 printf("port %u: flow tunnel #%u destroyed\n", 1356 port_id, tunnel_id); 1357 } 1358 } 1359 1360 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1361 { 1362 struct rte_port *port = &ports[port_id]; 1363 enum rte_flow_item_type type; 1364 struct port_flow_tunnel *flt; 1365 1366 if (!strcmp(ops->type, "vxlan")) 1367 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1368 else if (!strcmp(ops->type, "gre")) 1369 type = RTE_FLOW_ITEM_TYPE_GRE; 1370 else if (!strcmp(ops->type, "nvgre")) 1371 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1372 else if (!strcmp(ops->type, "geneve")) 1373 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1374 else { 1375 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1376 ops->type); 1377 return; 1378 } 1379 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1380 if (flt->tunnel.type == type) 1381 break; 1382 } 1383 if (!flt) { 1384 flt = calloc(1, sizeof(*flt)); 1385 if (!flt) { 1386 fprintf(stderr, "failed to allocate port flt object\n"); 1387 return; 1388 } 1389 flt->tunnel.type = type; 1390 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1391 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1392 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1393 } 1394 printf("port %d: flow tunnel #%u type %s\n", 1395 port_id, flt->id, ops->type); 1396 } 1397 1398 /** Generate a port_flow entry from attributes/pattern/actions. */ 1399 static struct port_flow * 1400 port_flow_new(const struct rte_flow_attr *attr, 1401 const struct rte_flow_item *pattern, 1402 const struct rte_flow_action *actions, 1403 struct rte_flow_error *error) 1404 { 1405 const struct rte_flow_conv_rule rule = { 1406 .attr_ro = attr, 1407 .pattern_ro = pattern, 1408 .actions_ro = actions, 1409 }; 1410 struct port_flow *pf; 1411 int ret; 1412 1413 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1414 if (ret < 0) 1415 return NULL; 1416 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1417 if (!pf) { 1418 rte_flow_error_set 1419 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1420 "calloc() failed"); 1421 return NULL; 1422 } 1423 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1424 error) >= 0) 1425 return pf; 1426 free(pf); 1427 return NULL; 1428 } 1429 1430 /** Print a message out of a flow error. */ 1431 static int 1432 port_flow_complain(struct rte_flow_error *error) 1433 { 1434 static const char *const errstrlist[] = { 1435 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1436 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1437 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1438 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1439 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1440 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1441 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1442 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1443 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1444 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1445 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1446 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1447 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1448 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1449 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1450 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1451 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1452 }; 1453 const char *errstr; 1454 char buf[32]; 1455 int err = rte_errno; 1456 1457 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1458 !errstrlist[error->type]) 1459 errstr = "unknown type"; 1460 else 1461 errstr = errstrlist[error->type]; 1462 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1463 __func__, error->type, errstr, 1464 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1465 error->cause), buf) : "", 1466 error->message ? error->message : "(no stated reason)", 1467 rte_strerror(err)); 1468 1469 switch (error->type) { 1470 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1471 fprintf(stderr, "The status suggests the use of \"transfer\" " 1472 "as the possible cause of the failure. Make " 1473 "sure that the flow in question and its " 1474 "indirect components (if any) are managed " 1475 "via \"transfer\" proxy port. Use command " 1476 "\"show port (port_id) flow transfer proxy\" " 1477 "to figure out the proxy port ID\n"); 1478 break; 1479 default: 1480 break; 1481 } 1482 1483 return -err; 1484 } 1485 1486 static void 1487 rss_config_display(struct rte_flow_action_rss *rss_conf) 1488 { 1489 uint8_t i; 1490 1491 if (rss_conf == NULL) { 1492 fprintf(stderr, "Invalid rule\n"); 1493 return; 1494 } 1495 1496 printf("RSS:\n" 1497 " queues:"); 1498 if (rss_conf->queue_num == 0) 1499 printf(" none"); 1500 for (i = 0; i < rss_conf->queue_num; i++) 1501 printf(" %d", rss_conf->queue[i]); 1502 printf("\n"); 1503 1504 printf(" function: "); 1505 switch (rss_conf->func) { 1506 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1507 printf("default\n"); 1508 break; 1509 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1510 printf("toeplitz\n"); 1511 break; 1512 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1513 printf("simple_xor\n"); 1514 break; 1515 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1516 printf("symmetric_toeplitz\n"); 1517 break; 1518 default: 1519 printf("Unknown function\n"); 1520 return; 1521 } 1522 1523 printf(" types:\n"); 1524 if (rss_conf->types == 0) { 1525 printf(" none\n"); 1526 return; 1527 } 1528 for (i = 0; rss_type_table[i].str; i++) { 1529 if ((rss_conf->types & 1530 rss_type_table[i].rss_type) == 1531 rss_type_table[i].rss_type && 1532 rss_type_table[i].rss_type != 0) 1533 printf(" %s\n", rss_type_table[i].str); 1534 } 1535 } 1536 1537 static struct port_indirect_action * 1538 action_get_by_id(portid_t port_id, uint32_t id) 1539 { 1540 struct rte_port *port; 1541 struct port_indirect_action **ppia; 1542 struct port_indirect_action *pia = NULL; 1543 1544 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1545 port_id == (portid_t)RTE_PORT_ALL) 1546 return NULL; 1547 port = &ports[port_id]; 1548 ppia = &port->actions_list; 1549 while (*ppia) { 1550 if ((*ppia)->id == id) { 1551 pia = *ppia; 1552 break; 1553 } 1554 ppia = &(*ppia)->next; 1555 } 1556 if (!pia) 1557 fprintf(stderr, 1558 "Failed to find indirect action #%u on port %u\n", 1559 id, port_id); 1560 return pia; 1561 } 1562 1563 static int 1564 action_alloc(portid_t port_id, uint32_t id, 1565 struct port_indirect_action **action) 1566 { 1567 struct rte_port *port; 1568 struct port_indirect_action **ppia; 1569 struct port_indirect_action *pia = NULL; 1570 1571 *action = NULL; 1572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1573 port_id == (portid_t)RTE_PORT_ALL) 1574 return -EINVAL; 1575 port = &ports[port_id]; 1576 if (id == UINT32_MAX) { 1577 /* taking first available ID */ 1578 if (port->actions_list) { 1579 if (port->actions_list->id == UINT32_MAX - 1) { 1580 fprintf(stderr, 1581 "Highest indirect action ID is already assigned, delete it first\n"); 1582 return -ENOMEM; 1583 } 1584 id = port->actions_list->id + 1; 1585 } else { 1586 id = 0; 1587 } 1588 } 1589 pia = calloc(1, sizeof(*pia)); 1590 if (!pia) { 1591 fprintf(stderr, 1592 "Allocation of port %u indirect action failed\n", 1593 port_id); 1594 return -ENOMEM; 1595 } 1596 ppia = &port->actions_list; 1597 while (*ppia && (*ppia)->id > id) 1598 ppia = &(*ppia)->next; 1599 if (*ppia && (*ppia)->id == id) { 1600 fprintf(stderr, 1601 "Indirect action #%u is already assigned, delete it first\n", 1602 id); 1603 free(pia); 1604 return -EINVAL; 1605 } 1606 pia->next = *ppia; 1607 pia->id = id; 1608 *ppia = pia; 1609 *action = pia; 1610 return 0; 1611 } 1612 1613 /** Create indirect action */ 1614 int 1615 port_action_handle_create(portid_t port_id, uint32_t id, 1616 const struct rte_flow_indir_action_conf *conf, 1617 const struct rte_flow_action *action) 1618 { 1619 struct port_indirect_action *pia; 1620 int ret; 1621 struct rte_flow_error error; 1622 1623 ret = action_alloc(port_id, id, &pia); 1624 if (ret) 1625 return ret; 1626 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1627 struct rte_flow_action_age *age = 1628 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1629 1630 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1631 age->context = &pia->age_type; 1632 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1633 struct rte_flow_action_conntrack *ct = 1634 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1635 1636 memcpy(ct, &conntrack_context, sizeof(*ct)); 1637 } 1638 /* Poisoning to make sure PMDs update it in case of error. */ 1639 memset(&error, 0x22, sizeof(error)); 1640 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1641 &error); 1642 if (!pia->handle) { 1643 uint32_t destroy_id = pia->id; 1644 port_action_handle_destroy(port_id, 1, &destroy_id); 1645 return port_flow_complain(&error); 1646 } 1647 pia->type = action->type; 1648 printf("Indirect action #%u created\n", pia->id); 1649 return 0; 1650 } 1651 1652 /** Destroy indirect action */ 1653 int 1654 port_action_handle_destroy(portid_t port_id, 1655 uint32_t n, 1656 const uint32_t *actions) 1657 { 1658 struct rte_port *port; 1659 struct port_indirect_action **tmp; 1660 uint32_t c = 0; 1661 int ret = 0; 1662 1663 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1664 port_id == (portid_t)RTE_PORT_ALL) 1665 return -EINVAL; 1666 port = &ports[port_id]; 1667 tmp = &port->actions_list; 1668 while (*tmp) { 1669 uint32_t i; 1670 1671 for (i = 0; i != n; ++i) { 1672 struct rte_flow_error error; 1673 struct port_indirect_action *pia = *tmp; 1674 1675 if (actions[i] != pia->id) 1676 continue; 1677 /* 1678 * Poisoning to make sure PMDs update it in case 1679 * of error. 1680 */ 1681 memset(&error, 0x33, sizeof(error)); 1682 1683 if (pia->handle && rte_flow_action_handle_destroy( 1684 port_id, pia->handle, &error)) { 1685 ret = port_flow_complain(&error); 1686 continue; 1687 } 1688 *tmp = pia->next; 1689 printf("Indirect action #%u destroyed\n", pia->id); 1690 free(pia); 1691 break; 1692 } 1693 if (i == n) 1694 tmp = &(*tmp)->next; 1695 ++c; 1696 } 1697 return ret; 1698 } 1699 1700 1701 /** Get indirect action by port + id */ 1702 struct rte_flow_action_handle * 1703 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1704 { 1705 1706 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1707 1708 return (pia) ? pia->handle : NULL; 1709 } 1710 1711 /** Update indirect action */ 1712 int 1713 port_action_handle_update(portid_t port_id, uint32_t id, 1714 const struct rte_flow_action *action) 1715 { 1716 struct rte_flow_error error; 1717 struct rte_flow_action_handle *action_handle; 1718 struct port_indirect_action *pia; 1719 const void *update; 1720 1721 action_handle = port_action_handle_get_by_id(port_id, id); 1722 if (!action_handle) 1723 return -EINVAL; 1724 pia = action_get_by_id(port_id, id); 1725 if (!pia) 1726 return -EINVAL; 1727 switch (pia->type) { 1728 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1729 update = action->conf; 1730 break; 1731 default: 1732 update = action; 1733 break; 1734 } 1735 if (rte_flow_action_handle_update(port_id, action_handle, update, 1736 &error)) { 1737 return port_flow_complain(&error); 1738 } 1739 printf("Indirect action #%u updated\n", id); 1740 return 0; 1741 } 1742 1743 int 1744 port_action_handle_query(portid_t port_id, uint32_t id) 1745 { 1746 struct rte_flow_error error; 1747 struct port_indirect_action *pia; 1748 union { 1749 struct rte_flow_query_count count; 1750 struct rte_flow_query_age age; 1751 struct rte_flow_action_conntrack ct; 1752 } query; 1753 1754 pia = action_get_by_id(port_id, id); 1755 if (!pia) 1756 return -EINVAL; 1757 switch (pia->type) { 1758 case RTE_FLOW_ACTION_TYPE_AGE: 1759 case RTE_FLOW_ACTION_TYPE_COUNT: 1760 break; 1761 default: 1762 fprintf(stderr, 1763 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1764 id, pia->type, port_id); 1765 return -ENOTSUP; 1766 } 1767 /* Poisoning to make sure PMDs update it in case of error. */ 1768 memset(&error, 0x55, sizeof(error)); 1769 memset(&query, 0, sizeof(query)); 1770 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1771 return port_flow_complain(&error); 1772 switch (pia->type) { 1773 case RTE_FLOW_ACTION_TYPE_AGE: 1774 printf("Indirect AGE action:\n" 1775 " aged: %u\n" 1776 " sec_since_last_hit_valid: %u\n" 1777 " sec_since_last_hit: %" PRIu32 "\n", 1778 query.age.aged, 1779 query.age.sec_since_last_hit_valid, 1780 query.age.sec_since_last_hit); 1781 break; 1782 case RTE_FLOW_ACTION_TYPE_COUNT: 1783 printf("Indirect COUNT action:\n" 1784 " hits_set: %u\n" 1785 " bytes_set: %u\n" 1786 " hits: %" PRIu64 "\n" 1787 " bytes: %" PRIu64 "\n", 1788 query.count.hits_set, 1789 query.count.bytes_set, 1790 query.count.hits, 1791 query.count.bytes); 1792 break; 1793 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1794 printf("Conntrack Context:\n" 1795 " Peer: %u, Flow dir: %s, Enable: %u\n" 1796 " Live: %u, SACK: %u, CACK: %u\n" 1797 " Packet dir: %s, Liberal: %u, State: %u\n" 1798 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1799 " Last Seq: %u, Last ACK: %u\n" 1800 " Last Win: %u, Last End: %u\n", 1801 query.ct.peer_port, 1802 query.ct.is_original_dir ? "Original" : "Reply", 1803 query.ct.enable, query.ct.live_connection, 1804 query.ct.selective_ack, query.ct.challenge_ack_passed, 1805 query.ct.last_direction ? "Original" : "Reply", 1806 query.ct.liberal_mode, query.ct.state, 1807 query.ct.max_ack_window, query.ct.retransmission_limit, 1808 query.ct.last_index, query.ct.last_seq, 1809 query.ct.last_ack, query.ct.last_window, 1810 query.ct.last_end); 1811 printf(" Original Dir:\n" 1812 " scale: %u, fin: %u, ack seen: %u\n" 1813 " unacked data: %u\n Sent end: %u," 1814 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1815 query.ct.original_dir.scale, 1816 query.ct.original_dir.close_initiated, 1817 query.ct.original_dir.last_ack_seen, 1818 query.ct.original_dir.data_unacked, 1819 query.ct.original_dir.sent_end, 1820 query.ct.original_dir.reply_end, 1821 query.ct.original_dir.max_win, 1822 query.ct.original_dir.max_ack); 1823 printf(" Reply Dir:\n" 1824 " scale: %u, fin: %u, ack seen: %u\n" 1825 " unacked data: %u\n Sent end: %u," 1826 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1827 query.ct.reply_dir.scale, 1828 query.ct.reply_dir.close_initiated, 1829 query.ct.reply_dir.last_ack_seen, 1830 query.ct.reply_dir.data_unacked, 1831 query.ct.reply_dir.sent_end, 1832 query.ct.reply_dir.reply_end, 1833 query.ct.reply_dir.max_win, 1834 query.ct.reply_dir.max_ack); 1835 break; 1836 default: 1837 fprintf(stderr, 1838 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1839 id, pia->type, port_id); 1840 break; 1841 } 1842 return 0; 1843 } 1844 1845 static struct port_flow_tunnel * 1846 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1847 const struct rte_flow_item *pattern, 1848 const struct rte_flow_action *actions, 1849 const struct tunnel_ops *tunnel_ops) 1850 { 1851 int ret; 1852 struct rte_port *port; 1853 struct port_flow_tunnel *pft; 1854 struct rte_flow_error error; 1855 1856 port = &ports[port_id]; 1857 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1858 if (!pft) { 1859 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1860 tunnel_ops->id); 1861 return NULL; 1862 } 1863 if (tunnel_ops->actions) { 1864 uint32_t num_actions; 1865 const struct rte_flow_action *aptr; 1866 1867 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1868 &pft->pmd_actions, 1869 &pft->num_pmd_actions, 1870 &error); 1871 if (ret) { 1872 port_flow_complain(&error); 1873 return NULL; 1874 } 1875 for (aptr = actions, num_actions = 1; 1876 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1877 aptr++, num_actions++); 1878 pft->actions = malloc( 1879 (num_actions + pft->num_pmd_actions) * 1880 sizeof(actions[0])); 1881 if (!pft->actions) { 1882 rte_flow_tunnel_action_decap_release( 1883 port_id, pft->actions, 1884 pft->num_pmd_actions, &error); 1885 return NULL; 1886 } 1887 rte_memcpy(pft->actions, pft->pmd_actions, 1888 pft->num_pmd_actions * sizeof(actions[0])); 1889 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1890 num_actions * sizeof(actions[0])); 1891 } 1892 if (tunnel_ops->items) { 1893 uint32_t num_items; 1894 const struct rte_flow_item *iptr; 1895 1896 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1897 &pft->pmd_items, 1898 &pft->num_pmd_items, 1899 &error); 1900 if (ret) { 1901 port_flow_complain(&error); 1902 return NULL; 1903 } 1904 for (iptr = pattern, num_items = 1; 1905 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1906 iptr++, num_items++); 1907 pft->items = malloc((num_items + pft->num_pmd_items) * 1908 sizeof(pattern[0])); 1909 if (!pft->items) { 1910 rte_flow_tunnel_item_release( 1911 port_id, pft->pmd_items, 1912 pft->num_pmd_items, &error); 1913 return NULL; 1914 } 1915 rte_memcpy(pft->items, pft->pmd_items, 1916 pft->num_pmd_items * sizeof(pattern[0])); 1917 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1918 num_items * sizeof(pattern[0])); 1919 } 1920 1921 return pft; 1922 } 1923 1924 static void 1925 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1926 const struct tunnel_ops *tunnel_ops, 1927 struct port_flow_tunnel *pft) 1928 { 1929 struct rte_flow_error error; 1930 1931 if (tunnel_ops->actions) { 1932 free(pft->actions); 1933 rte_flow_tunnel_action_decap_release( 1934 port_id, pft->pmd_actions, 1935 pft->num_pmd_actions, &error); 1936 pft->actions = NULL; 1937 pft->pmd_actions = NULL; 1938 } 1939 if (tunnel_ops->items) { 1940 free(pft->items); 1941 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1942 pft->num_pmd_items, 1943 &error); 1944 pft->items = NULL; 1945 pft->pmd_items = NULL; 1946 } 1947 } 1948 1949 /** Add port meter policy */ 1950 int 1951 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1952 const struct rte_flow_action *actions) 1953 { 1954 struct rte_mtr_error error; 1955 const struct rte_flow_action *act = actions; 1956 const struct rte_flow_action *start; 1957 struct rte_mtr_meter_policy_params policy; 1958 uint32_t i = 0, act_n; 1959 int ret; 1960 1961 for (i = 0; i < RTE_COLORS; i++) { 1962 for (act_n = 0, start = act; 1963 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1964 act_n++; 1965 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1966 policy.actions[i] = start; 1967 else 1968 policy.actions[i] = NULL; 1969 act++; 1970 } 1971 ret = rte_mtr_meter_policy_add(port_id, 1972 policy_id, 1973 &policy, &error); 1974 if (ret) 1975 print_mtr_err_msg(&error); 1976 return ret; 1977 } 1978 1979 /** Validate flow rule. */ 1980 int 1981 port_flow_validate(portid_t port_id, 1982 const struct rte_flow_attr *attr, 1983 const struct rte_flow_item *pattern, 1984 const struct rte_flow_action *actions, 1985 const struct tunnel_ops *tunnel_ops) 1986 { 1987 struct rte_flow_error error; 1988 struct port_flow_tunnel *pft = NULL; 1989 int ret; 1990 1991 /* Poisoning to make sure PMDs update it in case of error. */ 1992 memset(&error, 0x11, sizeof(error)); 1993 if (tunnel_ops->enabled) { 1994 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1995 actions, tunnel_ops); 1996 if (!pft) 1997 return -ENOENT; 1998 if (pft->items) 1999 pattern = pft->items; 2000 if (pft->actions) 2001 actions = pft->actions; 2002 } 2003 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2004 if (tunnel_ops->enabled) 2005 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2006 if (ret) 2007 return port_flow_complain(&error); 2008 printf("Flow rule validated\n"); 2009 return 0; 2010 } 2011 2012 /** Return age action structure if exists, otherwise NULL. */ 2013 static struct rte_flow_action_age * 2014 age_action_get(const struct rte_flow_action *actions) 2015 { 2016 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2017 switch (actions->type) { 2018 case RTE_FLOW_ACTION_TYPE_AGE: 2019 return (struct rte_flow_action_age *) 2020 (uintptr_t)actions->conf; 2021 default: 2022 break; 2023 } 2024 } 2025 return NULL; 2026 } 2027 2028 /** Create flow rule. */ 2029 int 2030 port_flow_create(portid_t port_id, 2031 const struct rte_flow_attr *attr, 2032 const struct rte_flow_item *pattern, 2033 const struct rte_flow_action *actions, 2034 const struct tunnel_ops *tunnel_ops) 2035 { 2036 struct rte_flow *flow; 2037 struct rte_port *port; 2038 struct port_flow *pf; 2039 uint32_t id = 0; 2040 struct rte_flow_error error; 2041 struct port_flow_tunnel *pft = NULL; 2042 struct rte_flow_action_age *age = age_action_get(actions); 2043 2044 port = &ports[port_id]; 2045 if (port->flow_list) { 2046 if (port->flow_list->id == UINT32_MAX) { 2047 fprintf(stderr, 2048 "Highest rule ID is already assigned, delete it first"); 2049 return -ENOMEM; 2050 } 2051 id = port->flow_list->id + 1; 2052 } 2053 if (tunnel_ops->enabled) { 2054 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2055 actions, tunnel_ops); 2056 if (!pft) 2057 return -ENOENT; 2058 if (pft->items) 2059 pattern = pft->items; 2060 if (pft->actions) 2061 actions = pft->actions; 2062 } 2063 pf = port_flow_new(attr, pattern, actions, &error); 2064 if (!pf) 2065 return port_flow_complain(&error); 2066 if (age) { 2067 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2068 age->context = &pf->age_type; 2069 } 2070 /* Poisoning to make sure PMDs update it in case of error. */ 2071 memset(&error, 0x22, sizeof(error)); 2072 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2073 if (!flow) { 2074 if (tunnel_ops->enabled) 2075 port_flow_tunnel_offload_cmd_release(port_id, 2076 tunnel_ops, pft); 2077 free(pf); 2078 return port_flow_complain(&error); 2079 } 2080 pf->next = port->flow_list; 2081 pf->id = id; 2082 pf->flow = flow; 2083 port->flow_list = pf; 2084 if (tunnel_ops->enabled) 2085 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2086 printf("Flow rule #%u created\n", pf->id); 2087 return 0; 2088 } 2089 2090 /** Destroy a number of flow rules. */ 2091 int 2092 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2093 { 2094 struct rte_port *port; 2095 struct port_flow **tmp; 2096 uint32_t c = 0; 2097 int ret = 0; 2098 2099 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2100 port_id == (portid_t)RTE_PORT_ALL) 2101 return -EINVAL; 2102 port = &ports[port_id]; 2103 tmp = &port->flow_list; 2104 while (*tmp) { 2105 uint32_t i; 2106 2107 for (i = 0; i != n; ++i) { 2108 struct rte_flow_error error; 2109 struct port_flow *pf = *tmp; 2110 2111 if (rule[i] != pf->id) 2112 continue; 2113 /* 2114 * Poisoning to make sure PMDs update it in case 2115 * of error. 2116 */ 2117 memset(&error, 0x33, sizeof(error)); 2118 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2119 ret = port_flow_complain(&error); 2120 continue; 2121 } 2122 printf("Flow rule #%u destroyed\n", pf->id); 2123 *tmp = pf->next; 2124 free(pf); 2125 break; 2126 } 2127 if (i == n) 2128 tmp = &(*tmp)->next; 2129 ++c; 2130 } 2131 return ret; 2132 } 2133 2134 /** Remove all flow rules. */ 2135 int 2136 port_flow_flush(portid_t port_id) 2137 { 2138 struct rte_flow_error error; 2139 struct rte_port *port; 2140 int ret = 0; 2141 2142 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2143 port_id == (portid_t)RTE_PORT_ALL) 2144 return -EINVAL; 2145 2146 port = &ports[port_id]; 2147 2148 if (port->flow_list == NULL) 2149 return ret; 2150 2151 /* Poisoning to make sure PMDs update it in case of error. */ 2152 memset(&error, 0x44, sizeof(error)); 2153 if (rte_flow_flush(port_id, &error)) { 2154 port_flow_complain(&error); 2155 } 2156 2157 while (port->flow_list) { 2158 struct port_flow *pf = port->flow_list->next; 2159 2160 free(port->flow_list); 2161 port->flow_list = pf; 2162 } 2163 return ret; 2164 } 2165 2166 /** Dump flow rules. */ 2167 int 2168 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2169 const char *file_name) 2170 { 2171 int ret = 0; 2172 FILE *file = stdout; 2173 struct rte_flow_error error; 2174 struct rte_port *port; 2175 struct port_flow *pflow; 2176 struct rte_flow *tmpFlow = NULL; 2177 bool found = false; 2178 2179 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2180 port_id == (portid_t)RTE_PORT_ALL) 2181 return -EINVAL; 2182 2183 if (!dump_all) { 2184 port = &ports[port_id]; 2185 pflow = port->flow_list; 2186 while (pflow) { 2187 if (rule_id != pflow->id) { 2188 pflow = pflow->next; 2189 } else { 2190 tmpFlow = pflow->flow; 2191 if (tmpFlow) 2192 found = true; 2193 break; 2194 } 2195 } 2196 if (found == false) { 2197 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2198 return -EINVAL; 2199 } 2200 } 2201 2202 if (file_name && strlen(file_name)) { 2203 file = fopen(file_name, "w"); 2204 if (!file) { 2205 fprintf(stderr, "Failed to create file %s: %s\n", 2206 file_name, strerror(errno)); 2207 return -errno; 2208 } 2209 } 2210 2211 if (!dump_all) 2212 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2213 else 2214 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2215 if (ret) { 2216 port_flow_complain(&error); 2217 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2218 } else 2219 printf("Flow dump finished\n"); 2220 if (file_name && strlen(file_name)) 2221 fclose(file); 2222 return ret; 2223 } 2224 2225 /** Query a flow rule. */ 2226 int 2227 port_flow_query(portid_t port_id, uint32_t rule, 2228 const struct rte_flow_action *action) 2229 { 2230 struct rte_flow_error error; 2231 struct rte_port *port; 2232 struct port_flow *pf; 2233 const char *name; 2234 union { 2235 struct rte_flow_query_count count; 2236 struct rte_flow_action_rss rss_conf; 2237 struct rte_flow_query_age age; 2238 } query; 2239 int ret; 2240 2241 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2242 port_id == (portid_t)RTE_PORT_ALL) 2243 return -EINVAL; 2244 port = &ports[port_id]; 2245 for (pf = port->flow_list; pf; pf = pf->next) 2246 if (pf->id == rule) 2247 break; 2248 if (!pf) { 2249 fprintf(stderr, "Flow rule #%u not found\n", rule); 2250 return -ENOENT; 2251 } 2252 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2253 &name, sizeof(name), 2254 (void *)(uintptr_t)action->type, &error); 2255 if (ret < 0) 2256 return port_flow_complain(&error); 2257 switch (action->type) { 2258 case RTE_FLOW_ACTION_TYPE_COUNT: 2259 case RTE_FLOW_ACTION_TYPE_RSS: 2260 case RTE_FLOW_ACTION_TYPE_AGE: 2261 break; 2262 default: 2263 fprintf(stderr, "Cannot query action type %d (%s)\n", 2264 action->type, name); 2265 return -ENOTSUP; 2266 } 2267 /* Poisoning to make sure PMDs update it in case of error. */ 2268 memset(&error, 0x55, sizeof(error)); 2269 memset(&query, 0, sizeof(query)); 2270 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2271 return port_flow_complain(&error); 2272 switch (action->type) { 2273 case RTE_FLOW_ACTION_TYPE_COUNT: 2274 printf("%s:\n" 2275 " hits_set: %u\n" 2276 " bytes_set: %u\n" 2277 " hits: %" PRIu64 "\n" 2278 " bytes: %" PRIu64 "\n", 2279 name, 2280 query.count.hits_set, 2281 query.count.bytes_set, 2282 query.count.hits, 2283 query.count.bytes); 2284 break; 2285 case RTE_FLOW_ACTION_TYPE_RSS: 2286 rss_config_display(&query.rss_conf); 2287 break; 2288 case RTE_FLOW_ACTION_TYPE_AGE: 2289 printf("%s:\n" 2290 " aged: %u\n" 2291 " sec_since_last_hit_valid: %u\n" 2292 " sec_since_last_hit: %" PRIu32 "\n", 2293 name, 2294 query.age.aged, 2295 query.age.sec_since_last_hit_valid, 2296 query.age.sec_since_last_hit); 2297 break; 2298 default: 2299 fprintf(stderr, 2300 "Cannot display result for action type %d (%s)\n", 2301 action->type, name); 2302 break; 2303 } 2304 return 0; 2305 } 2306 2307 /** List simply and destroy all aged flows. */ 2308 void 2309 port_flow_aged(portid_t port_id, uint8_t destroy) 2310 { 2311 void **contexts; 2312 int nb_context, total = 0, idx; 2313 struct rte_flow_error error; 2314 enum age_action_context_type *type; 2315 union { 2316 struct port_flow *pf; 2317 struct port_indirect_action *pia; 2318 } ctx; 2319 2320 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2321 port_id == (portid_t)RTE_PORT_ALL) 2322 return; 2323 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2324 printf("Port %u total aged flows: %d\n", port_id, total); 2325 if (total < 0) { 2326 port_flow_complain(&error); 2327 return; 2328 } 2329 if (total == 0) 2330 return; 2331 contexts = malloc(sizeof(void *) * total); 2332 if (contexts == NULL) { 2333 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2334 return; 2335 } 2336 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2337 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2338 if (nb_context != total) { 2339 fprintf(stderr, 2340 "Port:%d get aged flows count(%d) != total(%d)\n", 2341 port_id, nb_context, total); 2342 free(contexts); 2343 return; 2344 } 2345 total = 0; 2346 for (idx = 0; idx < nb_context; idx++) { 2347 if (!contexts[idx]) { 2348 fprintf(stderr, "Error: get Null context in port %u\n", 2349 port_id); 2350 continue; 2351 } 2352 type = (enum age_action_context_type *)contexts[idx]; 2353 switch (*type) { 2354 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2355 ctx.pf = container_of(type, struct port_flow, age_type); 2356 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2357 "\t%c%c%c\t\n", 2358 "Flow", 2359 ctx.pf->id, 2360 ctx.pf->rule.attr->group, 2361 ctx.pf->rule.attr->priority, 2362 ctx.pf->rule.attr->ingress ? 'i' : '-', 2363 ctx.pf->rule.attr->egress ? 'e' : '-', 2364 ctx.pf->rule.attr->transfer ? 't' : '-'); 2365 if (destroy && !port_flow_destroy(port_id, 1, 2366 &ctx.pf->id)) 2367 total++; 2368 break; 2369 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2370 ctx.pia = container_of(type, 2371 struct port_indirect_action, age_type); 2372 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2373 ctx.pia->id); 2374 break; 2375 default: 2376 fprintf(stderr, "Error: invalid context type %u\n", 2377 port_id); 2378 break; 2379 } 2380 } 2381 printf("\n%d flows destroyed\n", total); 2382 free(contexts); 2383 } 2384 2385 /** List flow rules. */ 2386 void 2387 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2388 { 2389 struct rte_port *port; 2390 struct port_flow *pf; 2391 struct port_flow *list = NULL; 2392 uint32_t i; 2393 2394 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2395 port_id == (portid_t)RTE_PORT_ALL) 2396 return; 2397 port = &ports[port_id]; 2398 if (!port->flow_list) 2399 return; 2400 /* Sort flows by group, priority and ID. */ 2401 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2402 struct port_flow **tmp; 2403 const struct rte_flow_attr *curr = pf->rule.attr; 2404 2405 if (n) { 2406 /* Filter out unwanted groups. */ 2407 for (i = 0; i != n; ++i) 2408 if (curr->group == group[i]) 2409 break; 2410 if (i == n) 2411 continue; 2412 } 2413 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2414 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2415 2416 if (curr->group > comp->group || 2417 (curr->group == comp->group && 2418 curr->priority > comp->priority) || 2419 (curr->group == comp->group && 2420 curr->priority == comp->priority && 2421 pf->id > (*tmp)->id)) 2422 continue; 2423 break; 2424 } 2425 pf->tmp = *tmp; 2426 *tmp = pf; 2427 } 2428 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2429 for (pf = list; pf != NULL; pf = pf->tmp) { 2430 const struct rte_flow_item *item = pf->rule.pattern; 2431 const struct rte_flow_action *action = pf->rule.actions; 2432 const char *name; 2433 2434 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2435 pf->id, 2436 pf->rule.attr->group, 2437 pf->rule.attr->priority, 2438 pf->rule.attr->ingress ? 'i' : '-', 2439 pf->rule.attr->egress ? 'e' : '-', 2440 pf->rule.attr->transfer ? 't' : '-'); 2441 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2442 if ((uint32_t)item->type > INT_MAX) 2443 name = "PMD_INTERNAL"; 2444 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2445 &name, sizeof(name), 2446 (void *)(uintptr_t)item->type, 2447 NULL) <= 0) 2448 name = "[UNKNOWN]"; 2449 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2450 printf("%s ", name); 2451 ++item; 2452 } 2453 printf("=>"); 2454 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2455 if ((uint32_t)action->type > INT_MAX) 2456 name = "PMD_INTERNAL"; 2457 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2458 &name, sizeof(name), 2459 (void *)(uintptr_t)action->type, 2460 NULL) <= 0) 2461 name = "[UNKNOWN]"; 2462 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2463 printf(" %s", name); 2464 ++action; 2465 } 2466 printf("\n"); 2467 } 2468 } 2469 2470 /** Restrict ingress traffic to the defined flow rules. */ 2471 int 2472 port_flow_isolate(portid_t port_id, int set) 2473 { 2474 struct rte_flow_error error; 2475 2476 /* Poisoning to make sure PMDs update it in case of error. */ 2477 memset(&error, 0x66, sizeof(error)); 2478 if (rte_flow_isolate(port_id, set, &error)) 2479 return port_flow_complain(&error); 2480 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2481 port_id, 2482 set ? "now restricted" : "not restricted anymore"); 2483 return 0; 2484 } 2485 2486 /* 2487 * RX/TX ring descriptors display functions. 2488 */ 2489 int 2490 rx_queue_id_is_invalid(queueid_t rxq_id) 2491 { 2492 if (rxq_id < nb_rxq) 2493 return 0; 2494 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2495 rxq_id, nb_rxq); 2496 return 1; 2497 } 2498 2499 int 2500 tx_queue_id_is_invalid(queueid_t txq_id) 2501 { 2502 if (txq_id < nb_txq) 2503 return 0; 2504 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2505 txq_id, nb_txq); 2506 return 1; 2507 } 2508 2509 static int 2510 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2511 { 2512 struct rte_port *port = &ports[port_id]; 2513 struct rte_eth_rxq_info rx_qinfo; 2514 int ret; 2515 2516 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2517 if (ret == 0) { 2518 *ring_size = rx_qinfo.nb_desc; 2519 return ret; 2520 } 2521 2522 if (ret != -ENOTSUP) 2523 return ret; 2524 /* 2525 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2526 * ring_size stored in testpmd will be used for validity verification. 2527 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2528 * being 0, it will use a default value provided by PMDs to setup this 2529 * rxq. If the default value is 0, it will use the 2530 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2531 */ 2532 if (port->nb_rx_desc[rxq_id]) 2533 *ring_size = port->nb_rx_desc[rxq_id]; 2534 else if (port->dev_info.default_rxportconf.ring_size) 2535 *ring_size = port->dev_info.default_rxportconf.ring_size; 2536 else 2537 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2538 return 0; 2539 } 2540 2541 static int 2542 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2543 { 2544 struct rte_port *port = &ports[port_id]; 2545 struct rte_eth_txq_info tx_qinfo; 2546 int ret; 2547 2548 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2549 if (ret == 0) { 2550 *ring_size = tx_qinfo.nb_desc; 2551 return ret; 2552 } 2553 2554 if (ret != -ENOTSUP) 2555 return ret; 2556 /* 2557 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2558 * ring_size stored in testpmd will be used for validity verification. 2559 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2560 * being 0, it will use a default value provided by PMDs to setup this 2561 * txq. If the default value is 0, it will use the 2562 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2563 */ 2564 if (port->nb_tx_desc[txq_id]) 2565 *ring_size = port->nb_tx_desc[txq_id]; 2566 else if (port->dev_info.default_txportconf.ring_size) 2567 *ring_size = port->dev_info.default_txportconf.ring_size; 2568 else 2569 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2570 return 0; 2571 } 2572 2573 static int 2574 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2575 { 2576 uint16_t ring_size; 2577 int ret; 2578 2579 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2580 if (ret) 2581 return 1; 2582 2583 if (rxdesc_id < ring_size) 2584 return 0; 2585 2586 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2587 rxdesc_id, ring_size); 2588 return 1; 2589 } 2590 2591 static int 2592 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2593 { 2594 uint16_t ring_size; 2595 int ret; 2596 2597 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2598 if (ret) 2599 return 1; 2600 2601 if (txdesc_id < ring_size) 2602 return 0; 2603 2604 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2605 txdesc_id, ring_size); 2606 return 1; 2607 } 2608 2609 static const struct rte_memzone * 2610 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2611 { 2612 char mz_name[RTE_MEMZONE_NAMESIZE]; 2613 const struct rte_memzone *mz; 2614 2615 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2616 port_id, q_id, ring_name); 2617 mz = rte_memzone_lookup(mz_name); 2618 if (mz == NULL) 2619 fprintf(stderr, 2620 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2621 ring_name, port_id, q_id, mz_name); 2622 return mz; 2623 } 2624 2625 union igb_ring_dword { 2626 uint64_t dword; 2627 struct { 2628 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2629 uint32_t lo; 2630 uint32_t hi; 2631 #else 2632 uint32_t hi; 2633 uint32_t lo; 2634 #endif 2635 } words; 2636 }; 2637 2638 struct igb_ring_desc_32_bytes { 2639 union igb_ring_dword lo_dword; 2640 union igb_ring_dword hi_dword; 2641 union igb_ring_dword resv1; 2642 union igb_ring_dword resv2; 2643 }; 2644 2645 struct igb_ring_desc_16_bytes { 2646 union igb_ring_dword lo_dword; 2647 union igb_ring_dword hi_dword; 2648 }; 2649 2650 static void 2651 ring_rxd_display_dword(union igb_ring_dword dword) 2652 { 2653 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2654 (unsigned)dword.words.hi); 2655 } 2656 2657 static void 2658 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2659 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2660 portid_t port_id, 2661 #else 2662 __rte_unused portid_t port_id, 2663 #endif 2664 uint16_t desc_id) 2665 { 2666 struct igb_ring_desc_16_bytes *ring = 2667 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2668 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2669 int ret; 2670 struct rte_eth_dev_info dev_info; 2671 2672 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2673 if (ret != 0) 2674 return; 2675 2676 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2677 /* 32 bytes RX descriptor, i40e only */ 2678 struct igb_ring_desc_32_bytes *ring = 2679 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2680 ring[desc_id].lo_dword.dword = 2681 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2682 ring_rxd_display_dword(ring[desc_id].lo_dword); 2683 ring[desc_id].hi_dword.dword = 2684 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2685 ring_rxd_display_dword(ring[desc_id].hi_dword); 2686 ring[desc_id].resv1.dword = 2687 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2688 ring_rxd_display_dword(ring[desc_id].resv1); 2689 ring[desc_id].resv2.dword = 2690 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2691 ring_rxd_display_dword(ring[desc_id].resv2); 2692 2693 return; 2694 } 2695 #endif 2696 /* 16 bytes RX descriptor */ 2697 ring[desc_id].lo_dword.dword = 2698 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2699 ring_rxd_display_dword(ring[desc_id].lo_dword); 2700 ring[desc_id].hi_dword.dword = 2701 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2702 ring_rxd_display_dword(ring[desc_id].hi_dword); 2703 } 2704 2705 static void 2706 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2707 { 2708 struct igb_ring_desc_16_bytes *ring; 2709 struct igb_ring_desc_16_bytes txd; 2710 2711 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2712 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2713 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2714 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2715 (unsigned)txd.lo_dword.words.lo, 2716 (unsigned)txd.lo_dword.words.hi, 2717 (unsigned)txd.hi_dword.words.lo, 2718 (unsigned)txd.hi_dword.words.hi); 2719 } 2720 2721 void 2722 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2723 { 2724 const struct rte_memzone *rx_mz; 2725 2726 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2727 return; 2728 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2729 if (rx_mz == NULL) 2730 return; 2731 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2732 } 2733 2734 void 2735 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2736 { 2737 const struct rte_memzone *tx_mz; 2738 2739 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2740 return; 2741 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2742 if (tx_mz == NULL) 2743 return; 2744 ring_tx_descriptor_display(tx_mz, txd_id); 2745 } 2746 2747 void 2748 fwd_lcores_config_display(void) 2749 { 2750 lcoreid_t lc_id; 2751 2752 printf("List of forwarding lcores:"); 2753 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2754 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2755 printf("\n"); 2756 } 2757 void 2758 rxtx_config_display(void) 2759 { 2760 portid_t pid; 2761 queueid_t qid; 2762 2763 printf(" %s packet forwarding%s packets/burst=%d\n", 2764 cur_fwd_eng->fwd_mode_name, 2765 retry_enabled == 0 ? "" : " with retry", 2766 nb_pkt_per_burst); 2767 2768 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2769 printf(" packet len=%u - nb packet segments=%d\n", 2770 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2771 2772 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2773 nb_fwd_lcores, nb_fwd_ports); 2774 2775 RTE_ETH_FOREACH_DEV(pid) { 2776 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2777 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2778 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2779 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2780 struct rte_eth_rxq_info rx_qinfo; 2781 struct rte_eth_txq_info tx_qinfo; 2782 uint16_t rx_free_thresh_tmp; 2783 uint16_t tx_free_thresh_tmp; 2784 uint16_t tx_rs_thresh_tmp; 2785 uint16_t nb_rx_desc_tmp; 2786 uint16_t nb_tx_desc_tmp; 2787 uint64_t offloads_tmp; 2788 uint8_t pthresh_tmp; 2789 uint8_t hthresh_tmp; 2790 uint8_t wthresh_tmp; 2791 int32_t rc; 2792 2793 /* per port config */ 2794 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2795 (unsigned int)pid, nb_rxq, nb_txq); 2796 2797 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2798 ports[pid].dev_conf.rxmode.offloads, 2799 ports[pid].dev_conf.txmode.offloads); 2800 2801 /* per rx queue config only for first queue to be less verbose */ 2802 for (qid = 0; qid < 1; qid++) { 2803 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2804 if (rc) { 2805 nb_rx_desc_tmp = nb_rx_desc[qid]; 2806 rx_free_thresh_tmp = 2807 rx_conf[qid].rx_free_thresh; 2808 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2809 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2810 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2811 offloads_tmp = rx_conf[qid].offloads; 2812 } else { 2813 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2814 rx_free_thresh_tmp = 2815 rx_qinfo.conf.rx_free_thresh; 2816 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2817 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2818 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2819 offloads_tmp = rx_qinfo.conf.offloads; 2820 } 2821 2822 printf(" RX queue: %d\n", qid); 2823 printf(" RX desc=%d - RX free threshold=%d\n", 2824 nb_rx_desc_tmp, rx_free_thresh_tmp); 2825 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2826 " wthresh=%d\n", 2827 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2828 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 2829 if (rx_conf->share_group > 0) 2830 printf(" share_group=%u share_qid=%u", 2831 rx_conf->share_group, 2832 rx_conf->share_qid); 2833 printf("\n"); 2834 } 2835 2836 /* per tx queue config only for first queue to be less verbose */ 2837 for (qid = 0; qid < 1; qid++) { 2838 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2839 if (rc) { 2840 nb_tx_desc_tmp = nb_tx_desc[qid]; 2841 tx_free_thresh_tmp = 2842 tx_conf[qid].tx_free_thresh; 2843 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2844 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2845 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2846 offloads_tmp = tx_conf[qid].offloads; 2847 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2848 } else { 2849 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2850 tx_free_thresh_tmp = 2851 tx_qinfo.conf.tx_free_thresh; 2852 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2853 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2854 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2855 offloads_tmp = tx_qinfo.conf.offloads; 2856 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2857 } 2858 2859 printf(" TX queue: %d\n", qid); 2860 printf(" TX desc=%d - TX free threshold=%d\n", 2861 nb_tx_desc_tmp, tx_free_thresh_tmp); 2862 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2863 " wthresh=%d\n", 2864 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2865 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2866 offloads_tmp, tx_rs_thresh_tmp); 2867 } 2868 } 2869 } 2870 2871 void 2872 port_rss_reta_info(portid_t port_id, 2873 struct rte_eth_rss_reta_entry64 *reta_conf, 2874 uint16_t nb_entries) 2875 { 2876 uint16_t i, idx, shift; 2877 int ret; 2878 2879 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2880 return; 2881 2882 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2883 if (ret != 0) { 2884 fprintf(stderr, 2885 "Failed to get RSS RETA info, return code = %d\n", 2886 ret); 2887 return; 2888 } 2889 2890 for (i = 0; i < nb_entries; i++) { 2891 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2892 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2893 if (!(reta_conf[idx].mask & (1ULL << shift))) 2894 continue; 2895 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2896 i, reta_conf[idx].reta[shift]); 2897 } 2898 } 2899 2900 /* 2901 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 2902 * key of the port. 2903 */ 2904 void 2905 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2906 { 2907 struct rte_eth_rss_conf rss_conf = {0}; 2908 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2909 uint64_t rss_hf; 2910 uint8_t i; 2911 int diag; 2912 struct rte_eth_dev_info dev_info; 2913 uint8_t hash_key_size; 2914 int ret; 2915 2916 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2917 return; 2918 2919 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2920 if (ret != 0) 2921 return; 2922 2923 if (dev_info.hash_key_size > 0 && 2924 dev_info.hash_key_size <= sizeof(rss_key)) 2925 hash_key_size = dev_info.hash_key_size; 2926 else { 2927 fprintf(stderr, 2928 "dev_info did not provide a valid hash key size\n"); 2929 return; 2930 } 2931 2932 /* Get RSS hash key if asked to display it */ 2933 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2934 rss_conf.rss_key_len = hash_key_size; 2935 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2936 if (diag != 0) { 2937 switch (diag) { 2938 case -ENODEV: 2939 fprintf(stderr, "port index %d invalid\n", port_id); 2940 break; 2941 case -ENOTSUP: 2942 fprintf(stderr, "operation not supported by device\n"); 2943 break; 2944 default: 2945 fprintf(stderr, "operation failed - diag=%d\n", diag); 2946 break; 2947 } 2948 return; 2949 } 2950 rss_hf = rss_conf.rss_hf; 2951 if (rss_hf == 0) { 2952 printf("RSS disabled\n"); 2953 return; 2954 } 2955 printf("RSS functions:\n "); 2956 for (i = 0; rss_type_table[i].str; i++) { 2957 if (rss_type_table[i].rss_type == 0) 2958 continue; 2959 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 2960 printf("%s ", rss_type_table[i].str); 2961 } 2962 printf("\n"); 2963 if (!show_rss_key) 2964 return; 2965 printf("RSS key:\n"); 2966 for (i = 0; i < hash_key_size; i++) 2967 printf("%02X", rss_key[i]); 2968 printf("\n"); 2969 } 2970 2971 void 2972 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2973 uint8_t hash_key_len) 2974 { 2975 struct rte_eth_rss_conf rss_conf; 2976 int diag; 2977 unsigned int i; 2978 2979 rss_conf.rss_key = NULL; 2980 rss_conf.rss_key_len = 0; 2981 rss_conf.rss_hf = 0; 2982 for (i = 0; rss_type_table[i].str; i++) { 2983 if (!strcmp(rss_type_table[i].str, rss_type)) 2984 rss_conf.rss_hf = rss_type_table[i].rss_type; 2985 } 2986 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2987 if (diag == 0) { 2988 rss_conf.rss_key = hash_key; 2989 rss_conf.rss_key_len = hash_key_len; 2990 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2991 } 2992 if (diag == 0) 2993 return; 2994 2995 switch (diag) { 2996 case -ENODEV: 2997 fprintf(stderr, "port index %d invalid\n", port_id); 2998 break; 2999 case -ENOTSUP: 3000 fprintf(stderr, "operation not supported by device\n"); 3001 break; 3002 default: 3003 fprintf(stderr, "operation failed - diag=%d\n", diag); 3004 break; 3005 } 3006 } 3007 3008 /* 3009 * Check whether a shared rxq scheduled on other lcores. 3010 */ 3011 static bool 3012 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3013 portid_t src_port, queueid_t src_rxq, 3014 uint32_t share_group, queueid_t share_rxq) 3015 { 3016 streamid_t sm_id; 3017 streamid_t nb_fs_per_lcore; 3018 lcoreid_t nb_fc; 3019 lcoreid_t lc_id; 3020 struct fwd_stream *fs; 3021 struct rte_port *port; 3022 struct rte_eth_dev_info *dev_info; 3023 struct rte_eth_rxconf *rxq_conf; 3024 3025 nb_fc = cur_fwd_config.nb_fwd_lcores; 3026 /* Check remaining cores. */ 3027 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3028 sm_id = fwd_lcores[lc_id]->stream_idx; 3029 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3030 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3031 sm_id++) { 3032 fs = fwd_streams[sm_id]; 3033 port = &ports[fs->rx_port]; 3034 dev_info = &port->dev_info; 3035 rxq_conf = &port->rx_conf[fs->rx_queue]; 3036 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3037 == 0 || rxq_conf->share_group == 0) 3038 /* Not shared rxq. */ 3039 continue; 3040 if (domain_id != port->dev_info.switch_info.domain_id) 3041 continue; 3042 if (rxq_conf->share_group != share_group) 3043 continue; 3044 if (rxq_conf->share_qid != share_rxq) 3045 continue; 3046 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3047 share_group, share_rxq); 3048 printf(" lcore %hhu Port %hu queue %hu\n", 3049 src_lc, src_port, src_rxq); 3050 printf(" lcore %hhu Port %hu queue %hu\n", 3051 lc_id, fs->rx_port, fs->rx_queue); 3052 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3053 nb_rxq); 3054 return true; 3055 } 3056 } 3057 return false; 3058 } 3059 3060 /* 3061 * Check shared rxq configuration. 3062 * 3063 * Shared group must not being scheduled on different core. 3064 */ 3065 bool 3066 pkt_fwd_shared_rxq_check(void) 3067 { 3068 streamid_t sm_id; 3069 streamid_t nb_fs_per_lcore; 3070 lcoreid_t nb_fc; 3071 lcoreid_t lc_id; 3072 struct fwd_stream *fs; 3073 uint16_t domain_id; 3074 struct rte_port *port; 3075 struct rte_eth_dev_info *dev_info; 3076 struct rte_eth_rxconf *rxq_conf; 3077 3078 if (rxq_share == 0) 3079 return true; 3080 nb_fc = cur_fwd_config.nb_fwd_lcores; 3081 /* 3082 * Check streams on each core, make sure the same switch domain + 3083 * group + queue doesn't get scheduled on other cores. 3084 */ 3085 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3086 sm_id = fwd_lcores[lc_id]->stream_idx; 3087 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3088 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3089 sm_id++) { 3090 fs = fwd_streams[sm_id]; 3091 /* Update lcore info stream being scheduled. */ 3092 fs->lcore = fwd_lcores[lc_id]; 3093 port = &ports[fs->rx_port]; 3094 dev_info = &port->dev_info; 3095 rxq_conf = &port->rx_conf[fs->rx_queue]; 3096 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3097 == 0 || rxq_conf->share_group == 0) 3098 /* Not shared rxq. */ 3099 continue; 3100 /* Check shared rxq not scheduled on remaining cores. */ 3101 domain_id = port->dev_info.switch_info.domain_id; 3102 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3103 fs->rx_port, 3104 fs->rx_queue, 3105 rxq_conf->share_group, 3106 rxq_conf->share_qid)) 3107 return false; 3108 } 3109 } 3110 return true; 3111 } 3112 3113 /* 3114 * Setup forwarding configuration for each logical core. 3115 */ 3116 static void 3117 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3118 { 3119 streamid_t nb_fs_per_lcore; 3120 streamid_t nb_fs; 3121 streamid_t sm_id; 3122 lcoreid_t nb_extra; 3123 lcoreid_t nb_fc; 3124 lcoreid_t nb_lc; 3125 lcoreid_t lc_id; 3126 3127 nb_fs = cfg->nb_fwd_streams; 3128 nb_fc = cfg->nb_fwd_lcores; 3129 if (nb_fs <= nb_fc) { 3130 nb_fs_per_lcore = 1; 3131 nb_extra = 0; 3132 } else { 3133 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3134 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3135 } 3136 3137 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3138 sm_id = 0; 3139 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3140 fwd_lcores[lc_id]->stream_idx = sm_id; 3141 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3142 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3143 } 3144 3145 /* 3146 * Assign extra remaining streams, if any. 3147 */ 3148 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3149 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3150 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3151 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3152 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3153 } 3154 } 3155 3156 static portid_t 3157 fwd_topology_tx_port_get(portid_t rxp) 3158 { 3159 static int warning_once = 1; 3160 3161 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3162 3163 switch (port_topology) { 3164 default: 3165 case PORT_TOPOLOGY_PAIRED: 3166 if ((rxp & 0x1) == 0) { 3167 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3168 return rxp + 1; 3169 if (warning_once) { 3170 fprintf(stderr, 3171 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3172 warning_once = 0; 3173 } 3174 return rxp; 3175 } 3176 return rxp - 1; 3177 case PORT_TOPOLOGY_CHAINED: 3178 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3179 case PORT_TOPOLOGY_LOOP: 3180 return rxp; 3181 } 3182 } 3183 3184 static void 3185 simple_fwd_config_setup(void) 3186 { 3187 portid_t i; 3188 3189 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3190 cur_fwd_config.nb_fwd_streams = 3191 (streamid_t) cur_fwd_config.nb_fwd_ports; 3192 3193 /* reinitialize forwarding streams */ 3194 init_fwd_streams(); 3195 3196 /* 3197 * In the simple forwarding test, the number of forwarding cores 3198 * must be lower or equal to the number of forwarding ports. 3199 */ 3200 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3201 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3202 cur_fwd_config.nb_fwd_lcores = 3203 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3204 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3205 3206 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3207 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3208 fwd_streams[i]->rx_queue = 0; 3209 fwd_streams[i]->tx_port = 3210 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3211 fwd_streams[i]->tx_queue = 0; 3212 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3213 fwd_streams[i]->retry_enabled = retry_enabled; 3214 } 3215 } 3216 3217 /** 3218 * For the RSS forwarding test all streams distributed over lcores. Each stream 3219 * being composed of a RX queue to poll on a RX port for input messages, 3220 * associated with a TX queue of a TX port where to send forwarded packets. 3221 */ 3222 static void 3223 rss_fwd_config_setup(void) 3224 { 3225 portid_t rxp; 3226 portid_t txp; 3227 queueid_t rxq; 3228 queueid_t nb_q; 3229 streamid_t sm_id; 3230 int start; 3231 int end; 3232 3233 nb_q = nb_rxq; 3234 if (nb_q > nb_txq) 3235 nb_q = nb_txq; 3236 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3237 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3238 cur_fwd_config.nb_fwd_streams = 3239 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3240 3241 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3242 cur_fwd_config.nb_fwd_lcores = 3243 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3244 3245 /* reinitialize forwarding streams */ 3246 init_fwd_streams(); 3247 3248 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3249 3250 if (proc_id > 0 && nb_q % num_procs != 0) 3251 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3252 3253 /** 3254 * In multi-process, All queues are allocated to different 3255 * processes based on num_procs and proc_id. For example: 3256 * if supports 4 queues(nb_q), 2 processes(num_procs), 3257 * the 0~1 queue for primary process. 3258 * the 2~3 queue for secondary process. 3259 */ 3260 start = proc_id * nb_q / num_procs; 3261 end = start + nb_q / num_procs; 3262 rxp = 0; 3263 rxq = start; 3264 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3265 struct fwd_stream *fs; 3266 3267 fs = fwd_streams[sm_id]; 3268 txp = fwd_topology_tx_port_get(rxp); 3269 fs->rx_port = fwd_ports_ids[rxp]; 3270 fs->rx_queue = rxq; 3271 fs->tx_port = fwd_ports_ids[txp]; 3272 fs->tx_queue = rxq; 3273 fs->peer_addr = fs->tx_port; 3274 fs->retry_enabled = retry_enabled; 3275 rxp++; 3276 if (rxp < nb_fwd_ports) 3277 continue; 3278 rxp = 0; 3279 rxq++; 3280 if (rxq >= end) 3281 rxq = start; 3282 } 3283 } 3284 3285 static uint16_t 3286 get_fwd_port_total_tc_num(void) 3287 { 3288 struct rte_eth_dcb_info dcb_info; 3289 uint16_t total_tc_num = 0; 3290 unsigned int i; 3291 3292 for (i = 0; i < nb_fwd_ports; i++) { 3293 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3294 total_tc_num += dcb_info.nb_tcs; 3295 } 3296 3297 return total_tc_num; 3298 } 3299 3300 /** 3301 * For the DCB forwarding test, each core is assigned on each traffic class. 3302 * 3303 * Each core is assigned a multi-stream, each stream being composed of 3304 * a RX queue to poll on a RX port for input messages, associated with 3305 * a TX queue of a TX port where to send forwarded packets. All RX and 3306 * TX queues are mapping to the same traffic class. 3307 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3308 * the same core 3309 */ 3310 static void 3311 dcb_fwd_config_setup(void) 3312 { 3313 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3314 portid_t txp, rxp = 0; 3315 queueid_t txq, rxq = 0; 3316 lcoreid_t lc_id; 3317 uint16_t nb_rx_queue, nb_tx_queue; 3318 uint16_t i, j, k, sm_id = 0; 3319 uint16_t total_tc_num; 3320 struct rte_port *port; 3321 uint8_t tc = 0; 3322 portid_t pid; 3323 int ret; 3324 3325 /* 3326 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3327 * or RTE_PORT_STOPPED. 3328 * 3329 * Re-configure ports to get updated mapping between tc and queue in 3330 * case the queue number of the port is changed. Skip for started ports 3331 * since modifying queue number and calling dev_configure need to stop 3332 * ports first. 3333 */ 3334 for (pid = 0; pid < nb_fwd_ports; pid++) { 3335 if (port_is_started(pid) == 1) 3336 continue; 3337 3338 port = &ports[pid]; 3339 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3340 &port->dev_conf); 3341 if (ret < 0) { 3342 fprintf(stderr, 3343 "Failed to re-configure port %d, ret = %d.\n", 3344 pid, ret); 3345 return; 3346 } 3347 } 3348 3349 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3350 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3351 cur_fwd_config.nb_fwd_streams = 3352 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3353 total_tc_num = get_fwd_port_total_tc_num(); 3354 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3355 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3356 3357 /* reinitialize forwarding streams */ 3358 init_fwd_streams(); 3359 sm_id = 0; 3360 txp = 1; 3361 /* get the dcb info on the first RX and TX ports */ 3362 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3363 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3364 3365 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3366 fwd_lcores[lc_id]->stream_nb = 0; 3367 fwd_lcores[lc_id]->stream_idx = sm_id; 3368 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 3369 /* if the nb_queue is zero, means this tc is 3370 * not enabled on the POOL 3371 */ 3372 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3373 break; 3374 k = fwd_lcores[lc_id]->stream_nb + 3375 fwd_lcores[lc_id]->stream_idx; 3376 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3377 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3378 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3379 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3380 for (j = 0; j < nb_rx_queue; j++) { 3381 struct fwd_stream *fs; 3382 3383 fs = fwd_streams[k + j]; 3384 fs->rx_port = fwd_ports_ids[rxp]; 3385 fs->rx_queue = rxq + j; 3386 fs->tx_port = fwd_ports_ids[txp]; 3387 fs->tx_queue = txq + j % nb_tx_queue; 3388 fs->peer_addr = fs->tx_port; 3389 fs->retry_enabled = retry_enabled; 3390 } 3391 fwd_lcores[lc_id]->stream_nb += 3392 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3393 } 3394 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3395 3396 tc++; 3397 if (tc < rxp_dcb_info.nb_tcs) 3398 continue; 3399 /* Restart from TC 0 on next RX port */ 3400 tc = 0; 3401 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3402 rxp = (portid_t) 3403 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3404 else 3405 rxp++; 3406 if (rxp >= nb_fwd_ports) 3407 return; 3408 /* get the dcb information on next RX and TX ports */ 3409 if ((rxp & 0x1) == 0) 3410 txp = (portid_t) (rxp + 1); 3411 else 3412 txp = (portid_t) (rxp - 1); 3413 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3414 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3415 } 3416 } 3417 3418 static void 3419 icmp_echo_config_setup(void) 3420 { 3421 portid_t rxp; 3422 queueid_t rxq; 3423 lcoreid_t lc_id; 3424 uint16_t sm_id; 3425 3426 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3427 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3428 (nb_txq * nb_fwd_ports); 3429 else 3430 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3431 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3432 cur_fwd_config.nb_fwd_streams = 3433 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3434 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3435 cur_fwd_config.nb_fwd_lcores = 3436 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3437 if (verbose_level > 0) { 3438 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3439 __FUNCTION__, 3440 cur_fwd_config.nb_fwd_lcores, 3441 cur_fwd_config.nb_fwd_ports, 3442 cur_fwd_config.nb_fwd_streams); 3443 } 3444 3445 /* reinitialize forwarding streams */ 3446 init_fwd_streams(); 3447 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3448 rxp = 0; rxq = 0; 3449 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3450 if (verbose_level > 0) 3451 printf(" core=%d: \n", lc_id); 3452 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3453 struct fwd_stream *fs; 3454 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3455 fs->rx_port = fwd_ports_ids[rxp]; 3456 fs->rx_queue = rxq; 3457 fs->tx_port = fs->rx_port; 3458 fs->tx_queue = rxq; 3459 fs->peer_addr = fs->tx_port; 3460 fs->retry_enabled = retry_enabled; 3461 if (verbose_level > 0) 3462 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3463 sm_id, fs->rx_port, fs->rx_queue, 3464 fs->tx_queue); 3465 rxq = (queueid_t) (rxq + 1); 3466 if (rxq == nb_rxq) { 3467 rxq = 0; 3468 rxp = (portid_t) (rxp + 1); 3469 } 3470 } 3471 } 3472 } 3473 3474 void 3475 fwd_config_setup(void) 3476 { 3477 struct rte_port *port; 3478 portid_t pt_id; 3479 unsigned int i; 3480 3481 cur_fwd_config.fwd_eng = cur_fwd_eng; 3482 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3483 icmp_echo_config_setup(); 3484 return; 3485 } 3486 3487 if ((nb_rxq > 1) && (nb_txq > 1)){ 3488 if (dcb_config) { 3489 for (i = 0; i < nb_fwd_ports; i++) { 3490 pt_id = fwd_ports_ids[i]; 3491 port = &ports[pt_id]; 3492 if (!port->dcb_flag) { 3493 fprintf(stderr, 3494 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3495 return; 3496 } 3497 } 3498 if (nb_fwd_lcores == 1) { 3499 fprintf(stderr, 3500 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3501 return; 3502 } 3503 3504 dcb_fwd_config_setup(); 3505 } else 3506 rss_fwd_config_setup(); 3507 } 3508 else 3509 simple_fwd_config_setup(); 3510 } 3511 3512 static const char * 3513 mp_alloc_to_str(uint8_t mode) 3514 { 3515 switch (mode) { 3516 case MP_ALLOC_NATIVE: 3517 return "native"; 3518 case MP_ALLOC_ANON: 3519 return "anon"; 3520 case MP_ALLOC_XMEM: 3521 return "xmem"; 3522 case MP_ALLOC_XMEM_HUGE: 3523 return "xmemhuge"; 3524 case MP_ALLOC_XBUF: 3525 return "xbuf"; 3526 default: 3527 return "invalid"; 3528 } 3529 } 3530 3531 void 3532 pkt_fwd_config_display(struct fwd_config *cfg) 3533 { 3534 struct fwd_stream *fs; 3535 lcoreid_t lc_id; 3536 streamid_t sm_id; 3537 3538 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3539 "NUMA support %s, MP allocation mode: %s\n", 3540 cfg->fwd_eng->fwd_mode_name, 3541 retry_enabled == 0 ? "" : " with retry", 3542 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3543 numa_support == 1 ? "enabled" : "disabled", 3544 mp_alloc_to_str(mp_alloc_type)); 3545 3546 if (retry_enabled) 3547 printf("TX retry num: %u, delay between TX retries: %uus\n", 3548 burst_tx_retry_num, burst_tx_delay_time); 3549 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3550 printf("Logical Core %u (socket %u) forwards packets on " 3551 "%d streams:", 3552 fwd_lcores_cpuids[lc_id], 3553 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3554 fwd_lcores[lc_id]->stream_nb); 3555 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3556 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3557 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3558 "P=%d/Q=%d (socket %u) ", 3559 fs->rx_port, fs->rx_queue, 3560 ports[fs->rx_port].socket_id, 3561 fs->tx_port, fs->tx_queue, 3562 ports[fs->tx_port].socket_id); 3563 print_ethaddr("peer=", 3564 &peer_eth_addrs[fs->peer_addr]); 3565 } 3566 printf("\n"); 3567 } 3568 printf("\n"); 3569 } 3570 3571 void 3572 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3573 { 3574 struct rte_ether_addr new_peer_addr; 3575 if (!rte_eth_dev_is_valid_port(port_id)) { 3576 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3577 return; 3578 } 3579 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3580 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3581 peer_addr); 3582 return; 3583 } 3584 peer_eth_addrs[port_id] = new_peer_addr; 3585 } 3586 3587 int 3588 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3589 { 3590 unsigned int i; 3591 unsigned int lcore_cpuid; 3592 int record_now; 3593 3594 record_now = 0; 3595 again: 3596 for (i = 0; i < nb_lc; i++) { 3597 lcore_cpuid = lcorelist[i]; 3598 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3599 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3600 return -1; 3601 } 3602 if (lcore_cpuid == rte_get_main_lcore()) { 3603 fprintf(stderr, 3604 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3605 lcore_cpuid); 3606 return -1; 3607 } 3608 if (record_now) 3609 fwd_lcores_cpuids[i] = lcore_cpuid; 3610 } 3611 if (record_now == 0) { 3612 record_now = 1; 3613 goto again; 3614 } 3615 nb_cfg_lcores = (lcoreid_t) nb_lc; 3616 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3617 printf("previous number of forwarding cores %u - changed to " 3618 "number of configured cores %u\n", 3619 (unsigned int) nb_fwd_lcores, nb_lc); 3620 nb_fwd_lcores = (lcoreid_t) nb_lc; 3621 } 3622 3623 return 0; 3624 } 3625 3626 int 3627 set_fwd_lcores_mask(uint64_t lcoremask) 3628 { 3629 unsigned int lcorelist[64]; 3630 unsigned int nb_lc; 3631 unsigned int i; 3632 3633 if (lcoremask == 0) { 3634 fprintf(stderr, "Invalid NULL mask of cores\n"); 3635 return -1; 3636 } 3637 nb_lc = 0; 3638 for (i = 0; i < 64; i++) { 3639 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3640 continue; 3641 lcorelist[nb_lc++] = i; 3642 } 3643 return set_fwd_lcores_list(lcorelist, nb_lc); 3644 } 3645 3646 void 3647 set_fwd_lcores_number(uint16_t nb_lc) 3648 { 3649 if (test_done == 0) { 3650 fprintf(stderr, "Please stop forwarding first\n"); 3651 return; 3652 } 3653 if (nb_lc > nb_cfg_lcores) { 3654 fprintf(stderr, 3655 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3656 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3657 return; 3658 } 3659 nb_fwd_lcores = (lcoreid_t) nb_lc; 3660 printf("Number of forwarding cores set to %u\n", 3661 (unsigned int) nb_fwd_lcores); 3662 } 3663 3664 void 3665 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3666 { 3667 unsigned int i; 3668 portid_t port_id; 3669 int record_now; 3670 3671 record_now = 0; 3672 again: 3673 for (i = 0; i < nb_pt; i++) { 3674 port_id = (portid_t) portlist[i]; 3675 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3676 return; 3677 if (record_now) 3678 fwd_ports_ids[i] = port_id; 3679 } 3680 if (record_now == 0) { 3681 record_now = 1; 3682 goto again; 3683 } 3684 nb_cfg_ports = (portid_t) nb_pt; 3685 if (nb_fwd_ports != (portid_t) nb_pt) { 3686 printf("previous number of forwarding ports %u - changed to " 3687 "number of configured ports %u\n", 3688 (unsigned int) nb_fwd_ports, nb_pt); 3689 nb_fwd_ports = (portid_t) nb_pt; 3690 } 3691 } 3692 3693 /** 3694 * Parse the user input and obtain the list of forwarding ports 3695 * 3696 * @param[in] list 3697 * String containing the user input. User can specify 3698 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3699 * For example, if the user wants to use all the available 3700 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3701 * If the user wants to use only the ports 1,2 then the input 3702 * is 1,2. 3703 * valid characters are '-' and ',' 3704 * @param[out] values 3705 * This array will be filled with a list of port IDs 3706 * based on the user input 3707 * Note that duplicate entries are discarded and only the first 3708 * count entries in this array are port IDs and all the rest 3709 * will contain default values 3710 * @param[in] maxsize 3711 * This parameter denotes 2 things 3712 * 1) Number of elements in the values array 3713 * 2) Maximum value of each element in the values array 3714 * @return 3715 * On success, returns total count of parsed port IDs 3716 * On failure, returns 0 3717 */ 3718 static unsigned int 3719 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3720 { 3721 unsigned int count = 0; 3722 char *end = NULL; 3723 int min, max; 3724 int value, i; 3725 unsigned int marked[maxsize]; 3726 3727 if (list == NULL || values == NULL) 3728 return 0; 3729 3730 for (i = 0; i < (int)maxsize; i++) 3731 marked[i] = 0; 3732 3733 min = INT_MAX; 3734 3735 do { 3736 /*Remove the blank spaces if any*/ 3737 while (isblank(*list)) 3738 list++; 3739 if (*list == '\0') 3740 break; 3741 errno = 0; 3742 value = strtol(list, &end, 10); 3743 if (errno || end == NULL) 3744 return 0; 3745 if (value < 0 || value >= (int)maxsize) 3746 return 0; 3747 while (isblank(*end)) 3748 end++; 3749 if (*end == '-' && min == INT_MAX) { 3750 min = value; 3751 } else if ((*end == ',') || (*end == '\0')) { 3752 max = value; 3753 if (min == INT_MAX) 3754 min = value; 3755 for (i = min; i <= max; i++) { 3756 if (count < maxsize) { 3757 if (marked[i]) 3758 continue; 3759 values[count] = i; 3760 marked[i] = 1; 3761 count++; 3762 } 3763 } 3764 min = INT_MAX; 3765 } else 3766 return 0; 3767 list = end + 1; 3768 } while (*end != '\0'); 3769 3770 return count; 3771 } 3772 3773 void 3774 parse_fwd_portlist(const char *portlist) 3775 { 3776 unsigned int portcount; 3777 unsigned int portindex[RTE_MAX_ETHPORTS]; 3778 unsigned int i, valid_port_count = 0; 3779 3780 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3781 if (!portcount) 3782 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3783 3784 /* 3785 * Here we verify the validity of the ports 3786 * and thereby calculate the total number of 3787 * valid ports 3788 */ 3789 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3790 if (rte_eth_dev_is_valid_port(portindex[i])) { 3791 portindex[valid_port_count] = portindex[i]; 3792 valid_port_count++; 3793 } 3794 } 3795 3796 set_fwd_ports_list(portindex, valid_port_count); 3797 } 3798 3799 void 3800 set_fwd_ports_mask(uint64_t portmask) 3801 { 3802 unsigned int portlist[64]; 3803 unsigned int nb_pt; 3804 unsigned int i; 3805 3806 if (portmask == 0) { 3807 fprintf(stderr, "Invalid NULL mask of ports\n"); 3808 return; 3809 } 3810 nb_pt = 0; 3811 RTE_ETH_FOREACH_DEV(i) { 3812 if (! ((uint64_t)(1ULL << i) & portmask)) 3813 continue; 3814 portlist[nb_pt++] = i; 3815 } 3816 set_fwd_ports_list(portlist, nb_pt); 3817 } 3818 3819 void 3820 set_fwd_ports_number(uint16_t nb_pt) 3821 { 3822 if (nb_pt > nb_cfg_ports) { 3823 fprintf(stderr, 3824 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3825 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3826 return; 3827 } 3828 nb_fwd_ports = (portid_t) nb_pt; 3829 printf("Number of forwarding ports set to %u\n", 3830 (unsigned int) nb_fwd_ports); 3831 } 3832 3833 int 3834 port_is_forwarding(portid_t port_id) 3835 { 3836 unsigned int i; 3837 3838 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3839 return -1; 3840 3841 for (i = 0; i < nb_fwd_ports; i++) { 3842 if (fwd_ports_ids[i] == port_id) 3843 return 1; 3844 } 3845 3846 return 0; 3847 } 3848 3849 void 3850 set_nb_pkt_per_burst(uint16_t nb) 3851 { 3852 if (nb > MAX_PKT_BURST) { 3853 fprintf(stderr, 3854 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3855 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3856 return; 3857 } 3858 nb_pkt_per_burst = nb; 3859 printf("Number of packets per burst set to %u\n", 3860 (unsigned int) nb_pkt_per_burst); 3861 } 3862 3863 static const char * 3864 tx_split_get_name(enum tx_pkt_split split) 3865 { 3866 uint32_t i; 3867 3868 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3869 if (tx_split_name[i].split == split) 3870 return tx_split_name[i].name; 3871 } 3872 return NULL; 3873 } 3874 3875 void 3876 set_tx_pkt_split(const char *name) 3877 { 3878 uint32_t i; 3879 3880 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3881 if (strcmp(tx_split_name[i].name, name) == 0) { 3882 tx_pkt_split = tx_split_name[i].split; 3883 return; 3884 } 3885 } 3886 fprintf(stderr, "unknown value: \"%s\"\n", name); 3887 } 3888 3889 int 3890 parse_fec_mode(const char *name, uint32_t *fec_capa) 3891 { 3892 uint8_t i; 3893 3894 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3895 if (strcmp(fec_mode_name[i].name, name) == 0) { 3896 *fec_capa = 3897 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3898 return 0; 3899 } 3900 } 3901 return -1; 3902 } 3903 3904 void 3905 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3906 { 3907 unsigned int i, j; 3908 3909 printf("FEC capabilities:\n"); 3910 3911 for (i = 0; i < num; i++) { 3912 printf("%s : ", 3913 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3914 3915 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3916 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3917 speed_fec_capa[i].capa) 3918 printf("%s ", fec_mode_name[j].name); 3919 } 3920 printf("\n"); 3921 } 3922 } 3923 3924 void 3925 show_rx_pkt_offsets(void) 3926 { 3927 uint32_t i, n; 3928 3929 n = rx_pkt_nb_offs; 3930 printf("Number of offsets: %u\n", n); 3931 if (n) { 3932 printf("Segment offsets: "); 3933 for (i = 0; i != n - 1; i++) 3934 printf("%hu,", rx_pkt_seg_offsets[i]); 3935 printf("%hu\n", rx_pkt_seg_lengths[i]); 3936 } 3937 } 3938 3939 void 3940 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3941 { 3942 unsigned int i; 3943 3944 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3945 printf("nb segments per RX packets=%u >= " 3946 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3947 return; 3948 } 3949 3950 /* 3951 * No extra check here, the segment length will be checked by PMD 3952 * in the extended queue setup. 3953 */ 3954 for (i = 0; i < nb_offs; i++) { 3955 if (seg_offsets[i] >= UINT16_MAX) { 3956 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3957 i, seg_offsets[i]); 3958 return; 3959 } 3960 } 3961 3962 for (i = 0; i < nb_offs; i++) 3963 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3964 3965 rx_pkt_nb_offs = (uint8_t) nb_offs; 3966 } 3967 3968 void 3969 show_rx_pkt_segments(void) 3970 { 3971 uint32_t i, n; 3972 3973 n = rx_pkt_nb_segs; 3974 printf("Number of segments: %u\n", n); 3975 if (n) { 3976 printf("Segment sizes: "); 3977 for (i = 0; i != n - 1; i++) 3978 printf("%hu,", rx_pkt_seg_lengths[i]); 3979 printf("%hu\n", rx_pkt_seg_lengths[i]); 3980 } 3981 } 3982 3983 void 3984 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3985 { 3986 unsigned int i; 3987 3988 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3989 printf("nb segments per RX packets=%u >= " 3990 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3991 return; 3992 } 3993 3994 /* 3995 * No extra check here, the segment length will be checked by PMD 3996 * in the extended queue setup. 3997 */ 3998 for (i = 0; i < nb_segs; i++) { 3999 if (seg_lengths[i] >= UINT16_MAX) { 4000 printf("length[%u]=%u > UINT16_MAX - give up\n", 4001 i, seg_lengths[i]); 4002 return; 4003 } 4004 } 4005 4006 for (i = 0; i < nb_segs; i++) 4007 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4008 4009 rx_pkt_nb_segs = (uint8_t) nb_segs; 4010 } 4011 4012 void 4013 show_tx_pkt_segments(void) 4014 { 4015 uint32_t i, n; 4016 const char *split; 4017 4018 n = tx_pkt_nb_segs; 4019 split = tx_split_get_name(tx_pkt_split); 4020 4021 printf("Number of segments: %u\n", n); 4022 printf("Segment sizes: "); 4023 for (i = 0; i != n - 1; i++) 4024 printf("%hu,", tx_pkt_seg_lengths[i]); 4025 printf("%hu\n", tx_pkt_seg_lengths[i]); 4026 printf("Split packet: %s\n", split); 4027 } 4028 4029 static bool 4030 nb_segs_is_invalid(unsigned int nb_segs) 4031 { 4032 uint16_t ring_size; 4033 uint16_t queue_id; 4034 uint16_t port_id; 4035 int ret; 4036 4037 RTE_ETH_FOREACH_DEV(port_id) { 4038 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4039 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4040 if (ret) { 4041 /* Port may not be initialized yet, can't say 4042 * the port is invalid in this stage. 4043 */ 4044 continue; 4045 } 4046 if (ring_size < nb_segs) { 4047 printf("nb segments per TX packets=%u >= TX " 4048 "queue(%u) ring_size=%u - txpkts ignored\n", 4049 nb_segs, queue_id, ring_size); 4050 return true; 4051 } 4052 } 4053 } 4054 4055 return false; 4056 } 4057 4058 void 4059 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4060 { 4061 uint16_t tx_pkt_len; 4062 unsigned int i; 4063 4064 /* 4065 * For single segment settings failed check is ignored. 4066 * It is a very basic capability to send the single segment 4067 * packets, suppose it is always supported. 4068 */ 4069 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4070 fprintf(stderr, 4071 "Tx segment size(%u) is not supported - txpkts ignored\n", 4072 nb_segs); 4073 return; 4074 } 4075 4076 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4077 fprintf(stderr, 4078 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4079 nb_segs, RTE_MAX_SEGS_PER_PKT); 4080 return; 4081 } 4082 4083 /* 4084 * Check that each segment length is greater or equal than 4085 * the mbuf data size. 4086 * Check also that the total packet length is greater or equal than the 4087 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4088 * 20 + 8). 4089 */ 4090 tx_pkt_len = 0; 4091 for (i = 0; i < nb_segs; i++) { 4092 if (seg_lengths[i] > mbuf_data_size[0]) { 4093 fprintf(stderr, 4094 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4095 i, seg_lengths[i], mbuf_data_size[0]); 4096 return; 4097 } 4098 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4099 } 4100 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4101 fprintf(stderr, "total packet length=%u < %d - give up\n", 4102 (unsigned) tx_pkt_len, 4103 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4104 return; 4105 } 4106 4107 for (i = 0; i < nb_segs; i++) 4108 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4109 4110 tx_pkt_length = tx_pkt_len; 4111 tx_pkt_nb_segs = (uint8_t) nb_segs; 4112 } 4113 4114 void 4115 show_tx_pkt_times(void) 4116 { 4117 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4118 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4119 } 4120 4121 void 4122 set_tx_pkt_times(unsigned int *tx_times) 4123 { 4124 tx_pkt_times_inter = tx_times[0]; 4125 tx_pkt_times_intra = tx_times[1]; 4126 } 4127 4128 #ifdef RTE_LIB_GRO 4129 void 4130 setup_gro(const char *onoff, portid_t port_id) 4131 { 4132 if (!rte_eth_dev_is_valid_port(port_id)) { 4133 fprintf(stderr, "invalid port id %u\n", port_id); 4134 return; 4135 } 4136 if (test_done == 0) { 4137 fprintf(stderr, 4138 "Before enable/disable GRO, please stop forwarding first\n"); 4139 return; 4140 } 4141 if (strcmp(onoff, "on") == 0) { 4142 if (gro_ports[port_id].enable != 0) { 4143 fprintf(stderr, 4144 "Port %u has enabled GRO. Please disable GRO first\n", 4145 port_id); 4146 return; 4147 } 4148 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4149 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4150 gro_ports[port_id].param.max_flow_num = 4151 GRO_DEFAULT_FLOW_NUM; 4152 gro_ports[port_id].param.max_item_per_flow = 4153 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4154 } 4155 gro_ports[port_id].enable = 1; 4156 } else { 4157 if (gro_ports[port_id].enable == 0) { 4158 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4159 return; 4160 } 4161 gro_ports[port_id].enable = 0; 4162 } 4163 } 4164 4165 void 4166 setup_gro_flush_cycles(uint8_t cycles) 4167 { 4168 if (test_done == 0) { 4169 fprintf(stderr, 4170 "Before change flush interval for GRO, please stop forwarding first.\n"); 4171 return; 4172 } 4173 4174 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4175 GRO_DEFAULT_FLUSH_CYCLES) { 4176 fprintf(stderr, 4177 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4178 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4179 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4180 } 4181 4182 gro_flush_cycles = cycles; 4183 } 4184 4185 void 4186 show_gro(portid_t port_id) 4187 { 4188 struct rte_gro_param *param; 4189 uint32_t max_pkts_num; 4190 4191 param = &gro_ports[port_id].param; 4192 4193 if (!rte_eth_dev_is_valid_port(port_id)) { 4194 fprintf(stderr, "Invalid port id %u.\n", port_id); 4195 return; 4196 } 4197 if (gro_ports[port_id].enable) { 4198 printf("GRO type: TCP/IPv4\n"); 4199 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4200 max_pkts_num = param->max_flow_num * 4201 param->max_item_per_flow; 4202 } else 4203 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4204 printf("Max number of packets to perform GRO: %u\n", 4205 max_pkts_num); 4206 printf("Flushing cycles: %u\n", gro_flush_cycles); 4207 } else 4208 printf("Port %u doesn't enable GRO.\n", port_id); 4209 } 4210 #endif /* RTE_LIB_GRO */ 4211 4212 #ifdef RTE_LIB_GSO 4213 void 4214 setup_gso(const char *mode, portid_t port_id) 4215 { 4216 if (!rte_eth_dev_is_valid_port(port_id)) { 4217 fprintf(stderr, "invalid port id %u\n", port_id); 4218 return; 4219 } 4220 if (strcmp(mode, "on") == 0) { 4221 if (test_done == 0) { 4222 fprintf(stderr, 4223 "before enabling GSO, please stop forwarding first\n"); 4224 return; 4225 } 4226 gso_ports[port_id].enable = 1; 4227 } else if (strcmp(mode, "off") == 0) { 4228 if (test_done == 0) { 4229 fprintf(stderr, 4230 "before disabling GSO, please stop forwarding first\n"); 4231 return; 4232 } 4233 gso_ports[port_id].enable = 0; 4234 } 4235 } 4236 #endif /* RTE_LIB_GSO */ 4237 4238 char* 4239 list_pkt_forwarding_modes(void) 4240 { 4241 static char fwd_modes[128] = ""; 4242 const char *separator = "|"; 4243 struct fwd_engine *fwd_eng; 4244 unsigned i = 0; 4245 4246 if (strlen (fwd_modes) == 0) { 4247 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4248 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4249 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4250 strncat(fwd_modes, separator, 4251 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4252 } 4253 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4254 } 4255 4256 return fwd_modes; 4257 } 4258 4259 char* 4260 list_pkt_forwarding_retry_modes(void) 4261 { 4262 static char fwd_modes[128] = ""; 4263 const char *separator = "|"; 4264 struct fwd_engine *fwd_eng; 4265 unsigned i = 0; 4266 4267 if (strlen(fwd_modes) == 0) { 4268 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4269 if (fwd_eng == &rx_only_engine) 4270 continue; 4271 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4272 sizeof(fwd_modes) - 4273 strlen(fwd_modes) - 1); 4274 strncat(fwd_modes, separator, 4275 sizeof(fwd_modes) - 4276 strlen(fwd_modes) - 1); 4277 } 4278 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4279 } 4280 4281 return fwd_modes; 4282 } 4283 4284 void 4285 set_pkt_forwarding_mode(const char *fwd_mode_name) 4286 { 4287 struct fwd_engine *fwd_eng; 4288 unsigned i; 4289 4290 i = 0; 4291 while ((fwd_eng = fwd_engines[i]) != NULL) { 4292 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4293 printf("Set %s packet forwarding mode%s\n", 4294 fwd_mode_name, 4295 retry_enabled == 0 ? "" : " with retry"); 4296 cur_fwd_eng = fwd_eng; 4297 return; 4298 } 4299 i++; 4300 } 4301 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4302 } 4303 4304 void 4305 add_rx_dump_callbacks(portid_t portid) 4306 { 4307 struct rte_eth_dev_info dev_info; 4308 uint16_t queue; 4309 int ret; 4310 4311 if (port_id_is_invalid(portid, ENABLED_WARN)) 4312 return; 4313 4314 ret = eth_dev_info_get_print_err(portid, &dev_info); 4315 if (ret != 0) 4316 return; 4317 4318 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4319 if (!ports[portid].rx_dump_cb[queue]) 4320 ports[portid].rx_dump_cb[queue] = 4321 rte_eth_add_rx_callback(portid, queue, 4322 dump_rx_pkts, NULL); 4323 } 4324 4325 void 4326 add_tx_dump_callbacks(portid_t portid) 4327 { 4328 struct rte_eth_dev_info dev_info; 4329 uint16_t queue; 4330 int ret; 4331 4332 if (port_id_is_invalid(portid, ENABLED_WARN)) 4333 return; 4334 4335 ret = eth_dev_info_get_print_err(portid, &dev_info); 4336 if (ret != 0) 4337 return; 4338 4339 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4340 if (!ports[portid].tx_dump_cb[queue]) 4341 ports[portid].tx_dump_cb[queue] = 4342 rte_eth_add_tx_callback(portid, queue, 4343 dump_tx_pkts, NULL); 4344 } 4345 4346 void 4347 remove_rx_dump_callbacks(portid_t portid) 4348 { 4349 struct rte_eth_dev_info dev_info; 4350 uint16_t queue; 4351 int ret; 4352 4353 if (port_id_is_invalid(portid, ENABLED_WARN)) 4354 return; 4355 4356 ret = eth_dev_info_get_print_err(portid, &dev_info); 4357 if (ret != 0) 4358 return; 4359 4360 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4361 if (ports[portid].rx_dump_cb[queue]) { 4362 rte_eth_remove_rx_callback(portid, queue, 4363 ports[portid].rx_dump_cb[queue]); 4364 ports[portid].rx_dump_cb[queue] = NULL; 4365 } 4366 } 4367 4368 void 4369 remove_tx_dump_callbacks(portid_t portid) 4370 { 4371 struct rte_eth_dev_info dev_info; 4372 uint16_t queue; 4373 int ret; 4374 4375 if (port_id_is_invalid(portid, ENABLED_WARN)) 4376 return; 4377 4378 ret = eth_dev_info_get_print_err(portid, &dev_info); 4379 if (ret != 0) 4380 return; 4381 4382 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4383 if (ports[portid].tx_dump_cb[queue]) { 4384 rte_eth_remove_tx_callback(portid, queue, 4385 ports[portid].tx_dump_cb[queue]); 4386 ports[portid].tx_dump_cb[queue] = NULL; 4387 } 4388 } 4389 4390 void 4391 configure_rxtx_dump_callbacks(uint16_t verbose) 4392 { 4393 portid_t portid; 4394 4395 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4396 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4397 return; 4398 #endif 4399 4400 RTE_ETH_FOREACH_DEV(portid) 4401 { 4402 if (verbose == 1 || verbose > 2) 4403 add_rx_dump_callbacks(portid); 4404 else 4405 remove_rx_dump_callbacks(portid); 4406 if (verbose >= 2) 4407 add_tx_dump_callbacks(portid); 4408 else 4409 remove_tx_dump_callbacks(portid); 4410 } 4411 } 4412 4413 void 4414 set_verbose_level(uint16_t vb_level) 4415 { 4416 printf("Change verbose level from %u to %u\n", 4417 (unsigned int) verbose_level, (unsigned int) vb_level); 4418 verbose_level = vb_level; 4419 configure_rxtx_dump_callbacks(verbose_level); 4420 } 4421 4422 void 4423 vlan_extend_set(portid_t port_id, int on) 4424 { 4425 int diag; 4426 int vlan_offload; 4427 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4428 4429 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4430 return; 4431 4432 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4433 4434 if (on) { 4435 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4436 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4437 } else { 4438 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 4439 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4440 } 4441 4442 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4443 if (diag < 0) { 4444 fprintf(stderr, 4445 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4446 port_id, on, diag); 4447 return; 4448 } 4449 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4450 } 4451 4452 void 4453 rx_vlan_strip_set(portid_t port_id, int on) 4454 { 4455 int diag; 4456 int vlan_offload; 4457 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4458 4459 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4460 return; 4461 4462 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4463 4464 if (on) { 4465 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4466 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4467 } else { 4468 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 4469 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4470 } 4471 4472 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4473 if (diag < 0) { 4474 fprintf(stderr, 4475 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4476 __func__, port_id, on, diag); 4477 return; 4478 } 4479 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4480 } 4481 4482 void 4483 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4484 { 4485 int diag; 4486 4487 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4488 return; 4489 4490 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4491 if (diag < 0) 4492 fprintf(stderr, 4493 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4494 __func__, port_id, queue_id, on, diag); 4495 } 4496 4497 void 4498 rx_vlan_filter_set(portid_t port_id, int on) 4499 { 4500 int diag; 4501 int vlan_offload; 4502 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4503 4504 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4505 return; 4506 4507 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4508 4509 if (on) { 4510 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4511 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4512 } else { 4513 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 4514 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4515 } 4516 4517 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4518 if (diag < 0) { 4519 fprintf(stderr, 4520 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4521 __func__, port_id, on, diag); 4522 return; 4523 } 4524 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4525 } 4526 4527 void 4528 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4529 { 4530 int diag; 4531 int vlan_offload; 4532 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4533 4534 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4535 return; 4536 4537 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4538 4539 if (on) { 4540 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4541 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4542 } else { 4543 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 4544 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4545 } 4546 4547 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4548 if (diag < 0) { 4549 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4550 __func__, port_id, on, diag); 4551 return; 4552 } 4553 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4554 } 4555 4556 int 4557 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4558 { 4559 int diag; 4560 4561 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4562 return 1; 4563 if (vlan_id_is_invalid(vlan_id)) 4564 return 1; 4565 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4566 if (diag == 0) 4567 return 0; 4568 fprintf(stderr, 4569 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4570 port_id, vlan_id, on, diag); 4571 return -1; 4572 } 4573 4574 void 4575 rx_vlan_all_filter_set(portid_t port_id, int on) 4576 { 4577 uint16_t vlan_id; 4578 4579 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4580 return; 4581 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4582 if (rx_vft_set(port_id, vlan_id, on)) 4583 break; 4584 } 4585 } 4586 4587 void 4588 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4589 { 4590 int diag; 4591 4592 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4593 return; 4594 4595 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4596 if (diag == 0) 4597 return; 4598 4599 fprintf(stderr, 4600 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4601 port_id, vlan_type, tp_id, diag); 4602 } 4603 4604 void 4605 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4606 { 4607 struct rte_eth_dev_info dev_info; 4608 int ret; 4609 4610 if (vlan_id_is_invalid(vlan_id)) 4611 return; 4612 4613 if (ports[port_id].dev_conf.txmode.offloads & 4614 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 4615 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4616 return; 4617 } 4618 4619 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4620 if (ret != 0) 4621 return; 4622 4623 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 4624 fprintf(stderr, 4625 "Error: vlan insert is not supported by port %d\n", 4626 port_id); 4627 return; 4628 } 4629 4630 tx_vlan_reset(port_id); 4631 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 4632 ports[port_id].tx_vlan_id = vlan_id; 4633 } 4634 4635 void 4636 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4637 { 4638 struct rte_eth_dev_info dev_info; 4639 int ret; 4640 4641 if (vlan_id_is_invalid(vlan_id)) 4642 return; 4643 if (vlan_id_is_invalid(vlan_id_outer)) 4644 return; 4645 4646 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4647 if (ret != 0) 4648 return; 4649 4650 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 4651 fprintf(stderr, 4652 "Error: qinq insert not supported by port %d\n", 4653 port_id); 4654 return; 4655 } 4656 4657 tx_vlan_reset(port_id); 4658 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4659 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4660 ports[port_id].tx_vlan_id = vlan_id; 4661 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4662 } 4663 4664 void 4665 tx_vlan_reset(portid_t port_id) 4666 { 4667 ports[port_id].dev_conf.txmode.offloads &= 4668 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4669 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4670 ports[port_id].tx_vlan_id = 0; 4671 ports[port_id].tx_vlan_id_outer = 0; 4672 } 4673 4674 void 4675 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4676 { 4677 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4678 return; 4679 4680 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4681 } 4682 4683 void 4684 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4685 { 4686 int ret; 4687 4688 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4689 return; 4690 4691 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4692 return; 4693 4694 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4695 fprintf(stderr, "map_value not in required range 0..%d\n", 4696 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4697 return; 4698 } 4699 4700 if (!is_rx) { /* tx */ 4701 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4702 map_value); 4703 if (ret) { 4704 fprintf(stderr, 4705 "failed to set tx queue stats mapping.\n"); 4706 return; 4707 } 4708 } else { /* rx */ 4709 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4710 map_value); 4711 if (ret) { 4712 fprintf(stderr, 4713 "failed to set rx queue stats mapping.\n"); 4714 return; 4715 } 4716 } 4717 } 4718 4719 void 4720 set_xstats_hide_zero(uint8_t on_off) 4721 { 4722 xstats_hide_zero = on_off; 4723 } 4724 4725 void 4726 set_record_core_cycles(uint8_t on_off) 4727 { 4728 record_core_cycles = on_off; 4729 } 4730 4731 void 4732 set_record_burst_stats(uint8_t on_off) 4733 { 4734 record_burst_stats = on_off; 4735 } 4736 4737 static char* 4738 flowtype_to_str(uint16_t flow_type) 4739 { 4740 struct flow_type_info { 4741 char str[32]; 4742 uint16_t ftype; 4743 }; 4744 4745 uint8_t i; 4746 static struct flow_type_info flowtype_str_table[] = { 4747 {"raw", RTE_ETH_FLOW_RAW}, 4748 {"ipv4", RTE_ETH_FLOW_IPV4}, 4749 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4750 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4751 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4752 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4753 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4754 {"ipv6", RTE_ETH_FLOW_IPV6}, 4755 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4756 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4757 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4758 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4759 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4760 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4761 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 4762 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 4763 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 4764 {"port", RTE_ETH_FLOW_PORT}, 4765 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4766 {"geneve", RTE_ETH_FLOW_GENEVE}, 4767 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4768 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4769 {"gtpu", RTE_ETH_FLOW_GTPU}, 4770 }; 4771 4772 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4773 if (flowtype_str_table[i].ftype == flow_type) 4774 return flowtype_str_table[i].str; 4775 } 4776 4777 return NULL; 4778 } 4779 4780 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4781 4782 static inline void 4783 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4784 { 4785 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4786 4787 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4788 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4789 " tunnel_id: 0x%08x", 4790 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4791 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4792 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4793 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4794 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4795 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4796 4797 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4798 rte_be_to_cpu_16(mask->src_port_mask), 4799 rte_be_to_cpu_16(mask->dst_port_mask)); 4800 4801 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4802 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4803 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4804 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4805 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4806 4807 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4808 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4809 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4810 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4811 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4812 } 4813 4814 printf("\n"); 4815 } 4816 4817 static inline void 4818 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4819 { 4820 struct rte_eth_flex_payload_cfg *cfg; 4821 uint32_t i, j; 4822 4823 for (i = 0; i < flex_conf->nb_payloads; i++) { 4824 cfg = &flex_conf->flex_set[i]; 4825 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4826 printf("\n RAW: "); 4827 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4828 printf("\n L2_PAYLOAD: "); 4829 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4830 printf("\n L3_PAYLOAD: "); 4831 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4832 printf("\n L4_PAYLOAD: "); 4833 else 4834 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4835 for (j = 0; j < num; j++) 4836 printf(" %-5u", cfg->src_offset[j]); 4837 } 4838 printf("\n"); 4839 } 4840 4841 static inline void 4842 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4843 { 4844 struct rte_eth_fdir_flex_mask *mask; 4845 uint32_t i, j; 4846 char *p; 4847 4848 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4849 mask = &flex_conf->flex_mask[i]; 4850 p = flowtype_to_str(mask->flow_type); 4851 printf("\n %s:\t", p ? p : "unknown"); 4852 for (j = 0; j < num; j++) 4853 printf(" %02x", mask->mask[j]); 4854 } 4855 printf("\n"); 4856 } 4857 4858 static inline void 4859 print_fdir_flow_type(uint32_t flow_types_mask) 4860 { 4861 int i; 4862 char *p; 4863 4864 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4865 if (!(flow_types_mask & (1 << i))) 4866 continue; 4867 p = flowtype_to_str(i); 4868 if (p) 4869 printf(" %s", p); 4870 else 4871 printf(" unknown"); 4872 } 4873 printf("\n"); 4874 } 4875 4876 static int 4877 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4878 struct rte_eth_fdir_stats *fdir_stat) 4879 { 4880 int ret = -ENOTSUP; 4881 4882 #ifdef RTE_NET_I40E 4883 if (ret == -ENOTSUP) { 4884 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4885 if (!ret) 4886 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4887 } 4888 #endif 4889 #ifdef RTE_NET_IXGBE 4890 if (ret == -ENOTSUP) { 4891 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4892 if (!ret) 4893 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4894 } 4895 #endif 4896 switch (ret) { 4897 case 0: 4898 break; 4899 case -ENOTSUP: 4900 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4901 port_id); 4902 break; 4903 default: 4904 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4905 break; 4906 } 4907 return ret; 4908 } 4909 4910 void 4911 fdir_get_infos(portid_t port_id) 4912 { 4913 struct rte_eth_fdir_stats fdir_stat; 4914 struct rte_eth_fdir_info fdir_info; 4915 4916 static const char *fdir_stats_border = "########################"; 4917 4918 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4919 return; 4920 4921 memset(&fdir_info, 0, sizeof(fdir_info)); 4922 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4923 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4924 return; 4925 4926 printf("\n %s FDIR infos for port %-2d %s\n", 4927 fdir_stats_border, port_id, fdir_stats_border); 4928 printf(" MODE: "); 4929 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4930 printf(" PERFECT\n"); 4931 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4932 printf(" PERFECT-MAC-VLAN\n"); 4933 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4934 printf(" PERFECT-TUNNEL\n"); 4935 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4936 printf(" SIGNATURE\n"); 4937 else 4938 printf(" DISABLE\n"); 4939 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4940 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4941 printf(" SUPPORTED FLOW TYPE: "); 4942 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4943 } 4944 printf(" FLEX PAYLOAD INFO:\n"); 4945 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4946 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4947 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4948 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4949 fdir_info.flex_payload_unit, 4950 fdir_info.max_flex_payload_segment_num, 4951 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4952 printf(" MASK: "); 4953 print_fdir_mask(&fdir_info.mask); 4954 if (fdir_info.flex_conf.nb_payloads > 0) { 4955 printf(" FLEX PAYLOAD SRC OFFSET:"); 4956 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4957 } 4958 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4959 printf(" FLEX MASK CFG:"); 4960 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4961 } 4962 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4963 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4964 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4965 fdir_info.guarant_spc, fdir_info.best_spc); 4966 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4967 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4968 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4969 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4970 fdir_stat.collision, fdir_stat.free, 4971 fdir_stat.maxhash, fdir_stat.maxlen, 4972 fdir_stat.add, fdir_stat.remove, 4973 fdir_stat.f_add, fdir_stat.f_remove); 4974 printf(" %s############################%s\n", 4975 fdir_stats_border, fdir_stats_border); 4976 } 4977 4978 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4979 4980 void 4981 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4982 { 4983 struct rte_port *port; 4984 struct rte_eth_fdir_flex_conf *flex_conf; 4985 int i, idx = 0; 4986 4987 port = &ports[port_id]; 4988 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4989 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4990 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4991 idx = i; 4992 break; 4993 } 4994 } 4995 if (i >= RTE_ETH_FLOW_MAX) { 4996 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4997 idx = flex_conf->nb_flexmasks; 4998 flex_conf->nb_flexmasks++; 4999 } else { 5000 fprintf(stderr, 5001 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5002 cfg->flow_type); 5003 return; 5004 } 5005 } 5006 rte_memcpy(&flex_conf->flex_mask[idx], 5007 cfg, 5008 sizeof(struct rte_eth_fdir_flex_mask)); 5009 } 5010 5011 void 5012 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5013 { 5014 struct rte_port *port; 5015 struct rte_eth_fdir_flex_conf *flex_conf; 5016 int i, idx = 0; 5017 5018 port = &ports[port_id]; 5019 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5020 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5021 if (cfg->type == flex_conf->flex_set[i].type) { 5022 idx = i; 5023 break; 5024 } 5025 } 5026 if (i >= RTE_ETH_PAYLOAD_MAX) { 5027 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5028 idx = flex_conf->nb_payloads; 5029 flex_conf->nb_payloads++; 5030 } else { 5031 fprintf(stderr, 5032 "The flex payload table is full. Can not set flex payload for type(%u).", 5033 cfg->type); 5034 return; 5035 } 5036 } 5037 rte_memcpy(&flex_conf->flex_set[idx], 5038 cfg, 5039 sizeof(struct rte_eth_flex_payload_cfg)); 5040 5041 } 5042 5043 void 5044 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5045 { 5046 #ifdef RTE_NET_IXGBE 5047 int diag; 5048 5049 if (is_rx) 5050 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5051 else 5052 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5053 5054 if (diag == 0) 5055 return; 5056 fprintf(stderr, 5057 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5058 is_rx ? "rx" : "tx", port_id, diag); 5059 return; 5060 #endif 5061 fprintf(stderr, "VF %s setting not supported for port %d\n", 5062 is_rx ? "Rx" : "Tx", port_id); 5063 RTE_SET_USED(vf); 5064 RTE_SET_USED(on); 5065 } 5066 5067 int 5068 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5069 { 5070 int diag; 5071 struct rte_eth_link link; 5072 int ret; 5073 5074 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5075 return 1; 5076 ret = eth_link_get_nowait_print_err(port_id, &link); 5077 if (ret < 0) 5078 return 1; 5079 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5080 rate > link.link_speed) { 5081 fprintf(stderr, 5082 "Invalid rate value:%u bigger than link speed: %u\n", 5083 rate, link.link_speed); 5084 return 1; 5085 } 5086 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5087 if (diag == 0) 5088 return diag; 5089 fprintf(stderr, 5090 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5091 port_id, diag); 5092 return diag; 5093 } 5094 5095 int 5096 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5097 { 5098 int diag = -ENOTSUP; 5099 5100 RTE_SET_USED(vf); 5101 RTE_SET_USED(rate); 5102 RTE_SET_USED(q_msk); 5103 5104 #ifdef RTE_NET_IXGBE 5105 if (diag == -ENOTSUP) 5106 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5107 q_msk); 5108 #endif 5109 #ifdef RTE_NET_BNXT 5110 if (diag == -ENOTSUP) 5111 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5112 #endif 5113 if (diag == 0) 5114 return diag; 5115 5116 fprintf(stderr, 5117 "%s for port_id=%d failed diag=%d\n", 5118 __func__, port_id, diag); 5119 return diag; 5120 } 5121 5122 /* 5123 * Functions to manage the set of filtered Multicast MAC addresses. 5124 * 5125 * A pool of filtered multicast MAC addresses is associated with each port. 5126 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5127 * The address of the pool and the number of valid multicast MAC addresses 5128 * recorded in the pool are stored in the fields "mc_addr_pool" and 5129 * "mc_addr_nb" of the "rte_port" data structure. 5130 * 5131 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5132 * to be supplied a contiguous array of multicast MAC addresses. 5133 * To comply with this constraint, the set of multicast addresses recorded 5134 * into the pool are systematically compacted at the beginning of the pool. 5135 * Hence, when a multicast address is removed from the pool, all following 5136 * addresses, if any, are copied back to keep the set contiguous. 5137 */ 5138 #define MCAST_POOL_INC 32 5139 5140 static int 5141 mcast_addr_pool_extend(struct rte_port *port) 5142 { 5143 struct rte_ether_addr *mc_pool; 5144 size_t mc_pool_size; 5145 5146 /* 5147 * If a free entry is available at the end of the pool, just 5148 * increment the number of recorded multicast addresses. 5149 */ 5150 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5151 port->mc_addr_nb++; 5152 return 0; 5153 } 5154 5155 /* 5156 * [re]allocate a pool with MCAST_POOL_INC more entries. 5157 * The previous test guarantees that port->mc_addr_nb is a multiple 5158 * of MCAST_POOL_INC. 5159 */ 5160 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5161 MCAST_POOL_INC); 5162 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5163 mc_pool_size); 5164 if (mc_pool == NULL) { 5165 fprintf(stderr, 5166 "allocation of pool of %u multicast addresses failed\n", 5167 port->mc_addr_nb + MCAST_POOL_INC); 5168 return -ENOMEM; 5169 } 5170 5171 port->mc_addr_pool = mc_pool; 5172 port->mc_addr_nb++; 5173 return 0; 5174 5175 } 5176 5177 static void 5178 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5179 { 5180 if (mcast_addr_pool_extend(port) != 0) 5181 return; 5182 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5183 } 5184 5185 static void 5186 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5187 { 5188 port->mc_addr_nb--; 5189 if (addr_idx == port->mc_addr_nb) { 5190 /* No need to recompact the set of multicast addresses. */ 5191 if (port->mc_addr_nb == 0) { 5192 /* free the pool of multicast addresses. */ 5193 free(port->mc_addr_pool); 5194 port->mc_addr_pool = NULL; 5195 } 5196 return; 5197 } 5198 memmove(&port->mc_addr_pool[addr_idx], 5199 &port->mc_addr_pool[addr_idx + 1], 5200 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5201 } 5202 5203 static int 5204 eth_port_multicast_addr_list_set(portid_t port_id) 5205 { 5206 struct rte_port *port; 5207 int diag; 5208 5209 port = &ports[port_id]; 5210 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5211 port->mc_addr_nb); 5212 if (diag < 0) 5213 fprintf(stderr, 5214 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5215 port_id, port->mc_addr_nb, diag); 5216 5217 return diag; 5218 } 5219 5220 void 5221 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5222 { 5223 struct rte_port *port; 5224 uint32_t i; 5225 5226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5227 return; 5228 5229 port = &ports[port_id]; 5230 5231 /* 5232 * Check that the added multicast MAC address is not already recorded 5233 * in the pool of multicast addresses. 5234 */ 5235 for (i = 0; i < port->mc_addr_nb; i++) { 5236 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5237 fprintf(stderr, 5238 "multicast address already filtered by port\n"); 5239 return; 5240 } 5241 } 5242 5243 mcast_addr_pool_append(port, mc_addr); 5244 if (eth_port_multicast_addr_list_set(port_id) < 0) 5245 /* Rollback on failure, remove the address from the pool */ 5246 mcast_addr_pool_remove(port, i); 5247 } 5248 5249 void 5250 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5251 { 5252 struct rte_port *port; 5253 uint32_t i; 5254 5255 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5256 return; 5257 5258 port = &ports[port_id]; 5259 5260 /* 5261 * Search the pool of multicast MAC addresses for the removed address. 5262 */ 5263 for (i = 0; i < port->mc_addr_nb; i++) { 5264 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5265 break; 5266 } 5267 if (i == port->mc_addr_nb) { 5268 fprintf(stderr, "multicast address not filtered by port %d\n", 5269 port_id); 5270 return; 5271 } 5272 5273 mcast_addr_pool_remove(port, i); 5274 if (eth_port_multicast_addr_list_set(port_id) < 0) 5275 /* Rollback on failure, add the address back into the pool */ 5276 mcast_addr_pool_append(port, mc_addr); 5277 } 5278 5279 void 5280 port_dcb_info_display(portid_t port_id) 5281 { 5282 struct rte_eth_dcb_info dcb_info; 5283 uint16_t i; 5284 int ret; 5285 static const char *border = "================"; 5286 5287 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5288 return; 5289 5290 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5291 if (ret) { 5292 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5293 port_id); 5294 return; 5295 } 5296 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5297 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5298 printf("\n TC : "); 5299 for (i = 0; i < dcb_info.nb_tcs; i++) 5300 printf("\t%4d", i); 5301 printf("\n Priority : "); 5302 for (i = 0; i < dcb_info.nb_tcs; i++) 5303 printf("\t%4d", dcb_info.prio_tc[i]); 5304 printf("\n BW percent :"); 5305 for (i = 0; i < dcb_info.nb_tcs; i++) 5306 printf("\t%4d%%", dcb_info.tc_bws[i]); 5307 printf("\n RXQ base : "); 5308 for (i = 0; i < dcb_info.nb_tcs; i++) 5309 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5310 printf("\n RXQ number :"); 5311 for (i = 0; i < dcb_info.nb_tcs; i++) 5312 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5313 printf("\n TXQ base : "); 5314 for (i = 0; i < dcb_info.nb_tcs; i++) 5315 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5316 printf("\n TXQ number :"); 5317 for (i = 0; i < dcb_info.nb_tcs; i++) 5318 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5319 printf("\n"); 5320 } 5321 5322 uint8_t * 5323 open_file(const char *file_path, uint32_t *size) 5324 { 5325 int fd = open(file_path, O_RDONLY); 5326 off_t pkg_size; 5327 uint8_t *buf = NULL; 5328 int ret = 0; 5329 struct stat st_buf; 5330 5331 if (size) 5332 *size = 0; 5333 5334 if (fd == -1) { 5335 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5336 return buf; 5337 } 5338 5339 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5340 close(fd); 5341 fprintf(stderr, "%s: File operations failed\n", __func__); 5342 return buf; 5343 } 5344 5345 pkg_size = st_buf.st_size; 5346 if (pkg_size < 0) { 5347 close(fd); 5348 fprintf(stderr, "%s: File operations failed\n", __func__); 5349 return buf; 5350 } 5351 5352 buf = (uint8_t *)malloc(pkg_size); 5353 if (!buf) { 5354 close(fd); 5355 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5356 return buf; 5357 } 5358 5359 ret = read(fd, buf, pkg_size); 5360 if (ret < 0) { 5361 close(fd); 5362 fprintf(stderr, "%s: File read operation failed\n", __func__); 5363 close_file(buf); 5364 return NULL; 5365 } 5366 5367 if (size) 5368 *size = pkg_size; 5369 5370 close(fd); 5371 5372 return buf; 5373 } 5374 5375 int 5376 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5377 { 5378 FILE *fh = fopen(file_path, "wb"); 5379 5380 if (fh == NULL) { 5381 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5382 return -1; 5383 } 5384 5385 if (fwrite(buf, 1, size, fh) != size) { 5386 fclose(fh); 5387 fprintf(stderr, "%s: File write operation failed\n", __func__); 5388 return -1; 5389 } 5390 5391 fclose(fh); 5392 5393 return 0; 5394 } 5395 5396 int 5397 close_file(uint8_t *buf) 5398 { 5399 if (buf) { 5400 free((void *)buf); 5401 return 0; 5402 } 5403 5404 return -1; 5405 } 5406 5407 void 5408 port_queue_region_info_display(portid_t port_id, void *buf) 5409 { 5410 #ifdef RTE_NET_I40E 5411 uint16_t i, j; 5412 struct rte_pmd_i40e_queue_regions *info = 5413 (struct rte_pmd_i40e_queue_regions *)buf; 5414 static const char *queue_region_info_stats_border = "-------"; 5415 5416 if (!info->queue_region_number) 5417 printf("there is no region has been set before"); 5418 5419 printf("\n %s All queue region info for port=%2d %s", 5420 queue_region_info_stats_border, port_id, 5421 queue_region_info_stats_border); 5422 printf("\n queue_region_number: %-14u \n", 5423 info->queue_region_number); 5424 5425 for (i = 0; i < info->queue_region_number; i++) { 5426 printf("\n region_id: %-14u queue_number: %-14u " 5427 "queue_start_index: %-14u \n", 5428 info->region[i].region_id, 5429 info->region[i].queue_num, 5430 info->region[i].queue_start_index); 5431 5432 printf(" user_priority_num is %-14u :", 5433 info->region[i].user_priority_num); 5434 for (j = 0; j < info->region[i].user_priority_num; j++) 5435 printf(" %-14u ", info->region[i].user_priority[j]); 5436 5437 printf("\n flowtype_num is %-14u :", 5438 info->region[i].flowtype_num); 5439 for (j = 0; j < info->region[i].flowtype_num; j++) 5440 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5441 } 5442 #else 5443 RTE_SET_USED(port_id); 5444 RTE_SET_USED(buf); 5445 #endif 5446 5447 printf("\n\n"); 5448 } 5449 5450 void 5451 show_macs(portid_t port_id) 5452 { 5453 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5454 struct rte_eth_dev_info dev_info; 5455 int32_t i, rc, num_macs = 0; 5456 5457 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5458 return; 5459 5460 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 5461 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 5462 if (rc < 0) 5463 return; 5464 5465 for (i = 0; i < rc; i++) { 5466 5467 /* skip zero address */ 5468 if (rte_is_zero_ether_addr(&addr[i])) 5469 continue; 5470 5471 num_macs++; 5472 } 5473 5474 printf("Number of MAC address added: %d\n", num_macs); 5475 5476 for (i = 0; i < rc; i++) { 5477 5478 /* skip zero address */ 5479 if (rte_is_zero_ether_addr(&addr[i])) 5480 continue; 5481 5482 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 5483 printf(" %s\n", buf); 5484 } 5485 } 5486 5487 void 5488 show_mcast_macs(portid_t port_id) 5489 { 5490 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5491 struct rte_ether_addr *addr; 5492 struct rte_port *port; 5493 uint32_t i; 5494 5495 port = &ports[port_id]; 5496 5497 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5498 5499 for (i = 0; i < port->mc_addr_nb; i++) { 5500 addr = &port->mc_addr_pool[i]; 5501 5502 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5503 printf(" %s\n", buf); 5504 } 5505 } 5506