1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static char *flowtype_to_str(uint16_t flow_type); 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 91 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 92 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 93 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS}, 94 { "none", 0 }, 95 { "eth", RTE_ETH_RSS_ETH }, 96 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 97 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 98 { "vlan", RTE_ETH_RSS_VLAN }, 99 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 100 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 101 { "ipv4", RTE_ETH_RSS_IPV4 }, 102 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 103 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 104 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 105 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 106 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 107 { "ipv6", RTE_ETH_RSS_IPV6 }, 108 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 109 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 110 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 111 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 112 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 113 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 114 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 115 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 116 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 117 { "port", RTE_ETH_RSS_PORT }, 118 { "vxlan", RTE_ETH_RSS_VXLAN }, 119 { "geneve", RTE_ETH_RSS_GENEVE }, 120 { "nvgre", RTE_ETH_RSS_NVGRE }, 121 { "ip", RTE_ETH_RSS_IP }, 122 { "udp", RTE_ETH_RSS_UDP }, 123 { "tcp", RTE_ETH_RSS_TCP }, 124 { "sctp", RTE_ETH_RSS_SCTP }, 125 { "tunnel", RTE_ETH_RSS_TUNNEL }, 126 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 127 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 128 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 129 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 130 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 131 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 132 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 133 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 134 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 135 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 136 { "esp", RTE_ETH_RSS_ESP }, 137 { "ah", RTE_ETH_RSS_AH }, 138 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 139 { "pfcp", RTE_ETH_RSS_PFCP }, 140 { "pppoe", RTE_ETH_RSS_PPPOE }, 141 { "gtpu", RTE_ETH_RSS_GTPU }, 142 { "ecpri", RTE_ETH_RSS_ECPRI }, 143 { "mpls", RTE_ETH_RSS_MPLS }, 144 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 145 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 146 { NULL, 0 }, 147 }; 148 149 static const struct { 150 enum rte_eth_fec_mode mode; 151 const char *name; 152 } fec_mode_name[] = { 153 { 154 .mode = RTE_ETH_FEC_NOFEC, 155 .name = "off", 156 }, 157 { 158 .mode = RTE_ETH_FEC_AUTO, 159 .name = "auto", 160 }, 161 { 162 .mode = RTE_ETH_FEC_BASER, 163 .name = "baser", 164 }, 165 { 166 .mode = RTE_ETH_FEC_RS, 167 .name = "rs", 168 }, 169 }; 170 171 static void 172 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 173 { 174 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 175 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 176 printf("%s%s", name, buf); 177 } 178 179 static void 180 nic_xstats_display_periodic(portid_t port_id) 181 { 182 struct xstat_display_info *xstats_info; 183 uint64_t *prev_values, *curr_values; 184 uint64_t diff_value, value_rate; 185 struct timespec cur_time; 186 uint64_t *ids_supp; 187 size_t ids_supp_sz; 188 uint64_t diff_ns; 189 unsigned int i; 190 int rc; 191 192 xstats_info = &ports[port_id].xstats_info; 193 194 ids_supp_sz = xstats_info->ids_supp_sz; 195 if (ids_supp_sz == 0) 196 return; 197 198 printf("\n"); 199 200 ids_supp = xstats_info->ids_supp; 201 prev_values = xstats_info->prev_values; 202 curr_values = xstats_info->curr_values; 203 204 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 205 ids_supp_sz); 206 if (rc != (int)ids_supp_sz) { 207 fprintf(stderr, 208 "Failed to get values of %zu xstats for port %u - return code %d\n", 209 ids_supp_sz, port_id, rc); 210 return; 211 } 212 213 diff_ns = 0; 214 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 215 uint64_t ns; 216 217 ns = cur_time.tv_sec * NS_PER_SEC; 218 ns += cur_time.tv_nsec; 219 220 if (xstats_info->prev_ns != 0) 221 diff_ns = ns - xstats_info->prev_ns; 222 xstats_info->prev_ns = ns; 223 } 224 225 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 226 for (i = 0; i < ids_supp_sz; i++) { 227 diff_value = (curr_values[i] > prev_values[i]) ? 228 (curr_values[i] - prev_values[i]) : 0; 229 prev_values[i] = curr_values[i]; 230 value_rate = diff_ns > 0 ? 231 (double)diff_value / diff_ns * NS_PER_SEC : 0; 232 233 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 234 xstats_display[i].name, curr_values[i], value_rate); 235 } 236 } 237 238 void 239 nic_stats_display(portid_t port_id) 240 { 241 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 242 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 245 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 246 struct timespec cur_time; 247 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 248 diff_ns; 249 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 250 struct rte_eth_stats stats; 251 252 static const char *nic_stats_border = "########################"; 253 254 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 255 print_valid_ports(); 256 return; 257 } 258 rte_eth_stats_get(port_id, &stats); 259 printf("\n %s NIC statistics for port %-2d %s\n", 260 nic_stats_border, port_id, nic_stats_border); 261 262 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 263 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 264 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 265 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 266 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 267 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 268 269 diff_ns = 0; 270 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 271 uint64_t ns; 272 273 ns = cur_time.tv_sec * NS_PER_SEC; 274 ns += cur_time.tv_nsec; 275 276 if (prev_ns[port_id] != 0) 277 diff_ns = ns - prev_ns[port_id]; 278 prev_ns[port_id] = ns; 279 } 280 281 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 282 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 283 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 284 (stats.opackets - prev_pkts_tx[port_id]) : 0; 285 prev_pkts_rx[port_id] = stats.ipackets; 286 prev_pkts_tx[port_id] = stats.opackets; 287 mpps_rx = diff_ns > 0 ? 288 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 289 mpps_tx = diff_ns > 0 ? 290 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 291 292 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 293 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 294 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 295 (stats.obytes - prev_bytes_tx[port_id]) : 0; 296 prev_bytes_rx[port_id] = stats.ibytes; 297 prev_bytes_tx[port_id] = stats.obytes; 298 mbps_rx = diff_ns > 0 ? 299 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 300 mbps_tx = diff_ns > 0 ? 301 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 302 303 printf("\n Throughput (since last show)\n"); 304 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 305 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 306 mpps_tx, mbps_tx * 8); 307 308 if (xstats_display_num > 0) 309 nic_xstats_display_periodic(port_id); 310 311 printf(" %s############################%s\n", 312 nic_stats_border, nic_stats_border); 313 } 314 315 void 316 nic_stats_clear(portid_t port_id) 317 { 318 int ret; 319 320 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 321 print_valid_ports(); 322 return; 323 } 324 325 ret = rte_eth_stats_reset(port_id); 326 if (ret != 0) { 327 fprintf(stderr, 328 "%s: Error: failed to reset stats (port %u): %s", 329 __func__, port_id, strerror(-ret)); 330 return; 331 } 332 333 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 334 if (ret != 0) { 335 if (ret < 0) 336 ret = -ret; 337 fprintf(stderr, 338 "%s: Error: failed to get stats (port %u): %s", 339 __func__, port_id, strerror(ret)); 340 return; 341 } 342 printf("\n NIC statistics for port %d cleared\n", port_id); 343 } 344 345 void 346 nic_xstats_display(portid_t port_id) 347 { 348 struct rte_eth_xstat *xstats; 349 int cnt_xstats, idx_xstat; 350 struct rte_eth_xstat_name *xstats_names; 351 352 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 353 print_valid_ports(); 354 return; 355 } 356 printf("###### NIC extended statistics for port %-2d\n", port_id); 357 if (!rte_eth_dev_is_valid_port(port_id)) { 358 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 359 return; 360 } 361 362 /* Get count */ 363 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 364 if (cnt_xstats < 0) { 365 fprintf(stderr, "Error: Cannot get count of xstats\n"); 366 return; 367 } 368 369 /* Get id-name lookup table */ 370 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 371 if (xstats_names == NULL) { 372 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 373 return; 374 } 375 if (cnt_xstats != rte_eth_xstats_get_names( 376 port_id, xstats_names, cnt_xstats)) { 377 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 378 free(xstats_names); 379 return; 380 } 381 382 /* Get stats themselves */ 383 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 384 if (xstats == NULL) { 385 fprintf(stderr, "Cannot allocate memory for xstats\n"); 386 free(xstats_names); 387 return; 388 } 389 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 390 fprintf(stderr, "Error: Unable to get xstats\n"); 391 free(xstats_names); 392 free(xstats); 393 return; 394 } 395 396 /* Display xstats */ 397 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 398 if (xstats_hide_zero && !xstats[idx_xstat].value) 399 continue; 400 printf("%s: %"PRIu64"\n", 401 xstats_names[idx_xstat].name, 402 xstats[idx_xstat].value); 403 } 404 free(xstats_names); 405 free(xstats); 406 } 407 408 void 409 nic_xstats_clear(portid_t port_id) 410 { 411 int ret; 412 413 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 414 print_valid_ports(); 415 return; 416 } 417 418 ret = rte_eth_xstats_reset(port_id); 419 if (ret != 0) { 420 fprintf(stderr, 421 "%s: Error: failed to reset xstats (port %u): %s\n", 422 __func__, port_id, strerror(-ret)); 423 return; 424 } 425 426 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 427 if (ret != 0) { 428 if (ret < 0) 429 ret = -ret; 430 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 431 __func__, port_id, strerror(ret)); 432 return; 433 } 434 } 435 436 static const char * 437 get_queue_state_name(uint8_t queue_state) 438 { 439 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 440 return "stopped"; 441 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 442 return "started"; 443 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 444 return "hairpin"; 445 else 446 return "unknown"; 447 } 448 449 void 450 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 451 { 452 struct rte_eth_burst_mode mode; 453 struct rte_eth_rxq_info qinfo; 454 int32_t rc; 455 static const char *info_border = "*********************"; 456 457 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 458 if (rc != 0) { 459 fprintf(stderr, 460 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 461 port_id, queue_id, strerror(-rc), rc); 462 return; 463 } 464 465 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 466 info_border, port_id, queue_id, info_border); 467 468 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 469 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 470 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 471 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 472 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 473 printf("\nRX drop packets: %s", 474 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 475 printf("\nRX deferred start: %s", 476 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 477 printf("\nRX scattered packets: %s", 478 (qinfo.scattered_rx != 0) ? "on" : "off"); 479 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 480 if (qinfo.rx_buf_size != 0) 481 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 482 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 483 484 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 485 printf("\nBurst mode: %s%s", 486 mode.info, 487 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 488 " (per queue)" : ""); 489 490 printf("\n"); 491 } 492 493 void 494 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 495 { 496 struct rte_eth_burst_mode mode; 497 struct rte_eth_txq_info qinfo; 498 int32_t rc; 499 static const char *info_border = "*********************"; 500 501 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 502 if (rc != 0) { 503 fprintf(stderr, 504 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 505 port_id, queue_id, strerror(-rc), rc); 506 return; 507 } 508 509 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 510 info_border, port_id, queue_id, info_border); 511 512 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 513 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 514 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 515 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 516 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 517 printf("\nTX deferred start: %s", 518 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 519 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 520 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 521 522 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 523 printf("\nBurst mode: %s%s", 524 mode.info, 525 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 526 " (per queue)" : ""); 527 528 printf("\n"); 529 } 530 531 static int bus_match_all(const struct rte_bus *bus, const void *data) 532 { 533 RTE_SET_USED(bus); 534 RTE_SET_USED(data); 535 return 0; 536 } 537 538 static void 539 device_infos_display_speeds(uint32_t speed_capa) 540 { 541 printf("\n\tDevice speed capability:"); 542 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 543 printf(" Autonegotiate (all speeds)"); 544 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 545 printf(" Disable autonegotiate (fixed speed) "); 546 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 547 printf(" 10 Mbps half-duplex "); 548 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 549 printf(" 10 Mbps full-duplex "); 550 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 551 printf(" 100 Mbps half-duplex "); 552 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 553 printf(" 100 Mbps full-duplex "); 554 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 555 printf(" 1 Gbps "); 556 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 557 printf(" 2.5 Gbps "); 558 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 559 printf(" 5 Gbps "); 560 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 561 printf(" 10 Gbps "); 562 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 563 printf(" 20 Gbps "); 564 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 565 printf(" 25 Gbps "); 566 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 567 printf(" 40 Gbps "); 568 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 569 printf(" 50 Gbps "); 570 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 571 printf(" 56 Gbps "); 572 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 573 printf(" 100 Gbps "); 574 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 575 printf(" 200 Gbps "); 576 } 577 578 void 579 device_infos_display(const char *identifier) 580 { 581 static const char *info_border = "*********************"; 582 struct rte_bus *start = NULL, *next; 583 struct rte_dev_iterator dev_iter; 584 char name[RTE_ETH_NAME_MAX_LEN]; 585 struct rte_ether_addr mac_addr; 586 struct rte_device *dev; 587 struct rte_devargs da; 588 portid_t port_id; 589 struct rte_eth_dev_info dev_info; 590 char devstr[128]; 591 592 memset(&da, 0, sizeof(da)); 593 if (!identifier) 594 goto skip_parse; 595 596 if (rte_devargs_parsef(&da, "%s", identifier)) { 597 fprintf(stderr, "cannot parse identifier\n"); 598 return; 599 } 600 601 skip_parse: 602 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 603 604 start = next; 605 if (identifier && da.bus != next) 606 continue; 607 608 /* Skip buses that don't have iterate method */ 609 if (!next->dev_iterate) 610 continue; 611 612 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 613 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 614 615 if (!dev->driver) 616 continue; 617 /* Check for matching device if identifier is present */ 618 if (identifier && 619 strncmp(da.name, dev->name, strlen(dev->name))) 620 continue; 621 printf("\n%s Infos for device %s %s\n", 622 info_border, dev->name, info_border); 623 printf("Bus name: %s", dev->bus->name); 624 printf("\nDriver name: %s", dev->driver->name); 625 printf("\nDevargs: %s", 626 dev->devargs ? dev->devargs->args : ""); 627 printf("\nConnect to socket: %d", dev->numa_node); 628 printf("\n"); 629 630 /* List ports with matching device name */ 631 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 632 printf("\n\tPort id: %-2d", port_id); 633 if (eth_macaddr_get_print_err(port_id, 634 &mac_addr) == 0) 635 print_ethaddr("\n\tMAC address: ", 636 &mac_addr); 637 rte_eth_dev_get_name_by_port(port_id, name); 638 printf("\n\tDevice name: %s", name); 639 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 640 device_infos_display_speeds(dev_info.speed_capa); 641 printf("\n"); 642 } 643 } 644 }; 645 rte_devargs_reset(&da); 646 } 647 648 static void 649 print_dev_capabilities(uint64_t capabilities) 650 { 651 uint64_t single_capa; 652 int begin; 653 int end; 654 int bit; 655 656 if (capabilities == 0) 657 return; 658 659 begin = __builtin_ctzll(capabilities); 660 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 661 662 single_capa = 1ULL << begin; 663 for (bit = begin; bit < end; bit++) { 664 if (capabilities & single_capa) 665 printf(" %s", 666 rte_eth_dev_capability_name(single_capa)); 667 single_capa <<= 1; 668 } 669 } 670 671 void 672 port_infos_display(portid_t port_id) 673 { 674 struct rte_port *port; 675 struct rte_ether_addr mac_addr; 676 struct rte_eth_link link; 677 struct rte_eth_dev_info dev_info; 678 int vlan_offload; 679 struct rte_mempool * mp; 680 static const char *info_border = "*********************"; 681 uint16_t mtu; 682 char name[RTE_ETH_NAME_MAX_LEN]; 683 int ret; 684 char fw_version[ETHDEV_FWVERS_LEN]; 685 686 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 687 print_valid_ports(); 688 return; 689 } 690 port = &ports[port_id]; 691 ret = eth_link_get_nowait_print_err(port_id, &link); 692 if (ret < 0) 693 return; 694 695 ret = eth_dev_info_get_print_err(port_id, &dev_info); 696 if (ret != 0) 697 return; 698 699 printf("\n%s Infos for port %-2d %s\n", 700 info_border, port_id, info_border); 701 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 702 print_ethaddr("MAC address: ", &mac_addr); 703 rte_eth_dev_get_name_by_port(port_id, name); 704 printf("\nDevice name: %s", name); 705 printf("\nDriver name: %s", dev_info.driver_name); 706 707 if (rte_eth_dev_fw_version_get(port_id, fw_version, 708 ETHDEV_FWVERS_LEN) == 0) 709 printf("\nFirmware-version: %s", fw_version); 710 else 711 printf("\nFirmware-version: %s", "not available"); 712 713 if (dev_info.device->devargs && dev_info.device->devargs->args) 714 printf("\nDevargs: %s", dev_info.device->devargs->args); 715 printf("\nConnect to socket: %u", port->socket_id); 716 717 if (port_numa[port_id] != NUMA_NO_CONFIG) { 718 mp = mbuf_pool_find(port_numa[port_id], 0); 719 if (mp) 720 printf("\nmemory allocation on the socket: %d", 721 port_numa[port_id]); 722 } else 723 printf("\nmemory allocation on the socket: %u",port->socket_id); 724 725 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 726 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 727 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 728 ("full-duplex") : ("half-duplex")); 729 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 730 ("On") : ("Off")); 731 732 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 733 printf("MTU: %u\n", mtu); 734 735 printf("Promiscuous mode: %s\n", 736 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 737 printf("Allmulticast mode: %s\n", 738 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 739 printf("Maximum number of MAC addresses: %u\n", 740 (unsigned int)(port->dev_info.max_mac_addrs)); 741 printf("Maximum number of MAC addresses of hash filtering: %u\n", 742 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 743 744 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 745 if (vlan_offload >= 0){ 746 printf("VLAN offload: \n"); 747 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 748 printf(" strip on, "); 749 else 750 printf(" strip off, "); 751 752 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 753 printf("filter on, "); 754 else 755 printf("filter off, "); 756 757 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 758 printf("extend on, "); 759 else 760 printf("extend off, "); 761 762 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 763 printf("qinq strip on\n"); 764 else 765 printf("qinq strip off\n"); 766 } 767 768 if (dev_info.hash_key_size > 0) 769 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 770 if (dev_info.reta_size > 0) 771 printf("Redirection table size: %u\n", dev_info.reta_size); 772 if (!dev_info.flow_type_rss_offloads) 773 printf("No RSS offload flow type is supported.\n"); 774 else { 775 uint16_t i; 776 char *p; 777 778 printf("Supported RSS offload flow types:\n"); 779 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 780 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 781 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 782 continue; 783 p = flowtype_to_str(i); 784 if (p) 785 printf(" %s\n", p); 786 else 787 printf(" user defined %d\n", i); 788 } 789 } 790 791 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 792 printf("Maximum configurable length of RX packet: %u\n", 793 dev_info.max_rx_pktlen); 794 printf("Maximum configurable size of LRO aggregated packet: %u\n", 795 dev_info.max_lro_pkt_size); 796 if (dev_info.max_vfs) 797 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 798 if (dev_info.max_vmdq_pools) 799 printf("Maximum number of VMDq pools: %u\n", 800 dev_info.max_vmdq_pools); 801 802 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 803 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 804 printf("Max possible number of RXDs per queue: %hu\n", 805 dev_info.rx_desc_lim.nb_max); 806 printf("Min possible number of RXDs per queue: %hu\n", 807 dev_info.rx_desc_lim.nb_min); 808 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 809 810 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 811 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 812 printf("Max possible number of TXDs per queue: %hu\n", 813 dev_info.tx_desc_lim.nb_max); 814 printf("Min possible number of TXDs per queue: %hu\n", 815 dev_info.tx_desc_lim.nb_min); 816 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 817 printf("Max segment number per packet: %hu\n", 818 dev_info.tx_desc_lim.nb_seg_max); 819 printf("Max segment number per MTU/TSO: %hu\n", 820 dev_info.tx_desc_lim.nb_mtu_seg_max); 821 822 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 823 print_dev_capabilities(dev_info.dev_capa); 824 printf(" )\n"); 825 /* Show switch info only if valid switch domain and port id is set */ 826 if (dev_info.switch_info.domain_id != 827 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 828 if (dev_info.switch_info.name) 829 printf("Switch name: %s\n", dev_info.switch_info.name); 830 831 printf("Switch domain Id: %u\n", 832 dev_info.switch_info.domain_id); 833 printf("Switch Port Id: %u\n", 834 dev_info.switch_info.port_id); 835 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 836 printf("Switch Rx domain: %u\n", 837 dev_info.switch_info.rx_domain); 838 } 839 } 840 841 void 842 port_summary_header_display(void) 843 { 844 uint16_t port_number; 845 846 port_number = rte_eth_dev_count_avail(); 847 printf("Number of available ports: %i\n", port_number); 848 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 849 "Driver", "Status", "Link"); 850 } 851 852 void 853 port_summary_display(portid_t port_id) 854 { 855 struct rte_ether_addr mac_addr; 856 struct rte_eth_link link; 857 struct rte_eth_dev_info dev_info; 858 char name[RTE_ETH_NAME_MAX_LEN]; 859 int ret; 860 861 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 862 print_valid_ports(); 863 return; 864 } 865 866 ret = eth_link_get_nowait_print_err(port_id, &link); 867 if (ret < 0) 868 return; 869 870 ret = eth_dev_info_get_print_err(port_id, &dev_info); 871 if (ret != 0) 872 return; 873 874 rte_eth_dev_get_name_by_port(port_id, name); 875 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 876 if (ret != 0) 877 return; 878 879 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 880 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 881 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 882 rte_eth_link_speed_to_str(link.link_speed)); 883 } 884 885 void 886 port_eeprom_display(portid_t port_id) 887 { 888 struct rte_dev_eeprom_info einfo; 889 int ret; 890 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 891 print_valid_ports(); 892 return; 893 } 894 895 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 896 if (len_eeprom < 0) { 897 switch (len_eeprom) { 898 case -ENODEV: 899 fprintf(stderr, "port index %d invalid\n", port_id); 900 break; 901 case -ENOTSUP: 902 fprintf(stderr, "operation not supported by device\n"); 903 break; 904 case -EIO: 905 fprintf(stderr, "device is removed\n"); 906 break; 907 default: 908 fprintf(stderr, "Unable to get EEPROM: %d\n", 909 len_eeprom); 910 break; 911 } 912 return; 913 } 914 915 char buf[len_eeprom]; 916 einfo.offset = 0; 917 einfo.length = len_eeprom; 918 einfo.data = buf; 919 920 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 921 if (ret != 0) { 922 switch (ret) { 923 case -ENODEV: 924 fprintf(stderr, "port index %d invalid\n", port_id); 925 break; 926 case -ENOTSUP: 927 fprintf(stderr, "operation not supported by device\n"); 928 break; 929 case -EIO: 930 fprintf(stderr, "device is removed\n"); 931 break; 932 default: 933 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 934 break; 935 } 936 return; 937 } 938 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 939 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 940 } 941 942 void 943 port_module_eeprom_display(portid_t port_id) 944 { 945 struct rte_eth_dev_module_info minfo; 946 struct rte_dev_eeprom_info einfo; 947 int ret; 948 949 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 950 print_valid_ports(); 951 return; 952 } 953 954 955 ret = rte_eth_dev_get_module_info(port_id, &minfo); 956 if (ret != 0) { 957 switch (ret) { 958 case -ENODEV: 959 fprintf(stderr, "port index %d invalid\n", port_id); 960 break; 961 case -ENOTSUP: 962 fprintf(stderr, "operation not supported by device\n"); 963 break; 964 case -EIO: 965 fprintf(stderr, "device is removed\n"); 966 break; 967 default: 968 fprintf(stderr, "Unable to get module EEPROM: %d\n", 969 ret); 970 break; 971 } 972 return; 973 } 974 975 char buf[minfo.eeprom_len]; 976 einfo.offset = 0; 977 einfo.length = minfo.eeprom_len; 978 einfo.data = buf; 979 980 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 981 if (ret != 0) { 982 switch (ret) { 983 case -ENODEV: 984 fprintf(stderr, "port index %d invalid\n", port_id); 985 break; 986 case -ENOTSUP: 987 fprintf(stderr, "operation not supported by device\n"); 988 break; 989 case -EIO: 990 fprintf(stderr, "device is removed\n"); 991 break; 992 default: 993 fprintf(stderr, "Unable to get module EEPROM: %d\n", 994 ret); 995 break; 996 } 997 return; 998 } 999 1000 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1001 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1002 } 1003 1004 int 1005 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1006 { 1007 uint16_t pid; 1008 1009 if (port_id == (portid_t)RTE_PORT_ALL) 1010 return 0; 1011 1012 RTE_ETH_FOREACH_DEV(pid) 1013 if (port_id == pid) 1014 return 0; 1015 1016 if (warning == ENABLED_WARN) 1017 fprintf(stderr, "Invalid port %d\n", port_id); 1018 1019 return 1; 1020 } 1021 1022 void print_valid_ports(void) 1023 { 1024 portid_t pid; 1025 1026 printf("The valid ports array is ["); 1027 RTE_ETH_FOREACH_DEV(pid) { 1028 printf(" %d", pid); 1029 } 1030 printf(" ]\n"); 1031 } 1032 1033 static int 1034 vlan_id_is_invalid(uint16_t vlan_id) 1035 { 1036 if (vlan_id < 4096) 1037 return 0; 1038 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1039 return 1; 1040 } 1041 1042 static int 1043 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1044 { 1045 const struct rte_pci_device *pci_dev; 1046 const struct rte_bus *bus; 1047 uint64_t pci_len; 1048 1049 if (reg_off & 0x3) { 1050 fprintf(stderr, 1051 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1052 (unsigned int)reg_off); 1053 return 1; 1054 } 1055 1056 if (!ports[port_id].dev_info.device) { 1057 fprintf(stderr, "Invalid device\n"); 1058 return 0; 1059 } 1060 1061 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1062 if (bus && !strcmp(bus->name, "pci")) { 1063 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1064 } else { 1065 fprintf(stderr, "Not a PCI device\n"); 1066 return 1; 1067 } 1068 1069 pci_len = pci_dev->mem_resource[0].len; 1070 if (reg_off >= pci_len) { 1071 fprintf(stderr, 1072 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1073 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1074 pci_len); 1075 return 1; 1076 } 1077 return 0; 1078 } 1079 1080 static int 1081 reg_bit_pos_is_invalid(uint8_t bit_pos) 1082 { 1083 if (bit_pos <= 31) 1084 return 0; 1085 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1086 return 1; 1087 } 1088 1089 #define display_port_and_reg_off(port_id, reg_off) \ 1090 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1091 1092 static inline void 1093 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1094 { 1095 display_port_and_reg_off(port_id, (unsigned)reg_off); 1096 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1097 } 1098 1099 void 1100 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1101 { 1102 uint32_t reg_v; 1103 1104 1105 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1106 return; 1107 if (port_reg_off_is_invalid(port_id, reg_off)) 1108 return; 1109 if (reg_bit_pos_is_invalid(bit_x)) 1110 return; 1111 reg_v = port_id_pci_reg_read(port_id, reg_off); 1112 display_port_and_reg_off(port_id, (unsigned)reg_off); 1113 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1114 } 1115 1116 void 1117 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1118 uint8_t bit1_pos, uint8_t bit2_pos) 1119 { 1120 uint32_t reg_v; 1121 uint8_t l_bit; 1122 uint8_t h_bit; 1123 1124 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1125 return; 1126 if (port_reg_off_is_invalid(port_id, reg_off)) 1127 return; 1128 if (reg_bit_pos_is_invalid(bit1_pos)) 1129 return; 1130 if (reg_bit_pos_is_invalid(bit2_pos)) 1131 return; 1132 if (bit1_pos > bit2_pos) 1133 l_bit = bit2_pos, h_bit = bit1_pos; 1134 else 1135 l_bit = bit1_pos, h_bit = bit2_pos; 1136 1137 reg_v = port_id_pci_reg_read(port_id, reg_off); 1138 reg_v >>= l_bit; 1139 if (h_bit < 31) 1140 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1141 display_port_and_reg_off(port_id, (unsigned)reg_off); 1142 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1143 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1144 } 1145 1146 void 1147 port_reg_display(portid_t port_id, uint32_t reg_off) 1148 { 1149 uint32_t reg_v; 1150 1151 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1152 return; 1153 if (port_reg_off_is_invalid(port_id, reg_off)) 1154 return; 1155 reg_v = port_id_pci_reg_read(port_id, reg_off); 1156 display_port_reg_value(port_id, reg_off, reg_v); 1157 } 1158 1159 void 1160 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1161 uint8_t bit_v) 1162 { 1163 uint32_t reg_v; 1164 1165 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1166 return; 1167 if (port_reg_off_is_invalid(port_id, reg_off)) 1168 return; 1169 if (reg_bit_pos_is_invalid(bit_pos)) 1170 return; 1171 if (bit_v > 1) { 1172 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1173 (int) bit_v); 1174 return; 1175 } 1176 reg_v = port_id_pci_reg_read(port_id, reg_off); 1177 if (bit_v == 0) 1178 reg_v &= ~(1 << bit_pos); 1179 else 1180 reg_v |= (1 << bit_pos); 1181 port_id_pci_reg_write(port_id, reg_off, reg_v); 1182 display_port_reg_value(port_id, reg_off, reg_v); 1183 } 1184 1185 void 1186 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1187 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1188 { 1189 uint32_t max_v; 1190 uint32_t reg_v; 1191 uint8_t l_bit; 1192 uint8_t h_bit; 1193 1194 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1195 return; 1196 if (port_reg_off_is_invalid(port_id, reg_off)) 1197 return; 1198 if (reg_bit_pos_is_invalid(bit1_pos)) 1199 return; 1200 if (reg_bit_pos_is_invalid(bit2_pos)) 1201 return; 1202 if (bit1_pos > bit2_pos) 1203 l_bit = bit2_pos, h_bit = bit1_pos; 1204 else 1205 l_bit = bit1_pos, h_bit = bit2_pos; 1206 1207 if ((h_bit - l_bit) < 31) 1208 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1209 else 1210 max_v = 0xFFFFFFFF; 1211 1212 if (value > max_v) { 1213 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1214 (unsigned)value, (unsigned)value, 1215 (unsigned)max_v, (unsigned)max_v); 1216 return; 1217 } 1218 reg_v = port_id_pci_reg_read(port_id, reg_off); 1219 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1220 reg_v |= (value << l_bit); /* Set changed bits */ 1221 port_id_pci_reg_write(port_id, reg_off, reg_v); 1222 display_port_reg_value(port_id, reg_off, reg_v); 1223 } 1224 1225 void 1226 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1227 { 1228 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1229 return; 1230 if (port_reg_off_is_invalid(port_id, reg_off)) 1231 return; 1232 port_id_pci_reg_write(port_id, reg_off, reg_v); 1233 display_port_reg_value(port_id, reg_off, reg_v); 1234 } 1235 1236 void 1237 port_mtu_set(portid_t port_id, uint16_t mtu) 1238 { 1239 struct rte_port *port = &ports[port_id]; 1240 int diag; 1241 1242 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1243 return; 1244 1245 if (port->need_reconfig == 0) { 1246 diag = rte_eth_dev_set_mtu(port_id, mtu); 1247 if (diag != 0) { 1248 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1249 return; 1250 } 1251 } 1252 1253 port->dev_conf.rxmode.mtu = mtu; 1254 } 1255 1256 /* Generic flow management functions. */ 1257 1258 static struct port_flow_tunnel * 1259 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1260 { 1261 struct port_flow_tunnel *flow_tunnel; 1262 1263 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1264 if (flow_tunnel->id == port_tunnel_id) 1265 goto out; 1266 } 1267 flow_tunnel = NULL; 1268 1269 out: 1270 return flow_tunnel; 1271 } 1272 1273 const char * 1274 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1275 { 1276 const char *type; 1277 switch (tunnel->type) { 1278 default: 1279 type = "unknown"; 1280 break; 1281 case RTE_FLOW_ITEM_TYPE_VXLAN: 1282 type = "vxlan"; 1283 break; 1284 case RTE_FLOW_ITEM_TYPE_GRE: 1285 type = "gre"; 1286 break; 1287 case RTE_FLOW_ITEM_TYPE_NVGRE: 1288 type = "nvgre"; 1289 break; 1290 case RTE_FLOW_ITEM_TYPE_GENEVE: 1291 type = "geneve"; 1292 break; 1293 } 1294 1295 return type; 1296 } 1297 1298 struct port_flow_tunnel * 1299 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1300 { 1301 struct rte_port *port = &ports[port_id]; 1302 struct port_flow_tunnel *flow_tunnel; 1303 1304 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1305 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1306 goto out; 1307 } 1308 flow_tunnel = NULL; 1309 1310 out: 1311 return flow_tunnel; 1312 } 1313 1314 void port_flow_tunnel_list(portid_t port_id) 1315 { 1316 struct rte_port *port = &ports[port_id]; 1317 struct port_flow_tunnel *flt; 1318 1319 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1320 printf("port %u tunnel #%u type=%s", 1321 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1322 if (flt->tunnel.tun_id) 1323 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1324 printf("\n"); 1325 } 1326 } 1327 1328 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1329 { 1330 struct rte_port *port = &ports[port_id]; 1331 struct port_flow_tunnel *flt; 1332 1333 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1334 if (flt->id == tunnel_id) 1335 break; 1336 } 1337 if (flt) { 1338 LIST_REMOVE(flt, chain); 1339 free(flt); 1340 printf("port %u: flow tunnel #%u destroyed\n", 1341 port_id, tunnel_id); 1342 } 1343 } 1344 1345 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1346 { 1347 struct rte_port *port = &ports[port_id]; 1348 enum rte_flow_item_type type; 1349 struct port_flow_tunnel *flt; 1350 1351 if (!strcmp(ops->type, "vxlan")) 1352 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1353 else if (!strcmp(ops->type, "gre")) 1354 type = RTE_FLOW_ITEM_TYPE_GRE; 1355 else if (!strcmp(ops->type, "nvgre")) 1356 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1357 else if (!strcmp(ops->type, "geneve")) 1358 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1359 else { 1360 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1361 ops->type); 1362 return; 1363 } 1364 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1365 if (flt->tunnel.type == type) 1366 break; 1367 } 1368 if (!flt) { 1369 flt = calloc(1, sizeof(*flt)); 1370 if (!flt) { 1371 fprintf(stderr, "failed to allocate port flt object\n"); 1372 return; 1373 } 1374 flt->tunnel.type = type; 1375 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1376 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1377 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1378 } 1379 printf("port %d: flow tunnel #%u type %s\n", 1380 port_id, flt->id, ops->type); 1381 } 1382 1383 /** Generate a port_flow entry from attributes/pattern/actions. */ 1384 static struct port_flow * 1385 port_flow_new(const struct rte_flow_attr *attr, 1386 const struct rte_flow_item *pattern, 1387 const struct rte_flow_action *actions, 1388 struct rte_flow_error *error) 1389 { 1390 const struct rte_flow_conv_rule rule = { 1391 .attr_ro = attr, 1392 .pattern_ro = pattern, 1393 .actions_ro = actions, 1394 }; 1395 struct port_flow *pf; 1396 int ret; 1397 1398 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1399 if (ret < 0) 1400 return NULL; 1401 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1402 if (!pf) { 1403 rte_flow_error_set 1404 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1405 "calloc() failed"); 1406 return NULL; 1407 } 1408 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1409 error) >= 0) 1410 return pf; 1411 free(pf); 1412 return NULL; 1413 } 1414 1415 /** Print a message out of a flow error. */ 1416 static int 1417 port_flow_complain(struct rte_flow_error *error) 1418 { 1419 static const char *const errstrlist[] = { 1420 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1421 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1422 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1423 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1424 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1425 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1426 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1427 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1428 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1429 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1430 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1431 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1432 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1433 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1434 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1435 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1436 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1437 }; 1438 const char *errstr; 1439 char buf[32]; 1440 int err = rte_errno; 1441 1442 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1443 !errstrlist[error->type]) 1444 errstr = "unknown type"; 1445 else 1446 errstr = errstrlist[error->type]; 1447 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1448 __func__, error->type, errstr, 1449 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1450 error->cause), buf) : "", 1451 error->message ? error->message : "(no stated reason)", 1452 rte_strerror(err)); 1453 1454 switch (error->type) { 1455 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1456 fprintf(stderr, "The status suggests the use of \"transfer\" " 1457 "as the possible cause of the failure. Make " 1458 "sure that the flow in question and its " 1459 "indirect components (if any) are managed " 1460 "via \"transfer\" proxy port. Use command " 1461 "\"show port (port_id) flow transfer proxy\" " 1462 "to figure out the proxy port ID\n"); 1463 break; 1464 default: 1465 break; 1466 } 1467 1468 return -err; 1469 } 1470 1471 static void 1472 rss_config_display(struct rte_flow_action_rss *rss_conf) 1473 { 1474 uint8_t i; 1475 1476 if (rss_conf == NULL) { 1477 fprintf(stderr, "Invalid rule\n"); 1478 return; 1479 } 1480 1481 printf("RSS:\n" 1482 " queues:"); 1483 if (rss_conf->queue_num == 0) 1484 printf(" none"); 1485 for (i = 0; i < rss_conf->queue_num; i++) 1486 printf(" %d", rss_conf->queue[i]); 1487 printf("\n"); 1488 1489 printf(" function: "); 1490 switch (rss_conf->func) { 1491 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1492 printf("default\n"); 1493 break; 1494 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1495 printf("toeplitz\n"); 1496 break; 1497 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1498 printf("simple_xor\n"); 1499 break; 1500 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1501 printf("symmetric_toeplitz\n"); 1502 break; 1503 default: 1504 printf("Unknown function\n"); 1505 return; 1506 } 1507 1508 printf(" types:\n"); 1509 if (rss_conf->types == 0) { 1510 printf(" none\n"); 1511 return; 1512 } 1513 for (i = 0; rss_type_table[i].str; i++) { 1514 if ((rss_conf->types & 1515 rss_type_table[i].rss_type) == 1516 rss_type_table[i].rss_type && 1517 rss_type_table[i].rss_type != 0) 1518 printf(" %s\n", rss_type_table[i].str); 1519 } 1520 } 1521 1522 static struct port_indirect_action * 1523 action_get_by_id(portid_t port_id, uint32_t id) 1524 { 1525 struct rte_port *port; 1526 struct port_indirect_action **ppia; 1527 struct port_indirect_action *pia = NULL; 1528 1529 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1530 port_id == (portid_t)RTE_PORT_ALL) 1531 return NULL; 1532 port = &ports[port_id]; 1533 ppia = &port->actions_list; 1534 while (*ppia) { 1535 if ((*ppia)->id == id) { 1536 pia = *ppia; 1537 break; 1538 } 1539 ppia = &(*ppia)->next; 1540 } 1541 if (!pia) 1542 fprintf(stderr, 1543 "Failed to find indirect action #%u on port %u\n", 1544 id, port_id); 1545 return pia; 1546 } 1547 1548 static int 1549 action_alloc(portid_t port_id, uint32_t id, 1550 struct port_indirect_action **action) 1551 { 1552 struct rte_port *port; 1553 struct port_indirect_action **ppia; 1554 struct port_indirect_action *pia = NULL; 1555 1556 *action = NULL; 1557 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1558 port_id == (portid_t)RTE_PORT_ALL) 1559 return -EINVAL; 1560 port = &ports[port_id]; 1561 if (id == UINT32_MAX) { 1562 /* taking first available ID */ 1563 if (port->actions_list) { 1564 if (port->actions_list->id == UINT32_MAX - 1) { 1565 fprintf(stderr, 1566 "Highest indirect action ID is already assigned, delete it first\n"); 1567 return -ENOMEM; 1568 } 1569 id = port->actions_list->id + 1; 1570 } else { 1571 id = 0; 1572 } 1573 } 1574 pia = calloc(1, sizeof(*pia)); 1575 if (!pia) { 1576 fprintf(stderr, 1577 "Allocation of port %u indirect action failed\n", 1578 port_id); 1579 return -ENOMEM; 1580 } 1581 ppia = &port->actions_list; 1582 while (*ppia && (*ppia)->id > id) 1583 ppia = &(*ppia)->next; 1584 if (*ppia && (*ppia)->id == id) { 1585 fprintf(stderr, 1586 "Indirect action #%u is already assigned, delete it first\n", 1587 id); 1588 free(pia); 1589 return -EINVAL; 1590 } 1591 pia->next = *ppia; 1592 pia->id = id; 1593 *ppia = pia; 1594 *action = pia; 1595 return 0; 1596 } 1597 1598 /** Create indirect action */ 1599 int 1600 port_action_handle_create(portid_t port_id, uint32_t id, 1601 const struct rte_flow_indir_action_conf *conf, 1602 const struct rte_flow_action *action) 1603 { 1604 struct port_indirect_action *pia; 1605 int ret; 1606 struct rte_flow_error error; 1607 1608 ret = action_alloc(port_id, id, &pia); 1609 if (ret) 1610 return ret; 1611 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1612 struct rte_flow_action_age *age = 1613 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1614 1615 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1616 age->context = &pia->age_type; 1617 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1618 struct rte_flow_action_conntrack *ct = 1619 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1620 1621 memcpy(ct, &conntrack_context, sizeof(*ct)); 1622 } 1623 /* Poisoning to make sure PMDs update it in case of error. */ 1624 memset(&error, 0x22, sizeof(error)); 1625 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1626 &error); 1627 if (!pia->handle) { 1628 uint32_t destroy_id = pia->id; 1629 port_action_handle_destroy(port_id, 1, &destroy_id); 1630 return port_flow_complain(&error); 1631 } 1632 pia->type = action->type; 1633 printf("Indirect action #%u created\n", pia->id); 1634 return 0; 1635 } 1636 1637 /** Destroy indirect action */ 1638 int 1639 port_action_handle_destroy(portid_t port_id, 1640 uint32_t n, 1641 const uint32_t *actions) 1642 { 1643 struct rte_port *port; 1644 struct port_indirect_action **tmp; 1645 uint32_t c = 0; 1646 int ret = 0; 1647 1648 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1649 port_id == (portid_t)RTE_PORT_ALL) 1650 return -EINVAL; 1651 port = &ports[port_id]; 1652 tmp = &port->actions_list; 1653 while (*tmp) { 1654 uint32_t i; 1655 1656 for (i = 0; i != n; ++i) { 1657 struct rte_flow_error error; 1658 struct port_indirect_action *pia = *tmp; 1659 1660 if (actions[i] != pia->id) 1661 continue; 1662 /* 1663 * Poisoning to make sure PMDs update it in case 1664 * of error. 1665 */ 1666 memset(&error, 0x33, sizeof(error)); 1667 1668 if (pia->handle && rte_flow_action_handle_destroy( 1669 port_id, pia->handle, &error)) { 1670 ret = port_flow_complain(&error); 1671 continue; 1672 } 1673 *tmp = pia->next; 1674 printf("Indirect action #%u destroyed\n", pia->id); 1675 free(pia); 1676 break; 1677 } 1678 if (i == n) 1679 tmp = &(*tmp)->next; 1680 ++c; 1681 } 1682 return ret; 1683 } 1684 1685 1686 /** Get indirect action by port + id */ 1687 struct rte_flow_action_handle * 1688 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1689 { 1690 1691 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1692 1693 return (pia) ? pia->handle : NULL; 1694 } 1695 1696 /** Update indirect action */ 1697 int 1698 port_action_handle_update(portid_t port_id, uint32_t id, 1699 const struct rte_flow_action *action) 1700 { 1701 struct rte_flow_error error; 1702 struct rte_flow_action_handle *action_handle; 1703 struct port_indirect_action *pia; 1704 const void *update; 1705 1706 action_handle = port_action_handle_get_by_id(port_id, id); 1707 if (!action_handle) 1708 return -EINVAL; 1709 pia = action_get_by_id(port_id, id); 1710 if (!pia) 1711 return -EINVAL; 1712 switch (pia->type) { 1713 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1714 update = action->conf; 1715 break; 1716 default: 1717 update = action; 1718 break; 1719 } 1720 if (rte_flow_action_handle_update(port_id, action_handle, update, 1721 &error)) { 1722 return port_flow_complain(&error); 1723 } 1724 printf("Indirect action #%u updated\n", id); 1725 return 0; 1726 } 1727 1728 int 1729 port_action_handle_query(portid_t port_id, uint32_t id) 1730 { 1731 struct rte_flow_error error; 1732 struct port_indirect_action *pia; 1733 union { 1734 struct rte_flow_query_count count; 1735 struct rte_flow_query_age age; 1736 struct rte_flow_action_conntrack ct; 1737 } query; 1738 1739 pia = action_get_by_id(port_id, id); 1740 if (!pia) 1741 return -EINVAL; 1742 switch (pia->type) { 1743 case RTE_FLOW_ACTION_TYPE_AGE: 1744 case RTE_FLOW_ACTION_TYPE_COUNT: 1745 break; 1746 default: 1747 fprintf(stderr, 1748 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1749 id, pia->type, port_id); 1750 return -ENOTSUP; 1751 } 1752 /* Poisoning to make sure PMDs update it in case of error. */ 1753 memset(&error, 0x55, sizeof(error)); 1754 memset(&query, 0, sizeof(query)); 1755 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1756 return port_flow_complain(&error); 1757 switch (pia->type) { 1758 case RTE_FLOW_ACTION_TYPE_AGE: 1759 printf("Indirect AGE action:\n" 1760 " aged: %u\n" 1761 " sec_since_last_hit_valid: %u\n" 1762 " sec_since_last_hit: %" PRIu32 "\n", 1763 query.age.aged, 1764 query.age.sec_since_last_hit_valid, 1765 query.age.sec_since_last_hit); 1766 break; 1767 case RTE_FLOW_ACTION_TYPE_COUNT: 1768 printf("Indirect COUNT action:\n" 1769 " hits_set: %u\n" 1770 " bytes_set: %u\n" 1771 " hits: %" PRIu64 "\n" 1772 " bytes: %" PRIu64 "\n", 1773 query.count.hits_set, 1774 query.count.bytes_set, 1775 query.count.hits, 1776 query.count.bytes); 1777 break; 1778 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1779 printf("Conntrack Context:\n" 1780 " Peer: %u, Flow dir: %s, Enable: %u\n" 1781 " Live: %u, SACK: %u, CACK: %u\n" 1782 " Packet dir: %s, Liberal: %u, State: %u\n" 1783 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1784 " Last Seq: %u, Last ACK: %u\n" 1785 " Last Win: %u, Last End: %u\n", 1786 query.ct.peer_port, 1787 query.ct.is_original_dir ? "Original" : "Reply", 1788 query.ct.enable, query.ct.live_connection, 1789 query.ct.selective_ack, query.ct.challenge_ack_passed, 1790 query.ct.last_direction ? "Original" : "Reply", 1791 query.ct.liberal_mode, query.ct.state, 1792 query.ct.max_ack_window, query.ct.retransmission_limit, 1793 query.ct.last_index, query.ct.last_seq, 1794 query.ct.last_ack, query.ct.last_window, 1795 query.ct.last_end); 1796 printf(" Original Dir:\n" 1797 " scale: %u, fin: %u, ack seen: %u\n" 1798 " unacked data: %u\n Sent end: %u," 1799 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1800 query.ct.original_dir.scale, 1801 query.ct.original_dir.close_initiated, 1802 query.ct.original_dir.last_ack_seen, 1803 query.ct.original_dir.data_unacked, 1804 query.ct.original_dir.sent_end, 1805 query.ct.original_dir.reply_end, 1806 query.ct.original_dir.max_win, 1807 query.ct.original_dir.max_ack); 1808 printf(" Reply Dir:\n" 1809 " scale: %u, fin: %u, ack seen: %u\n" 1810 " unacked data: %u\n Sent end: %u," 1811 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1812 query.ct.reply_dir.scale, 1813 query.ct.reply_dir.close_initiated, 1814 query.ct.reply_dir.last_ack_seen, 1815 query.ct.reply_dir.data_unacked, 1816 query.ct.reply_dir.sent_end, 1817 query.ct.reply_dir.reply_end, 1818 query.ct.reply_dir.max_win, 1819 query.ct.reply_dir.max_ack); 1820 break; 1821 default: 1822 fprintf(stderr, 1823 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1824 id, pia->type, port_id); 1825 break; 1826 } 1827 return 0; 1828 } 1829 1830 static struct port_flow_tunnel * 1831 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1832 const struct rte_flow_item *pattern, 1833 const struct rte_flow_action *actions, 1834 const struct tunnel_ops *tunnel_ops) 1835 { 1836 int ret; 1837 struct rte_port *port; 1838 struct port_flow_tunnel *pft; 1839 struct rte_flow_error error; 1840 1841 port = &ports[port_id]; 1842 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1843 if (!pft) { 1844 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1845 tunnel_ops->id); 1846 return NULL; 1847 } 1848 if (tunnel_ops->actions) { 1849 uint32_t num_actions; 1850 const struct rte_flow_action *aptr; 1851 1852 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1853 &pft->pmd_actions, 1854 &pft->num_pmd_actions, 1855 &error); 1856 if (ret) { 1857 port_flow_complain(&error); 1858 return NULL; 1859 } 1860 for (aptr = actions, num_actions = 1; 1861 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1862 aptr++, num_actions++); 1863 pft->actions = malloc( 1864 (num_actions + pft->num_pmd_actions) * 1865 sizeof(actions[0])); 1866 if (!pft->actions) { 1867 rte_flow_tunnel_action_decap_release( 1868 port_id, pft->actions, 1869 pft->num_pmd_actions, &error); 1870 return NULL; 1871 } 1872 rte_memcpy(pft->actions, pft->pmd_actions, 1873 pft->num_pmd_actions * sizeof(actions[0])); 1874 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1875 num_actions * sizeof(actions[0])); 1876 } 1877 if (tunnel_ops->items) { 1878 uint32_t num_items; 1879 const struct rte_flow_item *iptr; 1880 1881 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1882 &pft->pmd_items, 1883 &pft->num_pmd_items, 1884 &error); 1885 if (ret) { 1886 port_flow_complain(&error); 1887 return NULL; 1888 } 1889 for (iptr = pattern, num_items = 1; 1890 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1891 iptr++, num_items++); 1892 pft->items = malloc((num_items + pft->num_pmd_items) * 1893 sizeof(pattern[0])); 1894 if (!pft->items) { 1895 rte_flow_tunnel_item_release( 1896 port_id, pft->pmd_items, 1897 pft->num_pmd_items, &error); 1898 return NULL; 1899 } 1900 rte_memcpy(pft->items, pft->pmd_items, 1901 pft->num_pmd_items * sizeof(pattern[0])); 1902 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1903 num_items * sizeof(pattern[0])); 1904 } 1905 1906 return pft; 1907 } 1908 1909 static void 1910 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1911 const struct tunnel_ops *tunnel_ops, 1912 struct port_flow_tunnel *pft) 1913 { 1914 struct rte_flow_error error; 1915 1916 if (tunnel_ops->actions) { 1917 free(pft->actions); 1918 rte_flow_tunnel_action_decap_release( 1919 port_id, pft->pmd_actions, 1920 pft->num_pmd_actions, &error); 1921 pft->actions = NULL; 1922 pft->pmd_actions = NULL; 1923 } 1924 if (tunnel_ops->items) { 1925 free(pft->items); 1926 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1927 pft->num_pmd_items, 1928 &error); 1929 pft->items = NULL; 1930 pft->pmd_items = NULL; 1931 } 1932 } 1933 1934 /** Add port meter policy */ 1935 int 1936 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1937 const struct rte_flow_action *actions) 1938 { 1939 struct rte_mtr_error error; 1940 const struct rte_flow_action *act = actions; 1941 const struct rte_flow_action *start; 1942 struct rte_mtr_meter_policy_params policy; 1943 uint32_t i = 0, act_n; 1944 int ret; 1945 1946 for (i = 0; i < RTE_COLORS; i++) { 1947 for (act_n = 0, start = act; 1948 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1949 act_n++; 1950 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1951 policy.actions[i] = start; 1952 else 1953 policy.actions[i] = NULL; 1954 act++; 1955 } 1956 ret = rte_mtr_meter_policy_add(port_id, 1957 policy_id, 1958 &policy, &error); 1959 if (ret) 1960 print_mtr_err_msg(&error); 1961 return ret; 1962 } 1963 1964 /** Validate flow rule. */ 1965 int 1966 port_flow_validate(portid_t port_id, 1967 const struct rte_flow_attr *attr, 1968 const struct rte_flow_item *pattern, 1969 const struct rte_flow_action *actions, 1970 const struct tunnel_ops *tunnel_ops) 1971 { 1972 struct rte_flow_error error; 1973 struct port_flow_tunnel *pft = NULL; 1974 1975 /* Poisoning to make sure PMDs update it in case of error. */ 1976 memset(&error, 0x11, sizeof(error)); 1977 if (tunnel_ops->enabled) { 1978 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 1979 actions, tunnel_ops); 1980 if (!pft) 1981 return -ENOENT; 1982 if (pft->items) 1983 pattern = pft->items; 1984 if (pft->actions) 1985 actions = pft->actions; 1986 } 1987 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1988 return port_flow_complain(&error); 1989 if (tunnel_ops->enabled) 1990 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 1991 printf("Flow rule validated\n"); 1992 return 0; 1993 } 1994 1995 /** Return age action structure if exists, otherwise NULL. */ 1996 static struct rte_flow_action_age * 1997 age_action_get(const struct rte_flow_action *actions) 1998 { 1999 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2000 switch (actions->type) { 2001 case RTE_FLOW_ACTION_TYPE_AGE: 2002 return (struct rte_flow_action_age *) 2003 (uintptr_t)actions->conf; 2004 default: 2005 break; 2006 } 2007 } 2008 return NULL; 2009 } 2010 2011 /** Create flow rule. */ 2012 int 2013 port_flow_create(portid_t port_id, 2014 const struct rte_flow_attr *attr, 2015 const struct rte_flow_item *pattern, 2016 const struct rte_flow_action *actions, 2017 const struct tunnel_ops *tunnel_ops) 2018 { 2019 struct rte_flow *flow; 2020 struct rte_port *port; 2021 struct port_flow *pf; 2022 uint32_t id = 0; 2023 struct rte_flow_error error; 2024 struct port_flow_tunnel *pft = NULL; 2025 struct rte_flow_action_age *age = age_action_get(actions); 2026 2027 port = &ports[port_id]; 2028 if (port->flow_list) { 2029 if (port->flow_list->id == UINT32_MAX) { 2030 fprintf(stderr, 2031 "Highest rule ID is already assigned, delete it first"); 2032 return -ENOMEM; 2033 } 2034 id = port->flow_list->id + 1; 2035 } 2036 if (tunnel_ops->enabled) { 2037 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2038 actions, tunnel_ops); 2039 if (!pft) 2040 return -ENOENT; 2041 if (pft->items) 2042 pattern = pft->items; 2043 if (pft->actions) 2044 actions = pft->actions; 2045 } 2046 pf = port_flow_new(attr, pattern, actions, &error); 2047 if (!pf) 2048 return port_flow_complain(&error); 2049 if (age) { 2050 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2051 age->context = &pf->age_type; 2052 } 2053 /* Poisoning to make sure PMDs update it in case of error. */ 2054 memset(&error, 0x22, sizeof(error)); 2055 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2056 if (!flow) { 2057 if (tunnel_ops->enabled) 2058 port_flow_tunnel_offload_cmd_release(port_id, 2059 tunnel_ops, pft); 2060 free(pf); 2061 return port_flow_complain(&error); 2062 } 2063 pf->next = port->flow_list; 2064 pf->id = id; 2065 pf->flow = flow; 2066 port->flow_list = pf; 2067 if (tunnel_ops->enabled) 2068 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2069 printf("Flow rule #%u created\n", pf->id); 2070 return 0; 2071 } 2072 2073 /** Destroy a number of flow rules. */ 2074 int 2075 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2076 { 2077 struct rte_port *port; 2078 struct port_flow **tmp; 2079 uint32_t c = 0; 2080 int ret = 0; 2081 2082 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2083 port_id == (portid_t)RTE_PORT_ALL) 2084 return -EINVAL; 2085 port = &ports[port_id]; 2086 tmp = &port->flow_list; 2087 while (*tmp) { 2088 uint32_t i; 2089 2090 for (i = 0; i != n; ++i) { 2091 struct rte_flow_error error; 2092 struct port_flow *pf = *tmp; 2093 2094 if (rule[i] != pf->id) 2095 continue; 2096 /* 2097 * Poisoning to make sure PMDs update it in case 2098 * of error. 2099 */ 2100 memset(&error, 0x33, sizeof(error)); 2101 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2102 ret = port_flow_complain(&error); 2103 continue; 2104 } 2105 printf("Flow rule #%u destroyed\n", pf->id); 2106 *tmp = pf->next; 2107 free(pf); 2108 break; 2109 } 2110 if (i == n) 2111 tmp = &(*tmp)->next; 2112 ++c; 2113 } 2114 return ret; 2115 } 2116 2117 /** Remove all flow rules. */ 2118 int 2119 port_flow_flush(portid_t port_id) 2120 { 2121 struct rte_flow_error error; 2122 struct rte_port *port; 2123 int ret = 0; 2124 2125 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2126 port_id == (portid_t)RTE_PORT_ALL) 2127 return -EINVAL; 2128 2129 port = &ports[port_id]; 2130 2131 if (port->flow_list == NULL) 2132 return ret; 2133 2134 /* Poisoning to make sure PMDs update it in case of error. */ 2135 memset(&error, 0x44, sizeof(error)); 2136 if (rte_flow_flush(port_id, &error)) { 2137 port_flow_complain(&error); 2138 } 2139 2140 while (port->flow_list) { 2141 struct port_flow *pf = port->flow_list->next; 2142 2143 free(port->flow_list); 2144 port->flow_list = pf; 2145 } 2146 return ret; 2147 } 2148 2149 /** Dump flow rules. */ 2150 int 2151 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2152 const char *file_name) 2153 { 2154 int ret = 0; 2155 FILE *file = stdout; 2156 struct rte_flow_error error; 2157 struct rte_port *port; 2158 struct port_flow *pflow; 2159 struct rte_flow *tmpFlow = NULL; 2160 bool found = false; 2161 2162 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2163 port_id == (portid_t)RTE_PORT_ALL) 2164 return -EINVAL; 2165 2166 if (!dump_all) { 2167 port = &ports[port_id]; 2168 pflow = port->flow_list; 2169 while (pflow) { 2170 if (rule_id != pflow->id) { 2171 pflow = pflow->next; 2172 } else { 2173 tmpFlow = pflow->flow; 2174 if (tmpFlow) 2175 found = true; 2176 break; 2177 } 2178 } 2179 if (found == false) { 2180 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2181 return -EINVAL; 2182 } 2183 } 2184 2185 if (file_name && strlen(file_name)) { 2186 file = fopen(file_name, "w"); 2187 if (!file) { 2188 fprintf(stderr, "Failed to create file %s: %s\n", 2189 file_name, strerror(errno)); 2190 return -errno; 2191 } 2192 } 2193 2194 if (!dump_all) 2195 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2196 else 2197 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2198 if (ret) { 2199 port_flow_complain(&error); 2200 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2201 } else 2202 printf("Flow dump finished\n"); 2203 if (file_name && strlen(file_name)) 2204 fclose(file); 2205 return ret; 2206 } 2207 2208 /** Query a flow rule. */ 2209 int 2210 port_flow_query(portid_t port_id, uint32_t rule, 2211 const struct rte_flow_action *action) 2212 { 2213 struct rte_flow_error error; 2214 struct rte_port *port; 2215 struct port_flow *pf; 2216 const char *name; 2217 union { 2218 struct rte_flow_query_count count; 2219 struct rte_flow_action_rss rss_conf; 2220 struct rte_flow_query_age age; 2221 } query; 2222 int ret; 2223 2224 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2225 port_id == (portid_t)RTE_PORT_ALL) 2226 return -EINVAL; 2227 port = &ports[port_id]; 2228 for (pf = port->flow_list; pf; pf = pf->next) 2229 if (pf->id == rule) 2230 break; 2231 if (!pf) { 2232 fprintf(stderr, "Flow rule #%u not found\n", rule); 2233 return -ENOENT; 2234 } 2235 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2236 &name, sizeof(name), 2237 (void *)(uintptr_t)action->type, &error); 2238 if (ret < 0) 2239 return port_flow_complain(&error); 2240 switch (action->type) { 2241 case RTE_FLOW_ACTION_TYPE_COUNT: 2242 case RTE_FLOW_ACTION_TYPE_RSS: 2243 case RTE_FLOW_ACTION_TYPE_AGE: 2244 break; 2245 default: 2246 fprintf(stderr, "Cannot query action type %d (%s)\n", 2247 action->type, name); 2248 return -ENOTSUP; 2249 } 2250 /* Poisoning to make sure PMDs update it in case of error. */ 2251 memset(&error, 0x55, sizeof(error)); 2252 memset(&query, 0, sizeof(query)); 2253 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2254 return port_flow_complain(&error); 2255 switch (action->type) { 2256 case RTE_FLOW_ACTION_TYPE_COUNT: 2257 printf("%s:\n" 2258 " hits_set: %u\n" 2259 " bytes_set: %u\n" 2260 " hits: %" PRIu64 "\n" 2261 " bytes: %" PRIu64 "\n", 2262 name, 2263 query.count.hits_set, 2264 query.count.bytes_set, 2265 query.count.hits, 2266 query.count.bytes); 2267 break; 2268 case RTE_FLOW_ACTION_TYPE_RSS: 2269 rss_config_display(&query.rss_conf); 2270 break; 2271 case RTE_FLOW_ACTION_TYPE_AGE: 2272 printf("%s:\n" 2273 " aged: %u\n" 2274 " sec_since_last_hit_valid: %u\n" 2275 " sec_since_last_hit: %" PRIu32 "\n", 2276 name, 2277 query.age.aged, 2278 query.age.sec_since_last_hit_valid, 2279 query.age.sec_since_last_hit); 2280 break; 2281 default: 2282 fprintf(stderr, 2283 "Cannot display result for action type %d (%s)\n", 2284 action->type, name); 2285 break; 2286 } 2287 return 0; 2288 } 2289 2290 /** List simply and destroy all aged flows. */ 2291 void 2292 port_flow_aged(portid_t port_id, uint8_t destroy) 2293 { 2294 void **contexts; 2295 int nb_context, total = 0, idx; 2296 struct rte_flow_error error; 2297 enum age_action_context_type *type; 2298 union { 2299 struct port_flow *pf; 2300 struct port_indirect_action *pia; 2301 } ctx; 2302 2303 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2304 port_id == (portid_t)RTE_PORT_ALL) 2305 return; 2306 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2307 printf("Port %u total aged flows: %d\n", port_id, total); 2308 if (total < 0) { 2309 port_flow_complain(&error); 2310 return; 2311 } 2312 if (total == 0) 2313 return; 2314 contexts = malloc(sizeof(void *) * total); 2315 if (contexts == NULL) { 2316 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2317 return; 2318 } 2319 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2320 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2321 if (nb_context != total) { 2322 fprintf(stderr, 2323 "Port:%d get aged flows count(%d) != total(%d)\n", 2324 port_id, nb_context, total); 2325 free(contexts); 2326 return; 2327 } 2328 total = 0; 2329 for (idx = 0; idx < nb_context; idx++) { 2330 if (!contexts[idx]) { 2331 fprintf(stderr, "Error: get Null context in port %u\n", 2332 port_id); 2333 continue; 2334 } 2335 type = (enum age_action_context_type *)contexts[idx]; 2336 switch (*type) { 2337 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2338 ctx.pf = container_of(type, struct port_flow, age_type); 2339 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2340 "\t%c%c%c\t\n", 2341 "Flow", 2342 ctx.pf->id, 2343 ctx.pf->rule.attr->group, 2344 ctx.pf->rule.attr->priority, 2345 ctx.pf->rule.attr->ingress ? 'i' : '-', 2346 ctx.pf->rule.attr->egress ? 'e' : '-', 2347 ctx.pf->rule.attr->transfer ? 't' : '-'); 2348 if (destroy && !port_flow_destroy(port_id, 1, 2349 &ctx.pf->id)) 2350 total++; 2351 break; 2352 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2353 ctx.pia = container_of(type, 2354 struct port_indirect_action, age_type); 2355 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2356 ctx.pia->id); 2357 break; 2358 default: 2359 fprintf(stderr, "Error: invalid context type %u\n", 2360 port_id); 2361 break; 2362 } 2363 } 2364 printf("\n%d flows destroyed\n", total); 2365 free(contexts); 2366 } 2367 2368 /** List flow rules. */ 2369 void 2370 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2371 { 2372 struct rte_port *port; 2373 struct port_flow *pf; 2374 struct port_flow *list = NULL; 2375 uint32_t i; 2376 2377 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2378 port_id == (portid_t)RTE_PORT_ALL) 2379 return; 2380 port = &ports[port_id]; 2381 if (!port->flow_list) 2382 return; 2383 /* Sort flows by group, priority and ID. */ 2384 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2385 struct port_flow **tmp; 2386 const struct rte_flow_attr *curr = pf->rule.attr; 2387 2388 if (n) { 2389 /* Filter out unwanted groups. */ 2390 for (i = 0; i != n; ++i) 2391 if (curr->group == group[i]) 2392 break; 2393 if (i == n) 2394 continue; 2395 } 2396 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2397 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2398 2399 if (curr->group > comp->group || 2400 (curr->group == comp->group && 2401 curr->priority > comp->priority) || 2402 (curr->group == comp->group && 2403 curr->priority == comp->priority && 2404 pf->id > (*tmp)->id)) 2405 continue; 2406 break; 2407 } 2408 pf->tmp = *tmp; 2409 *tmp = pf; 2410 } 2411 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2412 for (pf = list; pf != NULL; pf = pf->tmp) { 2413 const struct rte_flow_item *item = pf->rule.pattern; 2414 const struct rte_flow_action *action = pf->rule.actions; 2415 const char *name; 2416 2417 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2418 pf->id, 2419 pf->rule.attr->group, 2420 pf->rule.attr->priority, 2421 pf->rule.attr->ingress ? 'i' : '-', 2422 pf->rule.attr->egress ? 'e' : '-', 2423 pf->rule.attr->transfer ? 't' : '-'); 2424 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2425 if ((uint32_t)item->type > INT_MAX) 2426 name = "PMD_INTERNAL"; 2427 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2428 &name, sizeof(name), 2429 (void *)(uintptr_t)item->type, 2430 NULL) <= 0) 2431 name = "[UNKNOWN]"; 2432 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2433 printf("%s ", name); 2434 ++item; 2435 } 2436 printf("=>"); 2437 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2438 if ((uint32_t)action->type > INT_MAX) 2439 name = "PMD_INTERNAL"; 2440 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2441 &name, sizeof(name), 2442 (void *)(uintptr_t)action->type, 2443 NULL) <= 0) 2444 name = "[UNKNOWN]"; 2445 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2446 printf(" %s", name); 2447 ++action; 2448 } 2449 printf("\n"); 2450 } 2451 } 2452 2453 /** Restrict ingress traffic to the defined flow rules. */ 2454 int 2455 port_flow_isolate(portid_t port_id, int set) 2456 { 2457 struct rte_flow_error error; 2458 2459 /* Poisoning to make sure PMDs update it in case of error. */ 2460 memset(&error, 0x66, sizeof(error)); 2461 if (rte_flow_isolate(port_id, set, &error)) 2462 return port_flow_complain(&error); 2463 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2464 port_id, 2465 set ? "now restricted" : "not restricted anymore"); 2466 return 0; 2467 } 2468 2469 /* 2470 * RX/TX ring descriptors display functions. 2471 */ 2472 int 2473 rx_queue_id_is_invalid(queueid_t rxq_id) 2474 { 2475 if (rxq_id < nb_rxq) 2476 return 0; 2477 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2478 rxq_id, nb_rxq); 2479 return 1; 2480 } 2481 2482 int 2483 tx_queue_id_is_invalid(queueid_t txq_id) 2484 { 2485 if (txq_id < nb_txq) 2486 return 0; 2487 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2488 txq_id, nb_txq); 2489 return 1; 2490 } 2491 2492 static int 2493 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2494 { 2495 struct rte_port *port = &ports[port_id]; 2496 struct rte_eth_rxq_info rx_qinfo; 2497 int ret; 2498 2499 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2500 if (ret == 0) { 2501 *ring_size = rx_qinfo.nb_desc; 2502 return ret; 2503 } 2504 2505 if (ret != -ENOTSUP) 2506 return ret; 2507 /* 2508 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2509 * ring_size stored in testpmd will be used for validity verification. 2510 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2511 * being 0, it will use a default value provided by PMDs to setup this 2512 * rxq. If the default value is 0, it will use the 2513 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2514 */ 2515 if (port->nb_rx_desc[rxq_id]) 2516 *ring_size = port->nb_rx_desc[rxq_id]; 2517 else if (port->dev_info.default_rxportconf.ring_size) 2518 *ring_size = port->dev_info.default_rxportconf.ring_size; 2519 else 2520 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2521 return 0; 2522 } 2523 2524 static int 2525 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2526 { 2527 struct rte_port *port = &ports[port_id]; 2528 struct rte_eth_txq_info tx_qinfo; 2529 int ret; 2530 2531 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2532 if (ret == 0) { 2533 *ring_size = tx_qinfo.nb_desc; 2534 return ret; 2535 } 2536 2537 if (ret != -ENOTSUP) 2538 return ret; 2539 /* 2540 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2541 * ring_size stored in testpmd will be used for validity verification. 2542 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2543 * being 0, it will use a default value provided by PMDs to setup this 2544 * txq. If the default value is 0, it will use the 2545 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2546 */ 2547 if (port->nb_tx_desc[txq_id]) 2548 *ring_size = port->nb_tx_desc[txq_id]; 2549 else if (port->dev_info.default_txportconf.ring_size) 2550 *ring_size = port->dev_info.default_txportconf.ring_size; 2551 else 2552 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2553 return 0; 2554 } 2555 2556 static int 2557 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2558 { 2559 uint16_t ring_size; 2560 int ret; 2561 2562 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2563 if (ret) 2564 return 1; 2565 2566 if (rxdesc_id < ring_size) 2567 return 0; 2568 2569 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2570 rxdesc_id, ring_size); 2571 return 1; 2572 } 2573 2574 static int 2575 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2576 { 2577 uint16_t ring_size; 2578 int ret; 2579 2580 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2581 if (ret) 2582 return 1; 2583 2584 if (txdesc_id < ring_size) 2585 return 0; 2586 2587 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2588 txdesc_id, ring_size); 2589 return 1; 2590 } 2591 2592 static const struct rte_memzone * 2593 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2594 { 2595 char mz_name[RTE_MEMZONE_NAMESIZE]; 2596 const struct rte_memzone *mz; 2597 2598 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2599 port_id, q_id, ring_name); 2600 mz = rte_memzone_lookup(mz_name); 2601 if (mz == NULL) 2602 fprintf(stderr, 2603 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2604 ring_name, port_id, q_id, mz_name); 2605 return mz; 2606 } 2607 2608 union igb_ring_dword { 2609 uint64_t dword; 2610 struct { 2611 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2612 uint32_t lo; 2613 uint32_t hi; 2614 #else 2615 uint32_t hi; 2616 uint32_t lo; 2617 #endif 2618 } words; 2619 }; 2620 2621 struct igb_ring_desc_32_bytes { 2622 union igb_ring_dword lo_dword; 2623 union igb_ring_dword hi_dword; 2624 union igb_ring_dword resv1; 2625 union igb_ring_dword resv2; 2626 }; 2627 2628 struct igb_ring_desc_16_bytes { 2629 union igb_ring_dword lo_dword; 2630 union igb_ring_dword hi_dword; 2631 }; 2632 2633 static void 2634 ring_rxd_display_dword(union igb_ring_dword dword) 2635 { 2636 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2637 (unsigned)dword.words.hi); 2638 } 2639 2640 static void 2641 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2642 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2643 portid_t port_id, 2644 #else 2645 __rte_unused portid_t port_id, 2646 #endif 2647 uint16_t desc_id) 2648 { 2649 struct igb_ring_desc_16_bytes *ring = 2650 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2651 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2652 int ret; 2653 struct rte_eth_dev_info dev_info; 2654 2655 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2656 if (ret != 0) 2657 return; 2658 2659 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2660 /* 32 bytes RX descriptor, i40e only */ 2661 struct igb_ring_desc_32_bytes *ring = 2662 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2663 ring[desc_id].lo_dword.dword = 2664 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2665 ring_rxd_display_dword(ring[desc_id].lo_dword); 2666 ring[desc_id].hi_dword.dword = 2667 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2668 ring_rxd_display_dword(ring[desc_id].hi_dword); 2669 ring[desc_id].resv1.dword = 2670 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2671 ring_rxd_display_dword(ring[desc_id].resv1); 2672 ring[desc_id].resv2.dword = 2673 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2674 ring_rxd_display_dword(ring[desc_id].resv2); 2675 2676 return; 2677 } 2678 #endif 2679 /* 16 bytes RX descriptor */ 2680 ring[desc_id].lo_dword.dword = 2681 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2682 ring_rxd_display_dword(ring[desc_id].lo_dword); 2683 ring[desc_id].hi_dword.dword = 2684 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2685 ring_rxd_display_dword(ring[desc_id].hi_dword); 2686 } 2687 2688 static void 2689 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2690 { 2691 struct igb_ring_desc_16_bytes *ring; 2692 struct igb_ring_desc_16_bytes txd; 2693 2694 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2695 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2696 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2697 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2698 (unsigned)txd.lo_dword.words.lo, 2699 (unsigned)txd.lo_dword.words.hi, 2700 (unsigned)txd.hi_dword.words.lo, 2701 (unsigned)txd.hi_dword.words.hi); 2702 } 2703 2704 void 2705 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2706 { 2707 const struct rte_memzone *rx_mz; 2708 2709 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2710 return; 2711 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2712 if (rx_mz == NULL) 2713 return; 2714 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2715 } 2716 2717 void 2718 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2719 { 2720 const struct rte_memzone *tx_mz; 2721 2722 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2723 return; 2724 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2725 if (tx_mz == NULL) 2726 return; 2727 ring_tx_descriptor_display(tx_mz, txd_id); 2728 } 2729 2730 void 2731 fwd_lcores_config_display(void) 2732 { 2733 lcoreid_t lc_id; 2734 2735 printf("List of forwarding lcores:"); 2736 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2737 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2738 printf("\n"); 2739 } 2740 void 2741 rxtx_config_display(void) 2742 { 2743 portid_t pid; 2744 queueid_t qid; 2745 2746 printf(" %s packet forwarding%s packets/burst=%d\n", 2747 cur_fwd_eng->fwd_mode_name, 2748 retry_enabled == 0 ? "" : " with retry", 2749 nb_pkt_per_burst); 2750 2751 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2752 printf(" packet len=%u - nb packet segments=%d\n", 2753 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2754 2755 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2756 nb_fwd_lcores, nb_fwd_ports); 2757 2758 RTE_ETH_FOREACH_DEV(pid) { 2759 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2760 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2761 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2762 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2763 struct rte_eth_rxq_info rx_qinfo; 2764 struct rte_eth_txq_info tx_qinfo; 2765 uint16_t rx_free_thresh_tmp; 2766 uint16_t tx_free_thresh_tmp; 2767 uint16_t tx_rs_thresh_tmp; 2768 uint16_t nb_rx_desc_tmp; 2769 uint16_t nb_tx_desc_tmp; 2770 uint64_t offloads_tmp; 2771 uint8_t pthresh_tmp; 2772 uint8_t hthresh_tmp; 2773 uint8_t wthresh_tmp; 2774 int32_t rc; 2775 2776 /* per port config */ 2777 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2778 (unsigned int)pid, nb_rxq, nb_txq); 2779 2780 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2781 ports[pid].dev_conf.rxmode.offloads, 2782 ports[pid].dev_conf.txmode.offloads); 2783 2784 /* per rx queue config only for first queue to be less verbose */ 2785 for (qid = 0; qid < 1; qid++) { 2786 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2787 if (rc) { 2788 nb_rx_desc_tmp = nb_rx_desc[qid]; 2789 rx_free_thresh_tmp = 2790 rx_conf[qid].rx_free_thresh; 2791 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2792 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2793 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2794 offloads_tmp = rx_conf[qid].offloads; 2795 } else { 2796 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2797 rx_free_thresh_tmp = 2798 rx_qinfo.conf.rx_free_thresh; 2799 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2800 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2801 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2802 offloads_tmp = rx_qinfo.conf.offloads; 2803 } 2804 2805 printf(" RX queue: %d\n", qid); 2806 printf(" RX desc=%d - RX free threshold=%d\n", 2807 nb_rx_desc_tmp, rx_free_thresh_tmp); 2808 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2809 " wthresh=%d\n", 2810 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2811 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 2812 if (rx_conf->share_group > 0) 2813 printf(" share_group=%u share_qid=%u", 2814 rx_conf->share_group, 2815 rx_conf->share_qid); 2816 printf("\n"); 2817 } 2818 2819 /* per tx queue config only for first queue to be less verbose */ 2820 for (qid = 0; qid < 1; qid++) { 2821 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2822 if (rc) { 2823 nb_tx_desc_tmp = nb_tx_desc[qid]; 2824 tx_free_thresh_tmp = 2825 tx_conf[qid].tx_free_thresh; 2826 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2827 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2828 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2829 offloads_tmp = tx_conf[qid].offloads; 2830 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2831 } else { 2832 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2833 tx_free_thresh_tmp = 2834 tx_qinfo.conf.tx_free_thresh; 2835 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2836 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2837 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2838 offloads_tmp = tx_qinfo.conf.offloads; 2839 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2840 } 2841 2842 printf(" TX queue: %d\n", qid); 2843 printf(" TX desc=%d - TX free threshold=%d\n", 2844 nb_tx_desc_tmp, tx_free_thresh_tmp); 2845 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2846 " wthresh=%d\n", 2847 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2848 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2849 offloads_tmp, tx_rs_thresh_tmp); 2850 } 2851 } 2852 } 2853 2854 void 2855 port_rss_reta_info(portid_t port_id, 2856 struct rte_eth_rss_reta_entry64 *reta_conf, 2857 uint16_t nb_entries) 2858 { 2859 uint16_t i, idx, shift; 2860 int ret; 2861 2862 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2863 return; 2864 2865 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2866 if (ret != 0) { 2867 fprintf(stderr, 2868 "Failed to get RSS RETA info, return code = %d\n", 2869 ret); 2870 return; 2871 } 2872 2873 for (i = 0; i < nb_entries; i++) { 2874 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2875 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2876 if (!(reta_conf[idx].mask & (1ULL << shift))) 2877 continue; 2878 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2879 i, reta_conf[idx].reta[shift]); 2880 } 2881 } 2882 2883 /* 2884 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 2885 * key of the port. 2886 */ 2887 void 2888 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2889 { 2890 struct rte_eth_rss_conf rss_conf = {0}; 2891 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2892 uint64_t rss_hf; 2893 uint8_t i; 2894 int diag; 2895 struct rte_eth_dev_info dev_info; 2896 uint8_t hash_key_size; 2897 int ret; 2898 2899 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2900 return; 2901 2902 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2903 if (ret != 0) 2904 return; 2905 2906 if (dev_info.hash_key_size > 0 && 2907 dev_info.hash_key_size <= sizeof(rss_key)) 2908 hash_key_size = dev_info.hash_key_size; 2909 else { 2910 fprintf(stderr, 2911 "dev_info did not provide a valid hash key size\n"); 2912 return; 2913 } 2914 2915 /* Get RSS hash key if asked to display it */ 2916 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2917 rss_conf.rss_key_len = hash_key_size; 2918 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2919 if (diag != 0) { 2920 switch (diag) { 2921 case -ENODEV: 2922 fprintf(stderr, "port index %d invalid\n", port_id); 2923 break; 2924 case -ENOTSUP: 2925 fprintf(stderr, "operation not supported by device\n"); 2926 break; 2927 default: 2928 fprintf(stderr, "operation failed - diag=%d\n", diag); 2929 break; 2930 } 2931 return; 2932 } 2933 rss_hf = rss_conf.rss_hf; 2934 if (rss_hf == 0) { 2935 printf("RSS disabled\n"); 2936 return; 2937 } 2938 printf("RSS functions:\n "); 2939 for (i = 0; rss_type_table[i].str; i++) { 2940 if (rss_type_table[i].rss_type == 0) 2941 continue; 2942 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 2943 printf("%s ", rss_type_table[i].str); 2944 } 2945 printf("\n"); 2946 if (!show_rss_key) 2947 return; 2948 printf("RSS key:\n"); 2949 for (i = 0; i < hash_key_size; i++) 2950 printf("%02X", rss_key[i]); 2951 printf("\n"); 2952 } 2953 2954 void 2955 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 2956 uint8_t hash_key_len) 2957 { 2958 struct rte_eth_rss_conf rss_conf; 2959 int diag; 2960 unsigned int i; 2961 2962 rss_conf.rss_key = NULL; 2963 rss_conf.rss_key_len = 0; 2964 rss_conf.rss_hf = 0; 2965 for (i = 0; rss_type_table[i].str; i++) { 2966 if (!strcmp(rss_type_table[i].str, rss_type)) 2967 rss_conf.rss_hf = rss_type_table[i].rss_type; 2968 } 2969 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2970 if (diag == 0) { 2971 rss_conf.rss_key = hash_key; 2972 rss_conf.rss_key_len = hash_key_len; 2973 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2974 } 2975 if (diag == 0) 2976 return; 2977 2978 switch (diag) { 2979 case -ENODEV: 2980 fprintf(stderr, "port index %d invalid\n", port_id); 2981 break; 2982 case -ENOTSUP: 2983 fprintf(stderr, "operation not supported by device\n"); 2984 break; 2985 default: 2986 fprintf(stderr, "operation failed - diag=%d\n", diag); 2987 break; 2988 } 2989 } 2990 2991 /* 2992 * Check whether a shared rxq scheduled on other lcores. 2993 */ 2994 static bool 2995 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 2996 portid_t src_port, queueid_t src_rxq, 2997 uint32_t share_group, queueid_t share_rxq) 2998 { 2999 streamid_t sm_id; 3000 streamid_t nb_fs_per_lcore; 3001 lcoreid_t nb_fc; 3002 lcoreid_t lc_id; 3003 struct fwd_stream *fs; 3004 struct rte_port *port; 3005 struct rte_eth_dev_info *dev_info; 3006 struct rte_eth_rxconf *rxq_conf; 3007 3008 nb_fc = cur_fwd_config.nb_fwd_lcores; 3009 /* Check remaining cores. */ 3010 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3011 sm_id = fwd_lcores[lc_id]->stream_idx; 3012 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3013 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3014 sm_id++) { 3015 fs = fwd_streams[sm_id]; 3016 port = &ports[fs->rx_port]; 3017 dev_info = &port->dev_info; 3018 rxq_conf = &port->rx_conf[fs->rx_queue]; 3019 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3020 == 0 || rxq_conf->share_group == 0) 3021 /* Not shared rxq. */ 3022 continue; 3023 if (domain_id != port->dev_info.switch_info.domain_id) 3024 continue; 3025 if (rxq_conf->share_group != share_group) 3026 continue; 3027 if (rxq_conf->share_qid != share_rxq) 3028 continue; 3029 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3030 share_group, share_rxq); 3031 printf(" lcore %hhu Port %hu queue %hu\n", 3032 src_lc, src_port, src_rxq); 3033 printf(" lcore %hhu Port %hu queue %hu\n", 3034 lc_id, fs->rx_port, fs->rx_queue); 3035 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3036 nb_rxq); 3037 return true; 3038 } 3039 } 3040 return false; 3041 } 3042 3043 /* 3044 * Check shared rxq configuration. 3045 * 3046 * Shared group must not being scheduled on different core. 3047 */ 3048 bool 3049 pkt_fwd_shared_rxq_check(void) 3050 { 3051 streamid_t sm_id; 3052 streamid_t nb_fs_per_lcore; 3053 lcoreid_t nb_fc; 3054 lcoreid_t lc_id; 3055 struct fwd_stream *fs; 3056 uint16_t domain_id; 3057 struct rte_port *port; 3058 struct rte_eth_dev_info *dev_info; 3059 struct rte_eth_rxconf *rxq_conf; 3060 3061 if (rxq_share == 0) 3062 return true; 3063 nb_fc = cur_fwd_config.nb_fwd_lcores; 3064 /* 3065 * Check streams on each core, make sure the same switch domain + 3066 * group + queue doesn't get scheduled on other cores. 3067 */ 3068 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3069 sm_id = fwd_lcores[lc_id]->stream_idx; 3070 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3071 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3072 sm_id++) { 3073 fs = fwd_streams[sm_id]; 3074 /* Update lcore info stream being scheduled. */ 3075 fs->lcore = fwd_lcores[lc_id]; 3076 port = &ports[fs->rx_port]; 3077 dev_info = &port->dev_info; 3078 rxq_conf = &port->rx_conf[fs->rx_queue]; 3079 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3080 == 0 || rxq_conf->share_group == 0) 3081 /* Not shared rxq. */ 3082 continue; 3083 /* Check shared rxq not scheduled on remaining cores. */ 3084 domain_id = port->dev_info.switch_info.domain_id; 3085 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3086 fs->rx_port, 3087 fs->rx_queue, 3088 rxq_conf->share_group, 3089 rxq_conf->share_qid)) 3090 return false; 3091 } 3092 } 3093 return true; 3094 } 3095 3096 /* 3097 * Setup forwarding configuration for each logical core. 3098 */ 3099 static void 3100 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3101 { 3102 streamid_t nb_fs_per_lcore; 3103 streamid_t nb_fs; 3104 streamid_t sm_id; 3105 lcoreid_t nb_extra; 3106 lcoreid_t nb_fc; 3107 lcoreid_t nb_lc; 3108 lcoreid_t lc_id; 3109 3110 nb_fs = cfg->nb_fwd_streams; 3111 nb_fc = cfg->nb_fwd_lcores; 3112 if (nb_fs <= nb_fc) { 3113 nb_fs_per_lcore = 1; 3114 nb_extra = 0; 3115 } else { 3116 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3117 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3118 } 3119 3120 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3121 sm_id = 0; 3122 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3123 fwd_lcores[lc_id]->stream_idx = sm_id; 3124 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3125 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3126 } 3127 3128 /* 3129 * Assign extra remaining streams, if any. 3130 */ 3131 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3132 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3133 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3134 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3135 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3136 } 3137 } 3138 3139 static portid_t 3140 fwd_topology_tx_port_get(portid_t rxp) 3141 { 3142 static int warning_once = 1; 3143 3144 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3145 3146 switch (port_topology) { 3147 default: 3148 case PORT_TOPOLOGY_PAIRED: 3149 if ((rxp & 0x1) == 0) { 3150 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3151 return rxp + 1; 3152 if (warning_once) { 3153 fprintf(stderr, 3154 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3155 warning_once = 0; 3156 } 3157 return rxp; 3158 } 3159 return rxp - 1; 3160 case PORT_TOPOLOGY_CHAINED: 3161 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3162 case PORT_TOPOLOGY_LOOP: 3163 return rxp; 3164 } 3165 } 3166 3167 static void 3168 simple_fwd_config_setup(void) 3169 { 3170 portid_t i; 3171 3172 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3173 cur_fwd_config.nb_fwd_streams = 3174 (streamid_t) cur_fwd_config.nb_fwd_ports; 3175 3176 /* reinitialize forwarding streams */ 3177 init_fwd_streams(); 3178 3179 /* 3180 * In the simple forwarding test, the number of forwarding cores 3181 * must be lower or equal to the number of forwarding ports. 3182 */ 3183 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3184 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3185 cur_fwd_config.nb_fwd_lcores = 3186 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3187 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3188 3189 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3190 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3191 fwd_streams[i]->rx_queue = 0; 3192 fwd_streams[i]->tx_port = 3193 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3194 fwd_streams[i]->tx_queue = 0; 3195 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3196 fwd_streams[i]->retry_enabled = retry_enabled; 3197 } 3198 } 3199 3200 /** 3201 * For the RSS forwarding test all streams distributed over lcores. Each stream 3202 * being composed of a RX queue to poll on a RX port for input messages, 3203 * associated with a TX queue of a TX port where to send forwarded packets. 3204 */ 3205 static void 3206 rss_fwd_config_setup(void) 3207 { 3208 portid_t rxp; 3209 portid_t txp; 3210 queueid_t rxq; 3211 queueid_t nb_q; 3212 streamid_t sm_id; 3213 int start; 3214 int end; 3215 3216 nb_q = nb_rxq; 3217 if (nb_q > nb_txq) 3218 nb_q = nb_txq; 3219 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3220 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3221 cur_fwd_config.nb_fwd_streams = 3222 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3223 3224 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3225 cur_fwd_config.nb_fwd_lcores = 3226 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3227 3228 /* reinitialize forwarding streams */ 3229 init_fwd_streams(); 3230 3231 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3232 3233 if (proc_id > 0 && nb_q % num_procs != 0) 3234 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3235 3236 /** 3237 * In multi-process, All queues are allocated to different 3238 * processes based on num_procs and proc_id. For example: 3239 * if supports 4 queues(nb_q), 2 processes(num_procs), 3240 * the 0~1 queue for primary process. 3241 * the 2~3 queue for secondary process. 3242 */ 3243 start = proc_id * nb_q / num_procs; 3244 end = start + nb_q / num_procs; 3245 rxp = 0; 3246 rxq = start; 3247 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3248 struct fwd_stream *fs; 3249 3250 fs = fwd_streams[sm_id]; 3251 txp = fwd_topology_tx_port_get(rxp); 3252 fs->rx_port = fwd_ports_ids[rxp]; 3253 fs->rx_queue = rxq; 3254 fs->tx_port = fwd_ports_ids[txp]; 3255 fs->tx_queue = rxq; 3256 fs->peer_addr = fs->tx_port; 3257 fs->retry_enabled = retry_enabled; 3258 rxp++; 3259 if (rxp < nb_fwd_ports) 3260 continue; 3261 rxp = 0; 3262 rxq++; 3263 if (rxq >= end) 3264 rxq = start; 3265 } 3266 } 3267 3268 static uint16_t 3269 get_fwd_port_total_tc_num(void) 3270 { 3271 struct rte_eth_dcb_info dcb_info; 3272 uint16_t total_tc_num = 0; 3273 unsigned int i; 3274 3275 for (i = 0; i < nb_fwd_ports; i++) { 3276 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3277 total_tc_num += dcb_info.nb_tcs; 3278 } 3279 3280 return total_tc_num; 3281 } 3282 3283 /** 3284 * For the DCB forwarding test, each core is assigned on each traffic class. 3285 * 3286 * Each core is assigned a multi-stream, each stream being composed of 3287 * a RX queue to poll on a RX port for input messages, associated with 3288 * a TX queue of a TX port where to send forwarded packets. All RX and 3289 * TX queues are mapping to the same traffic class. 3290 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3291 * the same core 3292 */ 3293 static void 3294 dcb_fwd_config_setup(void) 3295 { 3296 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3297 portid_t txp, rxp = 0; 3298 queueid_t txq, rxq = 0; 3299 lcoreid_t lc_id; 3300 uint16_t nb_rx_queue, nb_tx_queue; 3301 uint16_t i, j, k, sm_id = 0; 3302 uint16_t total_tc_num; 3303 struct rte_port *port; 3304 uint8_t tc = 0; 3305 portid_t pid; 3306 int ret; 3307 3308 /* 3309 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3310 * or RTE_PORT_STOPPED. 3311 * 3312 * Re-configure ports to get updated mapping between tc and queue in 3313 * case the queue number of the port is changed. Skip for started ports 3314 * since modifying queue number and calling dev_configure need to stop 3315 * ports first. 3316 */ 3317 for (pid = 0; pid < nb_fwd_ports; pid++) { 3318 if (port_is_started(pid) == 1) 3319 continue; 3320 3321 port = &ports[pid]; 3322 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3323 &port->dev_conf); 3324 if (ret < 0) { 3325 fprintf(stderr, 3326 "Failed to re-configure port %d, ret = %d.\n", 3327 pid, ret); 3328 return; 3329 } 3330 } 3331 3332 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3333 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3334 cur_fwd_config.nb_fwd_streams = 3335 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3336 total_tc_num = get_fwd_port_total_tc_num(); 3337 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3338 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3339 3340 /* reinitialize forwarding streams */ 3341 init_fwd_streams(); 3342 sm_id = 0; 3343 txp = 1; 3344 /* get the dcb info on the first RX and TX ports */ 3345 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3346 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3347 3348 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3349 fwd_lcores[lc_id]->stream_nb = 0; 3350 fwd_lcores[lc_id]->stream_idx = sm_id; 3351 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 3352 /* if the nb_queue is zero, means this tc is 3353 * not enabled on the POOL 3354 */ 3355 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3356 break; 3357 k = fwd_lcores[lc_id]->stream_nb + 3358 fwd_lcores[lc_id]->stream_idx; 3359 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3360 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3361 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3362 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3363 for (j = 0; j < nb_rx_queue; j++) { 3364 struct fwd_stream *fs; 3365 3366 fs = fwd_streams[k + j]; 3367 fs->rx_port = fwd_ports_ids[rxp]; 3368 fs->rx_queue = rxq + j; 3369 fs->tx_port = fwd_ports_ids[txp]; 3370 fs->tx_queue = txq + j % nb_tx_queue; 3371 fs->peer_addr = fs->tx_port; 3372 fs->retry_enabled = retry_enabled; 3373 } 3374 fwd_lcores[lc_id]->stream_nb += 3375 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3376 } 3377 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3378 3379 tc++; 3380 if (tc < rxp_dcb_info.nb_tcs) 3381 continue; 3382 /* Restart from TC 0 on next RX port */ 3383 tc = 0; 3384 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3385 rxp = (portid_t) 3386 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3387 else 3388 rxp++; 3389 if (rxp >= nb_fwd_ports) 3390 return; 3391 /* get the dcb information on next RX and TX ports */ 3392 if ((rxp & 0x1) == 0) 3393 txp = (portid_t) (rxp + 1); 3394 else 3395 txp = (portid_t) (rxp - 1); 3396 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3397 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3398 } 3399 } 3400 3401 static void 3402 icmp_echo_config_setup(void) 3403 { 3404 portid_t rxp; 3405 queueid_t rxq; 3406 lcoreid_t lc_id; 3407 uint16_t sm_id; 3408 3409 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3410 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3411 (nb_txq * nb_fwd_ports); 3412 else 3413 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3414 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3415 cur_fwd_config.nb_fwd_streams = 3416 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3417 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3418 cur_fwd_config.nb_fwd_lcores = 3419 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3420 if (verbose_level > 0) { 3421 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3422 __FUNCTION__, 3423 cur_fwd_config.nb_fwd_lcores, 3424 cur_fwd_config.nb_fwd_ports, 3425 cur_fwd_config.nb_fwd_streams); 3426 } 3427 3428 /* reinitialize forwarding streams */ 3429 init_fwd_streams(); 3430 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3431 rxp = 0; rxq = 0; 3432 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3433 if (verbose_level > 0) 3434 printf(" core=%d: \n", lc_id); 3435 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3436 struct fwd_stream *fs; 3437 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3438 fs->rx_port = fwd_ports_ids[rxp]; 3439 fs->rx_queue = rxq; 3440 fs->tx_port = fs->rx_port; 3441 fs->tx_queue = rxq; 3442 fs->peer_addr = fs->tx_port; 3443 fs->retry_enabled = retry_enabled; 3444 if (verbose_level > 0) 3445 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3446 sm_id, fs->rx_port, fs->rx_queue, 3447 fs->tx_queue); 3448 rxq = (queueid_t) (rxq + 1); 3449 if (rxq == nb_rxq) { 3450 rxq = 0; 3451 rxp = (portid_t) (rxp + 1); 3452 } 3453 } 3454 } 3455 } 3456 3457 void 3458 fwd_config_setup(void) 3459 { 3460 struct rte_port *port; 3461 portid_t pt_id; 3462 unsigned int i; 3463 3464 cur_fwd_config.fwd_eng = cur_fwd_eng; 3465 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3466 icmp_echo_config_setup(); 3467 return; 3468 } 3469 3470 if ((nb_rxq > 1) && (nb_txq > 1)){ 3471 if (dcb_config) { 3472 for (i = 0; i < nb_fwd_ports; i++) { 3473 pt_id = fwd_ports_ids[i]; 3474 port = &ports[pt_id]; 3475 if (!port->dcb_flag) { 3476 fprintf(stderr, 3477 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3478 return; 3479 } 3480 } 3481 if (nb_fwd_lcores == 1) { 3482 fprintf(stderr, 3483 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3484 return; 3485 } 3486 3487 dcb_fwd_config_setup(); 3488 } else 3489 rss_fwd_config_setup(); 3490 } 3491 else 3492 simple_fwd_config_setup(); 3493 } 3494 3495 static const char * 3496 mp_alloc_to_str(uint8_t mode) 3497 { 3498 switch (mode) { 3499 case MP_ALLOC_NATIVE: 3500 return "native"; 3501 case MP_ALLOC_ANON: 3502 return "anon"; 3503 case MP_ALLOC_XMEM: 3504 return "xmem"; 3505 case MP_ALLOC_XMEM_HUGE: 3506 return "xmemhuge"; 3507 case MP_ALLOC_XBUF: 3508 return "xbuf"; 3509 default: 3510 return "invalid"; 3511 } 3512 } 3513 3514 void 3515 pkt_fwd_config_display(struct fwd_config *cfg) 3516 { 3517 struct fwd_stream *fs; 3518 lcoreid_t lc_id; 3519 streamid_t sm_id; 3520 3521 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3522 "NUMA support %s, MP allocation mode: %s\n", 3523 cfg->fwd_eng->fwd_mode_name, 3524 retry_enabled == 0 ? "" : " with retry", 3525 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3526 numa_support == 1 ? "enabled" : "disabled", 3527 mp_alloc_to_str(mp_alloc_type)); 3528 3529 if (retry_enabled) 3530 printf("TX retry num: %u, delay between TX retries: %uus\n", 3531 burst_tx_retry_num, burst_tx_delay_time); 3532 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3533 printf("Logical Core %u (socket %u) forwards packets on " 3534 "%d streams:", 3535 fwd_lcores_cpuids[lc_id], 3536 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3537 fwd_lcores[lc_id]->stream_nb); 3538 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3539 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3540 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3541 "P=%d/Q=%d (socket %u) ", 3542 fs->rx_port, fs->rx_queue, 3543 ports[fs->rx_port].socket_id, 3544 fs->tx_port, fs->tx_queue, 3545 ports[fs->tx_port].socket_id); 3546 print_ethaddr("peer=", 3547 &peer_eth_addrs[fs->peer_addr]); 3548 } 3549 printf("\n"); 3550 } 3551 printf("\n"); 3552 } 3553 3554 void 3555 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3556 { 3557 struct rte_ether_addr new_peer_addr; 3558 if (!rte_eth_dev_is_valid_port(port_id)) { 3559 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3560 return; 3561 } 3562 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3563 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3564 peer_addr); 3565 return; 3566 } 3567 peer_eth_addrs[port_id] = new_peer_addr; 3568 } 3569 3570 int 3571 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3572 { 3573 unsigned int i; 3574 unsigned int lcore_cpuid; 3575 int record_now; 3576 3577 record_now = 0; 3578 again: 3579 for (i = 0; i < nb_lc; i++) { 3580 lcore_cpuid = lcorelist[i]; 3581 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3582 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3583 return -1; 3584 } 3585 if (lcore_cpuid == rte_get_main_lcore()) { 3586 fprintf(stderr, 3587 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3588 lcore_cpuid); 3589 return -1; 3590 } 3591 if (record_now) 3592 fwd_lcores_cpuids[i] = lcore_cpuid; 3593 } 3594 if (record_now == 0) { 3595 record_now = 1; 3596 goto again; 3597 } 3598 nb_cfg_lcores = (lcoreid_t) nb_lc; 3599 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3600 printf("previous number of forwarding cores %u - changed to " 3601 "number of configured cores %u\n", 3602 (unsigned int) nb_fwd_lcores, nb_lc); 3603 nb_fwd_lcores = (lcoreid_t) nb_lc; 3604 } 3605 3606 return 0; 3607 } 3608 3609 int 3610 set_fwd_lcores_mask(uint64_t lcoremask) 3611 { 3612 unsigned int lcorelist[64]; 3613 unsigned int nb_lc; 3614 unsigned int i; 3615 3616 if (lcoremask == 0) { 3617 fprintf(stderr, "Invalid NULL mask of cores\n"); 3618 return -1; 3619 } 3620 nb_lc = 0; 3621 for (i = 0; i < 64; i++) { 3622 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3623 continue; 3624 lcorelist[nb_lc++] = i; 3625 } 3626 return set_fwd_lcores_list(lcorelist, nb_lc); 3627 } 3628 3629 void 3630 set_fwd_lcores_number(uint16_t nb_lc) 3631 { 3632 if (test_done == 0) { 3633 fprintf(stderr, "Please stop forwarding first\n"); 3634 return; 3635 } 3636 if (nb_lc > nb_cfg_lcores) { 3637 fprintf(stderr, 3638 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3639 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3640 return; 3641 } 3642 nb_fwd_lcores = (lcoreid_t) nb_lc; 3643 printf("Number of forwarding cores set to %u\n", 3644 (unsigned int) nb_fwd_lcores); 3645 } 3646 3647 void 3648 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3649 { 3650 unsigned int i; 3651 portid_t port_id; 3652 int record_now; 3653 3654 record_now = 0; 3655 again: 3656 for (i = 0; i < nb_pt; i++) { 3657 port_id = (portid_t) portlist[i]; 3658 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3659 return; 3660 if (record_now) 3661 fwd_ports_ids[i] = port_id; 3662 } 3663 if (record_now == 0) { 3664 record_now = 1; 3665 goto again; 3666 } 3667 nb_cfg_ports = (portid_t) nb_pt; 3668 if (nb_fwd_ports != (portid_t) nb_pt) { 3669 printf("previous number of forwarding ports %u - changed to " 3670 "number of configured ports %u\n", 3671 (unsigned int) nb_fwd_ports, nb_pt); 3672 nb_fwd_ports = (portid_t) nb_pt; 3673 } 3674 } 3675 3676 /** 3677 * Parse the user input and obtain the list of forwarding ports 3678 * 3679 * @param[in] list 3680 * String containing the user input. User can specify 3681 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3682 * For example, if the user wants to use all the available 3683 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3684 * If the user wants to use only the ports 1,2 then the input 3685 * is 1,2. 3686 * valid characters are '-' and ',' 3687 * @param[out] values 3688 * This array will be filled with a list of port IDs 3689 * based on the user input 3690 * Note that duplicate entries are discarded and only the first 3691 * count entries in this array are port IDs and all the rest 3692 * will contain default values 3693 * @param[in] maxsize 3694 * This parameter denotes 2 things 3695 * 1) Number of elements in the values array 3696 * 2) Maximum value of each element in the values array 3697 * @return 3698 * On success, returns total count of parsed port IDs 3699 * On failure, returns 0 3700 */ 3701 static unsigned int 3702 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3703 { 3704 unsigned int count = 0; 3705 char *end = NULL; 3706 int min, max; 3707 int value, i; 3708 unsigned int marked[maxsize]; 3709 3710 if (list == NULL || values == NULL) 3711 return 0; 3712 3713 for (i = 0; i < (int)maxsize; i++) 3714 marked[i] = 0; 3715 3716 min = INT_MAX; 3717 3718 do { 3719 /*Remove the blank spaces if any*/ 3720 while (isblank(*list)) 3721 list++; 3722 if (*list == '\0') 3723 break; 3724 errno = 0; 3725 value = strtol(list, &end, 10); 3726 if (errno || end == NULL) 3727 return 0; 3728 if (value < 0 || value >= (int)maxsize) 3729 return 0; 3730 while (isblank(*end)) 3731 end++; 3732 if (*end == '-' && min == INT_MAX) { 3733 min = value; 3734 } else if ((*end == ',') || (*end == '\0')) { 3735 max = value; 3736 if (min == INT_MAX) 3737 min = value; 3738 for (i = min; i <= max; i++) { 3739 if (count < maxsize) { 3740 if (marked[i]) 3741 continue; 3742 values[count] = i; 3743 marked[i] = 1; 3744 count++; 3745 } 3746 } 3747 min = INT_MAX; 3748 } else 3749 return 0; 3750 list = end + 1; 3751 } while (*end != '\0'); 3752 3753 return count; 3754 } 3755 3756 void 3757 parse_fwd_portlist(const char *portlist) 3758 { 3759 unsigned int portcount; 3760 unsigned int portindex[RTE_MAX_ETHPORTS]; 3761 unsigned int i, valid_port_count = 0; 3762 3763 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3764 if (!portcount) 3765 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3766 3767 /* 3768 * Here we verify the validity of the ports 3769 * and thereby calculate the total number of 3770 * valid ports 3771 */ 3772 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3773 if (rte_eth_dev_is_valid_port(portindex[i])) { 3774 portindex[valid_port_count] = portindex[i]; 3775 valid_port_count++; 3776 } 3777 } 3778 3779 set_fwd_ports_list(portindex, valid_port_count); 3780 } 3781 3782 void 3783 set_fwd_ports_mask(uint64_t portmask) 3784 { 3785 unsigned int portlist[64]; 3786 unsigned int nb_pt; 3787 unsigned int i; 3788 3789 if (portmask == 0) { 3790 fprintf(stderr, "Invalid NULL mask of ports\n"); 3791 return; 3792 } 3793 nb_pt = 0; 3794 RTE_ETH_FOREACH_DEV(i) { 3795 if (! ((uint64_t)(1ULL << i) & portmask)) 3796 continue; 3797 portlist[nb_pt++] = i; 3798 } 3799 set_fwd_ports_list(portlist, nb_pt); 3800 } 3801 3802 void 3803 set_fwd_ports_number(uint16_t nb_pt) 3804 { 3805 if (nb_pt > nb_cfg_ports) { 3806 fprintf(stderr, 3807 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3808 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3809 return; 3810 } 3811 nb_fwd_ports = (portid_t) nb_pt; 3812 printf("Number of forwarding ports set to %u\n", 3813 (unsigned int) nb_fwd_ports); 3814 } 3815 3816 int 3817 port_is_forwarding(portid_t port_id) 3818 { 3819 unsigned int i; 3820 3821 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3822 return -1; 3823 3824 for (i = 0; i < nb_fwd_ports; i++) { 3825 if (fwd_ports_ids[i] == port_id) 3826 return 1; 3827 } 3828 3829 return 0; 3830 } 3831 3832 void 3833 set_nb_pkt_per_burst(uint16_t nb) 3834 { 3835 if (nb > MAX_PKT_BURST) { 3836 fprintf(stderr, 3837 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3838 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3839 return; 3840 } 3841 nb_pkt_per_burst = nb; 3842 printf("Number of packets per burst set to %u\n", 3843 (unsigned int) nb_pkt_per_burst); 3844 } 3845 3846 static const char * 3847 tx_split_get_name(enum tx_pkt_split split) 3848 { 3849 uint32_t i; 3850 3851 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3852 if (tx_split_name[i].split == split) 3853 return tx_split_name[i].name; 3854 } 3855 return NULL; 3856 } 3857 3858 void 3859 set_tx_pkt_split(const char *name) 3860 { 3861 uint32_t i; 3862 3863 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3864 if (strcmp(tx_split_name[i].name, name) == 0) { 3865 tx_pkt_split = tx_split_name[i].split; 3866 return; 3867 } 3868 } 3869 fprintf(stderr, "unknown value: \"%s\"\n", name); 3870 } 3871 3872 int 3873 parse_fec_mode(const char *name, uint32_t *fec_capa) 3874 { 3875 uint8_t i; 3876 3877 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3878 if (strcmp(fec_mode_name[i].name, name) == 0) { 3879 *fec_capa = 3880 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3881 return 0; 3882 } 3883 } 3884 return -1; 3885 } 3886 3887 void 3888 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3889 { 3890 unsigned int i, j; 3891 3892 printf("FEC capabilities:\n"); 3893 3894 for (i = 0; i < num; i++) { 3895 printf("%s : ", 3896 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3897 3898 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3899 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3900 speed_fec_capa[i].capa) 3901 printf("%s ", fec_mode_name[j].name); 3902 } 3903 printf("\n"); 3904 } 3905 } 3906 3907 void 3908 show_rx_pkt_offsets(void) 3909 { 3910 uint32_t i, n; 3911 3912 n = rx_pkt_nb_offs; 3913 printf("Number of offsets: %u\n", n); 3914 if (n) { 3915 printf("Segment offsets: "); 3916 for (i = 0; i != n - 1; i++) 3917 printf("%hu,", rx_pkt_seg_offsets[i]); 3918 printf("%hu\n", rx_pkt_seg_lengths[i]); 3919 } 3920 } 3921 3922 void 3923 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3924 { 3925 unsigned int i; 3926 3927 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3928 printf("nb segments per RX packets=%u >= " 3929 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3930 return; 3931 } 3932 3933 /* 3934 * No extra check here, the segment length will be checked by PMD 3935 * in the extended queue setup. 3936 */ 3937 for (i = 0; i < nb_offs; i++) { 3938 if (seg_offsets[i] >= UINT16_MAX) { 3939 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3940 i, seg_offsets[i]); 3941 return; 3942 } 3943 } 3944 3945 for (i = 0; i < nb_offs; i++) 3946 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3947 3948 rx_pkt_nb_offs = (uint8_t) nb_offs; 3949 } 3950 3951 void 3952 show_rx_pkt_segments(void) 3953 { 3954 uint32_t i, n; 3955 3956 n = rx_pkt_nb_segs; 3957 printf("Number of segments: %u\n", n); 3958 if (n) { 3959 printf("Segment sizes: "); 3960 for (i = 0; i != n - 1; i++) 3961 printf("%hu,", rx_pkt_seg_lengths[i]); 3962 printf("%hu\n", rx_pkt_seg_lengths[i]); 3963 } 3964 } 3965 3966 void 3967 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3968 { 3969 unsigned int i; 3970 3971 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3972 printf("nb segments per RX packets=%u >= " 3973 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3974 return; 3975 } 3976 3977 /* 3978 * No extra check here, the segment length will be checked by PMD 3979 * in the extended queue setup. 3980 */ 3981 for (i = 0; i < nb_segs; i++) { 3982 if (seg_lengths[i] >= UINT16_MAX) { 3983 printf("length[%u]=%u > UINT16_MAX - give up\n", 3984 i, seg_lengths[i]); 3985 return; 3986 } 3987 } 3988 3989 for (i = 0; i < nb_segs; i++) 3990 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3991 3992 rx_pkt_nb_segs = (uint8_t) nb_segs; 3993 } 3994 3995 void 3996 show_tx_pkt_segments(void) 3997 { 3998 uint32_t i, n; 3999 const char *split; 4000 4001 n = tx_pkt_nb_segs; 4002 split = tx_split_get_name(tx_pkt_split); 4003 4004 printf("Number of segments: %u\n", n); 4005 printf("Segment sizes: "); 4006 for (i = 0; i != n - 1; i++) 4007 printf("%hu,", tx_pkt_seg_lengths[i]); 4008 printf("%hu\n", tx_pkt_seg_lengths[i]); 4009 printf("Split packet: %s\n", split); 4010 } 4011 4012 static bool 4013 nb_segs_is_invalid(unsigned int nb_segs) 4014 { 4015 uint16_t ring_size; 4016 uint16_t queue_id; 4017 uint16_t port_id; 4018 int ret; 4019 4020 RTE_ETH_FOREACH_DEV(port_id) { 4021 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4022 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4023 if (ret) { 4024 /* Port may not be initialized yet, can't say 4025 * the port is invalid in this stage. 4026 */ 4027 continue; 4028 } 4029 if (ring_size < nb_segs) { 4030 printf("nb segments per TX packets=%u >= TX " 4031 "queue(%u) ring_size=%u - txpkts ignored\n", 4032 nb_segs, queue_id, ring_size); 4033 return true; 4034 } 4035 } 4036 } 4037 4038 return false; 4039 } 4040 4041 void 4042 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4043 { 4044 uint16_t tx_pkt_len; 4045 unsigned int i; 4046 4047 /* 4048 * For single segment settings failed check is ignored. 4049 * It is a very basic capability to send the single segment 4050 * packets, suppose it is always supported. 4051 */ 4052 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4053 fprintf(stderr, 4054 "Tx segment size(%u) is not supported - txpkts ignored\n", 4055 nb_segs); 4056 return; 4057 } 4058 4059 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4060 fprintf(stderr, 4061 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4062 nb_segs, RTE_MAX_SEGS_PER_PKT); 4063 return; 4064 } 4065 4066 /* 4067 * Check that each segment length is greater or equal than 4068 * the mbuf data size. 4069 * Check also that the total packet length is greater or equal than the 4070 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4071 * 20 + 8). 4072 */ 4073 tx_pkt_len = 0; 4074 for (i = 0; i < nb_segs; i++) { 4075 if (seg_lengths[i] > mbuf_data_size[0]) { 4076 fprintf(stderr, 4077 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4078 i, seg_lengths[i], mbuf_data_size[0]); 4079 return; 4080 } 4081 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4082 } 4083 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4084 fprintf(stderr, "total packet length=%u < %d - give up\n", 4085 (unsigned) tx_pkt_len, 4086 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4087 return; 4088 } 4089 4090 for (i = 0; i < nb_segs; i++) 4091 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4092 4093 tx_pkt_length = tx_pkt_len; 4094 tx_pkt_nb_segs = (uint8_t) nb_segs; 4095 } 4096 4097 void 4098 show_tx_pkt_times(void) 4099 { 4100 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4101 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4102 } 4103 4104 void 4105 set_tx_pkt_times(unsigned int *tx_times) 4106 { 4107 tx_pkt_times_inter = tx_times[0]; 4108 tx_pkt_times_intra = tx_times[1]; 4109 } 4110 4111 #ifdef RTE_LIB_GRO 4112 void 4113 setup_gro(const char *onoff, portid_t port_id) 4114 { 4115 if (!rte_eth_dev_is_valid_port(port_id)) { 4116 fprintf(stderr, "invalid port id %u\n", port_id); 4117 return; 4118 } 4119 if (test_done == 0) { 4120 fprintf(stderr, 4121 "Before enable/disable GRO, please stop forwarding first\n"); 4122 return; 4123 } 4124 if (strcmp(onoff, "on") == 0) { 4125 if (gro_ports[port_id].enable != 0) { 4126 fprintf(stderr, 4127 "Port %u has enabled GRO. Please disable GRO first\n", 4128 port_id); 4129 return; 4130 } 4131 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4132 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4133 gro_ports[port_id].param.max_flow_num = 4134 GRO_DEFAULT_FLOW_NUM; 4135 gro_ports[port_id].param.max_item_per_flow = 4136 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4137 } 4138 gro_ports[port_id].enable = 1; 4139 } else { 4140 if (gro_ports[port_id].enable == 0) { 4141 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4142 return; 4143 } 4144 gro_ports[port_id].enable = 0; 4145 } 4146 } 4147 4148 void 4149 setup_gro_flush_cycles(uint8_t cycles) 4150 { 4151 if (test_done == 0) { 4152 fprintf(stderr, 4153 "Before change flush interval for GRO, please stop forwarding first.\n"); 4154 return; 4155 } 4156 4157 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4158 GRO_DEFAULT_FLUSH_CYCLES) { 4159 fprintf(stderr, 4160 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4161 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4162 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4163 } 4164 4165 gro_flush_cycles = cycles; 4166 } 4167 4168 void 4169 show_gro(portid_t port_id) 4170 { 4171 struct rte_gro_param *param; 4172 uint32_t max_pkts_num; 4173 4174 param = &gro_ports[port_id].param; 4175 4176 if (!rte_eth_dev_is_valid_port(port_id)) { 4177 fprintf(stderr, "Invalid port id %u.\n", port_id); 4178 return; 4179 } 4180 if (gro_ports[port_id].enable) { 4181 printf("GRO type: TCP/IPv4\n"); 4182 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4183 max_pkts_num = param->max_flow_num * 4184 param->max_item_per_flow; 4185 } else 4186 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4187 printf("Max number of packets to perform GRO: %u\n", 4188 max_pkts_num); 4189 printf("Flushing cycles: %u\n", gro_flush_cycles); 4190 } else 4191 printf("Port %u doesn't enable GRO.\n", port_id); 4192 } 4193 #endif /* RTE_LIB_GRO */ 4194 4195 #ifdef RTE_LIB_GSO 4196 void 4197 setup_gso(const char *mode, portid_t port_id) 4198 { 4199 if (!rte_eth_dev_is_valid_port(port_id)) { 4200 fprintf(stderr, "invalid port id %u\n", port_id); 4201 return; 4202 } 4203 if (strcmp(mode, "on") == 0) { 4204 if (test_done == 0) { 4205 fprintf(stderr, 4206 "before enabling GSO, please stop forwarding first\n"); 4207 return; 4208 } 4209 gso_ports[port_id].enable = 1; 4210 } else if (strcmp(mode, "off") == 0) { 4211 if (test_done == 0) { 4212 fprintf(stderr, 4213 "before disabling GSO, please stop forwarding first\n"); 4214 return; 4215 } 4216 gso_ports[port_id].enable = 0; 4217 } 4218 } 4219 #endif /* RTE_LIB_GSO */ 4220 4221 char* 4222 list_pkt_forwarding_modes(void) 4223 { 4224 static char fwd_modes[128] = ""; 4225 const char *separator = "|"; 4226 struct fwd_engine *fwd_eng; 4227 unsigned i = 0; 4228 4229 if (strlen (fwd_modes) == 0) { 4230 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4231 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4232 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4233 strncat(fwd_modes, separator, 4234 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4235 } 4236 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4237 } 4238 4239 return fwd_modes; 4240 } 4241 4242 char* 4243 list_pkt_forwarding_retry_modes(void) 4244 { 4245 static char fwd_modes[128] = ""; 4246 const char *separator = "|"; 4247 struct fwd_engine *fwd_eng; 4248 unsigned i = 0; 4249 4250 if (strlen(fwd_modes) == 0) { 4251 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4252 if (fwd_eng == &rx_only_engine) 4253 continue; 4254 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4255 sizeof(fwd_modes) - 4256 strlen(fwd_modes) - 1); 4257 strncat(fwd_modes, separator, 4258 sizeof(fwd_modes) - 4259 strlen(fwd_modes) - 1); 4260 } 4261 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4262 } 4263 4264 return fwd_modes; 4265 } 4266 4267 void 4268 set_pkt_forwarding_mode(const char *fwd_mode_name) 4269 { 4270 struct fwd_engine *fwd_eng; 4271 unsigned i; 4272 4273 i = 0; 4274 while ((fwd_eng = fwd_engines[i]) != NULL) { 4275 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4276 printf("Set %s packet forwarding mode%s\n", 4277 fwd_mode_name, 4278 retry_enabled == 0 ? "" : " with retry"); 4279 cur_fwd_eng = fwd_eng; 4280 return; 4281 } 4282 i++; 4283 } 4284 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4285 } 4286 4287 void 4288 add_rx_dump_callbacks(portid_t portid) 4289 { 4290 struct rte_eth_dev_info dev_info; 4291 uint16_t queue; 4292 int ret; 4293 4294 if (port_id_is_invalid(portid, ENABLED_WARN)) 4295 return; 4296 4297 ret = eth_dev_info_get_print_err(portid, &dev_info); 4298 if (ret != 0) 4299 return; 4300 4301 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4302 if (!ports[portid].rx_dump_cb[queue]) 4303 ports[portid].rx_dump_cb[queue] = 4304 rte_eth_add_rx_callback(portid, queue, 4305 dump_rx_pkts, NULL); 4306 } 4307 4308 void 4309 add_tx_dump_callbacks(portid_t portid) 4310 { 4311 struct rte_eth_dev_info dev_info; 4312 uint16_t queue; 4313 int ret; 4314 4315 if (port_id_is_invalid(portid, ENABLED_WARN)) 4316 return; 4317 4318 ret = eth_dev_info_get_print_err(portid, &dev_info); 4319 if (ret != 0) 4320 return; 4321 4322 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4323 if (!ports[portid].tx_dump_cb[queue]) 4324 ports[portid].tx_dump_cb[queue] = 4325 rte_eth_add_tx_callback(portid, queue, 4326 dump_tx_pkts, NULL); 4327 } 4328 4329 void 4330 remove_rx_dump_callbacks(portid_t portid) 4331 { 4332 struct rte_eth_dev_info dev_info; 4333 uint16_t queue; 4334 int ret; 4335 4336 if (port_id_is_invalid(portid, ENABLED_WARN)) 4337 return; 4338 4339 ret = eth_dev_info_get_print_err(portid, &dev_info); 4340 if (ret != 0) 4341 return; 4342 4343 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4344 if (ports[portid].rx_dump_cb[queue]) { 4345 rte_eth_remove_rx_callback(portid, queue, 4346 ports[portid].rx_dump_cb[queue]); 4347 ports[portid].rx_dump_cb[queue] = NULL; 4348 } 4349 } 4350 4351 void 4352 remove_tx_dump_callbacks(portid_t portid) 4353 { 4354 struct rte_eth_dev_info dev_info; 4355 uint16_t queue; 4356 int ret; 4357 4358 if (port_id_is_invalid(portid, ENABLED_WARN)) 4359 return; 4360 4361 ret = eth_dev_info_get_print_err(portid, &dev_info); 4362 if (ret != 0) 4363 return; 4364 4365 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4366 if (ports[portid].tx_dump_cb[queue]) { 4367 rte_eth_remove_tx_callback(portid, queue, 4368 ports[portid].tx_dump_cb[queue]); 4369 ports[portid].tx_dump_cb[queue] = NULL; 4370 } 4371 } 4372 4373 void 4374 configure_rxtx_dump_callbacks(uint16_t verbose) 4375 { 4376 portid_t portid; 4377 4378 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4379 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4380 return; 4381 #endif 4382 4383 RTE_ETH_FOREACH_DEV(portid) 4384 { 4385 if (verbose == 1 || verbose > 2) 4386 add_rx_dump_callbacks(portid); 4387 else 4388 remove_rx_dump_callbacks(portid); 4389 if (verbose >= 2) 4390 add_tx_dump_callbacks(portid); 4391 else 4392 remove_tx_dump_callbacks(portid); 4393 } 4394 } 4395 4396 void 4397 set_verbose_level(uint16_t vb_level) 4398 { 4399 printf("Change verbose level from %u to %u\n", 4400 (unsigned int) verbose_level, (unsigned int) vb_level); 4401 verbose_level = vb_level; 4402 configure_rxtx_dump_callbacks(verbose_level); 4403 } 4404 4405 void 4406 vlan_extend_set(portid_t port_id, int on) 4407 { 4408 int diag; 4409 int vlan_offload; 4410 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4411 4412 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4413 return; 4414 4415 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4416 4417 if (on) { 4418 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4419 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4420 } else { 4421 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 4422 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4423 } 4424 4425 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4426 if (diag < 0) { 4427 fprintf(stderr, 4428 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4429 port_id, on, diag); 4430 return; 4431 } 4432 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4433 } 4434 4435 void 4436 rx_vlan_strip_set(portid_t port_id, int on) 4437 { 4438 int diag; 4439 int vlan_offload; 4440 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4441 4442 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4443 return; 4444 4445 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4446 4447 if (on) { 4448 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4449 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4450 } else { 4451 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 4452 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4453 } 4454 4455 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4456 if (diag < 0) { 4457 fprintf(stderr, 4458 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4459 __func__, port_id, on, diag); 4460 return; 4461 } 4462 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4463 } 4464 4465 void 4466 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4467 { 4468 int diag; 4469 4470 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4471 return; 4472 4473 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4474 if (diag < 0) 4475 fprintf(stderr, 4476 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4477 __func__, port_id, queue_id, on, diag); 4478 } 4479 4480 void 4481 rx_vlan_filter_set(portid_t port_id, int on) 4482 { 4483 int diag; 4484 int vlan_offload; 4485 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4486 4487 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4488 return; 4489 4490 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4491 4492 if (on) { 4493 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4494 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4495 } else { 4496 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 4497 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4498 } 4499 4500 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4501 if (diag < 0) { 4502 fprintf(stderr, 4503 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4504 __func__, port_id, on, diag); 4505 return; 4506 } 4507 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4508 } 4509 4510 void 4511 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4512 { 4513 int diag; 4514 int vlan_offload; 4515 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4516 4517 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4518 return; 4519 4520 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4521 4522 if (on) { 4523 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4524 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4525 } else { 4526 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 4527 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4528 } 4529 4530 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4531 if (diag < 0) { 4532 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4533 __func__, port_id, on, diag); 4534 return; 4535 } 4536 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4537 } 4538 4539 int 4540 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4541 { 4542 int diag; 4543 4544 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4545 return 1; 4546 if (vlan_id_is_invalid(vlan_id)) 4547 return 1; 4548 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4549 if (diag == 0) 4550 return 0; 4551 fprintf(stderr, 4552 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4553 port_id, vlan_id, on, diag); 4554 return -1; 4555 } 4556 4557 void 4558 rx_vlan_all_filter_set(portid_t port_id, int on) 4559 { 4560 uint16_t vlan_id; 4561 4562 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4563 return; 4564 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4565 if (rx_vft_set(port_id, vlan_id, on)) 4566 break; 4567 } 4568 } 4569 4570 void 4571 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4572 { 4573 int diag; 4574 4575 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4576 return; 4577 4578 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4579 if (diag == 0) 4580 return; 4581 4582 fprintf(stderr, 4583 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4584 port_id, vlan_type, tp_id, diag); 4585 } 4586 4587 void 4588 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4589 { 4590 struct rte_eth_dev_info dev_info; 4591 int ret; 4592 4593 if (vlan_id_is_invalid(vlan_id)) 4594 return; 4595 4596 if (ports[port_id].dev_conf.txmode.offloads & 4597 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 4598 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4599 return; 4600 } 4601 4602 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4603 if (ret != 0) 4604 return; 4605 4606 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 4607 fprintf(stderr, 4608 "Error: vlan insert is not supported by port %d\n", 4609 port_id); 4610 return; 4611 } 4612 4613 tx_vlan_reset(port_id); 4614 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 4615 ports[port_id].tx_vlan_id = vlan_id; 4616 } 4617 4618 void 4619 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4620 { 4621 struct rte_eth_dev_info dev_info; 4622 int ret; 4623 4624 if (vlan_id_is_invalid(vlan_id)) 4625 return; 4626 if (vlan_id_is_invalid(vlan_id_outer)) 4627 return; 4628 4629 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4630 if (ret != 0) 4631 return; 4632 4633 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 4634 fprintf(stderr, 4635 "Error: qinq insert not supported by port %d\n", 4636 port_id); 4637 return; 4638 } 4639 4640 tx_vlan_reset(port_id); 4641 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4642 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4643 ports[port_id].tx_vlan_id = vlan_id; 4644 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4645 } 4646 4647 void 4648 tx_vlan_reset(portid_t port_id) 4649 { 4650 ports[port_id].dev_conf.txmode.offloads &= 4651 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4652 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4653 ports[port_id].tx_vlan_id = 0; 4654 ports[port_id].tx_vlan_id_outer = 0; 4655 } 4656 4657 void 4658 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4659 { 4660 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4661 return; 4662 4663 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4664 } 4665 4666 void 4667 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4668 { 4669 int ret; 4670 4671 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4672 return; 4673 4674 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4675 return; 4676 4677 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4678 fprintf(stderr, "map_value not in required range 0..%d\n", 4679 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4680 return; 4681 } 4682 4683 if (!is_rx) { /* tx */ 4684 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4685 map_value); 4686 if (ret) { 4687 fprintf(stderr, 4688 "failed to set tx queue stats mapping.\n"); 4689 return; 4690 } 4691 } else { /* rx */ 4692 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4693 map_value); 4694 if (ret) { 4695 fprintf(stderr, 4696 "failed to set rx queue stats mapping.\n"); 4697 return; 4698 } 4699 } 4700 } 4701 4702 void 4703 set_xstats_hide_zero(uint8_t on_off) 4704 { 4705 xstats_hide_zero = on_off; 4706 } 4707 4708 void 4709 set_record_core_cycles(uint8_t on_off) 4710 { 4711 record_core_cycles = on_off; 4712 } 4713 4714 void 4715 set_record_burst_stats(uint8_t on_off) 4716 { 4717 record_burst_stats = on_off; 4718 } 4719 4720 static char* 4721 flowtype_to_str(uint16_t flow_type) 4722 { 4723 struct flow_type_info { 4724 char str[32]; 4725 uint16_t ftype; 4726 }; 4727 4728 uint8_t i; 4729 static struct flow_type_info flowtype_str_table[] = { 4730 {"raw", RTE_ETH_FLOW_RAW}, 4731 {"ipv4", RTE_ETH_FLOW_IPV4}, 4732 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4733 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4734 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4735 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4736 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4737 {"ipv6", RTE_ETH_FLOW_IPV6}, 4738 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4739 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4740 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4741 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4742 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4743 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4744 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 4745 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 4746 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 4747 {"port", RTE_ETH_FLOW_PORT}, 4748 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4749 {"geneve", RTE_ETH_FLOW_GENEVE}, 4750 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4751 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4752 {"gtpu", RTE_ETH_FLOW_GTPU}, 4753 }; 4754 4755 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4756 if (flowtype_str_table[i].ftype == flow_type) 4757 return flowtype_str_table[i].str; 4758 } 4759 4760 return NULL; 4761 } 4762 4763 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4764 4765 static inline void 4766 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4767 { 4768 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4769 4770 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4771 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4772 " tunnel_id: 0x%08x", 4773 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4774 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4775 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4776 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4777 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4778 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4779 4780 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4781 rte_be_to_cpu_16(mask->src_port_mask), 4782 rte_be_to_cpu_16(mask->dst_port_mask)); 4783 4784 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4785 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4786 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4787 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4788 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4789 4790 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4791 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4792 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4793 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4794 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4795 } 4796 4797 printf("\n"); 4798 } 4799 4800 static inline void 4801 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4802 { 4803 struct rte_eth_flex_payload_cfg *cfg; 4804 uint32_t i, j; 4805 4806 for (i = 0; i < flex_conf->nb_payloads; i++) { 4807 cfg = &flex_conf->flex_set[i]; 4808 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4809 printf("\n RAW: "); 4810 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4811 printf("\n L2_PAYLOAD: "); 4812 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4813 printf("\n L3_PAYLOAD: "); 4814 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4815 printf("\n L4_PAYLOAD: "); 4816 else 4817 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4818 for (j = 0; j < num; j++) 4819 printf(" %-5u", cfg->src_offset[j]); 4820 } 4821 printf("\n"); 4822 } 4823 4824 static inline void 4825 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4826 { 4827 struct rte_eth_fdir_flex_mask *mask; 4828 uint32_t i, j; 4829 char *p; 4830 4831 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4832 mask = &flex_conf->flex_mask[i]; 4833 p = flowtype_to_str(mask->flow_type); 4834 printf("\n %s:\t", p ? p : "unknown"); 4835 for (j = 0; j < num; j++) 4836 printf(" %02x", mask->mask[j]); 4837 } 4838 printf("\n"); 4839 } 4840 4841 static inline void 4842 print_fdir_flow_type(uint32_t flow_types_mask) 4843 { 4844 int i; 4845 char *p; 4846 4847 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4848 if (!(flow_types_mask & (1 << i))) 4849 continue; 4850 p = flowtype_to_str(i); 4851 if (p) 4852 printf(" %s", p); 4853 else 4854 printf(" unknown"); 4855 } 4856 printf("\n"); 4857 } 4858 4859 static int 4860 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4861 struct rte_eth_fdir_stats *fdir_stat) 4862 { 4863 int ret = -ENOTSUP; 4864 4865 #ifdef RTE_NET_I40E 4866 if (ret == -ENOTSUP) { 4867 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4868 if (!ret) 4869 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4870 } 4871 #endif 4872 #ifdef RTE_NET_IXGBE 4873 if (ret == -ENOTSUP) { 4874 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4875 if (!ret) 4876 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4877 } 4878 #endif 4879 switch (ret) { 4880 case 0: 4881 break; 4882 case -ENOTSUP: 4883 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4884 port_id); 4885 break; 4886 default: 4887 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4888 break; 4889 } 4890 return ret; 4891 } 4892 4893 void 4894 fdir_get_infos(portid_t port_id) 4895 { 4896 struct rte_eth_fdir_stats fdir_stat; 4897 struct rte_eth_fdir_info fdir_info; 4898 4899 static const char *fdir_stats_border = "########################"; 4900 4901 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4902 return; 4903 4904 memset(&fdir_info, 0, sizeof(fdir_info)); 4905 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4906 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4907 return; 4908 4909 printf("\n %s FDIR infos for port %-2d %s\n", 4910 fdir_stats_border, port_id, fdir_stats_border); 4911 printf(" MODE: "); 4912 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4913 printf(" PERFECT\n"); 4914 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4915 printf(" PERFECT-MAC-VLAN\n"); 4916 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4917 printf(" PERFECT-TUNNEL\n"); 4918 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4919 printf(" SIGNATURE\n"); 4920 else 4921 printf(" DISABLE\n"); 4922 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4923 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4924 printf(" SUPPORTED FLOW TYPE: "); 4925 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4926 } 4927 printf(" FLEX PAYLOAD INFO:\n"); 4928 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4929 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4930 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4931 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4932 fdir_info.flex_payload_unit, 4933 fdir_info.max_flex_payload_segment_num, 4934 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4935 printf(" MASK: "); 4936 print_fdir_mask(&fdir_info.mask); 4937 if (fdir_info.flex_conf.nb_payloads > 0) { 4938 printf(" FLEX PAYLOAD SRC OFFSET:"); 4939 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4940 } 4941 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4942 printf(" FLEX MASK CFG:"); 4943 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4944 } 4945 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4946 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4947 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4948 fdir_info.guarant_spc, fdir_info.best_spc); 4949 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4950 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4951 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4952 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4953 fdir_stat.collision, fdir_stat.free, 4954 fdir_stat.maxhash, fdir_stat.maxlen, 4955 fdir_stat.add, fdir_stat.remove, 4956 fdir_stat.f_add, fdir_stat.f_remove); 4957 printf(" %s############################%s\n", 4958 fdir_stats_border, fdir_stats_border); 4959 } 4960 4961 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4962 4963 void 4964 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4965 { 4966 struct rte_port *port; 4967 struct rte_eth_fdir_flex_conf *flex_conf; 4968 int i, idx = 0; 4969 4970 port = &ports[port_id]; 4971 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4972 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4973 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4974 idx = i; 4975 break; 4976 } 4977 } 4978 if (i >= RTE_ETH_FLOW_MAX) { 4979 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4980 idx = flex_conf->nb_flexmasks; 4981 flex_conf->nb_flexmasks++; 4982 } else { 4983 fprintf(stderr, 4984 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 4985 cfg->flow_type); 4986 return; 4987 } 4988 } 4989 rte_memcpy(&flex_conf->flex_mask[idx], 4990 cfg, 4991 sizeof(struct rte_eth_fdir_flex_mask)); 4992 } 4993 4994 void 4995 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4996 { 4997 struct rte_port *port; 4998 struct rte_eth_fdir_flex_conf *flex_conf; 4999 int i, idx = 0; 5000 5001 port = &ports[port_id]; 5002 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5003 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5004 if (cfg->type == flex_conf->flex_set[i].type) { 5005 idx = i; 5006 break; 5007 } 5008 } 5009 if (i >= RTE_ETH_PAYLOAD_MAX) { 5010 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5011 idx = flex_conf->nb_payloads; 5012 flex_conf->nb_payloads++; 5013 } else { 5014 fprintf(stderr, 5015 "The flex payload table is full. Can not set flex payload for type(%u).", 5016 cfg->type); 5017 return; 5018 } 5019 } 5020 rte_memcpy(&flex_conf->flex_set[idx], 5021 cfg, 5022 sizeof(struct rte_eth_flex_payload_cfg)); 5023 5024 } 5025 5026 void 5027 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5028 { 5029 #ifdef RTE_NET_IXGBE 5030 int diag; 5031 5032 if (is_rx) 5033 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5034 else 5035 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5036 5037 if (diag == 0) 5038 return; 5039 fprintf(stderr, 5040 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5041 is_rx ? "rx" : "tx", port_id, diag); 5042 return; 5043 #endif 5044 fprintf(stderr, "VF %s setting not supported for port %d\n", 5045 is_rx ? "Rx" : "Tx", port_id); 5046 RTE_SET_USED(vf); 5047 RTE_SET_USED(on); 5048 } 5049 5050 int 5051 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5052 { 5053 int diag; 5054 struct rte_eth_link link; 5055 int ret; 5056 5057 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5058 return 1; 5059 ret = eth_link_get_nowait_print_err(port_id, &link); 5060 if (ret < 0) 5061 return 1; 5062 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5063 rate > link.link_speed) { 5064 fprintf(stderr, 5065 "Invalid rate value:%u bigger than link speed: %u\n", 5066 rate, link.link_speed); 5067 return 1; 5068 } 5069 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5070 if (diag == 0) 5071 return diag; 5072 fprintf(stderr, 5073 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5074 port_id, diag); 5075 return diag; 5076 } 5077 5078 int 5079 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5080 { 5081 int diag = -ENOTSUP; 5082 5083 RTE_SET_USED(vf); 5084 RTE_SET_USED(rate); 5085 RTE_SET_USED(q_msk); 5086 5087 #ifdef RTE_NET_IXGBE 5088 if (diag == -ENOTSUP) 5089 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5090 q_msk); 5091 #endif 5092 #ifdef RTE_NET_BNXT 5093 if (diag == -ENOTSUP) 5094 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5095 #endif 5096 if (diag == 0) 5097 return diag; 5098 5099 fprintf(stderr, 5100 "%s for port_id=%d failed diag=%d\n", 5101 __func__, port_id, diag); 5102 return diag; 5103 } 5104 5105 /* 5106 * Functions to manage the set of filtered Multicast MAC addresses. 5107 * 5108 * A pool of filtered multicast MAC addresses is associated with each port. 5109 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5110 * The address of the pool and the number of valid multicast MAC addresses 5111 * recorded in the pool are stored in the fields "mc_addr_pool" and 5112 * "mc_addr_nb" of the "rte_port" data structure. 5113 * 5114 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5115 * to be supplied a contiguous array of multicast MAC addresses. 5116 * To comply with this constraint, the set of multicast addresses recorded 5117 * into the pool are systematically compacted at the beginning of the pool. 5118 * Hence, when a multicast address is removed from the pool, all following 5119 * addresses, if any, are copied back to keep the set contiguous. 5120 */ 5121 #define MCAST_POOL_INC 32 5122 5123 static int 5124 mcast_addr_pool_extend(struct rte_port *port) 5125 { 5126 struct rte_ether_addr *mc_pool; 5127 size_t mc_pool_size; 5128 5129 /* 5130 * If a free entry is available at the end of the pool, just 5131 * increment the number of recorded multicast addresses. 5132 */ 5133 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5134 port->mc_addr_nb++; 5135 return 0; 5136 } 5137 5138 /* 5139 * [re]allocate a pool with MCAST_POOL_INC more entries. 5140 * The previous test guarantees that port->mc_addr_nb is a multiple 5141 * of MCAST_POOL_INC. 5142 */ 5143 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5144 MCAST_POOL_INC); 5145 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5146 mc_pool_size); 5147 if (mc_pool == NULL) { 5148 fprintf(stderr, 5149 "allocation of pool of %u multicast addresses failed\n", 5150 port->mc_addr_nb + MCAST_POOL_INC); 5151 return -ENOMEM; 5152 } 5153 5154 port->mc_addr_pool = mc_pool; 5155 port->mc_addr_nb++; 5156 return 0; 5157 5158 } 5159 5160 static void 5161 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5162 { 5163 if (mcast_addr_pool_extend(port) != 0) 5164 return; 5165 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5166 } 5167 5168 static void 5169 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5170 { 5171 port->mc_addr_nb--; 5172 if (addr_idx == port->mc_addr_nb) { 5173 /* No need to recompact the set of multicast addresses. */ 5174 if (port->mc_addr_nb == 0) { 5175 /* free the pool of multicast addresses. */ 5176 free(port->mc_addr_pool); 5177 port->mc_addr_pool = NULL; 5178 } 5179 return; 5180 } 5181 memmove(&port->mc_addr_pool[addr_idx], 5182 &port->mc_addr_pool[addr_idx + 1], 5183 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5184 } 5185 5186 static int 5187 eth_port_multicast_addr_list_set(portid_t port_id) 5188 { 5189 struct rte_port *port; 5190 int diag; 5191 5192 port = &ports[port_id]; 5193 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5194 port->mc_addr_nb); 5195 if (diag < 0) 5196 fprintf(stderr, 5197 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5198 port_id, port->mc_addr_nb, diag); 5199 5200 return diag; 5201 } 5202 5203 void 5204 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5205 { 5206 struct rte_port *port; 5207 uint32_t i; 5208 5209 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5210 return; 5211 5212 port = &ports[port_id]; 5213 5214 /* 5215 * Check that the added multicast MAC address is not already recorded 5216 * in the pool of multicast addresses. 5217 */ 5218 for (i = 0; i < port->mc_addr_nb; i++) { 5219 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5220 fprintf(stderr, 5221 "multicast address already filtered by port\n"); 5222 return; 5223 } 5224 } 5225 5226 mcast_addr_pool_append(port, mc_addr); 5227 if (eth_port_multicast_addr_list_set(port_id) < 0) 5228 /* Rollback on failure, remove the address from the pool */ 5229 mcast_addr_pool_remove(port, i); 5230 } 5231 5232 void 5233 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5234 { 5235 struct rte_port *port; 5236 uint32_t i; 5237 5238 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5239 return; 5240 5241 port = &ports[port_id]; 5242 5243 /* 5244 * Search the pool of multicast MAC addresses for the removed address. 5245 */ 5246 for (i = 0; i < port->mc_addr_nb; i++) { 5247 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5248 break; 5249 } 5250 if (i == port->mc_addr_nb) { 5251 fprintf(stderr, "multicast address not filtered by port %d\n", 5252 port_id); 5253 return; 5254 } 5255 5256 mcast_addr_pool_remove(port, i); 5257 if (eth_port_multicast_addr_list_set(port_id) < 0) 5258 /* Rollback on failure, add the address back into the pool */ 5259 mcast_addr_pool_append(port, mc_addr); 5260 } 5261 5262 void 5263 port_dcb_info_display(portid_t port_id) 5264 { 5265 struct rte_eth_dcb_info dcb_info; 5266 uint16_t i; 5267 int ret; 5268 static const char *border = "================"; 5269 5270 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5271 return; 5272 5273 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5274 if (ret) { 5275 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5276 port_id); 5277 return; 5278 } 5279 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5280 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5281 printf("\n TC : "); 5282 for (i = 0; i < dcb_info.nb_tcs; i++) 5283 printf("\t%4d", i); 5284 printf("\n Priority : "); 5285 for (i = 0; i < dcb_info.nb_tcs; i++) 5286 printf("\t%4d", dcb_info.prio_tc[i]); 5287 printf("\n BW percent :"); 5288 for (i = 0; i < dcb_info.nb_tcs; i++) 5289 printf("\t%4d%%", dcb_info.tc_bws[i]); 5290 printf("\n RXQ base : "); 5291 for (i = 0; i < dcb_info.nb_tcs; i++) 5292 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5293 printf("\n RXQ number :"); 5294 for (i = 0; i < dcb_info.nb_tcs; i++) 5295 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5296 printf("\n TXQ base : "); 5297 for (i = 0; i < dcb_info.nb_tcs; i++) 5298 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5299 printf("\n TXQ number :"); 5300 for (i = 0; i < dcb_info.nb_tcs; i++) 5301 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5302 printf("\n"); 5303 } 5304 5305 uint8_t * 5306 open_file(const char *file_path, uint32_t *size) 5307 { 5308 int fd = open(file_path, O_RDONLY); 5309 off_t pkg_size; 5310 uint8_t *buf = NULL; 5311 int ret = 0; 5312 struct stat st_buf; 5313 5314 if (size) 5315 *size = 0; 5316 5317 if (fd == -1) { 5318 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5319 return buf; 5320 } 5321 5322 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5323 close(fd); 5324 fprintf(stderr, "%s: File operations failed\n", __func__); 5325 return buf; 5326 } 5327 5328 pkg_size = st_buf.st_size; 5329 if (pkg_size < 0) { 5330 close(fd); 5331 fprintf(stderr, "%s: File operations failed\n", __func__); 5332 return buf; 5333 } 5334 5335 buf = (uint8_t *)malloc(pkg_size); 5336 if (!buf) { 5337 close(fd); 5338 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5339 return buf; 5340 } 5341 5342 ret = read(fd, buf, pkg_size); 5343 if (ret < 0) { 5344 close(fd); 5345 fprintf(stderr, "%s: File read operation failed\n", __func__); 5346 close_file(buf); 5347 return NULL; 5348 } 5349 5350 if (size) 5351 *size = pkg_size; 5352 5353 close(fd); 5354 5355 return buf; 5356 } 5357 5358 int 5359 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5360 { 5361 FILE *fh = fopen(file_path, "wb"); 5362 5363 if (fh == NULL) { 5364 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5365 return -1; 5366 } 5367 5368 if (fwrite(buf, 1, size, fh) != size) { 5369 fclose(fh); 5370 fprintf(stderr, "%s: File write operation failed\n", __func__); 5371 return -1; 5372 } 5373 5374 fclose(fh); 5375 5376 return 0; 5377 } 5378 5379 int 5380 close_file(uint8_t *buf) 5381 { 5382 if (buf) { 5383 free((void *)buf); 5384 return 0; 5385 } 5386 5387 return -1; 5388 } 5389 5390 void 5391 port_queue_region_info_display(portid_t port_id, void *buf) 5392 { 5393 #ifdef RTE_NET_I40E 5394 uint16_t i, j; 5395 struct rte_pmd_i40e_queue_regions *info = 5396 (struct rte_pmd_i40e_queue_regions *)buf; 5397 static const char *queue_region_info_stats_border = "-------"; 5398 5399 if (!info->queue_region_number) 5400 printf("there is no region has been set before"); 5401 5402 printf("\n %s All queue region info for port=%2d %s", 5403 queue_region_info_stats_border, port_id, 5404 queue_region_info_stats_border); 5405 printf("\n queue_region_number: %-14u \n", 5406 info->queue_region_number); 5407 5408 for (i = 0; i < info->queue_region_number; i++) { 5409 printf("\n region_id: %-14u queue_number: %-14u " 5410 "queue_start_index: %-14u \n", 5411 info->region[i].region_id, 5412 info->region[i].queue_num, 5413 info->region[i].queue_start_index); 5414 5415 printf(" user_priority_num is %-14u :", 5416 info->region[i].user_priority_num); 5417 for (j = 0; j < info->region[i].user_priority_num; j++) 5418 printf(" %-14u ", info->region[i].user_priority[j]); 5419 5420 printf("\n flowtype_num is %-14u :", 5421 info->region[i].flowtype_num); 5422 for (j = 0; j < info->region[i].flowtype_num; j++) 5423 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5424 } 5425 #else 5426 RTE_SET_USED(port_id); 5427 RTE_SET_USED(buf); 5428 #endif 5429 5430 printf("\n\n"); 5431 } 5432 5433 void 5434 show_macs(portid_t port_id) 5435 { 5436 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5437 struct rte_eth_dev_info dev_info; 5438 int32_t i, rc, num_macs = 0; 5439 5440 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5441 return; 5442 5443 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 5444 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 5445 if (rc < 0) 5446 return; 5447 5448 for (i = 0; i < rc; i++) { 5449 5450 /* skip zero address */ 5451 if (rte_is_zero_ether_addr(&addr[i])) 5452 continue; 5453 5454 num_macs++; 5455 } 5456 5457 printf("Number of MAC address added: %d\n", num_macs); 5458 5459 for (i = 0; i < rc; i++) { 5460 5461 /* skip zero address */ 5462 if (rte_is_zero_ether_addr(&addr[i])) 5463 continue; 5464 5465 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 5466 printf(" %s\n", buf); 5467 } 5468 } 5469 5470 void 5471 show_mcast_macs(portid_t port_id) 5472 { 5473 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5474 struct rte_ether_addr *addr; 5475 struct rte_port *port; 5476 uint32_t i; 5477 5478 port = &ports[port_id]; 5479 5480 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5481 5482 for (i = 0; i < port->mc_addr_nb; i++) { 5483 addr = &port->mc_addr_pool[i]; 5484 5485 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5486 printf(" %s\n", buf); 5487 } 5488 } 5489