1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static char *flowtype_to_str(uint16_t flow_type); 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 91 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 92 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 93 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 94 { "none", 0 }, 95 { "eth", RTE_ETH_RSS_ETH }, 96 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 97 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 98 { "vlan", RTE_ETH_RSS_VLAN }, 99 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 100 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 101 { "ipv4", RTE_ETH_RSS_IPV4 }, 102 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 103 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 104 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 105 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 106 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 107 { "ipv6", RTE_ETH_RSS_IPV6 }, 108 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 109 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 110 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 111 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 112 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 113 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 114 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 115 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 116 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 117 { "port", RTE_ETH_RSS_PORT }, 118 { "vxlan", RTE_ETH_RSS_VXLAN }, 119 { "geneve", RTE_ETH_RSS_GENEVE }, 120 { "nvgre", RTE_ETH_RSS_NVGRE }, 121 { "ip", RTE_ETH_RSS_IP }, 122 { "udp", RTE_ETH_RSS_UDP }, 123 { "tcp", RTE_ETH_RSS_TCP }, 124 { "sctp", RTE_ETH_RSS_SCTP }, 125 { "tunnel", RTE_ETH_RSS_TUNNEL }, 126 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 127 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 128 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 129 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 130 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 131 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 132 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 133 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 134 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 135 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 136 { "esp", RTE_ETH_RSS_ESP }, 137 { "ah", RTE_ETH_RSS_AH }, 138 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 139 { "pfcp", RTE_ETH_RSS_PFCP }, 140 { "pppoe", RTE_ETH_RSS_PPPOE }, 141 { "gtpu", RTE_ETH_RSS_GTPU }, 142 { "ecpri", RTE_ETH_RSS_ECPRI }, 143 { "mpls", RTE_ETH_RSS_MPLS }, 144 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 145 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 146 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 147 { NULL, 0 }, 148 }; 149 150 static const struct { 151 enum rte_eth_fec_mode mode; 152 const char *name; 153 } fec_mode_name[] = { 154 { 155 .mode = RTE_ETH_FEC_NOFEC, 156 .name = "off", 157 }, 158 { 159 .mode = RTE_ETH_FEC_AUTO, 160 .name = "auto", 161 }, 162 { 163 .mode = RTE_ETH_FEC_BASER, 164 .name = "baser", 165 }, 166 { 167 .mode = RTE_ETH_FEC_RS, 168 .name = "rs", 169 }, 170 }; 171 172 static void 173 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 174 { 175 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 176 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 177 printf("%s%s", name, buf); 178 } 179 180 static void 181 nic_xstats_display_periodic(portid_t port_id) 182 { 183 struct xstat_display_info *xstats_info; 184 uint64_t *prev_values, *curr_values; 185 uint64_t diff_value, value_rate; 186 struct timespec cur_time; 187 uint64_t *ids_supp; 188 size_t ids_supp_sz; 189 uint64_t diff_ns; 190 unsigned int i; 191 int rc; 192 193 xstats_info = &ports[port_id].xstats_info; 194 195 ids_supp_sz = xstats_info->ids_supp_sz; 196 if (ids_supp_sz == 0) 197 return; 198 199 printf("\n"); 200 201 ids_supp = xstats_info->ids_supp; 202 prev_values = xstats_info->prev_values; 203 curr_values = xstats_info->curr_values; 204 205 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 206 ids_supp_sz); 207 if (rc != (int)ids_supp_sz) { 208 fprintf(stderr, 209 "Failed to get values of %zu xstats for port %u - return code %d\n", 210 ids_supp_sz, port_id, rc); 211 return; 212 } 213 214 diff_ns = 0; 215 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 216 uint64_t ns; 217 218 ns = cur_time.tv_sec * NS_PER_SEC; 219 ns += cur_time.tv_nsec; 220 221 if (xstats_info->prev_ns != 0) 222 diff_ns = ns - xstats_info->prev_ns; 223 xstats_info->prev_ns = ns; 224 } 225 226 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 227 for (i = 0; i < ids_supp_sz; i++) { 228 diff_value = (curr_values[i] > prev_values[i]) ? 229 (curr_values[i] - prev_values[i]) : 0; 230 prev_values[i] = curr_values[i]; 231 value_rate = diff_ns > 0 ? 232 (double)diff_value / diff_ns * NS_PER_SEC : 0; 233 234 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 235 xstats_display[i].name, curr_values[i], value_rate); 236 } 237 } 238 239 void 240 nic_stats_display(portid_t port_id) 241 { 242 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 245 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 246 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 247 struct timespec cur_time; 248 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 249 diff_ns; 250 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 251 struct rte_eth_stats stats; 252 253 static const char *nic_stats_border = "########################"; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 rte_eth_stats_get(port_id, &stats); 260 printf("\n %s NIC statistics for port %-2d %s\n", 261 nic_stats_border, port_id, nic_stats_border); 262 263 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 264 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 265 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 266 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 267 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 268 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 269 270 diff_ns = 0; 271 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 272 uint64_t ns; 273 274 ns = cur_time.tv_sec * NS_PER_SEC; 275 ns += cur_time.tv_nsec; 276 277 if (prev_ns[port_id] != 0) 278 diff_ns = ns - prev_ns[port_id]; 279 prev_ns[port_id] = ns; 280 } 281 282 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 283 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 284 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 285 (stats.opackets - prev_pkts_tx[port_id]) : 0; 286 prev_pkts_rx[port_id] = stats.ipackets; 287 prev_pkts_tx[port_id] = stats.opackets; 288 mpps_rx = diff_ns > 0 ? 289 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 290 mpps_tx = diff_ns > 0 ? 291 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 292 293 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 294 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 295 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 296 (stats.obytes - prev_bytes_tx[port_id]) : 0; 297 prev_bytes_rx[port_id] = stats.ibytes; 298 prev_bytes_tx[port_id] = stats.obytes; 299 mbps_rx = diff_ns > 0 ? 300 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 301 mbps_tx = diff_ns > 0 ? 302 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 303 304 printf("\n Throughput (since last show)\n"); 305 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 306 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 307 mpps_tx, mbps_tx * 8); 308 309 if (xstats_display_num > 0) 310 nic_xstats_display_periodic(port_id); 311 312 printf(" %s############################%s\n", 313 nic_stats_border, nic_stats_border); 314 } 315 316 void 317 nic_stats_clear(portid_t port_id) 318 { 319 int ret; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 ret = rte_eth_stats_reset(port_id); 327 if (ret != 0) { 328 fprintf(stderr, 329 "%s: Error: failed to reset stats (port %u): %s", 330 __func__, port_id, strerror(-ret)); 331 return; 332 } 333 334 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 335 if (ret != 0) { 336 if (ret < 0) 337 ret = -ret; 338 fprintf(stderr, 339 "%s: Error: failed to get stats (port %u): %s", 340 __func__, port_id, strerror(ret)); 341 return; 342 } 343 printf("\n NIC statistics for port %d cleared\n", port_id); 344 } 345 346 void 347 nic_xstats_display(portid_t port_id) 348 { 349 struct rte_eth_xstat *xstats; 350 int cnt_xstats, idx_xstat; 351 struct rte_eth_xstat_name *xstats_names; 352 353 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 354 print_valid_ports(); 355 return; 356 } 357 printf("###### NIC extended statistics for port %-2d\n", port_id); 358 if (!rte_eth_dev_is_valid_port(port_id)) { 359 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 360 return; 361 } 362 363 /* Get count */ 364 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 365 if (cnt_xstats < 0) { 366 fprintf(stderr, "Error: Cannot get count of xstats\n"); 367 return; 368 } 369 370 /* Get id-name lookup table */ 371 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 372 if (xstats_names == NULL) { 373 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 374 return; 375 } 376 if (cnt_xstats != rte_eth_xstats_get_names( 377 port_id, xstats_names, cnt_xstats)) { 378 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 379 free(xstats_names); 380 return; 381 } 382 383 /* Get stats themselves */ 384 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 385 if (xstats == NULL) { 386 fprintf(stderr, "Cannot allocate memory for xstats\n"); 387 free(xstats_names); 388 return; 389 } 390 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 391 fprintf(stderr, "Error: Unable to get xstats\n"); 392 free(xstats_names); 393 free(xstats); 394 return; 395 } 396 397 /* Display xstats */ 398 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 399 if (xstats_hide_zero && !xstats[idx_xstat].value) 400 continue; 401 printf("%s: %"PRIu64"\n", 402 xstats_names[idx_xstat].name, 403 xstats[idx_xstat].value); 404 } 405 free(xstats_names); 406 free(xstats); 407 } 408 409 void 410 nic_xstats_clear(portid_t port_id) 411 { 412 int ret; 413 414 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 415 print_valid_ports(); 416 return; 417 } 418 419 ret = rte_eth_xstats_reset(port_id); 420 if (ret != 0) { 421 fprintf(stderr, 422 "%s: Error: failed to reset xstats (port %u): %s\n", 423 __func__, port_id, strerror(-ret)); 424 return; 425 } 426 427 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 428 if (ret != 0) { 429 if (ret < 0) 430 ret = -ret; 431 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 432 __func__, port_id, strerror(ret)); 433 return; 434 } 435 } 436 437 static const char * 438 get_queue_state_name(uint8_t queue_state) 439 { 440 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 441 return "stopped"; 442 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 443 return "started"; 444 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 445 return "hairpin"; 446 else 447 return "unknown"; 448 } 449 450 void 451 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 452 { 453 struct rte_eth_burst_mode mode; 454 struct rte_eth_rxq_info qinfo; 455 int32_t rc; 456 static const char *info_border = "*********************"; 457 458 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 459 if (rc != 0) { 460 fprintf(stderr, 461 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 462 port_id, queue_id, strerror(-rc), rc); 463 return; 464 } 465 466 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 467 info_border, port_id, queue_id, info_border); 468 469 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 470 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 471 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 472 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 473 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 474 printf("\nRX drop packets: %s", 475 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 476 printf("\nRX deferred start: %s", 477 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 478 printf("\nRX scattered packets: %s", 479 (qinfo.scattered_rx != 0) ? "on" : "off"); 480 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 481 if (qinfo.rx_buf_size != 0) 482 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 483 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 484 485 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 486 printf("\nBurst mode: %s%s", 487 mode.info, 488 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 489 " (per queue)" : ""); 490 491 printf("\n"); 492 } 493 494 void 495 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 496 { 497 struct rte_eth_burst_mode mode; 498 struct rte_eth_txq_info qinfo; 499 int32_t rc; 500 static const char *info_border = "*********************"; 501 502 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 503 if (rc != 0) { 504 fprintf(stderr, 505 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 506 port_id, queue_id, strerror(-rc), rc); 507 return; 508 } 509 510 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 511 info_border, port_id, queue_id, info_border); 512 513 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 514 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 515 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 516 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 517 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 518 printf("\nTX deferred start: %s", 519 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 520 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 521 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 522 523 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 static int bus_match_all(const struct rte_bus *bus, const void *data) 533 { 534 RTE_SET_USED(bus); 535 RTE_SET_USED(data); 536 return 0; 537 } 538 539 static void 540 device_infos_display_speeds(uint32_t speed_capa) 541 { 542 printf("\n\tDevice speed capability:"); 543 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 544 printf(" Autonegotiate (all speeds)"); 545 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 546 printf(" Disable autonegotiate (fixed speed) "); 547 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 548 printf(" 10 Mbps half-duplex "); 549 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 550 printf(" 10 Mbps full-duplex "); 551 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 552 printf(" 100 Mbps half-duplex "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 554 printf(" 100 Mbps full-duplex "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 556 printf(" 1 Gbps "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 558 printf(" 2.5 Gbps "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 560 printf(" 5 Gbps "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 562 printf(" 10 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 564 printf(" 20 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 566 printf(" 25 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 568 printf(" 40 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 570 printf(" 50 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 572 printf(" 56 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 574 printf(" 100 Gbps "); 575 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 576 printf(" 200 Gbps "); 577 } 578 579 void 580 device_infos_display(const char *identifier) 581 { 582 static const char *info_border = "*********************"; 583 struct rte_bus *start = NULL, *next; 584 struct rte_dev_iterator dev_iter; 585 char name[RTE_ETH_NAME_MAX_LEN]; 586 struct rte_ether_addr mac_addr; 587 struct rte_device *dev; 588 struct rte_devargs da; 589 portid_t port_id; 590 struct rte_eth_dev_info dev_info; 591 char devstr[128]; 592 593 memset(&da, 0, sizeof(da)); 594 if (!identifier) 595 goto skip_parse; 596 597 if (rte_devargs_parsef(&da, "%s", identifier)) { 598 fprintf(stderr, "cannot parse identifier\n"); 599 return; 600 } 601 602 skip_parse: 603 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 604 605 start = next; 606 if (identifier && da.bus != next) 607 continue; 608 609 /* Skip buses that don't have iterate method */ 610 if (!next->dev_iterate) 611 continue; 612 613 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 614 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 615 616 if (!dev->driver) 617 continue; 618 /* Check for matching device if identifier is present */ 619 if (identifier && 620 strncmp(da.name, dev->name, strlen(dev->name))) 621 continue; 622 printf("\n%s Infos for device %s %s\n", 623 info_border, dev->name, info_border); 624 printf("Bus name: %s", dev->bus->name); 625 printf("\nDriver name: %s", dev->driver->name); 626 printf("\nDevargs: %s", 627 dev->devargs ? dev->devargs->args : ""); 628 printf("\nConnect to socket: %d", dev->numa_node); 629 printf("\n"); 630 631 /* List ports with matching device name */ 632 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 633 printf("\n\tPort id: %-2d", port_id); 634 if (eth_macaddr_get_print_err(port_id, 635 &mac_addr) == 0) 636 print_ethaddr("\n\tMAC address: ", 637 &mac_addr); 638 rte_eth_dev_get_name_by_port(port_id, name); 639 printf("\n\tDevice name: %s", name); 640 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 641 device_infos_display_speeds(dev_info.speed_capa); 642 printf("\n"); 643 } 644 } 645 }; 646 rte_devargs_reset(&da); 647 } 648 649 static void 650 print_dev_capabilities(uint64_t capabilities) 651 { 652 uint64_t single_capa; 653 int begin; 654 int end; 655 int bit; 656 657 if (capabilities == 0) 658 return; 659 660 begin = __builtin_ctzll(capabilities); 661 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 662 663 single_capa = 1ULL << begin; 664 for (bit = begin; bit < end; bit++) { 665 if (capabilities & single_capa) 666 printf(" %s", 667 rte_eth_dev_capability_name(single_capa)); 668 single_capa <<= 1; 669 } 670 } 671 672 void 673 port_infos_display(portid_t port_id) 674 { 675 struct rte_port *port; 676 struct rte_ether_addr mac_addr; 677 struct rte_eth_link link; 678 struct rte_eth_dev_info dev_info; 679 int vlan_offload; 680 struct rte_mempool * mp; 681 static const char *info_border = "*********************"; 682 uint16_t mtu; 683 char name[RTE_ETH_NAME_MAX_LEN]; 684 int ret; 685 char fw_version[ETHDEV_FWVERS_LEN]; 686 687 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 688 print_valid_ports(); 689 return; 690 } 691 port = &ports[port_id]; 692 ret = eth_link_get_nowait_print_err(port_id, &link); 693 if (ret < 0) 694 return; 695 696 ret = eth_dev_info_get_print_err(port_id, &dev_info); 697 if (ret != 0) 698 return; 699 700 printf("\n%s Infos for port %-2d %s\n", 701 info_border, port_id, info_border); 702 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 703 print_ethaddr("MAC address: ", &mac_addr); 704 rte_eth_dev_get_name_by_port(port_id, name); 705 printf("\nDevice name: %s", name); 706 printf("\nDriver name: %s", dev_info.driver_name); 707 708 if (rte_eth_dev_fw_version_get(port_id, fw_version, 709 ETHDEV_FWVERS_LEN) == 0) 710 printf("\nFirmware-version: %s", fw_version); 711 else 712 printf("\nFirmware-version: %s", "not available"); 713 714 if (dev_info.device->devargs && dev_info.device->devargs->args) 715 printf("\nDevargs: %s", dev_info.device->devargs->args); 716 printf("\nConnect to socket: %u", port->socket_id); 717 718 if (port_numa[port_id] != NUMA_NO_CONFIG) { 719 mp = mbuf_pool_find(port_numa[port_id], 0); 720 if (mp) 721 printf("\nmemory allocation on the socket: %d", 722 port_numa[port_id]); 723 } else 724 printf("\nmemory allocation on the socket: %u",port->socket_id); 725 726 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 727 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 728 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 729 ("full-duplex") : ("half-duplex")); 730 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 731 ("On") : ("Off")); 732 733 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 734 printf("MTU: %u\n", mtu); 735 736 printf("Promiscuous mode: %s\n", 737 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 738 printf("Allmulticast mode: %s\n", 739 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 740 printf("Maximum number of MAC addresses: %u\n", 741 (unsigned int)(port->dev_info.max_mac_addrs)); 742 printf("Maximum number of MAC addresses of hash filtering: %u\n", 743 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 744 745 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 746 if (vlan_offload >= 0){ 747 printf("VLAN offload: \n"); 748 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 749 printf(" strip on, "); 750 else 751 printf(" strip off, "); 752 753 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 754 printf("filter on, "); 755 else 756 printf("filter off, "); 757 758 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 759 printf("extend on, "); 760 else 761 printf("extend off, "); 762 763 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 764 printf("qinq strip on\n"); 765 else 766 printf("qinq strip off\n"); 767 } 768 769 if (dev_info.hash_key_size > 0) 770 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 771 if (dev_info.reta_size > 0) 772 printf("Redirection table size: %u\n", dev_info.reta_size); 773 if (!dev_info.flow_type_rss_offloads) 774 printf("No RSS offload flow type is supported.\n"); 775 else { 776 uint16_t i; 777 char *p; 778 779 printf("Supported RSS offload flow types:\n"); 780 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 781 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 782 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 783 continue; 784 p = flowtype_to_str(i); 785 if (p) 786 printf(" %s\n", p); 787 else 788 printf(" user defined %d\n", i); 789 } 790 } 791 792 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 793 printf("Maximum configurable length of RX packet: %u\n", 794 dev_info.max_rx_pktlen); 795 printf("Maximum configurable size of LRO aggregated packet: %u\n", 796 dev_info.max_lro_pkt_size); 797 if (dev_info.max_vfs) 798 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 799 if (dev_info.max_vmdq_pools) 800 printf("Maximum number of VMDq pools: %u\n", 801 dev_info.max_vmdq_pools); 802 803 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 804 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 805 printf("Max possible number of RXDs per queue: %hu\n", 806 dev_info.rx_desc_lim.nb_max); 807 printf("Min possible number of RXDs per queue: %hu\n", 808 dev_info.rx_desc_lim.nb_min); 809 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 810 811 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 812 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 813 printf("Max possible number of TXDs per queue: %hu\n", 814 dev_info.tx_desc_lim.nb_max); 815 printf("Min possible number of TXDs per queue: %hu\n", 816 dev_info.tx_desc_lim.nb_min); 817 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 818 printf("Max segment number per packet: %hu\n", 819 dev_info.tx_desc_lim.nb_seg_max); 820 printf("Max segment number per MTU/TSO: %hu\n", 821 dev_info.tx_desc_lim.nb_mtu_seg_max); 822 823 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 824 print_dev_capabilities(dev_info.dev_capa); 825 printf(" )\n"); 826 /* Show switch info only if valid switch domain and port id is set */ 827 if (dev_info.switch_info.domain_id != 828 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 829 if (dev_info.switch_info.name) 830 printf("Switch name: %s\n", dev_info.switch_info.name); 831 832 printf("Switch domain Id: %u\n", 833 dev_info.switch_info.domain_id); 834 printf("Switch Port Id: %u\n", 835 dev_info.switch_info.port_id); 836 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 837 printf("Switch Rx domain: %u\n", 838 dev_info.switch_info.rx_domain); 839 } 840 } 841 842 void 843 port_summary_header_display(void) 844 { 845 uint16_t port_number; 846 847 port_number = rte_eth_dev_count_avail(); 848 printf("Number of available ports: %i\n", port_number); 849 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 850 "Driver", "Status", "Link"); 851 } 852 853 void 854 port_summary_display(portid_t port_id) 855 { 856 struct rte_ether_addr mac_addr; 857 struct rte_eth_link link; 858 struct rte_eth_dev_info dev_info; 859 char name[RTE_ETH_NAME_MAX_LEN]; 860 int ret; 861 862 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 863 print_valid_ports(); 864 return; 865 } 866 867 ret = eth_link_get_nowait_print_err(port_id, &link); 868 if (ret < 0) 869 return; 870 871 ret = eth_dev_info_get_print_err(port_id, &dev_info); 872 if (ret != 0) 873 return; 874 875 rte_eth_dev_get_name_by_port(port_id, name); 876 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 877 if (ret != 0) 878 return; 879 880 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 881 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 882 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 883 rte_eth_link_speed_to_str(link.link_speed)); 884 } 885 886 void 887 port_eeprom_display(portid_t port_id) 888 { 889 struct rte_dev_eeprom_info einfo; 890 int ret; 891 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 892 print_valid_ports(); 893 return; 894 } 895 896 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 897 if (len_eeprom < 0) { 898 switch (len_eeprom) { 899 case -ENODEV: 900 fprintf(stderr, "port index %d invalid\n", port_id); 901 break; 902 case -ENOTSUP: 903 fprintf(stderr, "operation not supported by device\n"); 904 break; 905 case -EIO: 906 fprintf(stderr, "device is removed\n"); 907 break; 908 default: 909 fprintf(stderr, "Unable to get EEPROM: %d\n", 910 len_eeprom); 911 break; 912 } 913 return; 914 } 915 916 einfo.offset = 0; 917 einfo.length = len_eeprom; 918 einfo.data = calloc(1, len_eeprom); 919 if (!einfo.data) { 920 fprintf(stderr, 921 "Allocation of port %u eeprom data failed\n", 922 port_id); 923 return; 924 } 925 926 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 927 if (ret != 0) { 928 switch (ret) { 929 case -ENODEV: 930 fprintf(stderr, "port index %d invalid\n", port_id); 931 break; 932 case -ENOTSUP: 933 fprintf(stderr, "operation not supported by device\n"); 934 break; 935 case -EIO: 936 fprintf(stderr, "device is removed\n"); 937 break; 938 default: 939 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 940 break; 941 } 942 free(einfo.data); 943 return; 944 } 945 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 946 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 947 free(einfo.data); 948 } 949 950 void 951 port_module_eeprom_display(portid_t port_id) 952 { 953 struct rte_eth_dev_module_info minfo; 954 struct rte_dev_eeprom_info einfo; 955 int ret; 956 957 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 958 print_valid_ports(); 959 return; 960 } 961 962 963 ret = rte_eth_dev_get_module_info(port_id, &minfo); 964 if (ret != 0) { 965 switch (ret) { 966 case -ENODEV: 967 fprintf(stderr, "port index %d invalid\n", port_id); 968 break; 969 case -ENOTSUP: 970 fprintf(stderr, "operation not supported by device\n"); 971 break; 972 case -EIO: 973 fprintf(stderr, "device is removed\n"); 974 break; 975 default: 976 fprintf(stderr, "Unable to get module EEPROM: %d\n", 977 ret); 978 break; 979 } 980 return; 981 } 982 983 einfo.offset = 0; 984 einfo.length = minfo.eeprom_len; 985 einfo.data = calloc(1, minfo.eeprom_len); 986 if (!einfo.data) { 987 fprintf(stderr, 988 "Allocation of port %u eeprom data failed\n", 989 port_id); 990 return; 991 } 992 993 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 994 if (ret != 0) { 995 switch (ret) { 996 case -ENODEV: 997 fprintf(stderr, "port index %d invalid\n", port_id); 998 break; 999 case -ENOTSUP: 1000 fprintf(stderr, "operation not supported by device\n"); 1001 break; 1002 case -EIO: 1003 fprintf(stderr, "device is removed\n"); 1004 break; 1005 default: 1006 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1007 ret); 1008 break; 1009 } 1010 free(einfo.data); 1011 return; 1012 } 1013 1014 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1015 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1016 free(einfo.data); 1017 } 1018 1019 int 1020 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1021 { 1022 uint16_t pid; 1023 1024 if (port_id == (portid_t)RTE_PORT_ALL) 1025 return 0; 1026 1027 RTE_ETH_FOREACH_DEV(pid) 1028 if (port_id == pid) 1029 return 0; 1030 1031 if (warning == ENABLED_WARN) 1032 fprintf(stderr, "Invalid port %d\n", port_id); 1033 1034 return 1; 1035 } 1036 1037 void print_valid_ports(void) 1038 { 1039 portid_t pid; 1040 1041 printf("The valid ports array is ["); 1042 RTE_ETH_FOREACH_DEV(pid) { 1043 printf(" %d", pid); 1044 } 1045 printf(" ]\n"); 1046 } 1047 1048 static int 1049 vlan_id_is_invalid(uint16_t vlan_id) 1050 { 1051 if (vlan_id < 4096) 1052 return 0; 1053 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1054 return 1; 1055 } 1056 1057 static int 1058 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1059 { 1060 const struct rte_pci_device *pci_dev; 1061 const struct rte_bus *bus; 1062 uint64_t pci_len; 1063 1064 if (reg_off & 0x3) { 1065 fprintf(stderr, 1066 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1067 (unsigned int)reg_off); 1068 return 1; 1069 } 1070 1071 if (!ports[port_id].dev_info.device) { 1072 fprintf(stderr, "Invalid device\n"); 1073 return 0; 1074 } 1075 1076 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1077 if (bus && !strcmp(bus->name, "pci")) { 1078 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1079 } else { 1080 fprintf(stderr, "Not a PCI device\n"); 1081 return 1; 1082 } 1083 1084 pci_len = pci_dev->mem_resource[0].len; 1085 if (reg_off >= pci_len) { 1086 fprintf(stderr, 1087 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1088 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1089 pci_len); 1090 return 1; 1091 } 1092 return 0; 1093 } 1094 1095 static int 1096 reg_bit_pos_is_invalid(uint8_t bit_pos) 1097 { 1098 if (bit_pos <= 31) 1099 return 0; 1100 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1101 return 1; 1102 } 1103 1104 #define display_port_and_reg_off(port_id, reg_off) \ 1105 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1106 1107 static inline void 1108 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1109 { 1110 display_port_and_reg_off(port_id, (unsigned)reg_off); 1111 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1112 } 1113 1114 void 1115 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1116 { 1117 uint32_t reg_v; 1118 1119 1120 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1121 return; 1122 if (port_reg_off_is_invalid(port_id, reg_off)) 1123 return; 1124 if (reg_bit_pos_is_invalid(bit_x)) 1125 return; 1126 reg_v = port_id_pci_reg_read(port_id, reg_off); 1127 display_port_and_reg_off(port_id, (unsigned)reg_off); 1128 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1129 } 1130 1131 void 1132 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1133 uint8_t bit1_pos, uint8_t bit2_pos) 1134 { 1135 uint32_t reg_v; 1136 uint8_t l_bit; 1137 uint8_t h_bit; 1138 1139 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1140 return; 1141 if (port_reg_off_is_invalid(port_id, reg_off)) 1142 return; 1143 if (reg_bit_pos_is_invalid(bit1_pos)) 1144 return; 1145 if (reg_bit_pos_is_invalid(bit2_pos)) 1146 return; 1147 if (bit1_pos > bit2_pos) 1148 l_bit = bit2_pos, h_bit = bit1_pos; 1149 else 1150 l_bit = bit1_pos, h_bit = bit2_pos; 1151 1152 reg_v = port_id_pci_reg_read(port_id, reg_off); 1153 reg_v >>= l_bit; 1154 if (h_bit < 31) 1155 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1156 display_port_and_reg_off(port_id, (unsigned)reg_off); 1157 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1158 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1159 } 1160 1161 void 1162 port_reg_display(portid_t port_id, uint32_t reg_off) 1163 { 1164 uint32_t reg_v; 1165 1166 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1167 return; 1168 if (port_reg_off_is_invalid(port_id, reg_off)) 1169 return; 1170 reg_v = port_id_pci_reg_read(port_id, reg_off); 1171 display_port_reg_value(port_id, reg_off, reg_v); 1172 } 1173 1174 void 1175 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1176 uint8_t bit_v) 1177 { 1178 uint32_t reg_v; 1179 1180 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1181 return; 1182 if (port_reg_off_is_invalid(port_id, reg_off)) 1183 return; 1184 if (reg_bit_pos_is_invalid(bit_pos)) 1185 return; 1186 if (bit_v > 1) { 1187 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1188 (int) bit_v); 1189 return; 1190 } 1191 reg_v = port_id_pci_reg_read(port_id, reg_off); 1192 if (bit_v == 0) 1193 reg_v &= ~(1 << bit_pos); 1194 else 1195 reg_v |= (1 << bit_pos); 1196 port_id_pci_reg_write(port_id, reg_off, reg_v); 1197 display_port_reg_value(port_id, reg_off, reg_v); 1198 } 1199 1200 void 1201 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1202 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1203 { 1204 uint32_t max_v; 1205 uint32_t reg_v; 1206 uint8_t l_bit; 1207 uint8_t h_bit; 1208 1209 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1210 return; 1211 if (port_reg_off_is_invalid(port_id, reg_off)) 1212 return; 1213 if (reg_bit_pos_is_invalid(bit1_pos)) 1214 return; 1215 if (reg_bit_pos_is_invalid(bit2_pos)) 1216 return; 1217 if (bit1_pos > bit2_pos) 1218 l_bit = bit2_pos, h_bit = bit1_pos; 1219 else 1220 l_bit = bit1_pos, h_bit = bit2_pos; 1221 1222 if ((h_bit - l_bit) < 31) 1223 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1224 else 1225 max_v = 0xFFFFFFFF; 1226 1227 if (value > max_v) { 1228 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1229 (unsigned)value, (unsigned)value, 1230 (unsigned)max_v, (unsigned)max_v); 1231 return; 1232 } 1233 reg_v = port_id_pci_reg_read(port_id, reg_off); 1234 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1235 reg_v |= (value << l_bit); /* Set changed bits */ 1236 port_id_pci_reg_write(port_id, reg_off, reg_v); 1237 display_port_reg_value(port_id, reg_off, reg_v); 1238 } 1239 1240 void 1241 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1242 { 1243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1244 return; 1245 if (port_reg_off_is_invalid(port_id, reg_off)) 1246 return; 1247 port_id_pci_reg_write(port_id, reg_off, reg_v); 1248 display_port_reg_value(port_id, reg_off, reg_v); 1249 } 1250 1251 void 1252 port_mtu_set(portid_t port_id, uint16_t mtu) 1253 { 1254 struct rte_port *port = &ports[port_id]; 1255 int diag; 1256 1257 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1258 return; 1259 1260 if (port->need_reconfig == 0) { 1261 diag = rte_eth_dev_set_mtu(port_id, mtu); 1262 if (diag != 0) { 1263 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1264 return; 1265 } 1266 } 1267 1268 port->dev_conf.rxmode.mtu = mtu; 1269 } 1270 1271 /* Generic flow management functions. */ 1272 1273 static struct port_flow_tunnel * 1274 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1275 { 1276 struct port_flow_tunnel *flow_tunnel; 1277 1278 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1279 if (flow_tunnel->id == port_tunnel_id) 1280 goto out; 1281 } 1282 flow_tunnel = NULL; 1283 1284 out: 1285 return flow_tunnel; 1286 } 1287 1288 const char * 1289 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1290 { 1291 const char *type; 1292 switch (tunnel->type) { 1293 default: 1294 type = "unknown"; 1295 break; 1296 case RTE_FLOW_ITEM_TYPE_VXLAN: 1297 type = "vxlan"; 1298 break; 1299 case RTE_FLOW_ITEM_TYPE_GRE: 1300 type = "gre"; 1301 break; 1302 case RTE_FLOW_ITEM_TYPE_NVGRE: 1303 type = "nvgre"; 1304 break; 1305 case RTE_FLOW_ITEM_TYPE_GENEVE: 1306 type = "geneve"; 1307 break; 1308 } 1309 1310 return type; 1311 } 1312 1313 struct port_flow_tunnel * 1314 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1315 { 1316 struct rte_port *port = &ports[port_id]; 1317 struct port_flow_tunnel *flow_tunnel; 1318 1319 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1320 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1321 goto out; 1322 } 1323 flow_tunnel = NULL; 1324 1325 out: 1326 return flow_tunnel; 1327 } 1328 1329 void port_flow_tunnel_list(portid_t port_id) 1330 { 1331 struct rte_port *port = &ports[port_id]; 1332 struct port_flow_tunnel *flt; 1333 1334 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1335 printf("port %u tunnel #%u type=%s", 1336 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1337 if (flt->tunnel.tun_id) 1338 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1339 printf("\n"); 1340 } 1341 } 1342 1343 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1344 { 1345 struct rte_port *port = &ports[port_id]; 1346 struct port_flow_tunnel *flt; 1347 1348 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1349 if (flt->id == tunnel_id) 1350 break; 1351 } 1352 if (flt) { 1353 LIST_REMOVE(flt, chain); 1354 free(flt); 1355 printf("port %u: flow tunnel #%u destroyed\n", 1356 port_id, tunnel_id); 1357 } 1358 } 1359 1360 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1361 { 1362 struct rte_port *port = &ports[port_id]; 1363 enum rte_flow_item_type type; 1364 struct port_flow_tunnel *flt; 1365 1366 if (!strcmp(ops->type, "vxlan")) 1367 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1368 else if (!strcmp(ops->type, "gre")) 1369 type = RTE_FLOW_ITEM_TYPE_GRE; 1370 else if (!strcmp(ops->type, "nvgre")) 1371 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1372 else if (!strcmp(ops->type, "geneve")) 1373 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1374 else { 1375 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1376 ops->type); 1377 return; 1378 } 1379 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1380 if (flt->tunnel.type == type) 1381 break; 1382 } 1383 if (!flt) { 1384 flt = calloc(1, sizeof(*flt)); 1385 if (!flt) { 1386 fprintf(stderr, "failed to allocate port flt object\n"); 1387 return; 1388 } 1389 flt->tunnel.type = type; 1390 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1391 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1392 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1393 } 1394 printf("port %d: flow tunnel #%u type %s\n", 1395 port_id, flt->id, ops->type); 1396 } 1397 1398 /** Generate a port_flow entry from attributes/pattern/actions. */ 1399 static struct port_flow * 1400 port_flow_new(const struct rte_flow_attr *attr, 1401 const struct rte_flow_item *pattern, 1402 const struct rte_flow_action *actions, 1403 struct rte_flow_error *error) 1404 { 1405 const struct rte_flow_conv_rule rule = { 1406 .attr_ro = attr, 1407 .pattern_ro = pattern, 1408 .actions_ro = actions, 1409 }; 1410 struct port_flow *pf; 1411 int ret; 1412 1413 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1414 if (ret < 0) 1415 return NULL; 1416 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1417 if (!pf) { 1418 rte_flow_error_set 1419 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1420 "calloc() failed"); 1421 return NULL; 1422 } 1423 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1424 error) >= 0) 1425 return pf; 1426 free(pf); 1427 return NULL; 1428 } 1429 1430 /** Print a message out of a flow error. */ 1431 static int 1432 port_flow_complain(struct rte_flow_error *error) 1433 { 1434 static const char *const errstrlist[] = { 1435 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1436 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1437 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1438 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1439 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1440 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1441 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1442 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1443 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1444 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1445 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1446 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1447 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1448 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1449 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1450 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1451 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1452 }; 1453 const char *errstr; 1454 char buf[32]; 1455 int err = rte_errno; 1456 1457 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1458 !errstrlist[error->type]) 1459 errstr = "unknown type"; 1460 else 1461 errstr = errstrlist[error->type]; 1462 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1463 __func__, error->type, errstr, 1464 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1465 error->cause), buf) : "", 1466 error->message ? error->message : "(no stated reason)", 1467 rte_strerror(err)); 1468 1469 switch (error->type) { 1470 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1471 fprintf(stderr, "The status suggests the use of \"transfer\" " 1472 "as the possible cause of the failure. Make " 1473 "sure that the flow in question and its " 1474 "indirect components (if any) are managed " 1475 "via \"transfer\" proxy port. Use command " 1476 "\"show port (port_id) flow transfer proxy\" " 1477 "to figure out the proxy port ID\n"); 1478 break; 1479 default: 1480 break; 1481 } 1482 1483 return -err; 1484 } 1485 1486 static void 1487 rss_config_display(struct rte_flow_action_rss *rss_conf) 1488 { 1489 uint8_t i; 1490 1491 if (rss_conf == NULL) { 1492 fprintf(stderr, "Invalid rule\n"); 1493 return; 1494 } 1495 1496 printf("RSS:\n" 1497 " queues:"); 1498 if (rss_conf->queue_num == 0) 1499 printf(" none"); 1500 for (i = 0; i < rss_conf->queue_num; i++) 1501 printf(" %d", rss_conf->queue[i]); 1502 printf("\n"); 1503 1504 printf(" function: "); 1505 switch (rss_conf->func) { 1506 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1507 printf("default\n"); 1508 break; 1509 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1510 printf("toeplitz\n"); 1511 break; 1512 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1513 printf("simple_xor\n"); 1514 break; 1515 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1516 printf("symmetric_toeplitz\n"); 1517 break; 1518 default: 1519 printf("Unknown function\n"); 1520 return; 1521 } 1522 1523 printf(" types:\n"); 1524 if (rss_conf->types == 0) { 1525 printf(" none\n"); 1526 return; 1527 } 1528 for (i = 0; rss_type_table[i].str; i++) { 1529 if ((rss_conf->types & 1530 rss_type_table[i].rss_type) == 1531 rss_type_table[i].rss_type && 1532 rss_type_table[i].rss_type != 0) 1533 printf(" %s\n", rss_type_table[i].str); 1534 } 1535 } 1536 1537 static struct port_indirect_action * 1538 action_get_by_id(portid_t port_id, uint32_t id) 1539 { 1540 struct rte_port *port; 1541 struct port_indirect_action **ppia; 1542 struct port_indirect_action *pia = NULL; 1543 1544 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1545 port_id == (portid_t)RTE_PORT_ALL) 1546 return NULL; 1547 port = &ports[port_id]; 1548 ppia = &port->actions_list; 1549 while (*ppia) { 1550 if ((*ppia)->id == id) { 1551 pia = *ppia; 1552 break; 1553 } 1554 ppia = &(*ppia)->next; 1555 } 1556 if (!pia) 1557 fprintf(stderr, 1558 "Failed to find indirect action #%u on port %u\n", 1559 id, port_id); 1560 return pia; 1561 } 1562 1563 static int 1564 action_alloc(portid_t port_id, uint32_t id, 1565 struct port_indirect_action **action) 1566 { 1567 struct rte_port *port; 1568 struct port_indirect_action **ppia; 1569 struct port_indirect_action *pia = NULL; 1570 1571 *action = NULL; 1572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1573 port_id == (portid_t)RTE_PORT_ALL) 1574 return -EINVAL; 1575 port = &ports[port_id]; 1576 if (id == UINT32_MAX) { 1577 /* taking first available ID */ 1578 if (port->actions_list) { 1579 if (port->actions_list->id == UINT32_MAX - 1) { 1580 fprintf(stderr, 1581 "Highest indirect action ID is already assigned, delete it first\n"); 1582 return -ENOMEM; 1583 } 1584 id = port->actions_list->id + 1; 1585 } else { 1586 id = 0; 1587 } 1588 } 1589 pia = calloc(1, sizeof(*pia)); 1590 if (!pia) { 1591 fprintf(stderr, 1592 "Allocation of port %u indirect action failed\n", 1593 port_id); 1594 return -ENOMEM; 1595 } 1596 ppia = &port->actions_list; 1597 while (*ppia && (*ppia)->id > id) 1598 ppia = &(*ppia)->next; 1599 if (*ppia && (*ppia)->id == id) { 1600 fprintf(stderr, 1601 "Indirect action #%u is already assigned, delete it first\n", 1602 id); 1603 free(pia); 1604 return -EINVAL; 1605 } 1606 pia->next = *ppia; 1607 pia->id = id; 1608 *ppia = pia; 1609 *action = pia; 1610 return 0; 1611 } 1612 1613 /** Get info about flow management resources. */ 1614 int 1615 port_flow_get_info(portid_t port_id) 1616 { 1617 struct rte_flow_port_info port_info; 1618 struct rte_flow_queue_info queue_info; 1619 struct rte_flow_error error; 1620 1621 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1622 port_id == (portid_t)RTE_PORT_ALL) 1623 return -EINVAL; 1624 /* Poisoning to make sure PMDs update it in case of error. */ 1625 memset(&error, 0x99, sizeof(error)); 1626 memset(&port_info, 0, sizeof(port_info)); 1627 memset(&queue_info, 0, sizeof(queue_info)); 1628 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1629 return port_flow_complain(&error); 1630 printf("Flow engine resources on port %u:\n" 1631 "Number of queues: %d\n" 1632 "Size of queues: %d\n" 1633 "Number of counters: %d\n" 1634 "Number of aging objects: %d\n" 1635 "Number of meter actions: %d\n", 1636 port_id, port_info.max_nb_queues, 1637 queue_info.max_size, 1638 port_info.max_nb_counters, 1639 port_info.max_nb_aging_objects, 1640 port_info.max_nb_meters); 1641 return 0; 1642 } 1643 1644 /** Configure flow management resources. */ 1645 int 1646 port_flow_configure(portid_t port_id, 1647 const struct rte_flow_port_attr *port_attr, 1648 uint16_t nb_queue, 1649 const struct rte_flow_queue_attr *queue_attr) 1650 { 1651 struct rte_port *port; 1652 struct rte_flow_error error; 1653 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1654 int std_queue; 1655 1656 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1657 port_id == (portid_t)RTE_PORT_ALL) 1658 return -EINVAL; 1659 port = &ports[port_id]; 1660 port->queue_nb = nb_queue; 1661 port->queue_sz = queue_attr->size; 1662 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1663 attr_list[std_queue] = queue_attr; 1664 /* Poisoning to make sure PMDs update it in case of error. */ 1665 memset(&error, 0x66, sizeof(error)); 1666 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1667 return port_flow_complain(&error); 1668 printf("Configure flows on port %u: " 1669 "number of queues %d with %d elements\n", 1670 port_id, nb_queue, queue_attr->size); 1671 return 0; 1672 } 1673 1674 /** Create indirect action */ 1675 int 1676 port_action_handle_create(portid_t port_id, uint32_t id, 1677 const struct rte_flow_indir_action_conf *conf, 1678 const struct rte_flow_action *action) 1679 { 1680 struct port_indirect_action *pia; 1681 int ret; 1682 struct rte_flow_error error; 1683 1684 ret = action_alloc(port_id, id, &pia); 1685 if (ret) 1686 return ret; 1687 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1688 struct rte_flow_action_age *age = 1689 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1690 1691 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1692 age->context = &pia->age_type; 1693 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1694 struct rte_flow_action_conntrack *ct = 1695 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1696 1697 memcpy(ct, &conntrack_context, sizeof(*ct)); 1698 } 1699 /* Poisoning to make sure PMDs update it in case of error. */ 1700 memset(&error, 0x22, sizeof(error)); 1701 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1702 &error); 1703 if (!pia->handle) { 1704 uint32_t destroy_id = pia->id; 1705 port_action_handle_destroy(port_id, 1, &destroy_id); 1706 return port_flow_complain(&error); 1707 } 1708 pia->type = action->type; 1709 printf("Indirect action #%u created\n", pia->id); 1710 return 0; 1711 } 1712 1713 /** Destroy indirect action */ 1714 int 1715 port_action_handle_destroy(portid_t port_id, 1716 uint32_t n, 1717 const uint32_t *actions) 1718 { 1719 struct rte_port *port; 1720 struct port_indirect_action **tmp; 1721 uint32_t c = 0; 1722 int ret = 0; 1723 1724 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1725 port_id == (portid_t)RTE_PORT_ALL) 1726 return -EINVAL; 1727 port = &ports[port_id]; 1728 tmp = &port->actions_list; 1729 while (*tmp) { 1730 uint32_t i; 1731 1732 for (i = 0; i != n; ++i) { 1733 struct rte_flow_error error; 1734 struct port_indirect_action *pia = *tmp; 1735 1736 if (actions[i] != pia->id) 1737 continue; 1738 /* 1739 * Poisoning to make sure PMDs update it in case 1740 * of error. 1741 */ 1742 memset(&error, 0x33, sizeof(error)); 1743 1744 if (pia->handle && rte_flow_action_handle_destroy( 1745 port_id, pia->handle, &error)) { 1746 ret = port_flow_complain(&error); 1747 continue; 1748 } 1749 *tmp = pia->next; 1750 printf("Indirect action #%u destroyed\n", pia->id); 1751 free(pia); 1752 break; 1753 } 1754 if (i == n) 1755 tmp = &(*tmp)->next; 1756 ++c; 1757 } 1758 return ret; 1759 } 1760 1761 1762 /** Get indirect action by port + id */ 1763 struct rte_flow_action_handle * 1764 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1765 { 1766 1767 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1768 1769 return (pia) ? pia->handle : NULL; 1770 } 1771 1772 /** Update indirect action */ 1773 int 1774 port_action_handle_update(portid_t port_id, uint32_t id, 1775 const struct rte_flow_action *action) 1776 { 1777 struct rte_flow_error error; 1778 struct rte_flow_action_handle *action_handle; 1779 struct port_indirect_action *pia; 1780 const void *update; 1781 1782 action_handle = port_action_handle_get_by_id(port_id, id); 1783 if (!action_handle) 1784 return -EINVAL; 1785 pia = action_get_by_id(port_id, id); 1786 if (!pia) 1787 return -EINVAL; 1788 switch (pia->type) { 1789 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1790 update = action->conf; 1791 break; 1792 default: 1793 update = action; 1794 break; 1795 } 1796 if (rte_flow_action_handle_update(port_id, action_handle, update, 1797 &error)) { 1798 return port_flow_complain(&error); 1799 } 1800 printf("Indirect action #%u updated\n", id); 1801 return 0; 1802 } 1803 1804 int 1805 port_action_handle_query(portid_t port_id, uint32_t id) 1806 { 1807 struct rte_flow_error error; 1808 struct port_indirect_action *pia; 1809 union { 1810 struct rte_flow_query_count count; 1811 struct rte_flow_query_age age; 1812 struct rte_flow_action_conntrack ct; 1813 } query; 1814 1815 pia = action_get_by_id(port_id, id); 1816 if (!pia) 1817 return -EINVAL; 1818 switch (pia->type) { 1819 case RTE_FLOW_ACTION_TYPE_AGE: 1820 case RTE_FLOW_ACTION_TYPE_COUNT: 1821 break; 1822 default: 1823 fprintf(stderr, 1824 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1825 id, pia->type, port_id); 1826 return -ENOTSUP; 1827 } 1828 /* Poisoning to make sure PMDs update it in case of error. */ 1829 memset(&error, 0x55, sizeof(error)); 1830 memset(&query, 0, sizeof(query)); 1831 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1832 return port_flow_complain(&error); 1833 switch (pia->type) { 1834 case RTE_FLOW_ACTION_TYPE_AGE: 1835 printf("Indirect AGE action:\n" 1836 " aged: %u\n" 1837 " sec_since_last_hit_valid: %u\n" 1838 " sec_since_last_hit: %" PRIu32 "\n", 1839 query.age.aged, 1840 query.age.sec_since_last_hit_valid, 1841 query.age.sec_since_last_hit); 1842 break; 1843 case RTE_FLOW_ACTION_TYPE_COUNT: 1844 printf("Indirect COUNT action:\n" 1845 " hits_set: %u\n" 1846 " bytes_set: %u\n" 1847 " hits: %" PRIu64 "\n" 1848 " bytes: %" PRIu64 "\n", 1849 query.count.hits_set, 1850 query.count.bytes_set, 1851 query.count.hits, 1852 query.count.bytes); 1853 break; 1854 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1855 printf("Conntrack Context:\n" 1856 " Peer: %u, Flow dir: %s, Enable: %u\n" 1857 " Live: %u, SACK: %u, CACK: %u\n" 1858 " Packet dir: %s, Liberal: %u, State: %u\n" 1859 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1860 " Last Seq: %u, Last ACK: %u\n" 1861 " Last Win: %u, Last End: %u\n", 1862 query.ct.peer_port, 1863 query.ct.is_original_dir ? "Original" : "Reply", 1864 query.ct.enable, query.ct.live_connection, 1865 query.ct.selective_ack, query.ct.challenge_ack_passed, 1866 query.ct.last_direction ? "Original" : "Reply", 1867 query.ct.liberal_mode, query.ct.state, 1868 query.ct.max_ack_window, query.ct.retransmission_limit, 1869 query.ct.last_index, query.ct.last_seq, 1870 query.ct.last_ack, query.ct.last_window, 1871 query.ct.last_end); 1872 printf(" Original Dir:\n" 1873 " scale: %u, fin: %u, ack seen: %u\n" 1874 " unacked data: %u\n Sent end: %u," 1875 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1876 query.ct.original_dir.scale, 1877 query.ct.original_dir.close_initiated, 1878 query.ct.original_dir.last_ack_seen, 1879 query.ct.original_dir.data_unacked, 1880 query.ct.original_dir.sent_end, 1881 query.ct.original_dir.reply_end, 1882 query.ct.original_dir.max_win, 1883 query.ct.original_dir.max_ack); 1884 printf(" Reply Dir:\n" 1885 " scale: %u, fin: %u, ack seen: %u\n" 1886 " unacked data: %u\n Sent end: %u," 1887 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1888 query.ct.reply_dir.scale, 1889 query.ct.reply_dir.close_initiated, 1890 query.ct.reply_dir.last_ack_seen, 1891 query.ct.reply_dir.data_unacked, 1892 query.ct.reply_dir.sent_end, 1893 query.ct.reply_dir.reply_end, 1894 query.ct.reply_dir.max_win, 1895 query.ct.reply_dir.max_ack); 1896 break; 1897 default: 1898 fprintf(stderr, 1899 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1900 id, pia->type, port_id); 1901 break; 1902 } 1903 return 0; 1904 } 1905 1906 static struct port_flow_tunnel * 1907 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1908 const struct rte_flow_item *pattern, 1909 const struct rte_flow_action *actions, 1910 const struct tunnel_ops *tunnel_ops) 1911 { 1912 int ret; 1913 struct rte_port *port; 1914 struct port_flow_tunnel *pft; 1915 struct rte_flow_error error; 1916 1917 port = &ports[port_id]; 1918 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1919 if (!pft) { 1920 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1921 tunnel_ops->id); 1922 return NULL; 1923 } 1924 if (tunnel_ops->actions) { 1925 uint32_t num_actions; 1926 const struct rte_flow_action *aptr; 1927 1928 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1929 &pft->pmd_actions, 1930 &pft->num_pmd_actions, 1931 &error); 1932 if (ret) { 1933 port_flow_complain(&error); 1934 return NULL; 1935 } 1936 for (aptr = actions, num_actions = 1; 1937 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1938 aptr++, num_actions++); 1939 pft->actions = malloc( 1940 (num_actions + pft->num_pmd_actions) * 1941 sizeof(actions[0])); 1942 if (!pft->actions) { 1943 rte_flow_tunnel_action_decap_release( 1944 port_id, pft->actions, 1945 pft->num_pmd_actions, &error); 1946 return NULL; 1947 } 1948 rte_memcpy(pft->actions, pft->pmd_actions, 1949 pft->num_pmd_actions * sizeof(actions[0])); 1950 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1951 num_actions * sizeof(actions[0])); 1952 } 1953 if (tunnel_ops->items) { 1954 uint32_t num_items; 1955 const struct rte_flow_item *iptr; 1956 1957 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1958 &pft->pmd_items, 1959 &pft->num_pmd_items, 1960 &error); 1961 if (ret) { 1962 port_flow_complain(&error); 1963 return NULL; 1964 } 1965 for (iptr = pattern, num_items = 1; 1966 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1967 iptr++, num_items++); 1968 pft->items = malloc((num_items + pft->num_pmd_items) * 1969 sizeof(pattern[0])); 1970 if (!pft->items) { 1971 rte_flow_tunnel_item_release( 1972 port_id, pft->pmd_items, 1973 pft->num_pmd_items, &error); 1974 return NULL; 1975 } 1976 rte_memcpy(pft->items, pft->pmd_items, 1977 pft->num_pmd_items * sizeof(pattern[0])); 1978 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1979 num_items * sizeof(pattern[0])); 1980 } 1981 1982 return pft; 1983 } 1984 1985 static void 1986 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1987 const struct tunnel_ops *tunnel_ops, 1988 struct port_flow_tunnel *pft) 1989 { 1990 struct rte_flow_error error; 1991 1992 if (tunnel_ops->actions) { 1993 free(pft->actions); 1994 rte_flow_tunnel_action_decap_release( 1995 port_id, pft->pmd_actions, 1996 pft->num_pmd_actions, &error); 1997 pft->actions = NULL; 1998 pft->pmd_actions = NULL; 1999 } 2000 if (tunnel_ops->items) { 2001 free(pft->items); 2002 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2003 pft->num_pmd_items, 2004 &error); 2005 pft->items = NULL; 2006 pft->pmd_items = NULL; 2007 } 2008 } 2009 2010 /** Add port meter policy */ 2011 int 2012 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2013 const struct rte_flow_action *actions) 2014 { 2015 struct rte_mtr_error error; 2016 const struct rte_flow_action *act = actions; 2017 const struct rte_flow_action *start; 2018 struct rte_mtr_meter_policy_params policy; 2019 uint32_t i = 0, act_n; 2020 int ret; 2021 2022 for (i = 0; i < RTE_COLORS; i++) { 2023 for (act_n = 0, start = act; 2024 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2025 act_n++; 2026 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2027 policy.actions[i] = start; 2028 else 2029 policy.actions[i] = NULL; 2030 act++; 2031 } 2032 ret = rte_mtr_meter_policy_add(port_id, 2033 policy_id, 2034 &policy, &error); 2035 if (ret) 2036 print_mtr_err_msg(&error); 2037 return ret; 2038 } 2039 2040 /** Validate flow rule. */ 2041 int 2042 port_flow_validate(portid_t port_id, 2043 const struct rte_flow_attr *attr, 2044 const struct rte_flow_item *pattern, 2045 const struct rte_flow_action *actions, 2046 const struct tunnel_ops *tunnel_ops) 2047 { 2048 struct rte_flow_error error; 2049 struct port_flow_tunnel *pft = NULL; 2050 int ret; 2051 2052 /* Poisoning to make sure PMDs update it in case of error. */ 2053 memset(&error, 0x11, sizeof(error)); 2054 if (tunnel_ops->enabled) { 2055 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2056 actions, tunnel_ops); 2057 if (!pft) 2058 return -ENOENT; 2059 if (pft->items) 2060 pattern = pft->items; 2061 if (pft->actions) 2062 actions = pft->actions; 2063 } 2064 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2065 if (tunnel_ops->enabled) 2066 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2067 if (ret) 2068 return port_flow_complain(&error); 2069 printf("Flow rule validated\n"); 2070 return 0; 2071 } 2072 2073 /** Return age action structure if exists, otherwise NULL. */ 2074 static struct rte_flow_action_age * 2075 age_action_get(const struct rte_flow_action *actions) 2076 { 2077 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2078 switch (actions->type) { 2079 case RTE_FLOW_ACTION_TYPE_AGE: 2080 return (struct rte_flow_action_age *) 2081 (uintptr_t)actions->conf; 2082 default: 2083 break; 2084 } 2085 } 2086 return NULL; 2087 } 2088 2089 /** Create flow rule. */ 2090 int 2091 port_flow_create(portid_t port_id, 2092 const struct rte_flow_attr *attr, 2093 const struct rte_flow_item *pattern, 2094 const struct rte_flow_action *actions, 2095 const struct tunnel_ops *tunnel_ops) 2096 { 2097 struct rte_flow *flow; 2098 struct rte_port *port; 2099 struct port_flow *pf; 2100 uint32_t id = 0; 2101 struct rte_flow_error error; 2102 struct port_flow_tunnel *pft = NULL; 2103 struct rte_flow_action_age *age = age_action_get(actions); 2104 2105 port = &ports[port_id]; 2106 if (port->flow_list) { 2107 if (port->flow_list->id == UINT32_MAX) { 2108 fprintf(stderr, 2109 "Highest rule ID is already assigned, delete it first"); 2110 return -ENOMEM; 2111 } 2112 id = port->flow_list->id + 1; 2113 } 2114 if (tunnel_ops->enabled) { 2115 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2116 actions, tunnel_ops); 2117 if (!pft) 2118 return -ENOENT; 2119 if (pft->items) 2120 pattern = pft->items; 2121 if (pft->actions) 2122 actions = pft->actions; 2123 } 2124 pf = port_flow_new(attr, pattern, actions, &error); 2125 if (!pf) 2126 return port_flow_complain(&error); 2127 if (age) { 2128 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2129 age->context = &pf->age_type; 2130 } 2131 /* Poisoning to make sure PMDs update it in case of error. */ 2132 memset(&error, 0x22, sizeof(error)); 2133 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2134 if (!flow) { 2135 if (tunnel_ops->enabled) 2136 port_flow_tunnel_offload_cmd_release(port_id, 2137 tunnel_ops, pft); 2138 free(pf); 2139 return port_flow_complain(&error); 2140 } 2141 pf->next = port->flow_list; 2142 pf->id = id; 2143 pf->flow = flow; 2144 port->flow_list = pf; 2145 if (tunnel_ops->enabled) 2146 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2147 printf("Flow rule #%u created\n", pf->id); 2148 return 0; 2149 } 2150 2151 /** Destroy a number of flow rules. */ 2152 int 2153 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2154 { 2155 struct rte_port *port; 2156 struct port_flow **tmp; 2157 uint32_t c = 0; 2158 int ret = 0; 2159 2160 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2161 port_id == (portid_t)RTE_PORT_ALL) 2162 return -EINVAL; 2163 port = &ports[port_id]; 2164 tmp = &port->flow_list; 2165 while (*tmp) { 2166 uint32_t i; 2167 2168 for (i = 0; i != n; ++i) { 2169 struct rte_flow_error error; 2170 struct port_flow *pf = *tmp; 2171 2172 if (rule[i] != pf->id) 2173 continue; 2174 /* 2175 * Poisoning to make sure PMDs update it in case 2176 * of error. 2177 */ 2178 memset(&error, 0x33, sizeof(error)); 2179 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2180 ret = port_flow_complain(&error); 2181 continue; 2182 } 2183 printf("Flow rule #%u destroyed\n", pf->id); 2184 *tmp = pf->next; 2185 free(pf); 2186 break; 2187 } 2188 if (i == n) 2189 tmp = &(*tmp)->next; 2190 ++c; 2191 } 2192 return ret; 2193 } 2194 2195 /** Remove all flow rules. */ 2196 int 2197 port_flow_flush(portid_t port_id) 2198 { 2199 struct rte_flow_error error; 2200 struct rte_port *port; 2201 int ret = 0; 2202 2203 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2204 port_id == (portid_t)RTE_PORT_ALL) 2205 return -EINVAL; 2206 2207 port = &ports[port_id]; 2208 2209 if (port->flow_list == NULL) 2210 return ret; 2211 2212 /* Poisoning to make sure PMDs update it in case of error. */ 2213 memset(&error, 0x44, sizeof(error)); 2214 if (rte_flow_flush(port_id, &error)) { 2215 port_flow_complain(&error); 2216 } 2217 2218 while (port->flow_list) { 2219 struct port_flow *pf = port->flow_list->next; 2220 2221 free(port->flow_list); 2222 port->flow_list = pf; 2223 } 2224 return ret; 2225 } 2226 2227 /** Dump flow rules. */ 2228 int 2229 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2230 const char *file_name) 2231 { 2232 int ret = 0; 2233 FILE *file = stdout; 2234 struct rte_flow_error error; 2235 struct rte_port *port; 2236 struct port_flow *pflow; 2237 struct rte_flow *tmpFlow = NULL; 2238 bool found = false; 2239 2240 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2241 port_id == (portid_t)RTE_PORT_ALL) 2242 return -EINVAL; 2243 2244 if (!dump_all) { 2245 port = &ports[port_id]; 2246 pflow = port->flow_list; 2247 while (pflow) { 2248 if (rule_id != pflow->id) { 2249 pflow = pflow->next; 2250 } else { 2251 tmpFlow = pflow->flow; 2252 if (tmpFlow) 2253 found = true; 2254 break; 2255 } 2256 } 2257 if (found == false) { 2258 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2259 return -EINVAL; 2260 } 2261 } 2262 2263 if (file_name && strlen(file_name)) { 2264 file = fopen(file_name, "w"); 2265 if (!file) { 2266 fprintf(stderr, "Failed to create file %s: %s\n", 2267 file_name, strerror(errno)); 2268 return -errno; 2269 } 2270 } 2271 2272 if (!dump_all) 2273 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2274 else 2275 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2276 if (ret) { 2277 port_flow_complain(&error); 2278 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2279 } else 2280 printf("Flow dump finished\n"); 2281 if (file_name && strlen(file_name)) 2282 fclose(file); 2283 return ret; 2284 } 2285 2286 /** Query a flow rule. */ 2287 int 2288 port_flow_query(portid_t port_id, uint32_t rule, 2289 const struct rte_flow_action *action) 2290 { 2291 struct rte_flow_error error; 2292 struct rte_port *port; 2293 struct port_flow *pf; 2294 const char *name; 2295 union { 2296 struct rte_flow_query_count count; 2297 struct rte_flow_action_rss rss_conf; 2298 struct rte_flow_query_age age; 2299 } query; 2300 int ret; 2301 2302 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2303 port_id == (portid_t)RTE_PORT_ALL) 2304 return -EINVAL; 2305 port = &ports[port_id]; 2306 for (pf = port->flow_list; pf; pf = pf->next) 2307 if (pf->id == rule) 2308 break; 2309 if (!pf) { 2310 fprintf(stderr, "Flow rule #%u not found\n", rule); 2311 return -ENOENT; 2312 } 2313 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2314 &name, sizeof(name), 2315 (void *)(uintptr_t)action->type, &error); 2316 if (ret < 0) 2317 return port_flow_complain(&error); 2318 switch (action->type) { 2319 case RTE_FLOW_ACTION_TYPE_COUNT: 2320 case RTE_FLOW_ACTION_TYPE_RSS: 2321 case RTE_FLOW_ACTION_TYPE_AGE: 2322 break; 2323 default: 2324 fprintf(stderr, "Cannot query action type %d (%s)\n", 2325 action->type, name); 2326 return -ENOTSUP; 2327 } 2328 /* Poisoning to make sure PMDs update it in case of error. */ 2329 memset(&error, 0x55, sizeof(error)); 2330 memset(&query, 0, sizeof(query)); 2331 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2332 return port_flow_complain(&error); 2333 switch (action->type) { 2334 case RTE_FLOW_ACTION_TYPE_COUNT: 2335 printf("%s:\n" 2336 " hits_set: %u\n" 2337 " bytes_set: %u\n" 2338 " hits: %" PRIu64 "\n" 2339 " bytes: %" PRIu64 "\n", 2340 name, 2341 query.count.hits_set, 2342 query.count.bytes_set, 2343 query.count.hits, 2344 query.count.bytes); 2345 break; 2346 case RTE_FLOW_ACTION_TYPE_RSS: 2347 rss_config_display(&query.rss_conf); 2348 break; 2349 case RTE_FLOW_ACTION_TYPE_AGE: 2350 printf("%s:\n" 2351 " aged: %u\n" 2352 " sec_since_last_hit_valid: %u\n" 2353 " sec_since_last_hit: %" PRIu32 "\n", 2354 name, 2355 query.age.aged, 2356 query.age.sec_since_last_hit_valid, 2357 query.age.sec_since_last_hit); 2358 break; 2359 default: 2360 fprintf(stderr, 2361 "Cannot display result for action type %d (%s)\n", 2362 action->type, name); 2363 break; 2364 } 2365 return 0; 2366 } 2367 2368 /** List simply and destroy all aged flows. */ 2369 void 2370 port_flow_aged(portid_t port_id, uint8_t destroy) 2371 { 2372 void **contexts; 2373 int nb_context, total = 0, idx; 2374 struct rte_flow_error error; 2375 enum age_action_context_type *type; 2376 union { 2377 struct port_flow *pf; 2378 struct port_indirect_action *pia; 2379 } ctx; 2380 2381 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2382 port_id == (portid_t)RTE_PORT_ALL) 2383 return; 2384 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2385 printf("Port %u total aged flows: %d\n", port_id, total); 2386 if (total < 0) { 2387 port_flow_complain(&error); 2388 return; 2389 } 2390 if (total == 0) 2391 return; 2392 contexts = malloc(sizeof(void *) * total); 2393 if (contexts == NULL) { 2394 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2395 return; 2396 } 2397 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2398 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2399 if (nb_context != total) { 2400 fprintf(stderr, 2401 "Port:%d get aged flows count(%d) != total(%d)\n", 2402 port_id, nb_context, total); 2403 free(contexts); 2404 return; 2405 } 2406 total = 0; 2407 for (idx = 0; idx < nb_context; idx++) { 2408 if (!contexts[idx]) { 2409 fprintf(stderr, "Error: get Null context in port %u\n", 2410 port_id); 2411 continue; 2412 } 2413 type = (enum age_action_context_type *)contexts[idx]; 2414 switch (*type) { 2415 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2416 ctx.pf = container_of(type, struct port_flow, age_type); 2417 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2418 "\t%c%c%c\t\n", 2419 "Flow", 2420 ctx.pf->id, 2421 ctx.pf->rule.attr->group, 2422 ctx.pf->rule.attr->priority, 2423 ctx.pf->rule.attr->ingress ? 'i' : '-', 2424 ctx.pf->rule.attr->egress ? 'e' : '-', 2425 ctx.pf->rule.attr->transfer ? 't' : '-'); 2426 if (destroy && !port_flow_destroy(port_id, 1, 2427 &ctx.pf->id)) 2428 total++; 2429 break; 2430 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2431 ctx.pia = container_of(type, 2432 struct port_indirect_action, age_type); 2433 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2434 ctx.pia->id); 2435 break; 2436 default: 2437 fprintf(stderr, "Error: invalid context type %u\n", 2438 port_id); 2439 break; 2440 } 2441 } 2442 printf("\n%d flows destroyed\n", total); 2443 free(contexts); 2444 } 2445 2446 /** List flow rules. */ 2447 void 2448 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2449 { 2450 struct rte_port *port; 2451 struct port_flow *pf; 2452 struct port_flow *list = NULL; 2453 uint32_t i; 2454 2455 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2456 port_id == (portid_t)RTE_PORT_ALL) 2457 return; 2458 port = &ports[port_id]; 2459 if (!port->flow_list) 2460 return; 2461 /* Sort flows by group, priority and ID. */ 2462 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2463 struct port_flow **tmp; 2464 const struct rte_flow_attr *curr = pf->rule.attr; 2465 2466 if (n) { 2467 /* Filter out unwanted groups. */ 2468 for (i = 0; i != n; ++i) 2469 if (curr->group == group[i]) 2470 break; 2471 if (i == n) 2472 continue; 2473 } 2474 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2475 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2476 2477 if (curr->group > comp->group || 2478 (curr->group == comp->group && 2479 curr->priority > comp->priority) || 2480 (curr->group == comp->group && 2481 curr->priority == comp->priority && 2482 pf->id > (*tmp)->id)) 2483 continue; 2484 break; 2485 } 2486 pf->tmp = *tmp; 2487 *tmp = pf; 2488 } 2489 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2490 for (pf = list; pf != NULL; pf = pf->tmp) { 2491 const struct rte_flow_item *item = pf->rule.pattern; 2492 const struct rte_flow_action *action = pf->rule.actions; 2493 const char *name; 2494 2495 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2496 pf->id, 2497 pf->rule.attr->group, 2498 pf->rule.attr->priority, 2499 pf->rule.attr->ingress ? 'i' : '-', 2500 pf->rule.attr->egress ? 'e' : '-', 2501 pf->rule.attr->transfer ? 't' : '-'); 2502 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2503 if ((uint32_t)item->type > INT_MAX) 2504 name = "PMD_INTERNAL"; 2505 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2506 &name, sizeof(name), 2507 (void *)(uintptr_t)item->type, 2508 NULL) <= 0) 2509 name = "[UNKNOWN]"; 2510 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2511 printf("%s ", name); 2512 ++item; 2513 } 2514 printf("=>"); 2515 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2516 if ((uint32_t)action->type > INT_MAX) 2517 name = "PMD_INTERNAL"; 2518 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2519 &name, sizeof(name), 2520 (void *)(uintptr_t)action->type, 2521 NULL) <= 0) 2522 name = "[UNKNOWN]"; 2523 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2524 printf(" %s", name); 2525 ++action; 2526 } 2527 printf("\n"); 2528 } 2529 } 2530 2531 /** Restrict ingress traffic to the defined flow rules. */ 2532 int 2533 port_flow_isolate(portid_t port_id, int set) 2534 { 2535 struct rte_flow_error error; 2536 2537 /* Poisoning to make sure PMDs update it in case of error. */ 2538 memset(&error, 0x66, sizeof(error)); 2539 if (rte_flow_isolate(port_id, set, &error)) 2540 return port_flow_complain(&error); 2541 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2542 port_id, 2543 set ? "now restricted" : "not restricted anymore"); 2544 return 0; 2545 } 2546 2547 /* 2548 * RX/TX ring descriptors display functions. 2549 */ 2550 int 2551 rx_queue_id_is_invalid(queueid_t rxq_id) 2552 { 2553 if (rxq_id < nb_rxq) 2554 return 0; 2555 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2556 rxq_id, nb_rxq); 2557 return 1; 2558 } 2559 2560 int 2561 tx_queue_id_is_invalid(queueid_t txq_id) 2562 { 2563 if (txq_id < nb_txq) 2564 return 0; 2565 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2566 txq_id, nb_txq); 2567 return 1; 2568 } 2569 2570 static int 2571 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2572 { 2573 struct rte_port *port = &ports[port_id]; 2574 struct rte_eth_rxq_info rx_qinfo; 2575 int ret; 2576 2577 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2578 if (ret == 0) { 2579 *ring_size = rx_qinfo.nb_desc; 2580 return ret; 2581 } 2582 2583 if (ret != -ENOTSUP) 2584 return ret; 2585 /* 2586 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2587 * ring_size stored in testpmd will be used for validity verification. 2588 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2589 * being 0, it will use a default value provided by PMDs to setup this 2590 * rxq. If the default value is 0, it will use the 2591 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2592 */ 2593 if (port->nb_rx_desc[rxq_id]) 2594 *ring_size = port->nb_rx_desc[rxq_id]; 2595 else if (port->dev_info.default_rxportconf.ring_size) 2596 *ring_size = port->dev_info.default_rxportconf.ring_size; 2597 else 2598 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2599 return 0; 2600 } 2601 2602 static int 2603 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2604 { 2605 struct rte_port *port = &ports[port_id]; 2606 struct rte_eth_txq_info tx_qinfo; 2607 int ret; 2608 2609 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2610 if (ret == 0) { 2611 *ring_size = tx_qinfo.nb_desc; 2612 return ret; 2613 } 2614 2615 if (ret != -ENOTSUP) 2616 return ret; 2617 /* 2618 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2619 * ring_size stored in testpmd will be used for validity verification. 2620 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2621 * being 0, it will use a default value provided by PMDs to setup this 2622 * txq. If the default value is 0, it will use the 2623 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2624 */ 2625 if (port->nb_tx_desc[txq_id]) 2626 *ring_size = port->nb_tx_desc[txq_id]; 2627 else if (port->dev_info.default_txportconf.ring_size) 2628 *ring_size = port->dev_info.default_txportconf.ring_size; 2629 else 2630 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2631 return 0; 2632 } 2633 2634 static int 2635 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2636 { 2637 uint16_t ring_size; 2638 int ret; 2639 2640 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2641 if (ret) 2642 return 1; 2643 2644 if (rxdesc_id < ring_size) 2645 return 0; 2646 2647 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2648 rxdesc_id, ring_size); 2649 return 1; 2650 } 2651 2652 static int 2653 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2654 { 2655 uint16_t ring_size; 2656 int ret; 2657 2658 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2659 if (ret) 2660 return 1; 2661 2662 if (txdesc_id < ring_size) 2663 return 0; 2664 2665 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2666 txdesc_id, ring_size); 2667 return 1; 2668 } 2669 2670 static const struct rte_memzone * 2671 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2672 { 2673 char mz_name[RTE_MEMZONE_NAMESIZE]; 2674 const struct rte_memzone *mz; 2675 2676 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2677 port_id, q_id, ring_name); 2678 mz = rte_memzone_lookup(mz_name); 2679 if (mz == NULL) 2680 fprintf(stderr, 2681 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2682 ring_name, port_id, q_id, mz_name); 2683 return mz; 2684 } 2685 2686 union igb_ring_dword { 2687 uint64_t dword; 2688 struct { 2689 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2690 uint32_t lo; 2691 uint32_t hi; 2692 #else 2693 uint32_t hi; 2694 uint32_t lo; 2695 #endif 2696 } words; 2697 }; 2698 2699 struct igb_ring_desc_32_bytes { 2700 union igb_ring_dword lo_dword; 2701 union igb_ring_dword hi_dword; 2702 union igb_ring_dword resv1; 2703 union igb_ring_dword resv2; 2704 }; 2705 2706 struct igb_ring_desc_16_bytes { 2707 union igb_ring_dword lo_dword; 2708 union igb_ring_dword hi_dword; 2709 }; 2710 2711 static void 2712 ring_rxd_display_dword(union igb_ring_dword dword) 2713 { 2714 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2715 (unsigned)dword.words.hi); 2716 } 2717 2718 static void 2719 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2720 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2721 portid_t port_id, 2722 #else 2723 __rte_unused portid_t port_id, 2724 #endif 2725 uint16_t desc_id) 2726 { 2727 struct igb_ring_desc_16_bytes *ring = 2728 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2729 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2730 int ret; 2731 struct rte_eth_dev_info dev_info; 2732 2733 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2734 if (ret != 0) 2735 return; 2736 2737 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2738 /* 32 bytes RX descriptor, i40e only */ 2739 struct igb_ring_desc_32_bytes *ring = 2740 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2741 ring[desc_id].lo_dword.dword = 2742 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2743 ring_rxd_display_dword(ring[desc_id].lo_dword); 2744 ring[desc_id].hi_dword.dword = 2745 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2746 ring_rxd_display_dword(ring[desc_id].hi_dword); 2747 ring[desc_id].resv1.dword = 2748 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2749 ring_rxd_display_dword(ring[desc_id].resv1); 2750 ring[desc_id].resv2.dword = 2751 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2752 ring_rxd_display_dword(ring[desc_id].resv2); 2753 2754 return; 2755 } 2756 #endif 2757 /* 16 bytes RX descriptor */ 2758 ring[desc_id].lo_dword.dword = 2759 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2760 ring_rxd_display_dword(ring[desc_id].lo_dword); 2761 ring[desc_id].hi_dword.dword = 2762 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2763 ring_rxd_display_dword(ring[desc_id].hi_dword); 2764 } 2765 2766 static void 2767 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2768 { 2769 struct igb_ring_desc_16_bytes *ring; 2770 struct igb_ring_desc_16_bytes txd; 2771 2772 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2773 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2774 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2775 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2776 (unsigned)txd.lo_dword.words.lo, 2777 (unsigned)txd.lo_dword.words.hi, 2778 (unsigned)txd.hi_dword.words.lo, 2779 (unsigned)txd.hi_dword.words.hi); 2780 } 2781 2782 void 2783 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2784 { 2785 const struct rte_memzone *rx_mz; 2786 2787 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2788 return; 2789 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2790 if (rx_mz == NULL) 2791 return; 2792 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2793 } 2794 2795 void 2796 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2797 { 2798 const struct rte_memzone *tx_mz; 2799 2800 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2801 return; 2802 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2803 if (tx_mz == NULL) 2804 return; 2805 ring_tx_descriptor_display(tx_mz, txd_id); 2806 } 2807 2808 void 2809 fwd_lcores_config_display(void) 2810 { 2811 lcoreid_t lc_id; 2812 2813 printf("List of forwarding lcores:"); 2814 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2815 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2816 printf("\n"); 2817 } 2818 void 2819 rxtx_config_display(void) 2820 { 2821 portid_t pid; 2822 queueid_t qid; 2823 2824 printf(" %s packet forwarding%s packets/burst=%d\n", 2825 cur_fwd_eng->fwd_mode_name, 2826 retry_enabled == 0 ? "" : " with retry", 2827 nb_pkt_per_burst); 2828 2829 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2830 printf(" packet len=%u - nb packet segments=%d\n", 2831 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2832 2833 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2834 nb_fwd_lcores, nb_fwd_ports); 2835 2836 RTE_ETH_FOREACH_DEV(pid) { 2837 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2838 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2839 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2840 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2841 struct rte_eth_rxq_info rx_qinfo; 2842 struct rte_eth_txq_info tx_qinfo; 2843 uint16_t rx_free_thresh_tmp; 2844 uint16_t tx_free_thresh_tmp; 2845 uint16_t tx_rs_thresh_tmp; 2846 uint16_t nb_rx_desc_tmp; 2847 uint16_t nb_tx_desc_tmp; 2848 uint64_t offloads_tmp; 2849 uint8_t pthresh_tmp; 2850 uint8_t hthresh_tmp; 2851 uint8_t wthresh_tmp; 2852 int32_t rc; 2853 2854 /* per port config */ 2855 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2856 (unsigned int)pid, nb_rxq, nb_txq); 2857 2858 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2859 ports[pid].dev_conf.rxmode.offloads, 2860 ports[pid].dev_conf.txmode.offloads); 2861 2862 /* per rx queue config only for first queue to be less verbose */ 2863 for (qid = 0; qid < 1; qid++) { 2864 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2865 if (rc) { 2866 nb_rx_desc_tmp = nb_rx_desc[qid]; 2867 rx_free_thresh_tmp = 2868 rx_conf[qid].rx_free_thresh; 2869 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2870 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2871 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2872 offloads_tmp = rx_conf[qid].offloads; 2873 } else { 2874 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2875 rx_free_thresh_tmp = 2876 rx_qinfo.conf.rx_free_thresh; 2877 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2878 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2879 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2880 offloads_tmp = rx_qinfo.conf.offloads; 2881 } 2882 2883 printf(" RX queue: %d\n", qid); 2884 printf(" RX desc=%d - RX free threshold=%d\n", 2885 nb_rx_desc_tmp, rx_free_thresh_tmp); 2886 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2887 " wthresh=%d\n", 2888 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2889 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 2890 if (rx_conf->share_group > 0) 2891 printf(" share_group=%u share_qid=%u", 2892 rx_conf->share_group, 2893 rx_conf->share_qid); 2894 printf("\n"); 2895 } 2896 2897 /* per tx queue config only for first queue to be less verbose */ 2898 for (qid = 0; qid < 1; qid++) { 2899 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2900 if (rc) { 2901 nb_tx_desc_tmp = nb_tx_desc[qid]; 2902 tx_free_thresh_tmp = 2903 tx_conf[qid].tx_free_thresh; 2904 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2905 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2906 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2907 offloads_tmp = tx_conf[qid].offloads; 2908 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2909 } else { 2910 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2911 tx_free_thresh_tmp = 2912 tx_qinfo.conf.tx_free_thresh; 2913 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2914 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2915 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2916 offloads_tmp = tx_qinfo.conf.offloads; 2917 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2918 } 2919 2920 printf(" TX queue: %d\n", qid); 2921 printf(" TX desc=%d - TX free threshold=%d\n", 2922 nb_tx_desc_tmp, tx_free_thresh_tmp); 2923 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2924 " wthresh=%d\n", 2925 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2926 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2927 offloads_tmp, tx_rs_thresh_tmp); 2928 } 2929 } 2930 } 2931 2932 void 2933 port_rss_reta_info(portid_t port_id, 2934 struct rte_eth_rss_reta_entry64 *reta_conf, 2935 uint16_t nb_entries) 2936 { 2937 uint16_t i, idx, shift; 2938 int ret; 2939 2940 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2941 return; 2942 2943 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2944 if (ret != 0) { 2945 fprintf(stderr, 2946 "Failed to get RSS RETA info, return code = %d\n", 2947 ret); 2948 return; 2949 } 2950 2951 for (i = 0; i < nb_entries; i++) { 2952 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2953 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2954 if (!(reta_conf[idx].mask & (1ULL << shift))) 2955 continue; 2956 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2957 i, reta_conf[idx].reta[shift]); 2958 } 2959 } 2960 2961 /* 2962 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 2963 * key of the port. 2964 */ 2965 void 2966 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2967 { 2968 struct rte_eth_rss_conf rss_conf = {0}; 2969 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2970 uint64_t rss_hf; 2971 uint8_t i; 2972 int diag; 2973 struct rte_eth_dev_info dev_info; 2974 uint8_t hash_key_size; 2975 int ret; 2976 2977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2978 return; 2979 2980 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2981 if (ret != 0) 2982 return; 2983 2984 if (dev_info.hash_key_size > 0 && 2985 dev_info.hash_key_size <= sizeof(rss_key)) 2986 hash_key_size = dev_info.hash_key_size; 2987 else { 2988 fprintf(stderr, 2989 "dev_info did not provide a valid hash key size\n"); 2990 return; 2991 } 2992 2993 /* Get RSS hash key if asked to display it */ 2994 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2995 rss_conf.rss_key_len = hash_key_size; 2996 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2997 if (diag != 0) { 2998 switch (diag) { 2999 case -ENODEV: 3000 fprintf(stderr, "port index %d invalid\n", port_id); 3001 break; 3002 case -ENOTSUP: 3003 fprintf(stderr, "operation not supported by device\n"); 3004 break; 3005 default: 3006 fprintf(stderr, "operation failed - diag=%d\n", diag); 3007 break; 3008 } 3009 return; 3010 } 3011 rss_hf = rss_conf.rss_hf; 3012 if (rss_hf == 0) { 3013 printf("RSS disabled\n"); 3014 return; 3015 } 3016 printf("RSS functions:\n "); 3017 for (i = 0; rss_type_table[i].str; i++) { 3018 if (rss_type_table[i].rss_type == 0) 3019 continue; 3020 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 3021 printf("%s ", rss_type_table[i].str); 3022 } 3023 printf("\n"); 3024 if (!show_rss_key) 3025 return; 3026 printf("RSS key:\n"); 3027 for (i = 0; i < hash_key_size; i++) 3028 printf("%02X", rss_key[i]); 3029 printf("\n"); 3030 } 3031 3032 void 3033 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3034 uint8_t hash_key_len) 3035 { 3036 struct rte_eth_rss_conf rss_conf; 3037 int diag; 3038 unsigned int i; 3039 3040 rss_conf.rss_key = NULL; 3041 rss_conf.rss_key_len = 0; 3042 rss_conf.rss_hf = 0; 3043 for (i = 0; rss_type_table[i].str; i++) { 3044 if (!strcmp(rss_type_table[i].str, rss_type)) 3045 rss_conf.rss_hf = rss_type_table[i].rss_type; 3046 } 3047 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3048 if (diag == 0) { 3049 rss_conf.rss_key = hash_key; 3050 rss_conf.rss_key_len = hash_key_len; 3051 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3052 } 3053 if (diag == 0) 3054 return; 3055 3056 switch (diag) { 3057 case -ENODEV: 3058 fprintf(stderr, "port index %d invalid\n", port_id); 3059 break; 3060 case -ENOTSUP: 3061 fprintf(stderr, "operation not supported by device\n"); 3062 break; 3063 default: 3064 fprintf(stderr, "operation failed - diag=%d\n", diag); 3065 break; 3066 } 3067 } 3068 3069 /* 3070 * Check whether a shared rxq scheduled on other lcores. 3071 */ 3072 static bool 3073 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3074 portid_t src_port, queueid_t src_rxq, 3075 uint32_t share_group, queueid_t share_rxq) 3076 { 3077 streamid_t sm_id; 3078 streamid_t nb_fs_per_lcore; 3079 lcoreid_t nb_fc; 3080 lcoreid_t lc_id; 3081 struct fwd_stream *fs; 3082 struct rte_port *port; 3083 struct rte_eth_dev_info *dev_info; 3084 struct rte_eth_rxconf *rxq_conf; 3085 3086 nb_fc = cur_fwd_config.nb_fwd_lcores; 3087 /* Check remaining cores. */ 3088 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3089 sm_id = fwd_lcores[lc_id]->stream_idx; 3090 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3091 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3092 sm_id++) { 3093 fs = fwd_streams[sm_id]; 3094 port = &ports[fs->rx_port]; 3095 dev_info = &port->dev_info; 3096 rxq_conf = &port->rx_conf[fs->rx_queue]; 3097 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3098 == 0 || rxq_conf->share_group == 0) 3099 /* Not shared rxq. */ 3100 continue; 3101 if (domain_id != port->dev_info.switch_info.domain_id) 3102 continue; 3103 if (rxq_conf->share_group != share_group) 3104 continue; 3105 if (rxq_conf->share_qid != share_rxq) 3106 continue; 3107 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3108 share_group, share_rxq); 3109 printf(" lcore %hhu Port %hu queue %hu\n", 3110 src_lc, src_port, src_rxq); 3111 printf(" lcore %hhu Port %hu queue %hu\n", 3112 lc_id, fs->rx_port, fs->rx_queue); 3113 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3114 nb_rxq); 3115 return true; 3116 } 3117 } 3118 return false; 3119 } 3120 3121 /* 3122 * Check shared rxq configuration. 3123 * 3124 * Shared group must not being scheduled on different core. 3125 */ 3126 bool 3127 pkt_fwd_shared_rxq_check(void) 3128 { 3129 streamid_t sm_id; 3130 streamid_t nb_fs_per_lcore; 3131 lcoreid_t nb_fc; 3132 lcoreid_t lc_id; 3133 struct fwd_stream *fs; 3134 uint16_t domain_id; 3135 struct rte_port *port; 3136 struct rte_eth_dev_info *dev_info; 3137 struct rte_eth_rxconf *rxq_conf; 3138 3139 if (rxq_share == 0) 3140 return true; 3141 nb_fc = cur_fwd_config.nb_fwd_lcores; 3142 /* 3143 * Check streams on each core, make sure the same switch domain + 3144 * group + queue doesn't get scheduled on other cores. 3145 */ 3146 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3147 sm_id = fwd_lcores[lc_id]->stream_idx; 3148 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3149 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3150 sm_id++) { 3151 fs = fwd_streams[sm_id]; 3152 /* Update lcore info stream being scheduled. */ 3153 fs->lcore = fwd_lcores[lc_id]; 3154 port = &ports[fs->rx_port]; 3155 dev_info = &port->dev_info; 3156 rxq_conf = &port->rx_conf[fs->rx_queue]; 3157 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3158 == 0 || rxq_conf->share_group == 0) 3159 /* Not shared rxq. */ 3160 continue; 3161 /* Check shared rxq not scheduled on remaining cores. */ 3162 domain_id = port->dev_info.switch_info.domain_id; 3163 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3164 fs->rx_port, 3165 fs->rx_queue, 3166 rxq_conf->share_group, 3167 rxq_conf->share_qid)) 3168 return false; 3169 } 3170 } 3171 return true; 3172 } 3173 3174 /* 3175 * Setup forwarding configuration for each logical core. 3176 */ 3177 static void 3178 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3179 { 3180 streamid_t nb_fs_per_lcore; 3181 streamid_t nb_fs; 3182 streamid_t sm_id; 3183 lcoreid_t nb_extra; 3184 lcoreid_t nb_fc; 3185 lcoreid_t nb_lc; 3186 lcoreid_t lc_id; 3187 3188 nb_fs = cfg->nb_fwd_streams; 3189 nb_fc = cfg->nb_fwd_lcores; 3190 if (nb_fs <= nb_fc) { 3191 nb_fs_per_lcore = 1; 3192 nb_extra = 0; 3193 } else { 3194 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3195 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3196 } 3197 3198 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3199 sm_id = 0; 3200 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3201 fwd_lcores[lc_id]->stream_idx = sm_id; 3202 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3203 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3204 } 3205 3206 /* 3207 * Assign extra remaining streams, if any. 3208 */ 3209 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3210 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3211 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3212 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3213 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3214 } 3215 } 3216 3217 static portid_t 3218 fwd_topology_tx_port_get(portid_t rxp) 3219 { 3220 static int warning_once = 1; 3221 3222 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3223 3224 switch (port_topology) { 3225 default: 3226 case PORT_TOPOLOGY_PAIRED: 3227 if ((rxp & 0x1) == 0) { 3228 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3229 return rxp + 1; 3230 if (warning_once) { 3231 fprintf(stderr, 3232 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3233 warning_once = 0; 3234 } 3235 return rxp; 3236 } 3237 return rxp - 1; 3238 case PORT_TOPOLOGY_CHAINED: 3239 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3240 case PORT_TOPOLOGY_LOOP: 3241 return rxp; 3242 } 3243 } 3244 3245 static void 3246 simple_fwd_config_setup(void) 3247 { 3248 portid_t i; 3249 3250 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3251 cur_fwd_config.nb_fwd_streams = 3252 (streamid_t) cur_fwd_config.nb_fwd_ports; 3253 3254 /* reinitialize forwarding streams */ 3255 init_fwd_streams(); 3256 3257 /* 3258 * In the simple forwarding test, the number of forwarding cores 3259 * must be lower or equal to the number of forwarding ports. 3260 */ 3261 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3262 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3263 cur_fwd_config.nb_fwd_lcores = 3264 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3265 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3266 3267 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3268 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3269 fwd_streams[i]->rx_queue = 0; 3270 fwd_streams[i]->tx_port = 3271 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3272 fwd_streams[i]->tx_queue = 0; 3273 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3274 fwd_streams[i]->retry_enabled = retry_enabled; 3275 } 3276 } 3277 3278 /** 3279 * For the RSS forwarding test all streams distributed over lcores. Each stream 3280 * being composed of a RX queue to poll on a RX port for input messages, 3281 * associated with a TX queue of a TX port where to send forwarded packets. 3282 */ 3283 static void 3284 rss_fwd_config_setup(void) 3285 { 3286 portid_t rxp; 3287 portid_t txp; 3288 queueid_t rxq; 3289 queueid_t nb_q; 3290 streamid_t sm_id; 3291 int start; 3292 int end; 3293 3294 nb_q = nb_rxq; 3295 if (nb_q > nb_txq) 3296 nb_q = nb_txq; 3297 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3298 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3299 cur_fwd_config.nb_fwd_streams = 3300 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3301 3302 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3303 cur_fwd_config.nb_fwd_lcores = 3304 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3305 3306 /* reinitialize forwarding streams */ 3307 init_fwd_streams(); 3308 3309 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3310 3311 if (proc_id > 0 && nb_q % num_procs != 0) 3312 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3313 3314 /** 3315 * In multi-process, All queues are allocated to different 3316 * processes based on num_procs and proc_id. For example: 3317 * if supports 4 queues(nb_q), 2 processes(num_procs), 3318 * the 0~1 queue for primary process. 3319 * the 2~3 queue for secondary process. 3320 */ 3321 start = proc_id * nb_q / num_procs; 3322 end = start + nb_q / num_procs; 3323 rxp = 0; 3324 rxq = start; 3325 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3326 struct fwd_stream *fs; 3327 3328 fs = fwd_streams[sm_id]; 3329 txp = fwd_topology_tx_port_get(rxp); 3330 fs->rx_port = fwd_ports_ids[rxp]; 3331 fs->rx_queue = rxq; 3332 fs->tx_port = fwd_ports_ids[txp]; 3333 fs->tx_queue = rxq; 3334 fs->peer_addr = fs->tx_port; 3335 fs->retry_enabled = retry_enabled; 3336 rxp++; 3337 if (rxp < nb_fwd_ports) 3338 continue; 3339 rxp = 0; 3340 rxq++; 3341 if (rxq >= end) 3342 rxq = start; 3343 } 3344 } 3345 3346 static uint16_t 3347 get_fwd_port_total_tc_num(void) 3348 { 3349 struct rte_eth_dcb_info dcb_info; 3350 uint16_t total_tc_num = 0; 3351 unsigned int i; 3352 3353 for (i = 0; i < nb_fwd_ports; i++) { 3354 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3355 total_tc_num += dcb_info.nb_tcs; 3356 } 3357 3358 return total_tc_num; 3359 } 3360 3361 /** 3362 * For the DCB forwarding test, each core is assigned on each traffic class. 3363 * 3364 * Each core is assigned a multi-stream, each stream being composed of 3365 * a RX queue to poll on a RX port for input messages, associated with 3366 * a TX queue of a TX port where to send forwarded packets. All RX and 3367 * TX queues are mapping to the same traffic class. 3368 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3369 * the same core 3370 */ 3371 static void 3372 dcb_fwd_config_setup(void) 3373 { 3374 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3375 portid_t txp, rxp = 0; 3376 queueid_t txq, rxq = 0; 3377 lcoreid_t lc_id; 3378 uint16_t nb_rx_queue, nb_tx_queue; 3379 uint16_t i, j, k, sm_id = 0; 3380 uint16_t total_tc_num; 3381 struct rte_port *port; 3382 uint8_t tc = 0; 3383 portid_t pid; 3384 int ret; 3385 3386 /* 3387 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3388 * or RTE_PORT_STOPPED. 3389 * 3390 * Re-configure ports to get updated mapping between tc and queue in 3391 * case the queue number of the port is changed. Skip for started ports 3392 * since modifying queue number and calling dev_configure need to stop 3393 * ports first. 3394 */ 3395 for (pid = 0; pid < nb_fwd_ports; pid++) { 3396 if (port_is_started(pid) == 1) 3397 continue; 3398 3399 port = &ports[pid]; 3400 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3401 &port->dev_conf); 3402 if (ret < 0) { 3403 fprintf(stderr, 3404 "Failed to re-configure port %d, ret = %d.\n", 3405 pid, ret); 3406 return; 3407 } 3408 } 3409 3410 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3411 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3412 cur_fwd_config.nb_fwd_streams = 3413 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3414 total_tc_num = get_fwd_port_total_tc_num(); 3415 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3416 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3417 3418 /* reinitialize forwarding streams */ 3419 init_fwd_streams(); 3420 sm_id = 0; 3421 txp = 1; 3422 /* get the dcb info on the first RX and TX ports */ 3423 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3424 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3425 3426 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3427 fwd_lcores[lc_id]->stream_nb = 0; 3428 fwd_lcores[lc_id]->stream_idx = sm_id; 3429 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 3430 /* if the nb_queue is zero, means this tc is 3431 * not enabled on the POOL 3432 */ 3433 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3434 break; 3435 k = fwd_lcores[lc_id]->stream_nb + 3436 fwd_lcores[lc_id]->stream_idx; 3437 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3438 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3439 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3440 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3441 for (j = 0; j < nb_rx_queue; j++) { 3442 struct fwd_stream *fs; 3443 3444 fs = fwd_streams[k + j]; 3445 fs->rx_port = fwd_ports_ids[rxp]; 3446 fs->rx_queue = rxq + j; 3447 fs->tx_port = fwd_ports_ids[txp]; 3448 fs->tx_queue = txq + j % nb_tx_queue; 3449 fs->peer_addr = fs->tx_port; 3450 fs->retry_enabled = retry_enabled; 3451 } 3452 fwd_lcores[lc_id]->stream_nb += 3453 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3454 } 3455 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3456 3457 tc++; 3458 if (tc < rxp_dcb_info.nb_tcs) 3459 continue; 3460 /* Restart from TC 0 on next RX port */ 3461 tc = 0; 3462 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3463 rxp = (portid_t) 3464 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3465 else 3466 rxp++; 3467 if (rxp >= nb_fwd_ports) 3468 return; 3469 /* get the dcb information on next RX and TX ports */ 3470 if ((rxp & 0x1) == 0) 3471 txp = (portid_t) (rxp + 1); 3472 else 3473 txp = (portid_t) (rxp - 1); 3474 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3475 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3476 } 3477 } 3478 3479 static void 3480 icmp_echo_config_setup(void) 3481 { 3482 portid_t rxp; 3483 queueid_t rxq; 3484 lcoreid_t lc_id; 3485 uint16_t sm_id; 3486 3487 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3488 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3489 (nb_txq * nb_fwd_ports); 3490 else 3491 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3492 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3493 cur_fwd_config.nb_fwd_streams = 3494 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3495 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3496 cur_fwd_config.nb_fwd_lcores = 3497 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3498 if (verbose_level > 0) { 3499 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3500 __FUNCTION__, 3501 cur_fwd_config.nb_fwd_lcores, 3502 cur_fwd_config.nb_fwd_ports, 3503 cur_fwd_config.nb_fwd_streams); 3504 } 3505 3506 /* reinitialize forwarding streams */ 3507 init_fwd_streams(); 3508 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3509 rxp = 0; rxq = 0; 3510 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3511 if (verbose_level > 0) 3512 printf(" core=%d: \n", lc_id); 3513 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3514 struct fwd_stream *fs; 3515 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3516 fs->rx_port = fwd_ports_ids[rxp]; 3517 fs->rx_queue = rxq; 3518 fs->tx_port = fs->rx_port; 3519 fs->tx_queue = rxq; 3520 fs->peer_addr = fs->tx_port; 3521 fs->retry_enabled = retry_enabled; 3522 if (verbose_level > 0) 3523 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3524 sm_id, fs->rx_port, fs->rx_queue, 3525 fs->tx_queue); 3526 rxq = (queueid_t) (rxq + 1); 3527 if (rxq == nb_rxq) { 3528 rxq = 0; 3529 rxp = (portid_t) (rxp + 1); 3530 } 3531 } 3532 } 3533 } 3534 3535 void 3536 fwd_config_setup(void) 3537 { 3538 struct rte_port *port; 3539 portid_t pt_id; 3540 unsigned int i; 3541 3542 cur_fwd_config.fwd_eng = cur_fwd_eng; 3543 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3544 icmp_echo_config_setup(); 3545 return; 3546 } 3547 3548 if ((nb_rxq > 1) && (nb_txq > 1)){ 3549 if (dcb_config) { 3550 for (i = 0; i < nb_fwd_ports; i++) { 3551 pt_id = fwd_ports_ids[i]; 3552 port = &ports[pt_id]; 3553 if (!port->dcb_flag) { 3554 fprintf(stderr, 3555 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3556 return; 3557 } 3558 } 3559 if (nb_fwd_lcores == 1) { 3560 fprintf(stderr, 3561 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3562 return; 3563 } 3564 3565 dcb_fwd_config_setup(); 3566 } else 3567 rss_fwd_config_setup(); 3568 } 3569 else 3570 simple_fwd_config_setup(); 3571 } 3572 3573 static const char * 3574 mp_alloc_to_str(uint8_t mode) 3575 { 3576 switch (mode) { 3577 case MP_ALLOC_NATIVE: 3578 return "native"; 3579 case MP_ALLOC_ANON: 3580 return "anon"; 3581 case MP_ALLOC_XMEM: 3582 return "xmem"; 3583 case MP_ALLOC_XMEM_HUGE: 3584 return "xmemhuge"; 3585 case MP_ALLOC_XBUF: 3586 return "xbuf"; 3587 default: 3588 return "invalid"; 3589 } 3590 } 3591 3592 void 3593 pkt_fwd_config_display(struct fwd_config *cfg) 3594 { 3595 struct fwd_stream *fs; 3596 lcoreid_t lc_id; 3597 streamid_t sm_id; 3598 3599 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3600 "NUMA support %s, MP allocation mode: %s\n", 3601 cfg->fwd_eng->fwd_mode_name, 3602 retry_enabled == 0 ? "" : " with retry", 3603 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3604 numa_support == 1 ? "enabled" : "disabled", 3605 mp_alloc_to_str(mp_alloc_type)); 3606 3607 if (retry_enabled) 3608 printf("TX retry num: %u, delay between TX retries: %uus\n", 3609 burst_tx_retry_num, burst_tx_delay_time); 3610 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3611 printf("Logical Core %u (socket %u) forwards packets on " 3612 "%d streams:", 3613 fwd_lcores_cpuids[lc_id], 3614 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3615 fwd_lcores[lc_id]->stream_nb); 3616 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3617 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3618 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3619 "P=%d/Q=%d (socket %u) ", 3620 fs->rx_port, fs->rx_queue, 3621 ports[fs->rx_port].socket_id, 3622 fs->tx_port, fs->tx_queue, 3623 ports[fs->tx_port].socket_id); 3624 print_ethaddr("peer=", 3625 &peer_eth_addrs[fs->peer_addr]); 3626 } 3627 printf("\n"); 3628 } 3629 printf("\n"); 3630 } 3631 3632 void 3633 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3634 { 3635 struct rte_ether_addr new_peer_addr; 3636 if (!rte_eth_dev_is_valid_port(port_id)) { 3637 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3638 return; 3639 } 3640 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3641 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3642 peer_addr); 3643 return; 3644 } 3645 peer_eth_addrs[port_id] = new_peer_addr; 3646 } 3647 3648 int 3649 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3650 { 3651 unsigned int i; 3652 unsigned int lcore_cpuid; 3653 int record_now; 3654 3655 record_now = 0; 3656 again: 3657 for (i = 0; i < nb_lc; i++) { 3658 lcore_cpuid = lcorelist[i]; 3659 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3660 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3661 return -1; 3662 } 3663 if (lcore_cpuid == rte_get_main_lcore()) { 3664 fprintf(stderr, 3665 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3666 lcore_cpuid); 3667 return -1; 3668 } 3669 if (record_now) 3670 fwd_lcores_cpuids[i] = lcore_cpuid; 3671 } 3672 if (record_now == 0) { 3673 record_now = 1; 3674 goto again; 3675 } 3676 nb_cfg_lcores = (lcoreid_t) nb_lc; 3677 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3678 printf("previous number of forwarding cores %u - changed to " 3679 "number of configured cores %u\n", 3680 (unsigned int) nb_fwd_lcores, nb_lc); 3681 nb_fwd_lcores = (lcoreid_t) nb_lc; 3682 } 3683 3684 return 0; 3685 } 3686 3687 int 3688 set_fwd_lcores_mask(uint64_t lcoremask) 3689 { 3690 unsigned int lcorelist[64]; 3691 unsigned int nb_lc; 3692 unsigned int i; 3693 3694 if (lcoremask == 0) { 3695 fprintf(stderr, "Invalid NULL mask of cores\n"); 3696 return -1; 3697 } 3698 nb_lc = 0; 3699 for (i = 0; i < 64; i++) { 3700 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3701 continue; 3702 lcorelist[nb_lc++] = i; 3703 } 3704 return set_fwd_lcores_list(lcorelist, nb_lc); 3705 } 3706 3707 void 3708 set_fwd_lcores_number(uint16_t nb_lc) 3709 { 3710 if (test_done == 0) { 3711 fprintf(stderr, "Please stop forwarding first\n"); 3712 return; 3713 } 3714 if (nb_lc > nb_cfg_lcores) { 3715 fprintf(stderr, 3716 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3717 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3718 return; 3719 } 3720 nb_fwd_lcores = (lcoreid_t) nb_lc; 3721 printf("Number of forwarding cores set to %u\n", 3722 (unsigned int) nb_fwd_lcores); 3723 } 3724 3725 void 3726 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3727 { 3728 unsigned int i; 3729 portid_t port_id; 3730 int record_now; 3731 3732 record_now = 0; 3733 again: 3734 for (i = 0; i < nb_pt; i++) { 3735 port_id = (portid_t) portlist[i]; 3736 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3737 return; 3738 if (record_now) 3739 fwd_ports_ids[i] = port_id; 3740 } 3741 if (record_now == 0) { 3742 record_now = 1; 3743 goto again; 3744 } 3745 nb_cfg_ports = (portid_t) nb_pt; 3746 if (nb_fwd_ports != (portid_t) nb_pt) { 3747 printf("previous number of forwarding ports %u - changed to " 3748 "number of configured ports %u\n", 3749 (unsigned int) nb_fwd_ports, nb_pt); 3750 nb_fwd_ports = (portid_t) nb_pt; 3751 } 3752 } 3753 3754 /** 3755 * Parse the user input and obtain the list of forwarding ports 3756 * 3757 * @param[in] list 3758 * String containing the user input. User can specify 3759 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3760 * For example, if the user wants to use all the available 3761 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3762 * If the user wants to use only the ports 1,2 then the input 3763 * is 1,2. 3764 * valid characters are '-' and ',' 3765 * @param[out] values 3766 * This array will be filled with a list of port IDs 3767 * based on the user input 3768 * Note that duplicate entries are discarded and only the first 3769 * count entries in this array are port IDs and all the rest 3770 * will contain default values 3771 * @param[in] maxsize 3772 * This parameter denotes 2 things 3773 * 1) Number of elements in the values array 3774 * 2) Maximum value of each element in the values array 3775 * @return 3776 * On success, returns total count of parsed port IDs 3777 * On failure, returns 0 3778 */ 3779 static unsigned int 3780 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3781 { 3782 unsigned int count = 0; 3783 char *end = NULL; 3784 int min, max; 3785 int value, i; 3786 unsigned int marked[maxsize]; 3787 3788 if (list == NULL || values == NULL) 3789 return 0; 3790 3791 for (i = 0; i < (int)maxsize; i++) 3792 marked[i] = 0; 3793 3794 min = INT_MAX; 3795 3796 do { 3797 /*Remove the blank spaces if any*/ 3798 while (isblank(*list)) 3799 list++; 3800 if (*list == '\0') 3801 break; 3802 errno = 0; 3803 value = strtol(list, &end, 10); 3804 if (errno || end == NULL) 3805 return 0; 3806 if (value < 0 || value >= (int)maxsize) 3807 return 0; 3808 while (isblank(*end)) 3809 end++; 3810 if (*end == '-' && min == INT_MAX) { 3811 min = value; 3812 } else if ((*end == ',') || (*end == '\0')) { 3813 max = value; 3814 if (min == INT_MAX) 3815 min = value; 3816 for (i = min; i <= max; i++) { 3817 if (count < maxsize) { 3818 if (marked[i]) 3819 continue; 3820 values[count] = i; 3821 marked[i] = 1; 3822 count++; 3823 } 3824 } 3825 min = INT_MAX; 3826 } else 3827 return 0; 3828 list = end + 1; 3829 } while (*end != '\0'); 3830 3831 return count; 3832 } 3833 3834 void 3835 parse_fwd_portlist(const char *portlist) 3836 { 3837 unsigned int portcount; 3838 unsigned int portindex[RTE_MAX_ETHPORTS]; 3839 unsigned int i, valid_port_count = 0; 3840 3841 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3842 if (!portcount) 3843 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3844 3845 /* 3846 * Here we verify the validity of the ports 3847 * and thereby calculate the total number of 3848 * valid ports 3849 */ 3850 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3851 if (rte_eth_dev_is_valid_port(portindex[i])) { 3852 portindex[valid_port_count] = portindex[i]; 3853 valid_port_count++; 3854 } 3855 } 3856 3857 set_fwd_ports_list(portindex, valid_port_count); 3858 } 3859 3860 void 3861 set_fwd_ports_mask(uint64_t portmask) 3862 { 3863 unsigned int portlist[64]; 3864 unsigned int nb_pt; 3865 unsigned int i; 3866 3867 if (portmask == 0) { 3868 fprintf(stderr, "Invalid NULL mask of ports\n"); 3869 return; 3870 } 3871 nb_pt = 0; 3872 RTE_ETH_FOREACH_DEV(i) { 3873 if (! ((uint64_t)(1ULL << i) & portmask)) 3874 continue; 3875 portlist[nb_pt++] = i; 3876 } 3877 set_fwd_ports_list(portlist, nb_pt); 3878 } 3879 3880 void 3881 set_fwd_ports_number(uint16_t nb_pt) 3882 { 3883 if (nb_pt > nb_cfg_ports) { 3884 fprintf(stderr, 3885 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3886 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3887 return; 3888 } 3889 nb_fwd_ports = (portid_t) nb_pt; 3890 printf("Number of forwarding ports set to %u\n", 3891 (unsigned int) nb_fwd_ports); 3892 } 3893 3894 int 3895 port_is_forwarding(portid_t port_id) 3896 { 3897 unsigned int i; 3898 3899 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3900 return -1; 3901 3902 for (i = 0; i < nb_fwd_ports; i++) { 3903 if (fwd_ports_ids[i] == port_id) 3904 return 1; 3905 } 3906 3907 return 0; 3908 } 3909 3910 void 3911 set_nb_pkt_per_burst(uint16_t nb) 3912 { 3913 if (nb > MAX_PKT_BURST) { 3914 fprintf(stderr, 3915 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3916 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3917 return; 3918 } 3919 nb_pkt_per_burst = nb; 3920 printf("Number of packets per burst set to %u\n", 3921 (unsigned int) nb_pkt_per_burst); 3922 } 3923 3924 static const char * 3925 tx_split_get_name(enum tx_pkt_split split) 3926 { 3927 uint32_t i; 3928 3929 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3930 if (tx_split_name[i].split == split) 3931 return tx_split_name[i].name; 3932 } 3933 return NULL; 3934 } 3935 3936 void 3937 set_tx_pkt_split(const char *name) 3938 { 3939 uint32_t i; 3940 3941 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3942 if (strcmp(tx_split_name[i].name, name) == 0) { 3943 tx_pkt_split = tx_split_name[i].split; 3944 return; 3945 } 3946 } 3947 fprintf(stderr, "unknown value: \"%s\"\n", name); 3948 } 3949 3950 int 3951 parse_fec_mode(const char *name, uint32_t *fec_capa) 3952 { 3953 uint8_t i; 3954 3955 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3956 if (strcmp(fec_mode_name[i].name, name) == 0) { 3957 *fec_capa = 3958 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3959 return 0; 3960 } 3961 } 3962 return -1; 3963 } 3964 3965 void 3966 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3967 { 3968 unsigned int i, j; 3969 3970 printf("FEC capabilities:\n"); 3971 3972 for (i = 0; i < num; i++) { 3973 printf("%s : ", 3974 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3975 3976 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3977 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3978 speed_fec_capa[i].capa) 3979 printf("%s ", fec_mode_name[j].name); 3980 } 3981 printf("\n"); 3982 } 3983 } 3984 3985 void 3986 show_rx_pkt_offsets(void) 3987 { 3988 uint32_t i, n; 3989 3990 n = rx_pkt_nb_offs; 3991 printf("Number of offsets: %u\n", n); 3992 if (n) { 3993 printf("Segment offsets: "); 3994 for (i = 0; i != n - 1; i++) 3995 printf("%hu,", rx_pkt_seg_offsets[i]); 3996 printf("%hu\n", rx_pkt_seg_lengths[i]); 3997 } 3998 } 3999 4000 void 4001 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4002 { 4003 unsigned int i; 4004 4005 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4006 printf("nb segments per RX packets=%u >= " 4007 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4008 return; 4009 } 4010 4011 /* 4012 * No extra check here, the segment length will be checked by PMD 4013 * in the extended queue setup. 4014 */ 4015 for (i = 0; i < nb_offs; i++) { 4016 if (seg_offsets[i] >= UINT16_MAX) { 4017 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4018 i, seg_offsets[i]); 4019 return; 4020 } 4021 } 4022 4023 for (i = 0; i < nb_offs; i++) 4024 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4025 4026 rx_pkt_nb_offs = (uint8_t) nb_offs; 4027 } 4028 4029 void 4030 show_rx_pkt_segments(void) 4031 { 4032 uint32_t i, n; 4033 4034 n = rx_pkt_nb_segs; 4035 printf("Number of segments: %u\n", n); 4036 if (n) { 4037 printf("Segment sizes: "); 4038 for (i = 0; i != n - 1; i++) 4039 printf("%hu,", rx_pkt_seg_lengths[i]); 4040 printf("%hu\n", rx_pkt_seg_lengths[i]); 4041 } 4042 } 4043 4044 void 4045 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4046 { 4047 unsigned int i; 4048 4049 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4050 printf("nb segments per RX packets=%u >= " 4051 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4052 return; 4053 } 4054 4055 /* 4056 * No extra check here, the segment length will be checked by PMD 4057 * in the extended queue setup. 4058 */ 4059 for (i = 0; i < nb_segs; i++) { 4060 if (seg_lengths[i] >= UINT16_MAX) { 4061 printf("length[%u]=%u > UINT16_MAX - give up\n", 4062 i, seg_lengths[i]); 4063 return; 4064 } 4065 } 4066 4067 for (i = 0; i < nb_segs; i++) 4068 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4069 4070 rx_pkt_nb_segs = (uint8_t) nb_segs; 4071 } 4072 4073 void 4074 show_tx_pkt_segments(void) 4075 { 4076 uint32_t i, n; 4077 const char *split; 4078 4079 n = tx_pkt_nb_segs; 4080 split = tx_split_get_name(tx_pkt_split); 4081 4082 printf("Number of segments: %u\n", n); 4083 printf("Segment sizes: "); 4084 for (i = 0; i != n - 1; i++) 4085 printf("%hu,", tx_pkt_seg_lengths[i]); 4086 printf("%hu\n", tx_pkt_seg_lengths[i]); 4087 printf("Split packet: %s\n", split); 4088 } 4089 4090 static bool 4091 nb_segs_is_invalid(unsigned int nb_segs) 4092 { 4093 uint16_t ring_size; 4094 uint16_t queue_id; 4095 uint16_t port_id; 4096 int ret; 4097 4098 RTE_ETH_FOREACH_DEV(port_id) { 4099 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4100 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4101 if (ret) { 4102 /* Port may not be initialized yet, can't say 4103 * the port is invalid in this stage. 4104 */ 4105 continue; 4106 } 4107 if (ring_size < nb_segs) { 4108 printf("nb segments per TX packets=%u >= TX " 4109 "queue(%u) ring_size=%u - txpkts ignored\n", 4110 nb_segs, queue_id, ring_size); 4111 return true; 4112 } 4113 } 4114 } 4115 4116 return false; 4117 } 4118 4119 void 4120 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4121 { 4122 uint16_t tx_pkt_len; 4123 unsigned int i; 4124 4125 /* 4126 * For single segment settings failed check is ignored. 4127 * It is a very basic capability to send the single segment 4128 * packets, suppose it is always supported. 4129 */ 4130 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4131 fprintf(stderr, 4132 "Tx segment size(%u) is not supported - txpkts ignored\n", 4133 nb_segs); 4134 return; 4135 } 4136 4137 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4138 fprintf(stderr, 4139 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4140 nb_segs, RTE_MAX_SEGS_PER_PKT); 4141 return; 4142 } 4143 4144 /* 4145 * Check that each segment length is greater or equal than 4146 * the mbuf data size. 4147 * Check also that the total packet length is greater or equal than the 4148 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4149 * 20 + 8). 4150 */ 4151 tx_pkt_len = 0; 4152 for (i = 0; i < nb_segs; i++) { 4153 if (seg_lengths[i] > mbuf_data_size[0]) { 4154 fprintf(stderr, 4155 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4156 i, seg_lengths[i], mbuf_data_size[0]); 4157 return; 4158 } 4159 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4160 } 4161 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4162 fprintf(stderr, "total packet length=%u < %d - give up\n", 4163 (unsigned) tx_pkt_len, 4164 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4165 return; 4166 } 4167 4168 for (i = 0; i < nb_segs; i++) 4169 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4170 4171 tx_pkt_length = tx_pkt_len; 4172 tx_pkt_nb_segs = (uint8_t) nb_segs; 4173 } 4174 4175 void 4176 show_tx_pkt_times(void) 4177 { 4178 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4179 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4180 } 4181 4182 void 4183 set_tx_pkt_times(unsigned int *tx_times) 4184 { 4185 tx_pkt_times_inter = tx_times[0]; 4186 tx_pkt_times_intra = tx_times[1]; 4187 } 4188 4189 #ifdef RTE_LIB_GRO 4190 void 4191 setup_gro(const char *onoff, portid_t port_id) 4192 { 4193 if (!rte_eth_dev_is_valid_port(port_id)) { 4194 fprintf(stderr, "invalid port id %u\n", port_id); 4195 return; 4196 } 4197 if (test_done == 0) { 4198 fprintf(stderr, 4199 "Before enable/disable GRO, please stop forwarding first\n"); 4200 return; 4201 } 4202 if (strcmp(onoff, "on") == 0) { 4203 if (gro_ports[port_id].enable != 0) { 4204 fprintf(stderr, 4205 "Port %u has enabled GRO. Please disable GRO first\n", 4206 port_id); 4207 return; 4208 } 4209 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4210 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4211 gro_ports[port_id].param.max_flow_num = 4212 GRO_DEFAULT_FLOW_NUM; 4213 gro_ports[port_id].param.max_item_per_flow = 4214 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4215 } 4216 gro_ports[port_id].enable = 1; 4217 } else { 4218 if (gro_ports[port_id].enable == 0) { 4219 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4220 return; 4221 } 4222 gro_ports[port_id].enable = 0; 4223 } 4224 } 4225 4226 void 4227 setup_gro_flush_cycles(uint8_t cycles) 4228 { 4229 if (test_done == 0) { 4230 fprintf(stderr, 4231 "Before change flush interval for GRO, please stop forwarding first.\n"); 4232 return; 4233 } 4234 4235 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4236 GRO_DEFAULT_FLUSH_CYCLES) { 4237 fprintf(stderr, 4238 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4239 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4240 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4241 } 4242 4243 gro_flush_cycles = cycles; 4244 } 4245 4246 void 4247 show_gro(portid_t port_id) 4248 { 4249 struct rte_gro_param *param; 4250 uint32_t max_pkts_num; 4251 4252 param = &gro_ports[port_id].param; 4253 4254 if (!rte_eth_dev_is_valid_port(port_id)) { 4255 fprintf(stderr, "Invalid port id %u.\n", port_id); 4256 return; 4257 } 4258 if (gro_ports[port_id].enable) { 4259 printf("GRO type: TCP/IPv4\n"); 4260 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4261 max_pkts_num = param->max_flow_num * 4262 param->max_item_per_flow; 4263 } else 4264 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4265 printf("Max number of packets to perform GRO: %u\n", 4266 max_pkts_num); 4267 printf("Flushing cycles: %u\n", gro_flush_cycles); 4268 } else 4269 printf("Port %u doesn't enable GRO.\n", port_id); 4270 } 4271 #endif /* RTE_LIB_GRO */ 4272 4273 #ifdef RTE_LIB_GSO 4274 void 4275 setup_gso(const char *mode, portid_t port_id) 4276 { 4277 if (!rte_eth_dev_is_valid_port(port_id)) { 4278 fprintf(stderr, "invalid port id %u\n", port_id); 4279 return; 4280 } 4281 if (strcmp(mode, "on") == 0) { 4282 if (test_done == 0) { 4283 fprintf(stderr, 4284 "before enabling GSO, please stop forwarding first\n"); 4285 return; 4286 } 4287 gso_ports[port_id].enable = 1; 4288 } else if (strcmp(mode, "off") == 0) { 4289 if (test_done == 0) { 4290 fprintf(stderr, 4291 "before disabling GSO, please stop forwarding first\n"); 4292 return; 4293 } 4294 gso_ports[port_id].enable = 0; 4295 } 4296 } 4297 #endif /* RTE_LIB_GSO */ 4298 4299 char* 4300 list_pkt_forwarding_modes(void) 4301 { 4302 static char fwd_modes[128] = ""; 4303 const char *separator = "|"; 4304 struct fwd_engine *fwd_eng; 4305 unsigned i = 0; 4306 4307 if (strlen (fwd_modes) == 0) { 4308 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4309 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4310 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4311 strncat(fwd_modes, separator, 4312 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4313 } 4314 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4315 } 4316 4317 return fwd_modes; 4318 } 4319 4320 char* 4321 list_pkt_forwarding_retry_modes(void) 4322 { 4323 static char fwd_modes[128] = ""; 4324 const char *separator = "|"; 4325 struct fwd_engine *fwd_eng; 4326 unsigned i = 0; 4327 4328 if (strlen(fwd_modes) == 0) { 4329 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4330 if (fwd_eng == &rx_only_engine) 4331 continue; 4332 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4333 sizeof(fwd_modes) - 4334 strlen(fwd_modes) - 1); 4335 strncat(fwd_modes, separator, 4336 sizeof(fwd_modes) - 4337 strlen(fwd_modes) - 1); 4338 } 4339 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4340 } 4341 4342 return fwd_modes; 4343 } 4344 4345 void 4346 set_pkt_forwarding_mode(const char *fwd_mode_name) 4347 { 4348 struct fwd_engine *fwd_eng; 4349 unsigned i; 4350 4351 i = 0; 4352 while ((fwd_eng = fwd_engines[i]) != NULL) { 4353 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4354 printf("Set %s packet forwarding mode%s\n", 4355 fwd_mode_name, 4356 retry_enabled == 0 ? "" : " with retry"); 4357 cur_fwd_eng = fwd_eng; 4358 return; 4359 } 4360 i++; 4361 } 4362 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4363 } 4364 4365 void 4366 add_rx_dump_callbacks(portid_t portid) 4367 { 4368 struct rte_eth_dev_info dev_info; 4369 uint16_t queue; 4370 int ret; 4371 4372 if (port_id_is_invalid(portid, ENABLED_WARN)) 4373 return; 4374 4375 ret = eth_dev_info_get_print_err(portid, &dev_info); 4376 if (ret != 0) 4377 return; 4378 4379 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4380 if (!ports[portid].rx_dump_cb[queue]) 4381 ports[portid].rx_dump_cb[queue] = 4382 rte_eth_add_rx_callback(portid, queue, 4383 dump_rx_pkts, NULL); 4384 } 4385 4386 void 4387 add_tx_dump_callbacks(portid_t portid) 4388 { 4389 struct rte_eth_dev_info dev_info; 4390 uint16_t queue; 4391 int ret; 4392 4393 if (port_id_is_invalid(portid, ENABLED_WARN)) 4394 return; 4395 4396 ret = eth_dev_info_get_print_err(portid, &dev_info); 4397 if (ret != 0) 4398 return; 4399 4400 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4401 if (!ports[portid].tx_dump_cb[queue]) 4402 ports[portid].tx_dump_cb[queue] = 4403 rte_eth_add_tx_callback(portid, queue, 4404 dump_tx_pkts, NULL); 4405 } 4406 4407 void 4408 remove_rx_dump_callbacks(portid_t portid) 4409 { 4410 struct rte_eth_dev_info dev_info; 4411 uint16_t queue; 4412 int ret; 4413 4414 if (port_id_is_invalid(portid, ENABLED_WARN)) 4415 return; 4416 4417 ret = eth_dev_info_get_print_err(portid, &dev_info); 4418 if (ret != 0) 4419 return; 4420 4421 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4422 if (ports[portid].rx_dump_cb[queue]) { 4423 rte_eth_remove_rx_callback(portid, queue, 4424 ports[portid].rx_dump_cb[queue]); 4425 ports[portid].rx_dump_cb[queue] = NULL; 4426 } 4427 } 4428 4429 void 4430 remove_tx_dump_callbacks(portid_t portid) 4431 { 4432 struct rte_eth_dev_info dev_info; 4433 uint16_t queue; 4434 int ret; 4435 4436 if (port_id_is_invalid(portid, ENABLED_WARN)) 4437 return; 4438 4439 ret = eth_dev_info_get_print_err(portid, &dev_info); 4440 if (ret != 0) 4441 return; 4442 4443 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4444 if (ports[portid].tx_dump_cb[queue]) { 4445 rte_eth_remove_tx_callback(portid, queue, 4446 ports[portid].tx_dump_cb[queue]); 4447 ports[portid].tx_dump_cb[queue] = NULL; 4448 } 4449 } 4450 4451 void 4452 configure_rxtx_dump_callbacks(uint16_t verbose) 4453 { 4454 portid_t portid; 4455 4456 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4457 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4458 return; 4459 #endif 4460 4461 RTE_ETH_FOREACH_DEV(portid) 4462 { 4463 if (verbose == 1 || verbose > 2) 4464 add_rx_dump_callbacks(portid); 4465 else 4466 remove_rx_dump_callbacks(portid); 4467 if (verbose >= 2) 4468 add_tx_dump_callbacks(portid); 4469 else 4470 remove_tx_dump_callbacks(portid); 4471 } 4472 } 4473 4474 void 4475 set_verbose_level(uint16_t vb_level) 4476 { 4477 printf("Change verbose level from %u to %u\n", 4478 (unsigned int) verbose_level, (unsigned int) vb_level); 4479 verbose_level = vb_level; 4480 configure_rxtx_dump_callbacks(verbose_level); 4481 } 4482 4483 void 4484 vlan_extend_set(portid_t port_id, int on) 4485 { 4486 int diag; 4487 int vlan_offload; 4488 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4489 4490 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4491 return; 4492 4493 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4494 4495 if (on) { 4496 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4497 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4498 } else { 4499 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 4500 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4501 } 4502 4503 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4504 if (diag < 0) { 4505 fprintf(stderr, 4506 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4507 port_id, on, diag); 4508 return; 4509 } 4510 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4511 } 4512 4513 void 4514 rx_vlan_strip_set(portid_t port_id, int on) 4515 { 4516 int diag; 4517 int vlan_offload; 4518 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4519 4520 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4521 return; 4522 4523 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4524 4525 if (on) { 4526 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4527 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4528 } else { 4529 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 4530 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4531 } 4532 4533 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4534 if (diag < 0) { 4535 fprintf(stderr, 4536 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4537 __func__, port_id, on, diag); 4538 return; 4539 } 4540 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4541 } 4542 4543 void 4544 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4545 { 4546 int diag; 4547 4548 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4549 return; 4550 4551 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4552 if (diag < 0) 4553 fprintf(stderr, 4554 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4555 __func__, port_id, queue_id, on, diag); 4556 } 4557 4558 void 4559 rx_vlan_filter_set(portid_t port_id, int on) 4560 { 4561 int diag; 4562 int vlan_offload; 4563 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4564 4565 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4566 return; 4567 4568 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4569 4570 if (on) { 4571 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4572 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4573 } else { 4574 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 4575 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4576 } 4577 4578 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4579 if (diag < 0) { 4580 fprintf(stderr, 4581 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4582 __func__, port_id, on, diag); 4583 return; 4584 } 4585 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4586 } 4587 4588 void 4589 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4590 { 4591 int diag; 4592 int vlan_offload; 4593 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4594 4595 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4596 return; 4597 4598 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4599 4600 if (on) { 4601 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4602 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4603 } else { 4604 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 4605 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4606 } 4607 4608 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4609 if (diag < 0) { 4610 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4611 __func__, port_id, on, diag); 4612 return; 4613 } 4614 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4615 } 4616 4617 int 4618 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4619 { 4620 int diag; 4621 4622 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4623 return 1; 4624 if (vlan_id_is_invalid(vlan_id)) 4625 return 1; 4626 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4627 if (diag == 0) 4628 return 0; 4629 fprintf(stderr, 4630 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4631 port_id, vlan_id, on, diag); 4632 return -1; 4633 } 4634 4635 void 4636 rx_vlan_all_filter_set(portid_t port_id, int on) 4637 { 4638 uint16_t vlan_id; 4639 4640 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4641 return; 4642 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4643 if (rx_vft_set(port_id, vlan_id, on)) 4644 break; 4645 } 4646 } 4647 4648 void 4649 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4650 { 4651 int diag; 4652 4653 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4654 return; 4655 4656 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4657 if (diag == 0) 4658 return; 4659 4660 fprintf(stderr, 4661 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4662 port_id, vlan_type, tp_id, diag); 4663 } 4664 4665 void 4666 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4667 { 4668 struct rte_eth_dev_info dev_info; 4669 int ret; 4670 4671 if (vlan_id_is_invalid(vlan_id)) 4672 return; 4673 4674 if (ports[port_id].dev_conf.txmode.offloads & 4675 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 4676 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4677 return; 4678 } 4679 4680 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4681 if (ret != 0) 4682 return; 4683 4684 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 4685 fprintf(stderr, 4686 "Error: vlan insert is not supported by port %d\n", 4687 port_id); 4688 return; 4689 } 4690 4691 tx_vlan_reset(port_id); 4692 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 4693 ports[port_id].tx_vlan_id = vlan_id; 4694 } 4695 4696 void 4697 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4698 { 4699 struct rte_eth_dev_info dev_info; 4700 int ret; 4701 4702 if (vlan_id_is_invalid(vlan_id)) 4703 return; 4704 if (vlan_id_is_invalid(vlan_id_outer)) 4705 return; 4706 4707 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4708 if (ret != 0) 4709 return; 4710 4711 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 4712 fprintf(stderr, 4713 "Error: qinq insert not supported by port %d\n", 4714 port_id); 4715 return; 4716 } 4717 4718 tx_vlan_reset(port_id); 4719 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4720 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4721 ports[port_id].tx_vlan_id = vlan_id; 4722 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4723 } 4724 4725 void 4726 tx_vlan_reset(portid_t port_id) 4727 { 4728 ports[port_id].dev_conf.txmode.offloads &= 4729 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4730 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4731 ports[port_id].tx_vlan_id = 0; 4732 ports[port_id].tx_vlan_id_outer = 0; 4733 } 4734 4735 void 4736 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4737 { 4738 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4739 return; 4740 4741 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4742 } 4743 4744 void 4745 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4746 { 4747 int ret; 4748 4749 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4750 return; 4751 4752 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4753 return; 4754 4755 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4756 fprintf(stderr, "map_value not in required range 0..%d\n", 4757 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4758 return; 4759 } 4760 4761 if (!is_rx) { /* tx */ 4762 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4763 map_value); 4764 if (ret) { 4765 fprintf(stderr, 4766 "failed to set tx queue stats mapping.\n"); 4767 return; 4768 } 4769 } else { /* rx */ 4770 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4771 map_value); 4772 if (ret) { 4773 fprintf(stderr, 4774 "failed to set rx queue stats mapping.\n"); 4775 return; 4776 } 4777 } 4778 } 4779 4780 void 4781 set_xstats_hide_zero(uint8_t on_off) 4782 { 4783 xstats_hide_zero = on_off; 4784 } 4785 4786 void 4787 set_record_core_cycles(uint8_t on_off) 4788 { 4789 record_core_cycles = on_off; 4790 } 4791 4792 void 4793 set_record_burst_stats(uint8_t on_off) 4794 { 4795 record_burst_stats = on_off; 4796 } 4797 4798 static char* 4799 flowtype_to_str(uint16_t flow_type) 4800 { 4801 struct flow_type_info { 4802 char str[32]; 4803 uint16_t ftype; 4804 }; 4805 4806 uint8_t i; 4807 static struct flow_type_info flowtype_str_table[] = { 4808 {"raw", RTE_ETH_FLOW_RAW}, 4809 {"ipv4", RTE_ETH_FLOW_IPV4}, 4810 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4811 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4812 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4813 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4814 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4815 {"ipv6", RTE_ETH_FLOW_IPV6}, 4816 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4817 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4818 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4819 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4820 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4821 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4822 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 4823 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 4824 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 4825 {"port", RTE_ETH_FLOW_PORT}, 4826 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4827 {"geneve", RTE_ETH_FLOW_GENEVE}, 4828 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4829 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4830 {"gtpu", RTE_ETH_FLOW_GTPU}, 4831 }; 4832 4833 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4834 if (flowtype_str_table[i].ftype == flow_type) 4835 return flowtype_str_table[i].str; 4836 } 4837 4838 return NULL; 4839 } 4840 4841 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4842 4843 static inline void 4844 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4845 { 4846 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4847 4848 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4849 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4850 " tunnel_id: 0x%08x", 4851 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4852 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4853 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4854 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4855 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4856 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4857 4858 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4859 rte_be_to_cpu_16(mask->src_port_mask), 4860 rte_be_to_cpu_16(mask->dst_port_mask)); 4861 4862 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4863 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4864 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4865 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4866 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4867 4868 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4869 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4870 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4871 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4872 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4873 } 4874 4875 printf("\n"); 4876 } 4877 4878 static inline void 4879 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4880 { 4881 struct rte_eth_flex_payload_cfg *cfg; 4882 uint32_t i, j; 4883 4884 for (i = 0; i < flex_conf->nb_payloads; i++) { 4885 cfg = &flex_conf->flex_set[i]; 4886 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4887 printf("\n RAW: "); 4888 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4889 printf("\n L2_PAYLOAD: "); 4890 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4891 printf("\n L3_PAYLOAD: "); 4892 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4893 printf("\n L4_PAYLOAD: "); 4894 else 4895 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4896 for (j = 0; j < num; j++) 4897 printf(" %-5u", cfg->src_offset[j]); 4898 } 4899 printf("\n"); 4900 } 4901 4902 static inline void 4903 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4904 { 4905 struct rte_eth_fdir_flex_mask *mask; 4906 uint32_t i, j; 4907 char *p; 4908 4909 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4910 mask = &flex_conf->flex_mask[i]; 4911 p = flowtype_to_str(mask->flow_type); 4912 printf("\n %s:\t", p ? p : "unknown"); 4913 for (j = 0; j < num; j++) 4914 printf(" %02x", mask->mask[j]); 4915 } 4916 printf("\n"); 4917 } 4918 4919 static inline void 4920 print_fdir_flow_type(uint32_t flow_types_mask) 4921 { 4922 int i; 4923 char *p; 4924 4925 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4926 if (!(flow_types_mask & (1 << i))) 4927 continue; 4928 p = flowtype_to_str(i); 4929 if (p) 4930 printf(" %s", p); 4931 else 4932 printf(" unknown"); 4933 } 4934 printf("\n"); 4935 } 4936 4937 static int 4938 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4939 struct rte_eth_fdir_stats *fdir_stat) 4940 { 4941 int ret = -ENOTSUP; 4942 4943 #ifdef RTE_NET_I40E 4944 if (ret == -ENOTSUP) { 4945 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4946 if (!ret) 4947 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4948 } 4949 #endif 4950 #ifdef RTE_NET_IXGBE 4951 if (ret == -ENOTSUP) { 4952 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4953 if (!ret) 4954 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4955 } 4956 #endif 4957 switch (ret) { 4958 case 0: 4959 break; 4960 case -ENOTSUP: 4961 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4962 port_id); 4963 break; 4964 default: 4965 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4966 break; 4967 } 4968 return ret; 4969 } 4970 4971 void 4972 fdir_get_infos(portid_t port_id) 4973 { 4974 struct rte_eth_fdir_stats fdir_stat; 4975 struct rte_eth_fdir_info fdir_info; 4976 4977 static const char *fdir_stats_border = "########################"; 4978 4979 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4980 return; 4981 4982 memset(&fdir_info, 0, sizeof(fdir_info)); 4983 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4984 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4985 return; 4986 4987 printf("\n %s FDIR infos for port %-2d %s\n", 4988 fdir_stats_border, port_id, fdir_stats_border); 4989 printf(" MODE: "); 4990 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4991 printf(" PERFECT\n"); 4992 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4993 printf(" PERFECT-MAC-VLAN\n"); 4994 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4995 printf(" PERFECT-TUNNEL\n"); 4996 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4997 printf(" SIGNATURE\n"); 4998 else 4999 printf(" DISABLE\n"); 5000 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5001 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5002 printf(" SUPPORTED FLOW TYPE: "); 5003 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5004 } 5005 printf(" FLEX PAYLOAD INFO:\n"); 5006 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5007 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5008 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5009 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5010 fdir_info.flex_payload_unit, 5011 fdir_info.max_flex_payload_segment_num, 5012 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5013 printf(" MASK: "); 5014 print_fdir_mask(&fdir_info.mask); 5015 if (fdir_info.flex_conf.nb_payloads > 0) { 5016 printf(" FLEX PAYLOAD SRC OFFSET:"); 5017 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5018 } 5019 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5020 printf(" FLEX MASK CFG:"); 5021 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5022 } 5023 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5024 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5025 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5026 fdir_info.guarant_spc, fdir_info.best_spc); 5027 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5028 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5029 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5030 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5031 fdir_stat.collision, fdir_stat.free, 5032 fdir_stat.maxhash, fdir_stat.maxlen, 5033 fdir_stat.add, fdir_stat.remove, 5034 fdir_stat.f_add, fdir_stat.f_remove); 5035 printf(" %s############################%s\n", 5036 fdir_stats_border, fdir_stats_border); 5037 } 5038 5039 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5040 5041 void 5042 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5043 { 5044 struct rte_port *port; 5045 struct rte_eth_fdir_flex_conf *flex_conf; 5046 int i, idx = 0; 5047 5048 port = &ports[port_id]; 5049 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5050 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5051 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5052 idx = i; 5053 break; 5054 } 5055 } 5056 if (i >= RTE_ETH_FLOW_MAX) { 5057 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5058 idx = flex_conf->nb_flexmasks; 5059 flex_conf->nb_flexmasks++; 5060 } else { 5061 fprintf(stderr, 5062 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5063 cfg->flow_type); 5064 return; 5065 } 5066 } 5067 rte_memcpy(&flex_conf->flex_mask[idx], 5068 cfg, 5069 sizeof(struct rte_eth_fdir_flex_mask)); 5070 } 5071 5072 void 5073 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5074 { 5075 struct rte_port *port; 5076 struct rte_eth_fdir_flex_conf *flex_conf; 5077 int i, idx = 0; 5078 5079 port = &ports[port_id]; 5080 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5081 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5082 if (cfg->type == flex_conf->flex_set[i].type) { 5083 idx = i; 5084 break; 5085 } 5086 } 5087 if (i >= RTE_ETH_PAYLOAD_MAX) { 5088 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5089 idx = flex_conf->nb_payloads; 5090 flex_conf->nb_payloads++; 5091 } else { 5092 fprintf(stderr, 5093 "The flex payload table is full. Can not set flex payload for type(%u).", 5094 cfg->type); 5095 return; 5096 } 5097 } 5098 rte_memcpy(&flex_conf->flex_set[idx], 5099 cfg, 5100 sizeof(struct rte_eth_flex_payload_cfg)); 5101 5102 } 5103 5104 void 5105 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5106 { 5107 #ifdef RTE_NET_IXGBE 5108 int diag; 5109 5110 if (is_rx) 5111 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5112 else 5113 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5114 5115 if (diag == 0) 5116 return; 5117 fprintf(stderr, 5118 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5119 is_rx ? "rx" : "tx", port_id, diag); 5120 return; 5121 #endif 5122 fprintf(stderr, "VF %s setting not supported for port %d\n", 5123 is_rx ? "Rx" : "Tx", port_id); 5124 RTE_SET_USED(vf); 5125 RTE_SET_USED(on); 5126 } 5127 5128 int 5129 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5130 { 5131 int diag; 5132 struct rte_eth_link link; 5133 int ret; 5134 5135 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5136 return 1; 5137 ret = eth_link_get_nowait_print_err(port_id, &link); 5138 if (ret < 0) 5139 return 1; 5140 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5141 rate > link.link_speed) { 5142 fprintf(stderr, 5143 "Invalid rate value:%u bigger than link speed: %u\n", 5144 rate, link.link_speed); 5145 return 1; 5146 } 5147 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5148 if (diag == 0) 5149 return diag; 5150 fprintf(stderr, 5151 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5152 port_id, diag); 5153 return diag; 5154 } 5155 5156 int 5157 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5158 { 5159 int diag = -ENOTSUP; 5160 5161 RTE_SET_USED(vf); 5162 RTE_SET_USED(rate); 5163 RTE_SET_USED(q_msk); 5164 5165 #ifdef RTE_NET_IXGBE 5166 if (diag == -ENOTSUP) 5167 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5168 q_msk); 5169 #endif 5170 #ifdef RTE_NET_BNXT 5171 if (diag == -ENOTSUP) 5172 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5173 #endif 5174 if (diag == 0) 5175 return diag; 5176 5177 fprintf(stderr, 5178 "%s for port_id=%d failed diag=%d\n", 5179 __func__, port_id, diag); 5180 return diag; 5181 } 5182 5183 /* 5184 * Functions to manage the set of filtered Multicast MAC addresses. 5185 * 5186 * A pool of filtered multicast MAC addresses is associated with each port. 5187 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5188 * The address of the pool and the number of valid multicast MAC addresses 5189 * recorded in the pool are stored in the fields "mc_addr_pool" and 5190 * "mc_addr_nb" of the "rte_port" data structure. 5191 * 5192 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5193 * to be supplied a contiguous array of multicast MAC addresses. 5194 * To comply with this constraint, the set of multicast addresses recorded 5195 * into the pool are systematically compacted at the beginning of the pool. 5196 * Hence, when a multicast address is removed from the pool, all following 5197 * addresses, if any, are copied back to keep the set contiguous. 5198 */ 5199 #define MCAST_POOL_INC 32 5200 5201 static int 5202 mcast_addr_pool_extend(struct rte_port *port) 5203 { 5204 struct rte_ether_addr *mc_pool; 5205 size_t mc_pool_size; 5206 5207 /* 5208 * If a free entry is available at the end of the pool, just 5209 * increment the number of recorded multicast addresses. 5210 */ 5211 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5212 port->mc_addr_nb++; 5213 return 0; 5214 } 5215 5216 /* 5217 * [re]allocate a pool with MCAST_POOL_INC more entries. 5218 * The previous test guarantees that port->mc_addr_nb is a multiple 5219 * of MCAST_POOL_INC. 5220 */ 5221 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5222 MCAST_POOL_INC); 5223 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5224 mc_pool_size); 5225 if (mc_pool == NULL) { 5226 fprintf(stderr, 5227 "allocation of pool of %u multicast addresses failed\n", 5228 port->mc_addr_nb + MCAST_POOL_INC); 5229 return -ENOMEM; 5230 } 5231 5232 port->mc_addr_pool = mc_pool; 5233 port->mc_addr_nb++; 5234 return 0; 5235 5236 } 5237 5238 static void 5239 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5240 { 5241 if (mcast_addr_pool_extend(port) != 0) 5242 return; 5243 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5244 } 5245 5246 static void 5247 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5248 { 5249 port->mc_addr_nb--; 5250 if (addr_idx == port->mc_addr_nb) { 5251 /* No need to recompact the set of multicast addresses. */ 5252 if (port->mc_addr_nb == 0) { 5253 /* free the pool of multicast addresses. */ 5254 free(port->mc_addr_pool); 5255 port->mc_addr_pool = NULL; 5256 } 5257 return; 5258 } 5259 memmove(&port->mc_addr_pool[addr_idx], 5260 &port->mc_addr_pool[addr_idx + 1], 5261 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5262 } 5263 5264 static int 5265 eth_port_multicast_addr_list_set(portid_t port_id) 5266 { 5267 struct rte_port *port; 5268 int diag; 5269 5270 port = &ports[port_id]; 5271 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5272 port->mc_addr_nb); 5273 if (diag < 0) 5274 fprintf(stderr, 5275 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5276 port_id, port->mc_addr_nb, diag); 5277 5278 return diag; 5279 } 5280 5281 void 5282 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5283 { 5284 struct rte_port *port; 5285 uint32_t i; 5286 5287 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5288 return; 5289 5290 port = &ports[port_id]; 5291 5292 /* 5293 * Check that the added multicast MAC address is not already recorded 5294 * in the pool of multicast addresses. 5295 */ 5296 for (i = 0; i < port->mc_addr_nb; i++) { 5297 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5298 fprintf(stderr, 5299 "multicast address already filtered by port\n"); 5300 return; 5301 } 5302 } 5303 5304 mcast_addr_pool_append(port, mc_addr); 5305 if (eth_port_multicast_addr_list_set(port_id) < 0) 5306 /* Rollback on failure, remove the address from the pool */ 5307 mcast_addr_pool_remove(port, i); 5308 } 5309 5310 void 5311 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5312 { 5313 struct rte_port *port; 5314 uint32_t i; 5315 5316 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5317 return; 5318 5319 port = &ports[port_id]; 5320 5321 /* 5322 * Search the pool of multicast MAC addresses for the removed address. 5323 */ 5324 for (i = 0; i < port->mc_addr_nb; i++) { 5325 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5326 break; 5327 } 5328 if (i == port->mc_addr_nb) { 5329 fprintf(stderr, "multicast address not filtered by port %d\n", 5330 port_id); 5331 return; 5332 } 5333 5334 mcast_addr_pool_remove(port, i); 5335 if (eth_port_multicast_addr_list_set(port_id) < 0) 5336 /* Rollback on failure, add the address back into the pool */ 5337 mcast_addr_pool_append(port, mc_addr); 5338 } 5339 5340 void 5341 port_dcb_info_display(portid_t port_id) 5342 { 5343 struct rte_eth_dcb_info dcb_info; 5344 uint16_t i; 5345 int ret; 5346 static const char *border = "================"; 5347 5348 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5349 return; 5350 5351 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5352 if (ret) { 5353 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5354 port_id); 5355 return; 5356 } 5357 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5358 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5359 printf("\n TC : "); 5360 for (i = 0; i < dcb_info.nb_tcs; i++) 5361 printf("\t%4d", i); 5362 printf("\n Priority : "); 5363 for (i = 0; i < dcb_info.nb_tcs; i++) 5364 printf("\t%4d", dcb_info.prio_tc[i]); 5365 printf("\n BW percent :"); 5366 for (i = 0; i < dcb_info.nb_tcs; i++) 5367 printf("\t%4d%%", dcb_info.tc_bws[i]); 5368 printf("\n RXQ base : "); 5369 for (i = 0; i < dcb_info.nb_tcs; i++) 5370 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5371 printf("\n RXQ number :"); 5372 for (i = 0; i < dcb_info.nb_tcs; i++) 5373 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5374 printf("\n TXQ base : "); 5375 for (i = 0; i < dcb_info.nb_tcs; i++) 5376 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5377 printf("\n TXQ number :"); 5378 for (i = 0; i < dcb_info.nb_tcs; i++) 5379 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5380 printf("\n"); 5381 } 5382 5383 uint8_t * 5384 open_file(const char *file_path, uint32_t *size) 5385 { 5386 int fd = open(file_path, O_RDONLY); 5387 off_t pkg_size; 5388 uint8_t *buf = NULL; 5389 int ret = 0; 5390 struct stat st_buf; 5391 5392 if (size) 5393 *size = 0; 5394 5395 if (fd == -1) { 5396 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5397 return buf; 5398 } 5399 5400 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5401 close(fd); 5402 fprintf(stderr, "%s: File operations failed\n", __func__); 5403 return buf; 5404 } 5405 5406 pkg_size = st_buf.st_size; 5407 if (pkg_size < 0) { 5408 close(fd); 5409 fprintf(stderr, "%s: File operations failed\n", __func__); 5410 return buf; 5411 } 5412 5413 buf = (uint8_t *)malloc(pkg_size); 5414 if (!buf) { 5415 close(fd); 5416 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5417 return buf; 5418 } 5419 5420 ret = read(fd, buf, pkg_size); 5421 if (ret < 0) { 5422 close(fd); 5423 fprintf(stderr, "%s: File read operation failed\n", __func__); 5424 close_file(buf); 5425 return NULL; 5426 } 5427 5428 if (size) 5429 *size = pkg_size; 5430 5431 close(fd); 5432 5433 return buf; 5434 } 5435 5436 int 5437 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5438 { 5439 FILE *fh = fopen(file_path, "wb"); 5440 5441 if (fh == NULL) { 5442 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5443 return -1; 5444 } 5445 5446 if (fwrite(buf, 1, size, fh) != size) { 5447 fclose(fh); 5448 fprintf(stderr, "%s: File write operation failed\n", __func__); 5449 return -1; 5450 } 5451 5452 fclose(fh); 5453 5454 return 0; 5455 } 5456 5457 int 5458 close_file(uint8_t *buf) 5459 { 5460 if (buf) { 5461 free((void *)buf); 5462 return 0; 5463 } 5464 5465 return -1; 5466 } 5467 5468 void 5469 port_queue_region_info_display(portid_t port_id, void *buf) 5470 { 5471 #ifdef RTE_NET_I40E 5472 uint16_t i, j; 5473 struct rte_pmd_i40e_queue_regions *info = 5474 (struct rte_pmd_i40e_queue_regions *)buf; 5475 static const char *queue_region_info_stats_border = "-------"; 5476 5477 if (!info->queue_region_number) 5478 printf("there is no region has been set before"); 5479 5480 printf("\n %s All queue region info for port=%2d %s", 5481 queue_region_info_stats_border, port_id, 5482 queue_region_info_stats_border); 5483 printf("\n queue_region_number: %-14u \n", 5484 info->queue_region_number); 5485 5486 for (i = 0; i < info->queue_region_number; i++) { 5487 printf("\n region_id: %-14u queue_number: %-14u " 5488 "queue_start_index: %-14u \n", 5489 info->region[i].region_id, 5490 info->region[i].queue_num, 5491 info->region[i].queue_start_index); 5492 5493 printf(" user_priority_num is %-14u :", 5494 info->region[i].user_priority_num); 5495 for (j = 0; j < info->region[i].user_priority_num; j++) 5496 printf(" %-14u ", info->region[i].user_priority[j]); 5497 5498 printf("\n flowtype_num is %-14u :", 5499 info->region[i].flowtype_num); 5500 for (j = 0; j < info->region[i].flowtype_num; j++) 5501 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5502 } 5503 #else 5504 RTE_SET_USED(port_id); 5505 RTE_SET_USED(buf); 5506 #endif 5507 5508 printf("\n\n"); 5509 } 5510 5511 void 5512 show_macs(portid_t port_id) 5513 { 5514 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5515 struct rte_eth_dev_info dev_info; 5516 int32_t i, rc, num_macs = 0; 5517 5518 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5519 return; 5520 5521 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 5522 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 5523 if (rc < 0) 5524 return; 5525 5526 for (i = 0; i < rc; i++) { 5527 5528 /* skip zero address */ 5529 if (rte_is_zero_ether_addr(&addr[i])) 5530 continue; 5531 5532 num_macs++; 5533 } 5534 5535 printf("Number of MAC address added: %d\n", num_macs); 5536 5537 for (i = 0; i < rc; i++) { 5538 5539 /* skip zero address */ 5540 if (rte_is_zero_ether_addr(&addr[i])) 5541 continue; 5542 5543 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 5544 printf(" %s\n", buf); 5545 } 5546 } 5547 5548 void 5549 show_mcast_macs(portid_t port_id) 5550 { 5551 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5552 struct rte_ether_addr *addr; 5553 struct rte_port *port; 5554 uint32_t i; 5555 5556 port = &ports[port_id]; 5557 5558 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5559 5560 for (i = 0; i < port->mc_addr_nb; i++) { 5561 addr = &port->mc_addr_pool[i]; 5562 5563 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5564 printf(" %s\n", buf); 5565 } 5566 } 5567