1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 90 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 91 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 92 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", RTE_ETH_RSS_ETH }, 95 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 97 { "vlan", RTE_ETH_RSS_VLAN }, 98 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 99 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 100 { "ipv4", RTE_ETH_RSS_IPV4 }, 101 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", RTE_ETH_RSS_IPV6 }, 107 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 116 { "port", RTE_ETH_RSS_PORT }, 117 { "vxlan", RTE_ETH_RSS_VXLAN }, 118 { "geneve", RTE_ETH_RSS_GENEVE }, 119 { "nvgre", RTE_ETH_RSS_NVGRE }, 120 { "ip", RTE_ETH_RSS_IP }, 121 { "udp", RTE_ETH_RSS_UDP }, 122 { "tcp", RTE_ETH_RSS_TCP }, 123 { "sctp", RTE_ETH_RSS_SCTP }, 124 { "tunnel", RTE_ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 135 { "esp", RTE_ETH_RSS_ESP }, 136 { "ah", RTE_ETH_RSS_AH }, 137 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 138 { "pfcp", RTE_ETH_RSS_PFCP }, 139 { "pppoe", RTE_ETH_RSS_PPPOE }, 140 { "gtpu", RTE_ETH_RSS_GTPU }, 141 { "ecpri", RTE_ETH_RSS_ECPRI }, 142 { "mpls", RTE_ETH_RSS_MPLS }, 143 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 144 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 145 { NULL, 0 }, 146 }; 147 148 static const struct { 149 enum rte_eth_fec_mode mode; 150 const char *name; 151 } fec_mode_name[] = { 152 { 153 .mode = RTE_ETH_FEC_NOFEC, 154 .name = "off", 155 }, 156 { 157 .mode = RTE_ETH_FEC_AUTO, 158 .name = "auto", 159 }, 160 { 161 .mode = RTE_ETH_FEC_BASER, 162 .name = "baser", 163 }, 164 { 165 .mode = RTE_ETH_FEC_RS, 166 .name = "rs", 167 }, 168 }; 169 170 static void 171 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 172 { 173 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 174 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 175 printf("%s%s", name, buf); 176 } 177 178 static void 179 nic_xstats_display_periodic(portid_t port_id) 180 { 181 struct xstat_display_info *xstats_info; 182 uint64_t *prev_values, *curr_values; 183 uint64_t diff_value, value_rate; 184 struct timespec cur_time; 185 uint64_t *ids_supp; 186 size_t ids_supp_sz; 187 uint64_t diff_ns; 188 unsigned int i; 189 int rc; 190 191 xstats_info = &ports[port_id].xstats_info; 192 193 ids_supp_sz = xstats_info->ids_supp_sz; 194 if (ids_supp_sz == 0) 195 return; 196 197 printf("\n"); 198 199 ids_supp = xstats_info->ids_supp; 200 prev_values = xstats_info->prev_values; 201 curr_values = xstats_info->curr_values; 202 203 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 204 ids_supp_sz); 205 if (rc != (int)ids_supp_sz) { 206 fprintf(stderr, 207 "Failed to get values of %zu xstats for port %u - return code %d\n", 208 ids_supp_sz, port_id, rc); 209 return; 210 } 211 212 diff_ns = 0; 213 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 214 uint64_t ns; 215 216 ns = cur_time.tv_sec * NS_PER_SEC; 217 ns += cur_time.tv_nsec; 218 219 if (xstats_info->prev_ns != 0) 220 diff_ns = ns - xstats_info->prev_ns; 221 xstats_info->prev_ns = ns; 222 } 223 224 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 225 for (i = 0; i < ids_supp_sz; i++) { 226 diff_value = (curr_values[i] > prev_values[i]) ? 227 (curr_values[i] - prev_values[i]) : 0; 228 prev_values[i] = curr_values[i]; 229 value_rate = diff_ns > 0 ? 230 (double)diff_value / diff_ns * NS_PER_SEC : 0; 231 232 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 233 xstats_display[i].name, curr_values[i], value_rate); 234 } 235 } 236 237 void 238 nic_stats_display(portid_t port_id) 239 { 240 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 241 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 242 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 245 struct timespec cur_time; 246 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 247 diff_ns; 248 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 249 struct rte_eth_stats stats; 250 251 static const char *nic_stats_border = "########################"; 252 253 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 254 print_valid_ports(); 255 return; 256 } 257 rte_eth_stats_get(port_id, &stats); 258 printf("\n %s NIC statistics for port %-2d %s\n", 259 nic_stats_border, port_id, nic_stats_border); 260 261 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 262 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 263 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 264 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 265 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 266 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 267 268 diff_ns = 0; 269 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 270 uint64_t ns; 271 272 ns = cur_time.tv_sec * NS_PER_SEC; 273 ns += cur_time.tv_nsec; 274 275 if (prev_ns[port_id] != 0) 276 diff_ns = ns - prev_ns[port_id]; 277 prev_ns[port_id] = ns; 278 } 279 280 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 281 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 282 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 283 (stats.opackets - prev_pkts_tx[port_id]) : 0; 284 prev_pkts_rx[port_id] = stats.ipackets; 285 prev_pkts_tx[port_id] = stats.opackets; 286 mpps_rx = diff_ns > 0 ? 287 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 288 mpps_tx = diff_ns > 0 ? 289 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 290 291 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 292 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 293 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 294 (stats.obytes - prev_bytes_tx[port_id]) : 0; 295 prev_bytes_rx[port_id] = stats.ibytes; 296 prev_bytes_tx[port_id] = stats.obytes; 297 mbps_rx = diff_ns > 0 ? 298 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 299 mbps_tx = diff_ns > 0 ? 300 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 301 302 printf("\n Throughput (since last show)\n"); 303 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 304 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 305 mpps_tx, mbps_tx * 8); 306 307 if (xstats_display_num > 0) 308 nic_xstats_display_periodic(port_id); 309 310 printf(" %s############################%s\n", 311 nic_stats_border, nic_stats_border); 312 } 313 314 void 315 nic_stats_clear(portid_t port_id) 316 { 317 int ret; 318 319 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 320 print_valid_ports(); 321 return; 322 } 323 324 ret = rte_eth_stats_reset(port_id); 325 if (ret != 0) { 326 fprintf(stderr, 327 "%s: Error: failed to reset stats (port %u): %s", 328 __func__, port_id, strerror(-ret)); 329 return; 330 } 331 332 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 333 if (ret != 0) { 334 if (ret < 0) 335 ret = -ret; 336 fprintf(stderr, 337 "%s: Error: failed to get stats (port %u): %s", 338 __func__, port_id, strerror(ret)); 339 return; 340 } 341 printf("\n NIC statistics for port %d cleared\n", port_id); 342 } 343 344 void 345 nic_xstats_display(portid_t port_id) 346 { 347 struct rte_eth_xstat *xstats; 348 int cnt_xstats, idx_xstat; 349 struct rte_eth_xstat_name *xstats_names; 350 351 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 352 print_valid_ports(); 353 return; 354 } 355 printf("###### NIC extended statistics for port %-2d\n", port_id); 356 if (!rte_eth_dev_is_valid_port(port_id)) { 357 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 358 return; 359 } 360 361 /* Get count */ 362 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 363 if (cnt_xstats < 0) { 364 fprintf(stderr, "Error: Cannot get count of xstats\n"); 365 return; 366 } 367 368 /* Get id-name lookup table */ 369 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 370 if (xstats_names == NULL) { 371 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 372 return; 373 } 374 if (cnt_xstats != rte_eth_xstats_get_names( 375 port_id, xstats_names, cnt_xstats)) { 376 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 377 free(xstats_names); 378 return; 379 } 380 381 /* Get stats themselves */ 382 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 383 if (xstats == NULL) { 384 fprintf(stderr, "Cannot allocate memory for xstats\n"); 385 free(xstats_names); 386 return; 387 } 388 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 389 fprintf(stderr, "Error: Unable to get xstats\n"); 390 free(xstats_names); 391 free(xstats); 392 return; 393 } 394 395 /* Display xstats */ 396 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 397 if (xstats_hide_zero && !xstats[idx_xstat].value) 398 continue; 399 printf("%s: %"PRIu64"\n", 400 xstats_names[idx_xstat].name, 401 xstats[idx_xstat].value); 402 } 403 free(xstats_names); 404 free(xstats); 405 } 406 407 void 408 nic_xstats_clear(portid_t port_id) 409 { 410 int ret; 411 412 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 413 print_valid_ports(); 414 return; 415 } 416 417 ret = rte_eth_xstats_reset(port_id); 418 if (ret != 0) { 419 fprintf(stderr, 420 "%s: Error: failed to reset xstats (port %u): %s\n", 421 __func__, port_id, strerror(-ret)); 422 return; 423 } 424 425 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 426 if (ret != 0) { 427 if (ret < 0) 428 ret = -ret; 429 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 430 __func__, port_id, strerror(ret)); 431 return; 432 } 433 } 434 435 static const char * 436 get_queue_state_name(uint8_t queue_state) 437 { 438 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 439 return "stopped"; 440 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 441 return "started"; 442 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 443 return "hairpin"; 444 else 445 return "unknown"; 446 } 447 448 void 449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 450 { 451 struct rte_eth_burst_mode mode; 452 struct rte_eth_rxq_info qinfo; 453 int32_t rc; 454 static const char *info_border = "*********************"; 455 456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 457 if (rc != 0) { 458 fprintf(stderr, 459 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 460 port_id, queue_id, strerror(-rc), rc); 461 return; 462 } 463 464 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 465 info_border, port_id, queue_id, info_border); 466 467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 472 printf("\nRX drop packets: %s", 473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 474 printf("\nRX deferred start: %s", 475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 476 printf("\nRX scattered packets: %s", 477 (qinfo.scattered_rx != 0) ? "on" : "off"); 478 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 479 if (qinfo.rx_buf_size != 0) 480 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 481 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 482 483 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 484 printf("\nBurst mode: %s%s", 485 mode.info, 486 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 487 " (per queue)" : ""); 488 489 printf("\n"); 490 } 491 492 void 493 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_txq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 512 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 513 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 514 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 515 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 516 printf("\nTX deferred start: %s", 517 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 518 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 519 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 520 521 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 522 printf("\nBurst mode: %s%s", 523 mode.info, 524 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 525 " (per queue)" : ""); 526 527 printf("\n"); 528 } 529 530 static int bus_match_all(const struct rte_bus *bus, const void *data) 531 { 532 RTE_SET_USED(bus); 533 RTE_SET_USED(data); 534 return 0; 535 } 536 537 static void 538 device_infos_display_speeds(uint32_t speed_capa) 539 { 540 printf("\n\tDevice speed capability:"); 541 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 542 printf(" Autonegotiate (all speeds)"); 543 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 544 printf(" Disable autonegotiate (fixed speed) "); 545 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 546 printf(" 10 Mbps half-duplex "); 547 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 548 printf(" 10 Mbps full-duplex "); 549 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 550 printf(" 100 Mbps half-duplex "); 551 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 552 printf(" 100 Mbps full-duplex "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 554 printf(" 1 Gbps "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 556 printf(" 2.5 Gbps "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 558 printf(" 5 Gbps "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 560 printf(" 10 Gbps "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 562 printf(" 20 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 564 printf(" 25 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 566 printf(" 40 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 568 printf(" 50 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 570 printf(" 56 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 572 printf(" 100 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 574 printf(" 200 Gbps "); 575 } 576 577 void 578 device_infos_display(const char *identifier) 579 { 580 static const char *info_border = "*********************"; 581 struct rte_bus *start = NULL, *next; 582 struct rte_dev_iterator dev_iter; 583 char name[RTE_ETH_NAME_MAX_LEN]; 584 struct rte_ether_addr mac_addr; 585 struct rte_device *dev; 586 struct rte_devargs da; 587 portid_t port_id; 588 struct rte_eth_dev_info dev_info; 589 char devstr[128]; 590 591 memset(&da, 0, sizeof(da)); 592 if (!identifier) 593 goto skip_parse; 594 595 if (rte_devargs_parsef(&da, "%s", identifier)) { 596 fprintf(stderr, "cannot parse identifier\n"); 597 return; 598 } 599 600 skip_parse: 601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 602 603 start = next; 604 if (identifier && da.bus != next) 605 continue; 606 607 /* Skip buses that don't have iterate method */ 608 if (!next->dev_iterate) 609 continue; 610 611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 613 614 if (!dev->driver) 615 continue; 616 /* Check for matching device if identifier is present */ 617 if (identifier && 618 strncmp(da.name, dev->name, strlen(dev->name))) 619 continue; 620 printf("\n%s Infos for device %s %s\n", 621 info_border, dev->name, info_border); 622 printf("Bus name: %s", dev->bus->name); 623 printf("\nDriver name: %s", dev->driver->name); 624 printf("\nDevargs: %s", 625 dev->devargs ? dev->devargs->args : ""); 626 printf("\nConnect to socket: %d", dev->numa_node); 627 printf("\n"); 628 629 /* List ports with matching device name */ 630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 631 printf("\n\tPort id: %-2d", port_id); 632 if (eth_macaddr_get_print_err(port_id, 633 &mac_addr) == 0) 634 print_ethaddr("\n\tMAC address: ", 635 &mac_addr); 636 rte_eth_dev_get_name_by_port(port_id, name); 637 printf("\n\tDevice name: %s", name); 638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 639 device_infos_display_speeds(dev_info.speed_capa); 640 printf("\n"); 641 } 642 } 643 }; 644 rte_devargs_reset(&da); 645 } 646 647 static void 648 print_dev_capabilities(uint64_t capabilities) 649 { 650 uint64_t single_capa; 651 int begin; 652 int end; 653 int bit; 654 655 if (capabilities == 0) 656 return; 657 658 begin = __builtin_ctzll(capabilities); 659 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 660 661 single_capa = 1ULL << begin; 662 for (bit = begin; bit < end; bit++) { 663 if (capabilities & single_capa) 664 printf(" %s", 665 rte_eth_dev_capability_name(single_capa)); 666 single_capa <<= 1; 667 } 668 } 669 670 void 671 port_infos_display(portid_t port_id) 672 { 673 struct rte_port *port; 674 struct rte_ether_addr mac_addr; 675 struct rte_eth_link link; 676 struct rte_eth_dev_info dev_info; 677 int vlan_offload; 678 struct rte_mempool * mp; 679 static const char *info_border = "*********************"; 680 uint16_t mtu; 681 char name[RTE_ETH_NAME_MAX_LEN]; 682 int ret; 683 char fw_version[ETHDEV_FWVERS_LEN]; 684 685 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 686 print_valid_ports(); 687 return; 688 } 689 port = &ports[port_id]; 690 ret = eth_link_get_nowait_print_err(port_id, &link); 691 if (ret < 0) 692 return; 693 694 ret = eth_dev_info_get_print_err(port_id, &dev_info); 695 if (ret != 0) 696 return; 697 698 printf("\n%s Infos for port %-2d %s\n", 699 info_border, port_id, info_border); 700 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 701 print_ethaddr("MAC address: ", &mac_addr); 702 rte_eth_dev_get_name_by_port(port_id, name); 703 printf("\nDevice name: %s", name); 704 printf("\nDriver name: %s", dev_info.driver_name); 705 706 if (rte_eth_dev_fw_version_get(port_id, fw_version, 707 ETHDEV_FWVERS_LEN) == 0) 708 printf("\nFirmware-version: %s", fw_version); 709 else 710 printf("\nFirmware-version: %s", "not available"); 711 712 if (dev_info.device->devargs && dev_info.device->devargs->args) 713 printf("\nDevargs: %s", dev_info.device->devargs->args); 714 printf("\nConnect to socket: %u", port->socket_id); 715 716 if (port_numa[port_id] != NUMA_NO_CONFIG) { 717 mp = mbuf_pool_find(port_numa[port_id], 0); 718 if (mp) 719 printf("\nmemory allocation on the socket: %d", 720 port_numa[port_id]); 721 } else 722 printf("\nmemory allocation on the socket: %u",port->socket_id); 723 724 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 725 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 726 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 727 ("full-duplex") : ("half-duplex")); 728 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 729 ("On") : ("Off")); 730 731 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 732 printf("MTU: %u\n", mtu); 733 734 printf("Promiscuous mode: %s\n", 735 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 736 printf("Allmulticast mode: %s\n", 737 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 738 printf("Maximum number of MAC addresses: %u\n", 739 (unsigned int)(port->dev_info.max_mac_addrs)); 740 printf("Maximum number of MAC addresses of hash filtering: %u\n", 741 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 742 743 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 744 if (vlan_offload >= 0){ 745 printf("VLAN offload: \n"); 746 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 747 printf(" strip on, "); 748 else 749 printf(" strip off, "); 750 751 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 752 printf("filter on, "); 753 else 754 printf("filter off, "); 755 756 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 757 printf("extend on, "); 758 else 759 printf("extend off, "); 760 761 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 762 printf("qinq strip on\n"); 763 else 764 printf("qinq strip off\n"); 765 } 766 767 if (dev_info.hash_key_size > 0) 768 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 769 if (dev_info.reta_size > 0) 770 printf("Redirection table size: %u\n", dev_info.reta_size); 771 if (!dev_info.flow_type_rss_offloads) 772 printf("No RSS offload flow type is supported.\n"); 773 else { 774 uint16_t i; 775 char *p; 776 777 printf("Supported RSS offload flow types:\n"); 778 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 779 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 780 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 781 continue; 782 p = flowtype_to_str(i); 783 if (p) 784 printf(" %s\n", p); 785 else 786 printf(" user defined %d\n", i); 787 } 788 } 789 790 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 791 printf("Maximum configurable length of RX packet: %u\n", 792 dev_info.max_rx_pktlen); 793 printf("Maximum configurable size of LRO aggregated packet: %u\n", 794 dev_info.max_lro_pkt_size); 795 if (dev_info.max_vfs) 796 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 797 if (dev_info.max_vmdq_pools) 798 printf("Maximum number of VMDq pools: %u\n", 799 dev_info.max_vmdq_pools); 800 801 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 802 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 803 printf("Max possible number of RXDs per queue: %hu\n", 804 dev_info.rx_desc_lim.nb_max); 805 printf("Min possible number of RXDs per queue: %hu\n", 806 dev_info.rx_desc_lim.nb_min); 807 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 808 809 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 810 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 811 printf("Max possible number of TXDs per queue: %hu\n", 812 dev_info.tx_desc_lim.nb_max); 813 printf("Min possible number of TXDs per queue: %hu\n", 814 dev_info.tx_desc_lim.nb_min); 815 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 816 printf("Max segment number per packet: %hu\n", 817 dev_info.tx_desc_lim.nb_seg_max); 818 printf("Max segment number per MTU/TSO: %hu\n", 819 dev_info.tx_desc_lim.nb_mtu_seg_max); 820 821 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 822 print_dev_capabilities(dev_info.dev_capa); 823 printf(" )\n"); 824 /* Show switch info only if valid switch domain and port id is set */ 825 if (dev_info.switch_info.domain_id != 826 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 827 if (dev_info.switch_info.name) 828 printf("Switch name: %s\n", dev_info.switch_info.name); 829 830 printf("Switch domain Id: %u\n", 831 dev_info.switch_info.domain_id); 832 printf("Switch Port Id: %u\n", 833 dev_info.switch_info.port_id); 834 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 835 printf("Switch Rx domain: %u\n", 836 dev_info.switch_info.rx_domain); 837 } 838 } 839 840 void 841 port_summary_header_display(void) 842 { 843 uint16_t port_number; 844 845 port_number = rte_eth_dev_count_avail(); 846 printf("Number of available ports: %i\n", port_number); 847 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 848 "Driver", "Status", "Link"); 849 } 850 851 void 852 port_summary_display(portid_t port_id) 853 { 854 struct rte_ether_addr mac_addr; 855 struct rte_eth_link link; 856 struct rte_eth_dev_info dev_info; 857 char name[RTE_ETH_NAME_MAX_LEN]; 858 int ret; 859 860 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 861 print_valid_ports(); 862 return; 863 } 864 865 ret = eth_link_get_nowait_print_err(port_id, &link); 866 if (ret < 0) 867 return; 868 869 ret = eth_dev_info_get_print_err(port_id, &dev_info); 870 if (ret != 0) 871 return; 872 873 rte_eth_dev_get_name_by_port(port_id, name); 874 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 875 if (ret != 0) 876 return; 877 878 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 879 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 880 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 881 rte_eth_link_speed_to_str(link.link_speed)); 882 } 883 884 void 885 port_eeprom_display(portid_t port_id) 886 { 887 struct rte_dev_eeprom_info einfo; 888 int ret; 889 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 890 print_valid_ports(); 891 return; 892 } 893 894 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 895 if (len_eeprom < 0) { 896 switch (len_eeprom) { 897 case -ENODEV: 898 fprintf(stderr, "port index %d invalid\n", port_id); 899 break; 900 case -ENOTSUP: 901 fprintf(stderr, "operation not supported by device\n"); 902 break; 903 case -EIO: 904 fprintf(stderr, "device is removed\n"); 905 break; 906 default: 907 fprintf(stderr, "Unable to get EEPROM: %d\n", 908 len_eeprom); 909 break; 910 } 911 return; 912 } 913 914 char buf[len_eeprom]; 915 einfo.offset = 0; 916 einfo.length = len_eeprom; 917 einfo.data = buf; 918 919 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 920 if (ret != 0) { 921 switch (ret) { 922 case -ENODEV: 923 fprintf(stderr, "port index %d invalid\n", port_id); 924 break; 925 case -ENOTSUP: 926 fprintf(stderr, "operation not supported by device\n"); 927 break; 928 case -EIO: 929 fprintf(stderr, "device is removed\n"); 930 break; 931 default: 932 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 933 break; 934 } 935 return; 936 } 937 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 938 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 939 } 940 941 void 942 port_module_eeprom_display(portid_t port_id) 943 { 944 struct rte_eth_dev_module_info minfo; 945 struct rte_dev_eeprom_info einfo; 946 int ret; 947 948 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 949 print_valid_ports(); 950 return; 951 } 952 953 954 ret = rte_eth_dev_get_module_info(port_id, &minfo); 955 if (ret != 0) { 956 switch (ret) { 957 case -ENODEV: 958 fprintf(stderr, "port index %d invalid\n", port_id); 959 break; 960 case -ENOTSUP: 961 fprintf(stderr, "operation not supported by device\n"); 962 break; 963 case -EIO: 964 fprintf(stderr, "device is removed\n"); 965 break; 966 default: 967 fprintf(stderr, "Unable to get module EEPROM: %d\n", 968 ret); 969 break; 970 } 971 return; 972 } 973 974 char buf[minfo.eeprom_len]; 975 einfo.offset = 0; 976 einfo.length = minfo.eeprom_len; 977 einfo.data = buf; 978 979 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 980 if (ret != 0) { 981 switch (ret) { 982 case -ENODEV: 983 fprintf(stderr, "port index %d invalid\n", port_id); 984 break; 985 case -ENOTSUP: 986 fprintf(stderr, "operation not supported by device\n"); 987 break; 988 case -EIO: 989 fprintf(stderr, "device is removed\n"); 990 break; 991 default: 992 fprintf(stderr, "Unable to get module EEPROM: %d\n", 993 ret); 994 break; 995 } 996 return; 997 } 998 999 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1000 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1001 } 1002 1003 int 1004 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1005 { 1006 uint16_t pid; 1007 1008 if (port_id == (portid_t)RTE_PORT_ALL) 1009 return 0; 1010 1011 RTE_ETH_FOREACH_DEV(pid) 1012 if (port_id == pid) 1013 return 0; 1014 1015 if (warning == ENABLED_WARN) 1016 fprintf(stderr, "Invalid port %d\n", port_id); 1017 1018 return 1; 1019 } 1020 1021 void print_valid_ports(void) 1022 { 1023 portid_t pid; 1024 1025 printf("The valid ports array is ["); 1026 RTE_ETH_FOREACH_DEV(pid) { 1027 printf(" %d", pid); 1028 } 1029 printf(" ]\n"); 1030 } 1031 1032 static int 1033 vlan_id_is_invalid(uint16_t vlan_id) 1034 { 1035 if (vlan_id < 4096) 1036 return 0; 1037 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1038 return 1; 1039 } 1040 1041 static int 1042 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1043 { 1044 const struct rte_pci_device *pci_dev; 1045 const struct rte_bus *bus; 1046 uint64_t pci_len; 1047 1048 if (reg_off & 0x3) { 1049 fprintf(stderr, 1050 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1051 (unsigned int)reg_off); 1052 return 1; 1053 } 1054 1055 if (!ports[port_id].dev_info.device) { 1056 fprintf(stderr, "Invalid device\n"); 1057 return 0; 1058 } 1059 1060 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1061 if (bus && !strcmp(bus->name, "pci")) { 1062 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1063 } else { 1064 fprintf(stderr, "Not a PCI device\n"); 1065 return 1; 1066 } 1067 1068 pci_len = pci_dev->mem_resource[0].len; 1069 if (reg_off >= pci_len) { 1070 fprintf(stderr, 1071 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1072 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1073 pci_len); 1074 return 1; 1075 } 1076 return 0; 1077 } 1078 1079 static int 1080 reg_bit_pos_is_invalid(uint8_t bit_pos) 1081 { 1082 if (bit_pos <= 31) 1083 return 0; 1084 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1085 return 1; 1086 } 1087 1088 #define display_port_and_reg_off(port_id, reg_off) \ 1089 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1090 1091 static inline void 1092 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1093 { 1094 display_port_and_reg_off(port_id, (unsigned)reg_off); 1095 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1096 } 1097 1098 void 1099 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1100 { 1101 uint32_t reg_v; 1102 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit_x)) 1109 return; 1110 reg_v = port_id_pci_reg_read(port_id, reg_off); 1111 display_port_and_reg_off(port_id, (unsigned)reg_off); 1112 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1113 } 1114 1115 void 1116 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1117 uint8_t bit1_pos, uint8_t bit2_pos) 1118 { 1119 uint32_t reg_v; 1120 uint8_t l_bit; 1121 uint8_t h_bit; 1122 1123 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1124 return; 1125 if (port_reg_off_is_invalid(port_id, reg_off)) 1126 return; 1127 if (reg_bit_pos_is_invalid(bit1_pos)) 1128 return; 1129 if (reg_bit_pos_is_invalid(bit2_pos)) 1130 return; 1131 if (bit1_pos > bit2_pos) 1132 l_bit = bit2_pos, h_bit = bit1_pos; 1133 else 1134 l_bit = bit1_pos, h_bit = bit2_pos; 1135 1136 reg_v = port_id_pci_reg_read(port_id, reg_off); 1137 reg_v >>= l_bit; 1138 if (h_bit < 31) 1139 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1140 display_port_and_reg_off(port_id, (unsigned)reg_off); 1141 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1142 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1143 } 1144 1145 void 1146 port_reg_display(portid_t port_id, uint32_t reg_off) 1147 { 1148 uint32_t reg_v; 1149 1150 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1151 return; 1152 if (port_reg_off_is_invalid(port_id, reg_off)) 1153 return; 1154 reg_v = port_id_pci_reg_read(port_id, reg_off); 1155 display_port_reg_value(port_id, reg_off, reg_v); 1156 } 1157 1158 void 1159 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1160 uint8_t bit_v) 1161 { 1162 uint32_t reg_v; 1163 1164 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1165 return; 1166 if (port_reg_off_is_invalid(port_id, reg_off)) 1167 return; 1168 if (reg_bit_pos_is_invalid(bit_pos)) 1169 return; 1170 if (bit_v > 1) { 1171 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1172 (int) bit_v); 1173 return; 1174 } 1175 reg_v = port_id_pci_reg_read(port_id, reg_off); 1176 if (bit_v == 0) 1177 reg_v &= ~(1 << bit_pos); 1178 else 1179 reg_v |= (1 << bit_pos); 1180 port_id_pci_reg_write(port_id, reg_off, reg_v); 1181 display_port_reg_value(port_id, reg_off, reg_v); 1182 } 1183 1184 void 1185 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1186 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1187 { 1188 uint32_t max_v; 1189 uint32_t reg_v; 1190 uint8_t l_bit; 1191 uint8_t h_bit; 1192 1193 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1194 return; 1195 if (port_reg_off_is_invalid(port_id, reg_off)) 1196 return; 1197 if (reg_bit_pos_is_invalid(bit1_pos)) 1198 return; 1199 if (reg_bit_pos_is_invalid(bit2_pos)) 1200 return; 1201 if (bit1_pos > bit2_pos) 1202 l_bit = bit2_pos, h_bit = bit1_pos; 1203 else 1204 l_bit = bit1_pos, h_bit = bit2_pos; 1205 1206 if ((h_bit - l_bit) < 31) 1207 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1208 else 1209 max_v = 0xFFFFFFFF; 1210 1211 if (value > max_v) { 1212 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1213 (unsigned)value, (unsigned)value, 1214 (unsigned)max_v, (unsigned)max_v); 1215 return; 1216 } 1217 reg_v = port_id_pci_reg_read(port_id, reg_off); 1218 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1219 reg_v |= (value << l_bit); /* Set changed bits */ 1220 port_id_pci_reg_write(port_id, reg_off, reg_v); 1221 display_port_reg_value(port_id, reg_off, reg_v); 1222 } 1223 1224 void 1225 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1226 { 1227 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1228 return; 1229 if (port_reg_off_is_invalid(port_id, reg_off)) 1230 return; 1231 port_id_pci_reg_write(port_id, reg_off, reg_v); 1232 display_port_reg_value(port_id, reg_off, reg_v); 1233 } 1234 1235 void 1236 port_mtu_set(portid_t port_id, uint16_t mtu) 1237 { 1238 struct rte_port *port = &ports[port_id]; 1239 int diag; 1240 1241 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1242 return; 1243 1244 if (port->need_reconfig == 0) { 1245 diag = rte_eth_dev_set_mtu(port_id, mtu); 1246 if (diag != 0) { 1247 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1248 return; 1249 } 1250 } 1251 1252 port->dev_conf.rxmode.mtu = mtu; 1253 } 1254 1255 /* Generic flow management functions. */ 1256 1257 static struct port_flow_tunnel * 1258 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1259 { 1260 struct port_flow_tunnel *flow_tunnel; 1261 1262 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1263 if (flow_tunnel->id == port_tunnel_id) 1264 goto out; 1265 } 1266 flow_tunnel = NULL; 1267 1268 out: 1269 return flow_tunnel; 1270 } 1271 1272 const char * 1273 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1274 { 1275 const char *type; 1276 switch (tunnel->type) { 1277 default: 1278 type = "unknown"; 1279 break; 1280 case RTE_FLOW_ITEM_TYPE_VXLAN: 1281 type = "vxlan"; 1282 break; 1283 case RTE_FLOW_ITEM_TYPE_GRE: 1284 type = "gre"; 1285 break; 1286 case RTE_FLOW_ITEM_TYPE_NVGRE: 1287 type = "nvgre"; 1288 break; 1289 case RTE_FLOW_ITEM_TYPE_GENEVE: 1290 type = "geneve"; 1291 break; 1292 } 1293 1294 return type; 1295 } 1296 1297 struct port_flow_tunnel * 1298 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1299 { 1300 struct rte_port *port = &ports[port_id]; 1301 struct port_flow_tunnel *flow_tunnel; 1302 1303 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1304 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1305 goto out; 1306 } 1307 flow_tunnel = NULL; 1308 1309 out: 1310 return flow_tunnel; 1311 } 1312 1313 void port_flow_tunnel_list(portid_t port_id) 1314 { 1315 struct rte_port *port = &ports[port_id]; 1316 struct port_flow_tunnel *flt; 1317 1318 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1319 printf("port %u tunnel #%u type=%s", 1320 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1321 if (flt->tunnel.tun_id) 1322 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1323 printf("\n"); 1324 } 1325 } 1326 1327 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1328 { 1329 struct rte_port *port = &ports[port_id]; 1330 struct port_flow_tunnel *flt; 1331 1332 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1333 if (flt->id == tunnel_id) 1334 break; 1335 } 1336 if (flt) { 1337 LIST_REMOVE(flt, chain); 1338 free(flt); 1339 printf("port %u: flow tunnel #%u destroyed\n", 1340 port_id, tunnel_id); 1341 } 1342 } 1343 1344 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1345 { 1346 struct rte_port *port = &ports[port_id]; 1347 enum rte_flow_item_type type; 1348 struct port_flow_tunnel *flt; 1349 1350 if (!strcmp(ops->type, "vxlan")) 1351 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1352 else if (!strcmp(ops->type, "gre")) 1353 type = RTE_FLOW_ITEM_TYPE_GRE; 1354 else if (!strcmp(ops->type, "nvgre")) 1355 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1356 else if (!strcmp(ops->type, "geneve")) 1357 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1358 else { 1359 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1360 ops->type); 1361 return; 1362 } 1363 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1364 if (flt->tunnel.type == type) 1365 break; 1366 } 1367 if (!flt) { 1368 flt = calloc(1, sizeof(*flt)); 1369 if (!flt) { 1370 fprintf(stderr, "failed to allocate port flt object\n"); 1371 return; 1372 } 1373 flt->tunnel.type = type; 1374 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1375 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1376 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1377 } 1378 printf("port %d: flow tunnel #%u type %s\n", 1379 port_id, flt->id, ops->type); 1380 } 1381 1382 /** Generate a port_flow entry from attributes/pattern/actions. */ 1383 static struct port_flow * 1384 port_flow_new(const struct rte_flow_attr *attr, 1385 const struct rte_flow_item *pattern, 1386 const struct rte_flow_action *actions, 1387 struct rte_flow_error *error) 1388 { 1389 const struct rte_flow_conv_rule rule = { 1390 .attr_ro = attr, 1391 .pattern_ro = pattern, 1392 .actions_ro = actions, 1393 }; 1394 struct port_flow *pf; 1395 int ret; 1396 1397 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1398 if (ret < 0) 1399 return NULL; 1400 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1401 if (!pf) { 1402 rte_flow_error_set 1403 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1404 "calloc() failed"); 1405 return NULL; 1406 } 1407 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1408 error) >= 0) 1409 return pf; 1410 free(pf); 1411 return NULL; 1412 } 1413 1414 /** Print a message out of a flow error. */ 1415 static int 1416 port_flow_complain(struct rte_flow_error *error) 1417 { 1418 static const char *const errstrlist[] = { 1419 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1420 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1421 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1422 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1423 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1424 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1425 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1426 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1427 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1428 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1429 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1430 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1431 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1432 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1433 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1434 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1435 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1436 }; 1437 const char *errstr; 1438 char buf[32]; 1439 int err = rte_errno; 1440 1441 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1442 !errstrlist[error->type]) 1443 errstr = "unknown type"; 1444 else 1445 errstr = errstrlist[error->type]; 1446 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1447 __func__, error->type, errstr, 1448 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1449 error->cause), buf) : "", 1450 error->message ? error->message : "(no stated reason)", 1451 rte_strerror(err)); 1452 return -err; 1453 } 1454 1455 static void 1456 rss_config_display(struct rte_flow_action_rss *rss_conf) 1457 { 1458 uint8_t i; 1459 1460 if (rss_conf == NULL) { 1461 fprintf(stderr, "Invalid rule\n"); 1462 return; 1463 } 1464 1465 printf("RSS:\n" 1466 " queues:"); 1467 if (rss_conf->queue_num == 0) 1468 printf(" none"); 1469 for (i = 0; i < rss_conf->queue_num; i++) 1470 printf(" %d", rss_conf->queue[i]); 1471 printf("\n"); 1472 1473 printf(" function: "); 1474 switch (rss_conf->func) { 1475 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1476 printf("default\n"); 1477 break; 1478 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1479 printf("toeplitz\n"); 1480 break; 1481 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1482 printf("simple_xor\n"); 1483 break; 1484 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1485 printf("symmetric_toeplitz\n"); 1486 break; 1487 default: 1488 printf("Unknown function\n"); 1489 return; 1490 } 1491 1492 printf(" types:\n"); 1493 if (rss_conf->types == 0) { 1494 printf(" none\n"); 1495 return; 1496 } 1497 for (i = 0; rss_type_table[i].str; i++) { 1498 if ((rss_conf->types & 1499 rss_type_table[i].rss_type) == 1500 rss_type_table[i].rss_type && 1501 rss_type_table[i].rss_type != 0) 1502 printf(" %s\n", rss_type_table[i].str); 1503 } 1504 } 1505 1506 static struct port_indirect_action * 1507 action_get_by_id(portid_t port_id, uint32_t id) 1508 { 1509 struct rte_port *port; 1510 struct port_indirect_action **ppia; 1511 struct port_indirect_action *pia = NULL; 1512 1513 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1514 port_id == (portid_t)RTE_PORT_ALL) 1515 return NULL; 1516 port = &ports[port_id]; 1517 ppia = &port->actions_list; 1518 while (*ppia) { 1519 if ((*ppia)->id == id) { 1520 pia = *ppia; 1521 break; 1522 } 1523 ppia = &(*ppia)->next; 1524 } 1525 if (!pia) 1526 fprintf(stderr, 1527 "Failed to find indirect action #%u on port %u\n", 1528 id, port_id); 1529 return pia; 1530 } 1531 1532 static int 1533 action_alloc(portid_t port_id, uint32_t id, 1534 struct port_indirect_action **action) 1535 { 1536 struct rte_port *port; 1537 struct port_indirect_action **ppia; 1538 struct port_indirect_action *pia = NULL; 1539 1540 *action = NULL; 1541 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1542 port_id == (portid_t)RTE_PORT_ALL) 1543 return -EINVAL; 1544 port = &ports[port_id]; 1545 if (id == UINT32_MAX) { 1546 /* taking first available ID */ 1547 if (port->actions_list) { 1548 if (port->actions_list->id == UINT32_MAX - 1) { 1549 fprintf(stderr, 1550 "Highest indirect action ID is already assigned, delete it first\n"); 1551 return -ENOMEM; 1552 } 1553 id = port->actions_list->id + 1; 1554 } else { 1555 id = 0; 1556 } 1557 } 1558 pia = calloc(1, sizeof(*pia)); 1559 if (!pia) { 1560 fprintf(stderr, 1561 "Allocation of port %u indirect action failed\n", 1562 port_id); 1563 return -ENOMEM; 1564 } 1565 ppia = &port->actions_list; 1566 while (*ppia && (*ppia)->id > id) 1567 ppia = &(*ppia)->next; 1568 if (*ppia && (*ppia)->id == id) { 1569 fprintf(stderr, 1570 "Indirect action #%u is already assigned, delete it first\n", 1571 id); 1572 free(pia); 1573 return -EINVAL; 1574 } 1575 pia->next = *ppia; 1576 pia->id = id; 1577 *ppia = pia; 1578 *action = pia; 1579 return 0; 1580 } 1581 1582 /** Create indirect action */ 1583 int 1584 port_action_handle_create(portid_t port_id, uint32_t id, 1585 const struct rte_flow_indir_action_conf *conf, 1586 const struct rte_flow_action *action) 1587 { 1588 struct port_indirect_action *pia; 1589 int ret; 1590 struct rte_flow_error error; 1591 struct rte_port *port; 1592 1593 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1594 port_id == (portid_t)RTE_PORT_ALL) 1595 return -EINVAL; 1596 1597 ret = action_alloc(port_id, id, &pia); 1598 if (ret) 1599 return ret; 1600 1601 port = &ports[port_id]; 1602 1603 if (conf->transfer) 1604 port_id = port->flow_transfer_proxy; 1605 1606 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1607 port_id == (portid_t)RTE_PORT_ALL) 1608 return -EINVAL; 1609 1610 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1611 struct rte_flow_action_age *age = 1612 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1613 1614 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1615 age->context = &pia->age_type; 1616 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1617 struct rte_flow_action_conntrack *ct = 1618 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1619 1620 memcpy(ct, &conntrack_context, sizeof(*ct)); 1621 } 1622 /* Poisoning to make sure PMDs update it in case of error. */ 1623 memset(&error, 0x22, sizeof(error)); 1624 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1625 &error); 1626 if (!pia->handle) { 1627 uint32_t destroy_id = pia->id; 1628 port_action_handle_destroy(port_id, 1, &destroy_id); 1629 return port_flow_complain(&error); 1630 } 1631 pia->type = action->type; 1632 pia->transfer = conf->transfer; 1633 printf("Indirect action #%u created\n", pia->id); 1634 return 0; 1635 } 1636 1637 /** Destroy indirect action */ 1638 int 1639 port_action_handle_destroy(portid_t port_id, 1640 uint32_t n, 1641 const uint32_t *actions) 1642 { 1643 struct rte_port *port; 1644 struct port_indirect_action **tmp; 1645 uint32_t c = 0; 1646 int ret = 0; 1647 1648 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1649 port_id == (portid_t)RTE_PORT_ALL) 1650 return -EINVAL; 1651 port = &ports[port_id]; 1652 tmp = &port->actions_list; 1653 while (*tmp) { 1654 uint32_t i; 1655 1656 for (i = 0; i != n; ++i) { 1657 struct rte_flow_error error; 1658 struct port_indirect_action *pia = *tmp; 1659 portid_t port_id_eff = port_id; 1660 1661 if (actions[i] != pia->id) 1662 continue; 1663 1664 if (pia->transfer) 1665 port_id_eff = port->flow_transfer_proxy; 1666 1667 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 1668 port_id_eff == (portid_t)RTE_PORT_ALL) 1669 return -EINVAL; 1670 1671 /* 1672 * Poisoning to make sure PMDs update it in case 1673 * of error. 1674 */ 1675 memset(&error, 0x33, sizeof(error)); 1676 1677 if (pia->handle && rte_flow_action_handle_destroy( 1678 port_id_eff, pia->handle, &error)) { 1679 ret = port_flow_complain(&error); 1680 continue; 1681 } 1682 *tmp = pia->next; 1683 printf("Indirect action #%u destroyed\n", pia->id); 1684 free(pia); 1685 break; 1686 } 1687 if (i == n) 1688 tmp = &(*tmp)->next; 1689 ++c; 1690 } 1691 return ret; 1692 } 1693 1694 1695 /** Get indirect action by port + id */ 1696 struct rte_flow_action_handle * 1697 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1698 { 1699 1700 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1701 1702 return (pia) ? pia->handle : NULL; 1703 } 1704 1705 /** Update indirect action */ 1706 int 1707 port_action_handle_update(portid_t port_id, uint32_t id, 1708 const struct rte_flow_action *action) 1709 { 1710 struct rte_flow_error error; 1711 struct rte_flow_action_handle *action_handle; 1712 struct port_indirect_action *pia; 1713 struct rte_port *port; 1714 const void *update; 1715 1716 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1717 port_id == (portid_t)RTE_PORT_ALL) 1718 return -EINVAL; 1719 1720 port = &ports[port_id]; 1721 1722 action_handle = port_action_handle_get_by_id(port_id, id); 1723 if (!action_handle) 1724 return -EINVAL; 1725 pia = action_get_by_id(port_id, id); 1726 if (!pia) 1727 return -EINVAL; 1728 switch (pia->type) { 1729 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1730 update = action->conf; 1731 break; 1732 default: 1733 update = action; 1734 break; 1735 } 1736 1737 if (pia->transfer) 1738 port_id = port->flow_transfer_proxy; 1739 1740 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1741 port_id == (portid_t)RTE_PORT_ALL) 1742 return -EINVAL; 1743 1744 if (rte_flow_action_handle_update(port_id, action_handle, update, 1745 &error)) { 1746 return port_flow_complain(&error); 1747 } 1748 printf("Indirect action #%u updated\n", id); 1749 return 0; 1750 } 1751 1752 int 1753 port_action_handle_query(portid_t port_id, uint32_t id) 1754 { 1755 struct rte_flow_error error; 1756 struct port_indirect_action *pia; 1757 union { 1758 struct rte_flow_query_count count; 1759 struct rte_flow_query_age age; 1760 struct rte_flow_action_conntrack ct; 1761 } query; 1762 portid_t port_id_eff = port_id; 1763 struct rte_port *port; 1764 1765 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1766 port_id == (portid_t)RTE_PORT_ALL) 1767 return -EINVAL; 1768 1769 port = &ports[port_id]; 1770 1771 pia = action_get_by_id(port_id, id); 1772 if (!pia) 1773 return -EINVAL; 1774 switch (pia->type) { 1775 case RTE_FLOW_ACTION_TYPE_AGE: 1776 case RTE_FLOW_ACTION_TYPE_COUNT: 1777 break; 1778 default: 1779 fprintf(stderr, 1780 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1781 id, pia->type, port_id); 1782 return -ENOTSUP; 1783 } 1784 1785 if (pia->transfer) 1786 port_id_eff = port->flow_transfer_proxy; 1787 1788 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 1789 port_id_eff == (portid_t)RTE_PORT_ALL) 1790 return -EINVAL; 1791 1792 /* Poisoning to make sure PMDs update it in case of error. */ 1793 memset(&error, 0x55, sizeof(error)); 1794 memset(&query, 0, sizeof(query)); 1795 if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query, 1796 &error)) 1797 return port_flow_complain(&error); 1798 switch (pia->type) { 1799 case RTE_FLOW_ACTION_TYPE_AGE: 1800 printf("Indirect AGE action:\n" 1801 " aged: %u\n" 1802 " sec_since_last_hit_valid: %u\n" 1803 " sec_since_last_hit: %" PRIu32 "\n", 1804 query.age.aged, 1805 query.age.sec_since_last_hit_valid, 1806 query.age.sec_since_last_hit); 1807 break; 1808 case RTE_FLOW_ACTION_TYPE_COUNT: 1809 printf("Indirect COUNT action:\n" 1810 " hits_set: %u\n" 1811 " bytes_set: %u\n" 1812 " hits: %" PRIu64 "\n" 1813 " bytes: %" PRIu64 "\n", 1814 query.count.hits_set, 1815 query.count.bytes_set, 1816 query.count.hits, 1817 query.count.bytes); 1818 break; 1819 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1820 printf("Conntrack Context:\n" 1821 " Peer: %u, Flow dir: %s, Enable: %u\n" 1822 " Live: %u, SACK: %u, CACK: %u\n" 1823 " Packet dir: %s, Liberal: %u, State: %u\n" 1824 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1825 " Last Seq: %u, Last ACK: %u\n" 1826 " Last Win: %u, Last End: %u\n", 1827 query.ct.peer_port, 1828 query.ct.is_original_dir ? "Original" : "Reply", 1829 query.ct.enable, query.ct.live_connection, 1830 query.ct.selective_ack, query.ct.challenge_ack_passed, 1831 query.ct.last_direction ? "Original" : "Reply", 1832 query.ct.liberal_mode, query.ct.state, 1833 query.ct.max_ack_window, query.ct.retransmission_limit, 1834 query.ct.last_index, query.ct.last_seq, 1835 query.ct.last_ack, query.ct.last_window, 1836 query.ct.last_end); 1837 printf(" Original Dir:\n" 1838 " scale: %u, fin: %u, ack seen: %u\n" 1839 " unacked data: %u\n Sent end: %u," 1840 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1841 query.ct.original_dir.scale, 1842 query.ct.original_dir.close_initiated, 1843 query.ct.original_dir.last_ack_seen, 1844 query.ct.original_dir.data_unacked, 1845 query.ct.original_dir.sent_end, 1846 query.ct.original_dir.reply_end, 1847 query.ct.original_dir.max_win, 1848 query.ct.original_dir.max_ack); 1849 printf(" Reply Dir:\n" 1850 " scale: %u, fin: %u, ack seen: %u\n" 1851 " unacked data: %u\n Sent end: %u," 1852 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1853 query.ct.reply_dir.scale, 1854 query.ct.reply_dir.close_initiated, 1855 query.ct.reply_dir.last_ack_seen, 1856 query.ct.reply_dir.data_unacked, 1857 query.ct.reply_dir.sent_end, 1858 query.ct.reply_dir.reply_end, 1859 query.ct.reply_dir.max_win, 1860 query.ct.reply_dir.max_ack); 1861 break; 1862 default: 1863 fprintf(stderr, 1864 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1865 id, pia->type, port_id); 1866 break; 1867 } 1868 return 0; 1869 } 1870 1871 static struct port_flow_tunnel * 1872 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1873 const struct rte_flow_item *pattern, 1874 const struct rte_flow_action *actions, 1875 const struct tunnel_ops *tunnel_ops) 1876 { 1877 int ret; 1878 struct rte_port *port; 1879 struct port_flow_tunnel *pft; 1880 struct rte_flow_error error; 1881 1882 port = &ports[port_id]; 1883 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1884 if (!pft) { 1885 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1886 tunnel_ops->id); 1887 return NULL; 1888 } 1889 if (tunnel_ops->actions) { 1890 uint32_t num_actions; 1891 const struct rte_flow_action *aptr; 1892 1893 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1894 &pft->pmd_actions, 1895 &pft->num_pmd_actions, 1896 &error); 1897 if (ret) { 1898 port_flow_complain(&error); 1899 return NULL; 1900 } 1901 for (aptr = actions, num_actions = 1; 1902 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1903 aptr++, num_actions++); 1904 pft->actions = malloc( 1905 (num_actions + pft->num_pmd_actions) * 1906 sizeof(actions[0])); 1907 if (!pft->actions) { 1908 rte_flow_tunnel_action_decap_release( 1909 port_id, pft->actions, 1910 pft->num_pmd_actions, &error); 1911 return NULL; 1912 } 1913 rte_memcpy(pft->actions, pft->pmd_actions, 1914 pft->num_pmd_actions * sizeof(actions[0])); 1915 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1916 num_actions * sizeof(actions[0])); 1917 } 1918 if (tunnel_ops->items) { 1919 uint32_t num_items; 1920 const struct rte_flow_item *iptr; 1921 1922 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1923 &pft->pmd_items, 1924 &pft->num_pmd_items, 1925 &error); 1926 if (ret) { 1927 port_flow_complain(&error); 1928 return NULL; 1929 } 1930 for (iptr = pattern, num_items = 1; 1931 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1932 iptr++, num_items++); 1933 pft->items = malloc((num_items + pft->num_pmd_items) * 1934 sizeof(pattern[0])); 1935 if (!pft->items) { 1936 rte_flow_tunnel_item_release( 1937 port_id, pft->pmd_items, 1938 pft->num_pmd_items, &error); 1939 return NULL; 1940 } 1941 rte_memcpy(pft->items, pft->pmd_items, 1942 pft->num_pmd_items * sizeof(pattern[0])); 1943 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1944 num_items * sizeof(pattern[0])); 1945 } 1946 1947 return pft; 1948 } 1949 1950 static void 1951 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1952 const struct tunnel_ops *tunnel_ops, 1953 struct port_flow_tunnel *pft) 1954 { 1955 struct rte_flow_error error; 1956 1957 if (tunnel_ops->actions) { 1958 free(pft->actions); 1959 rte_flow_tunnel_action_decap_release( 1960 port_id, pft->pmd_actions, 1961 pft->num_pmd_actions, &error); 1962 pft->actions = NULL; 1963 pft->pmd_actions = NULL; 1964 } 1965 if (tunnel_ops->items) { 1966 free(pft->items); 1967 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1968 pft->num_pmd_items, 1969 &error); 1970 pft->items = NULL; 1971 pft->pmd_items = NULL; 1972 } 1973 } 1974 1975 /** Add port meter policy */ 1976 int 1977 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1978 const struct rte_flow_action *actions) 1979 { 1980 struct rte_mtr_error error; 1981 const struct rte_flow_action *act = actions; 1982 const struct rte_flow_action *start; 1983 struct rte_mtr_meter_policy_params policy; 1984 uint32_t i = 0, act_n; 1985 int ret; 1986 1987 for (i = 0; i < RTE_COLORS; i++) { 1988 for (act_n = 0, start = act; 1989 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1990 act_n++; 1991 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1992 policy.actions[i] = start; 1993 else 1994 policy.actions[i] = NULL; 1995 act++; 1996 } 1997 ret = rte_mtr_meter_policy_add(port_id, 1998 policy_id, 1999 &policy, &error); 2000 if (ret) 2001 print_mtr_err_msg(&error); 2002 return ret; 2003 } 2004 2005 /** Validate flow rule. */ 2006 int 2007 port_flow_validate(portid_t port_id, 2008 const struct rte_flow_attr *attr, 2009 const struct rte_flow_item *pattern, 2010 const struct rte_flow_action *actions, 2011 const struct tunnel_ops *tunnel_ops) 2012 { 2013 struct rte_flow_error error; 2014 struct port_flow_tunnel *pft = NULL; 2015 struct rte_port *port; 2016 2017 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2018 port_id == (portid_t)RTE_PORT_ALL) 2019 return -EINVAL; 2020 2021 port = &ports[port_id]; 2022 2023 if (attr->transfer) 2024 port_id = port->flow_transfer_proxy; 2025 2026 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2027 port_id == (portid_t)RTE_PORT_ALL) 2028 return -EINVAL; 2029 2030 /* Poisoning to make sure PMDs update it in case of error. */ 2031 memset(&error, 0x11, sizeof(error)); 2032 if (tunnel_ops->enabled) { 2033 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2034 actions, tunnel_ops); 2035 if (!pft) 2036 return -ENOENT; 2037 if (pft->items) 2038 pattern = pft->items; 2039 if (pft->actions) 2040 actions = pft->actions; 2041 } 2042 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 2043 return port_flow_complain(&error); 2044 if (tunnel_ops->enabled) 2045 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2046 printf("Flow rule validated\n"); 2047 return 0; 2048 } 2049 2050 /** Return age action structure if exists, otherwise NULL. */ 2051 static struct rte_flow_action_age * 2052 age_action_get(const struct rte_flow_action *actions) 2053 { 2054 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2055 switch (actions->type) { 2056 case RTE_FLOW_ACTION_TYPE_AGE: 2057 return (struct rte_flow_action_age *) 2058 (uintptr_t)actions->conf; 2059 default: 2060 break; 2061 } 2062 } 2063 return NULL; 2064 } 2065 2066 /** Create flow rule. */ 2067 int 2068 port_flow_create(portid_t port_id, 2069 const struct rte_flow_attr *attr, 2070 const struct rte_flow_item *pattern, 2071 const struct rte_flow_action *actions, 2072 const struct tunnel_ops *tunnel_ops) 2073 { 2074 struct rte_flow *flow; 2075 struct rte_port *port; 2076 struct port_flow *pf; 2077 uint32_t id = 0; 2078 struct rte_flow_error error; 2079 struct port_flow_tunnel *pft = NULL; 2080 struct rte_flow_action_age *age = age_action_get(actions); 2081 2082 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2083 port_id == (portid_t)RTE_PORT_ALL) 2084 return -EINVAL; 2085 2086 port = &ports[port_id]; 2087 2088 if (attr->transfer) 2089 port_id = port->flow_transfer_proxy; 2090 2091 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2092 port_id == (portid_t)RTE_PORT_ALL) 2093 return -EINVAL; 2094 2095 if (port->flow_list) { 2096 if (port->flow_list->id == UINT32_MAX) { 2097 fprintf(stderr, 2098 "Highest rule ID is already assigned, delete it first"); 2099 return -ENOMEM; 2100 } 2101 id = port->flow_list->id + 1; 2102 } 2103 if (tunnel_ops->enabled) { 2104 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2105 actions, tunnel_ops); 2106 if (!pft) 2107 return -ENOENT; 2108 if (pft->items) 2109 pattern = pft->items; 2110 if (pft->actions) 2111 actions = pft->actions; 2112 } 2113 pf = port_flow_new(attr, pattern, actions, &error); 2114 if (!pf) 2115 return port_flow_complain(&error); 2116 if (age) { 2117 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2118 age->context = &pf->age_type; 2119 } 2120 /* Poisoning to make sure PMDs update it in case of error. */ 2121 memset(&error, 0x22, sizeof(error)); 2122 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2123 if (!flow) { 2124 if (tunnel_ops->enabled) 2125 port_flow_tunnel_offload_cmd_release(port_id, 2126 tunnel_ops, pft); 2127 free(pf); 2128 return port_flow_complain(&error); 2129 } 2130 pf->next = port->flow_list; 2131 pf->id = id; 2132 pf->flow = flow; 2133 port->flow_list = pf; 2134 if (tunnel_ops->enabled) 2135 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2136 printf("Flow rule #%u created\n", pf->id); 2137 return 0; 2138 } 2139 2140 /** Destroy a number of flow rules. */ 2141 int 2142 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2143 { 2144 struct rte_port *port; 2145 struct port_flow **tmp; 2146 uint32_t c = 0; 2147 int ret = 0; 2148 2149 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2150 port_id == (portid_t)RTE_PORT_ALL) 2151 return -EINVAL; 2152 port = &ports[port_id]; 2153 tmp = &port->flow_list; 2154 while (*tmp) { 2155 uint32_t i; 2156 2157 for (i = 0; i != n; ++i) { 2158 portid_t port_id_eff = port_id; 2159 struct rte_flow_error error; 2160 struct port_flow *pf = *tmp; 2161 2162 if (rule[i] != pf->id) 2163 continue; 2164 /* 2165 * Poisoning to make sure PMDs update it in case 2166 * of error. 2167 */ 2168 memset(&error, 0x33, sizeof(error)); 2169 2170 if (pf->rule.attr->transfer) 2171 port_id_eff = port->flow_transfer_proxy; 2172 2173 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 2174 port_id_eff == (portid_t)RTE_PORT_ALL) 2175 return -EINVAL; 2176 2177 if (rte_flow_destroy(port_id_eff, pf->flow, &error)) { 2178 ret = port_flow_complain(&error); 2179 continue; 2180 } 2181 printf("Flow rule #%u destroyed\n", pf->id); 2182 *tmp = pf->next; 2183 free(pf); 2184 break; 2185 } 2186 if (i == n) 2187 tmp = &(*tmp)->next; 2188 ++c; 2189 } 2190 return ret; 2191 } 2192 2193 /** Remove all flow rules. */ 2194 int 2195 port_flow_flush(portid_t port_id) 2196 { 2197 struct rte_flow_error error; 2198 struct rte_port *port; 2199 int ret = 0; 2200 2201 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2202 port_id == (portid_t)RTE_PORT_ALL) 2203 return -EINVAL; 2204 2205 port = &ports[port_id]; 2206 2207 if (port->flow_list == NULL) 2208 return ret; 2209 2210 /* Poisoning to make sure PMDs update it in case of error. */ 2211 memset(&error, 0x44, sizeof(error)); 2212 if (rte_flow_flush(port_id, &error)) { 2213 port_flow_complain(&error); 2214 } 2215 2216 while (port->flow_list) { 2217 struct port_flow *pf = port->flow_list->next; 2218 2219 free(port->flow_list); 2220 port->flow_list = pf; 2221 } 2222 return ret; 2223 } 2224 2225 /** Dump flow rules. */ 2226 int 2227 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2228 const char *file_name) 2229 { 2230 int ret = 0; 2231 FILE *file = stdout; 2232 struct rte_flow_error error; 2233 struct rte_port *port; 2234 struct port_flow *pflow; 2235 struct rte_flow *tmpFlow = NULL; 2236 bool found = false; 2237 2238 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2239 port_id == (portid_t)RTE_PORT_ALL) 2240 return -EINVAL; 2241 2242 if (!dump_all) { 2243 port = &ports[port_id]; 2244 pflow = port->flow_list; 2245 while (pflow) { 2246 if (rule_id != pflow->id) { 2247 pflow = pflow->next; 2248 } else { 2249 tmpFlow = pflow->flow; 2250 if (tmpFlow) 2251 found = true; 2252 break; 2253 } 2254 } 2255 if (found == false) { 2256 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2257 return -EINVAL; 2258 } 2259 } 2260 2261 if (file_name && strlen(file_name)) { 2262 file = fopen(file_name, "w"); 2263 if (!file) { 2264 fprintf(stderr, "Failed to create file %s: %s\n", 2265 file_name, strerror(errno)); 2266 return -errno; 2267 } 2268 } 2269 2270 if (!dump_all) 2271 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2272 else 2273 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2274 if (ret) { 2275 port_flow_complain(&error); 2276 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2277 } else 2278 printf("Flow dump finished\n"); 2279 if (file_name && strlen(file_name)) 2280 fclose(file); 2281 return ret; 2282 } 2283 2284 /** Query a flow rule. */ 2285 int 2286 port_flow_query(portid_t port_id, uint32_t rule, 2287 const struct rte_flow_action *action) 2288 { 2289 struct rte_flow_error error; 2290 struct rte_port *port; 2291 struct port_flow *pf; 2292 const char *name; 2293 union { 2294 struct rte_flow_query_count count; 2295 struct rte_flow_action_rss rss_conf; 2296 struct rte_flow_query_age age; 2297 } query; 2298 int ret; 2299 2300 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2301 port_id == (portid_t)RTE_PORT_ALL) 2302 return -EINVAL; 2303 port = &ports[port_id]; 2304 for (pf = port->flow_list; pf; pf = pf->next) 2305 if (pf->id == rule) 2306 break; 2307 if (!pf) { 2308 fprintf(stderr, "Flow rule #%u not found\n", rule); 2309 return -ENOENT; 2310 } 2311 2312 if (pf->rule.attr->transfer) 2313 port_id = port->flow_transfer_proxy; 2314 2315 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2316 port_id == (portid_t)RTE_PORT_ALL) 2317 return -EINVAL; 2318 2319 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2320 &name, sizeof(name), 2321 (void *)(uintptr_t)action->type, &error); 2322 if (ret < 0) 2323 return port_flow_complain(&error); 2324 switch (action->type) { 2325 case RTE_FLOW_ACTION_TYPE_COUNT: 2326 case RTE_FLOW_ACTION_TYPE_RSS: 2327 case RTE_FLOW_ACTION_TYPE_AGE: 2328 break; 2329 default: 2330 fprintf(stderr, "Cannot query action type %d (%s)\n", 2331 action->type, name); 2332 return -ENOTSUP; 2333 } 2334 /* Poisoning to make sure PMDs update it in case of error. */ 2335 memset(&error, 0x55, sizeof(error)); 2336 memset(&query, 0, sizeof(query)); 2337 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2338 return port_flow_complain(&error); 2339 switch (action->type) { 2340 case RTE_FLOW_ACTION_TYPE_COUNT: 2341 printf("%s:\n" 2342 " hits_set: %u\n" 2343 " bytes_set: %u\n" 2344 " hits: %" PRIu64 "\n" 2345 " bytes: %" PRIu64 "\n", 2346 name, 2347 query.count.hits_set, 2348 query.count.bytes_set, 2349 query.count.hits, 2350 query.count.bytes); 2351 break; 2352 case RTE_FLOW_ACTION_TYPE_RSS: 2353 rss_config_display(&query.rss_conf); 2354 break; 2355 case RTE_FLOW_ACTION_TYPE_AGE: 2356 printf("%s:\n" 2357 " aged: %u\n" 2358 " sec_since_last_hit_valid: %u\n" 2359 " sec_since_last_hit: %" PRIu32 "\n", 2360 name, 2361 query.age.aged, 2362 query.age.sec_since_last_hit_valid, 2363 query.age.sec_since_last_hit); 2364 break; 2365 default: 2366 fprintf(stderr, 2367 "Cannot display result for action type %d (%s)\n", 2368 action->type, name); 2369 break; 2370 } 2371 return 0; 2372 } 2373 2374 /** List simply and destroy all aged flows. */ 2375 void 2376 port_flow_aged(portid_t port_id, uint8_t destroy) 2377 { 2378 void **contexts; 2379 int nb_context, total = 0, idx; 2380 struct rte_flow_error error; 2381 enum age_action_context_type *type; 2382 union { 2383 struct port_flow *pf; 2384 struct port_indirect_action *pia; 2385 } ctx; 2386 2387 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2388 port_id == (portid_t)RTE_PORT_ALL) 2389 return; 2390 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2391 printf("Port %u total aged flows: %d\n", port_id, total); 2392 if (total < 0) { 2393 port_flow_complain(&error); 2394 return; 2395 } 2396 if (total == 0) 2397 return; 2398 contexts = malloc(sizeof(void *) * total); 2399 if (contexts == NULL) { 2400 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2401 return; 2402 } 2403 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2404 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2405 if (nb_context != total) { 2406 fprintf(stderr, 2407 "Port:%d get aged flows count(%d) != total(%d)\n", 2408 port_id, nb_context, total); 2409 free(contexts); 2410 return; 2411 } 2412 total = 0; 2413 for (idx = 0; idx < nb_context; idx++) { 2414 if (!contexts[idx]) { 2415 fprintf(stderr, "Error: get Null context in port %u\n", 2416 port_id); 2417 continue; 2418 } 2419 type = (enum age_action_context_type *)contexts[idx]; 2420 switch (*type) { 2421 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2422 ctx.pf = container_of(type, struct port_flow, age_type); 2423 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2424 "\t%c%c%c\t\n", 2425 "Flow", 2426 ctx.pf->id, 2427 ctx.pf->rule.attr->group, 2428 ctx.pf->rule.attr->priority, 2429 ctx.pf->rule.attr->ingress ? 'i' : '-', 2430 ctx.pf->rule.attr->egress ? 'e' : '-', 2431 ctx.pf->rule.attr->transfer ? 't' : '-'); 2432 if (destroy && !port_flow_destroy(port_id, 1, 2433 &ctx.pf->id)) 2434 total++; 2435 break; 2436 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2437 ctx.pia = container_of(type, 2438 struct port_indirect_action, age_type); 2439 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2440 ctx.pia->id); 2441 break; 2442 default: 2443 fprintf(stderr, "Error: invalid context type %u\n", 2444 port_id); 2445 break; 2446 } 2447 } 2448 printf("\n%d flows destroyed\n", total); 2449 free(contexts); 2450 } 2451 2452 /** List flow rules. */ 2453 void 2454 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2455 { 2456 struct rte_port *port; 2457 struct port_flow *pf; 2458 struct port_flow *list = NULL; 2459 uint32_t i; 2460 2461 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2462 port_id == (portid_t)RTE_PORT_ALL) 2463 return; 2464 port = &ports[port_id]; 2465 if (!port->flow_list) 2466 return; 2467 /* Sort flows by group, priority and ID. */ 2468 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2469 struct port_flow **tmp; 2470 const struct rte_flow_attr *curr = pf->rule.attr; 2471 2472 if (n) { 2473 /* Filter out unwanted groups. */ 2474 for (i = 0; i != n; ++i) 2475 if (curr->group == group[i]) 2476 break; 2477 if (i == n) 2478 continue; 2479 } 2480 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2481 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2482 2483 if (curr->group > comp->group || 2484 (curr->group == comp->group && 2485 curr->priority > comp->priority) || 2486 (curr->group == comp->group && 2487 curr->priority == comp->priority && 2488 pf->id > (*tmp)->id)) 2489 continue; 2490 break; 2491 } 2492 pf->tmp = *tmp; 2493 *tmp = pf; 2494 } 2495 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2496 for (pf = list; pf != NULL; pf = pf->tmp) { 2497 const struct rte_flow_item *item = pf->rule.pattern; 2498 const struct rte_flow_action *action = pf->rule.actions; 2499 const char *name; 2500 2501 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2502 pf->id, 2503 pf->rule.attr->group, 2504 pf->rule.attr->priority, 2505 pf->rule.attr->ingress ? 'i' : '-', 2506 pf->rule.attr->egress ? 'e' : '-', 2507 pf->rule.attr->transfer ? 't' : '-'); 2508 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2509 if ((uint32_t)item->type > INT_MAX) 2510 name = "PMD_INTERNAL"; 2511 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2512 &name, sizeof(name), 2513 (void *)(uintptr_t)item->type, 2514 NULL) <= 0) 2515 name = "[UNKNOWN]"; 2516 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2517 printf("%s ", name); 2518 ++item; 2519 } 2520 printf("=>"); 2521 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2522 if ((uint32_t)action->type > INT_MAX) 2523 name = "PMD_INTERNAL"; 2524 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2525 &name, sizeof(name), 2526 (void *)(uintptr_t)action->type, 2527 NULL) <= 0) 2528 name = "[UNKNOWN]"; 2529 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2530 printf(" %s", name); 2531 ++action; 2532 } 2533 printf("\n"); 2534 } 2535 } 2536 2537 /** Restrict ingress traffic to the defined flow rules. */ 2538 int 2539 port_flow_isolate(portid_t port_id, int set) 2540 { 2541 struct rte_flow_error error; 2542 2543 /* Poisoning to make sure PMDs update it in case of error. */ 2544 memset(&error, 0x66, sizeof(error)); 2545 if (rte_flow_isolate(port_id, set, &error)) 2546 return port_flow_complain(&error); 2547 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2548 port_id, 2549 set ? "now restricted" : "not restricted anymore"); 2550 return 0; 2551 } 2552 2553 /* 2554 * RX/TX ring descriptors display functions. 2555 */ 2556 int 2557 rx_queue_id_is_invalid(queueid_t rxq_id) 2558 { 2559 if (rxq_id < nb_rxq) 2560 return 0; 2561 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2562 rxq_id, nb_rxq); 2563 return 1; 2564 } 2565 2566 int 2567 tx_queue_id_is_invalid(queueid_t txq_id) 2568 { 2569 if (txq_id < nb_txq) 2570 return 0; 2571 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2572 txq_id, nb_txq); 2573 return 1; 2574 } 2575 2576 static int 2577 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2578 { 2579 struct rte_port *port = &ports[port_id]; 2580 struct rte_eth_rxq_info rx_qinfo; 2581 int ret; 2582 2583 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2584 if (ret == 0) { 2585 *ring_size = rx_qinfo.nb_desc; 2586 return ret; 2587 } 2588 2589 if (ret != -ENOTSUP) 2590 return ret; 2591 /* 2592 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2593 * ring_size stored in testpmd will be used for validity verification. 2594 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2595 * being 0, it will use a default value provided by PMDs to setup this 2596 * rxq. If the default value is 0, it will use the 2597 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2598 */ 2599 if (port->nb_rx_desc[rxq_id]) 2600 *ring_size = port->nb_rx_desc[rxq_id]; 2601 else if (port->dev_info.default_rxportconf.ring_size) 2602 *ring_size = port->dev_info.default_rxportconf.ring_size; 2603 else 2604 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2605 return 0; 2606 } 2607 2608 static int 2609 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2610 { 2611 struct rte_port *port = &ports[port_id]; 2612 struct rte_eth_txq_info tx_qinfo; 2613 int ret; 2614 2615 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2616 if (ret == 0) { 2617 *ring_size = tx_qinfo.nb_desc; 2618 return ret; 2619 } 2620 2621 if (ret != -ENOTSUP) 2622 return ret; 2623 /* 2624 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2625 * ring_size stored in testpmd will be used for validity verification. 2626 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2627 * being 0, it will use a default value provided by PMDs to setup this 2628 * txq. If the default value is 0, it will use the 2629 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2630 */ 2631 if (port->nb_tx_desc[txq_id]) 2632 *ring_size = port->nb_tx_desc[txq_id]; 2633 else if (port->dev_info.default_txportconf.ring_size) 2634 *ring_size = port->dev_info.default_txportconf.ring_size; 2635 else 2636 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2637 return 0; 2638 } 2639 2640 static int 2641 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2642 { 2643 uint16_t ring_size; 2644 int ret; 2645 2646 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2647 if (ret) 2648 return 1; 2649 2650 if (rxdesc_id < ring_size) 2651 return 0; 2652 2653 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2654 rxdesc_id, ring_size); 2655 return 1; 2656 } 2657 2658 static int 2659 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2660 { 2661 uint16_t ring_size; 2662 int ret; 2663 2664 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2665 if (ret) 2666 return 1; 2667 2668 if (txdesc_id < ring_size) 2669 return 0; 2670 2671 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2672 txdesc_id, ring_size); 2673 return 1; 2674 } 2675 2676 static const struct rte_memzone * 2677 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2678 { 2679 char mz_name[RTE_MEMZONE_NAMESIZE]; 2680 const struct rte_memzone *mz; 2681 2682 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2683 port_id, q_id, ring_name); 2684 mz = rte_memzone_lookup(mz_name); 2685 if (mz == NULL) 2686 fprintf(stderr, 2687 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2688 ring_name, port_id, q_id, mz_name); 2689 return mz; 2690 } 2691 2692 union igb_ring_dword { 2693 uint64_t dword; 2694 struct { 2695 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2696 uint32_t lo; 2697 uint32_t hi; 2698 #else 2699 uint32_t hi; 2700 uint32_t lo; 2701 #endif 2702 } words; 2703 }; 2704 2705 struct igb_ring_desc_32_bytes { 2706 union igb_ring_dword lo_dword; 2707 union igb_ring_dword hi_dword; 2708 union igb_ring_dword resv1; 2709 union igb_ring_dword resv2; 2710 }; 2711 2712 struct igb_ring_desc_16_bytes { 2713 union igb_ring_dword lo_dword; 2714 union igb_ring_dword hi_dword; 2715 }; 2716 2717 static void 2718 ring_rxd_display_dword(union igb_ring_dword dword) 2719 { 2720 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2721 (unsigned)dword.words.hi); 2722 } 2723 2724 static void 2725 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2726 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2727 portid_t port_id, 2728 #else 2729 __rte_unused portid_t port_id, 2730 #endif 2731 uint16_t desc_id) 2732 { 2733 struct igb_ring_desc_16_bytes *ring = 2734 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2735 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2736 int ret; 2737 struct rte_eth_dev_info dev_info; 2738 2739 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2740 if (ret != 0) 2741 return; 2742 2743 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2744 /* 32 bytes RX descriptor, i40e only */ 2745 struct igb_ring_desc_32_bytes *ring = 2746 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2747 ring[desc_id].lo_dword.dword = 2748 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2749 ring_rxd_display_dword(ring[desc_id].lo_dword); 2750 ring[desc_id].hi_dword.dword = 2751 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2752 ring_rxd_display_dword(ring[desc_id].hi_dword); 2753 ring[desc_id].resv1.dword = 2754 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2755 ring_rxd_display_dword(ring[desc_id].resv1); 2756 ring[desc_id].resv2.dword = 2757 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2758 ring_rxd_display_dword(ring[desc_id].resv2); 2759 2760 return; 2761 } 2762 #endif 2763 /* 16 bytes RX descriptor */ 2764 ring[desc_id].lo_dword.dword = 2765 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2766 ring_rxd_display_dword(ring[desc_id].lo_dword); 2767 ring[desc_id].hi_dword.dword = 2768 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2769 ring_rxd_display_dword(ring[desc_id].hi_dword); 2770 } 2771 2772 static void 2773 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2774 { 2775 struct igb_ring_desc_16_bytes *ring; 2776 struct igb_ring_desc_16_bytes txd; 2777 2778 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2779 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2780 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2781 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2782 (unsigned)txd.lo_dword.words.lo, 2783 (unsigned)txd.lo_dword.words.hi, 2784 (unsigned)txd.hi_dword.words.lo, 2785 (unsigned)txd.hi_dword.words.hi); 2786 } 2787 2788 void 2789 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2790 { 2791 const struct rte_memzone *rx_mz; 2792 2793 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2794 return; 2795 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2796 if (rx_mz == NULL) 2797 return; 2798 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2799 } 2800 2801 void 2802 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2803 { 2804 const struct rte_memzone *tx_mz; 2805 2806 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2807 return; 2808 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2809 if (tx_mz == NULL) 2810 return; 2811 ring_tx_descriptor_display(tx_mz, txd_id); 2812 } 2813 2814 void 2815 fwd_lcores_config_display(void) 2816 { 2817 lcoreid_t lc_id; 2818 2819 printf("List of forwarding lcores:"); 2820 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2821 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2822 printf("\n"); 2823 } 2824 void 2825 rxtx_config_display(void) 2826 { 2827 portid_t pid; 2828 queueid_t qid; 2829 2830 printf(" %s packet forwarding%s packets/burst=%d\n", 2831 cur_fwd_eng->fwd_mode_name, 2832 retry_enabled == 0 ? "" : " with retry", 2833 nb_pkt_per_burst); 2834 2835 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2836 printf(" packet len=%u - nb packet segments=%d\n", 2837 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2838 2839 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2840 nb_fwd_lcores, nb_fwd_ports); 2841 2842 RTE_ETH_FOREACH_DEV(pid) { 2843 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2844 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2845 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2846 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2847 struct rte_eth_rxq_info rx_qinfo; 2848 struct rte_eth_txq_info tx_qinfo; 2849 uint16_t rx_free_thresh_tmp; 2850 uint16_t tx_free_thresh_tmp; 2851 uint16_t tx_rs_thresh_tmp; 2852 uint16_t nb_rx_desc_tmp; 2853 uint16_t nb_tx_desc_tmp; 2854 uint64_t offloads_tmp; 2855 uint8_t pthresh_tmp; 2856 uint8_t hthresh_tmp; 2857 uint8_t wthresh_tmp; 2858 int32_t rc; 2859 2860 /* per port config */ 2861 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2862 (unsigned int)pid, nb_rxq, nb_txq); 2863 2864 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2865 ports[pid].dev_conf.rxmode.offloads, 2866 ports[pid].dev_conf.txmode.offloads); 2867 2868 /* per rx queue config only for first queue to be less verbose */ 2869 for (qid = 0; qid < 1; qid++) { 2870 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2871 if (rc) { 2872 nb_rx_desc_tmp = nb_rx_desc[qid]; 2873 rx_free_thresh_tmp = 2874 rx_conf[qid].rx_free_thresh; 2875 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2876 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2877 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2878 offloads_tmp = rx_conf[qid].offloads; 2879 } else { 2880 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2881 rx_free_thresh_tmp = 2882 rx_qinfo.conf.rx_free_thresh; 2883 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2884 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2885 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2886 offloads_tmp = rx_qinfo.conf.offloads; 2887 } 2888 2889 printf(" RX queue: %d\n", qid); 2890 printf(" RX desc=%d - RX free threshold=%d\n", 2891 nb_rx_desc_tmp, rx_free_thresh_tmp); 2892 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2893 " wthresh=%d\n", 2894 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2895 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 2896 if (rx_conf->share_group > 0) 2897 printf(" share_group=%u share_qid=%u", 2898 rx_conf->share_group, 2899 rx_conf->share_qid); 2900 printf("\n"); 2901 } 2902 2903 /* per tx queue config only for first queue to be less verbose */ 2904 for (qid = 0; qid < 1; qid++) { 2905 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2906 if (rc) { 2907 nb_tx_desc_tmp = nb_tx_desc[qid]; 2908 tx_free_thresh_tmp = 2909 tx_conf[qid].tx_free_thresh; 2910 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2911 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2912 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2913 offloads_tmp = tx_conf[qid].offloads; 2914 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2915 } else { 2916 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2917 tx_free_thresh_tmp = 2918 tx_qinfo.conf.tx_free_thresh; 2919 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2920 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2921 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2922 offloads_tmp = tx_qinfo.conf.offloads; 2923 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2924 } 2925 2926 printf(" TX queue: %d\n", qid); 2927 printf(" TX desc=%d - TX free threshold=%d\n", 2928 nb_tx_desc_tmp, tx_free_thresh_tmp); 2929 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2930 " wthresh=%d\n", 2931 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2932 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2933 offloads_tmp, tx_rs_thresh_tmp); 2934 } 2935 } 2936 } 2937 2938 void 2939 port_rss_reta_info(portid_t port_id, 2940 struct rte_eth_rss_reta_entry64 *reta_conf, 2941 uint16_t nb_entries) 2942 { 2943 uint16_t i, idx, shift; 2944 int ret; 2945 2946 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2947 return; 2948 2949 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2950 if (ret != 0) { 2951 fprintf(stderr, 2952 "Failed to get RSS RETA info, return code = %d\n", 2953 ret); 2954 return; 2955 } 2956 2957 for (i = 0; i < nb_entries; i++) { 2958 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2959 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2960 if (!(reta_conf[idx].mask & (1ULL << shift))) 2961 continue; 2962 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2963 i, reta_conf[idx].reta[shift]); 2964 } 2965 } 2966 2967 /* 2968 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2969 * key of the port. 2970 */ 2971 void 2972 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2973 { 2974 struct rte_eth_rss_conf rss_conf = {0}; 2975 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2976 uint64_t rss_hf; 2977 uint8_t i; 2978 int diag; 2979 struct rte_eth_dev_info dev_info; 2980 uint8_t hash_key_size; 2981 int ret; 2982 2983 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2984 return; 2985 2986 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2987 if (ret != 0) 2988 return; 2989 2990 if (dev_info.hash_key_size > 0 && 2991 dev_info.hash_key_size <= sizeof(rss_key)) 2992 hash_key_size = dev_info.hash_key_size; 2993 else { 2994 fprintf(stderr, 2995 "dev_info did not provide a valid hash key size\n"); 2996 return; 2997 } 2998 2999 /* Get RSS hash key if asked to display it */ 3000 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3001 rss_conf.rss_key_len = hash_key_size; 3002 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3003 if (diag != 0) { 3004 switch (diag) { 3005 case -ENODEV: 3006 fprintf(stderr, "port index %d invalid\n", port_id); 3007 break; 3008 case -ENOTSUP: 3009 fprintf(stderr, "operation not supported by device\n"); 3010 break; 3011 default: 3012 fprintf(stderr, "operation failed - diag=%d\n", diag); 3013 break; 3014 } 3015 return; 3016 } 3017 rss_hf = rss_conf.rss_hf; 3018 if (rss_hf == 0) { 3019 printf("RSS disabled\n"); 3020 return; 3021 } 3022 printf("RSS functions:\n "); 3023 for (i = 0; rss_type_table[i].str; i++) { 3024 if (rss_type_table[i].rss_type == 0) 3025 continue; 3026 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 3027 printf("%s ", rss_type_table[i].str); 3028 } 3029 printf("\n"); 3030 if (!show_rss_key) 3031 return; 3032 printf("RSS key:\n"); 3033 for (i = 0; i < hash_key_size; i++) 3034 printf("%02X", rss_key[i]); 3035 printf("\n"); 3036 } 3037 3038 void 3039 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3040 uint8_t hash_key_len) 3041 { 3042 struct rte_eth_rss_conf rss_conf; 3043 int diag; 3044 unsigned int i; 3045 3046 rss_conf.rss_key = NULL; 3047 rss_conf.rss_key_len = 0; 3048 rss_conf.rss_hf = 0; 3049 for (i = 0; rss_type_table[i].str; i++) { 3050 if (!strcmp(rss_type_table[i].str, rss_type)) 3051 rss_conf.rss_hf = rss_type_table[i].rss_type; 3052 } 3053 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3054 if (diag == 0) { 3055 rss_conf.rss_key = hash_key; 3056 rss_conf.rss_key_len = hash_key_len; 3057 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3058 } 3059 if (diag == 0) 3060 return; 3061 3062 switch (diag) { 3063 case -ENODEV: 3064 fprintf(stderr, "port index %d invalid\n", port_id); 3065 break; 3066 case -ENOTSUP: 3067 fprintf(stderr, "operation not supported by device\n"); 3068 break; 3069 default: 3070 fprintf(stderr, "operation failed - diag=%d\n", diag); 3071 break; 3072 } 3073 } 3074 3075 /* 3076 * Check whether a shared rxq scheduled on other lcores. 3077 */ 3078 static bool 3079 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3080 portid_t src_port, queueid_t src_rxq, 3081 uint32_t share_group, queueid_t share_rxq) 3082 { 3083 streamid_t sm_id; 3084 streamid_t nb_fs_per_lcore; 3085 lcoreid_t nb_fc; 3086 lcoreid_t lc_id; 3087 struct fwd_stream *fs; 3088 struct rte_port *port; 3089 struct rte_eth_dev_info *dev_info; 3090 struct rte_eth_rxconf *rxq_conf; 3091 3092 nb_fc = cur_fwd_config.nb_fwd_lcores; 3093 /* Check remaining cores. */ 3094 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3095 sm_id = fwd_lcores[lc_id]->stream_idx; 3096 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3097 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3098 sm_id++) { 3099 fs = fwd_streams[sm_id]; 3100 port = &ports[fs->rx_port]; 3101 dev_info = &port->dev_info; 3102 rxq_conf = &port->rx_conf[fs->rx_queue]; 3103 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3104 == 0 || rxq_conf->share_group == 0) 3105 /* Not shared rxq. */ 3106 continue; 3107 if (domain_id != port->dev_info.switch_info.domain_id) 3108 continue; 3109 if (rxq_conf->share_group != share_group) 3110 continue; 3111 if (rxq_conf->share_qid != share_rxq) 3112 continue; 3113 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3114 share_group, share_rxq); 3115 printf(" lcore %hhu Port %hu queue %hu\n", 3116 src_lc, src_port, src_rxq); 3117 printf(" lcore %hhu Port %hu queue %hu\n", 3118 lc_id, fs->rx_port, fs->rx_queue); 3119 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3120 nb_rxq); 3121 return true; 3122 } 3123 } 3124 return false; 3125 } 3126 3127 /* 3128 * Check shared rxq configuration. 3129 * 3130 * Shared group must not being scheduled on different core. 3131 */ 3132 bool 3133 pkt_fwd_shared_rxq_check(void) 3134 { 3135 streamid_t sm_id; 3136 streamid_t nb_fs_per_lcore; 3137 lcoreid_t nb_fc; 3138 lcoreid_t lc_id; 3139 struct fwd_stream *fs; 3140 uint16_t domain_id; 3141 struct rte_port *port; 3142 struct rte_eth_dev_info *dev_info; 3143 struct rte_eth_rxconf *rxq_conf; 3144 3145 if (rxq_share == 0) 3146 return true; 3147 nb_fc = cur_fwd_config.nb_fwd_lcores; 3148 /* 3149 * Check streams on each core, make sure the same switch domain + 3150 * group + queue doesn't get scheduled on other cores. 3151 */ 3152 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3153 sm_id = fwd_lcores[lc_id]->stream_idx; 3154 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3155 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3156 sm_id++) { 3157 fs = fwd_streams[sm_id]; 3158 /* Update lcore info stream being scheduled. */ 3159 fs->lcore = fwd_lcores[lc_id]; 3160 port = &ports[fs->rx_port]; 3161 dev_info = &port->dev_info; 3162 rxq_conf = &port->rx_conf[fs->rx_queue]; 3163 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3164 == 0 || rxq_conf->share_group == 0) 3165 /* Not shared rxq. */ 3166 continue; 3167 /* Check shared rxq not scheduled on remaining cores. */ 3168 domain_id = port->dev_info.switch_info.domain_id; 3169 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3170 fs->rx_port, 3171 fs->rx_queue, 3172 rxq_conf->share_group, 3173 rxq_conf->share_qid)) 3174 return false; 3175 } 3176 } 3177 return true; 3178 } 3179 3180 /* 3181 * Setup forwarding configuration for each logical core. 3182 */ 3183 static void 3184 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3185 { 3186 streamid_t nb_fs_per_lcore; 3187 streamid_t nb_fs; 3188 streamid_t sm_id; 3189 lcoreid_t nb_extra; 3190 lcoreid_t nb_fc; 3191 lcoreid_t nb_lc; 3192 lcoreid_t lc_id; 3193 3194 nb_fs = cfg->nb_fwd_streams; 3195 nb_fc = cfg->nb_fwd_lcores; 3196 if (nb_fs <= nb_fc) { 3197 nb_fs_per_lcore = 1; 3198 nb_extra = 0; 3199 } else { 3200 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3201 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3202 } 3203 3204 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3205 sm_id = 0; 3206 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3207 fwd_lcores[lc_id]->stream_idx = sm_id; 3208 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3209 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3210 } 3211 3212 /* 3213 * Assign extra remaining streams, if any. 3214 */ 3215 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3216 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3217 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3218 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3219 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3220 } 3221 } 3222 3223 static portid_t 3224 fwd_topology_tx_port_get(portid_t rxp) 3225 { 3226 static int warning_once = 1; 3227 3228 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3229 3230 switch (port_topology) { 3231 default: 3232 case PORT_TOPOLOGY_PAIRED: 3233 if ((rxp & 0x1) == 0) { 3234 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3235 return rxp + 1; 3236 if (warning_once) { 3237 fprintf(stderr, 3238 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3239 warning_once = 0; 3240 } 3241 return rxp; 3242 } 3243 return rxp - 1; 3244 case PORT_TOPOLOGY_CHAINED: 3245 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3246 case PORT_TOPOLOGY_LOOP: 3247 return rxp; 3248 } 3249 } 3250 3251 static void 3252 simple_fwd_config_setup(void) 3253 { 3254 portid_t i; 3255 3256 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3257 cur_fwd_config.nb_fwd_streams = 3258 (streamid_t) cur_fwd_config.nb_fwd_ports; 3259 3260 /* reinitialize forwarding streams */ 3261 init_fwd_streams(); 3262 3263 /* 3264 * In the simple forwarding test, the number of forwarding cores 3265 * must be lower or equal to the number of forwarding ports. 3266 */ 3267 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3268 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3269 cur_fwd_config.nb_fwd_lcores = 3270 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3271 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3272 3273 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3274 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3275 fwd_streams[i]->rx_queue = 0; 3276 fwd_streams[i]->tx_port = 3277 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3278 fwd_streams[i]->tx_queue = 0; 3279 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3280 fwd_streams[i]->retry_enabled = retry_enabled; 3281 } 3282 } 3283 3284 /** 3285 * For the RSS forwarding test all streams distributed over lcores. Each stream 3286 * being composed of a RX queue to poll on a RX port for input messages, 3287 * associated with a TX queue of a TX port where to send forwarded packets. 3288 */ 3289 static void 3290 rss_fwd_config_setup(void) 3291 { 3292 portid_t rxp; 3293 portid_t txp; 3294 queueid_t rxq; 3295 queueid_t nb_q; 3296 streamid_t sm_id; 3297 int start; 3298 int end; 3299 3300 nb_q = nb_rxq; 3301 if (nb_q > nb_txq) 3302 nb_q = nb_txq; 3303 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3304 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3305 cur_fwd_config.nb_fwd_streams = 3306 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3307 3308 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3309 cur_fwd_config.nb_fwd_lcores = 3310 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3311 3312 /* reinitialize forwarding streams */ 3313 init_fwd_streams(); 3314 3315 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3316 3317 if (proc_id > 0 && nb_q % num_procs != 0) 3318 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3319 3320 /** 3321 * In multi-process, All queues are allocated to different 3322 * processes based on num_procs and proc_id. For example: 3323 * if supports 4 queues(nb_q), 2 processes(num_procs), 3324 * the 0~1 queue for primary process. 3325 * the 2~3 queue for secondary process. 3326 */ 3327 start = proc_id * nb_q / num_procs; 3328 end = start + nb_q / num_procs; 3329 rxp = 0; 3330 rxq = start; 3331 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3332 struct fwd_stream *fs; 3333 3334 fs = fwd_streams[sm_id]; 3335 txp = fwd_topology_tx_port_get(rxp); 3336 fs->rx_port = fwd_ports_ids[rxp]; 3337 fs->rx_queue = rxq; 3338 fs->tx_port = fwd_ports_ids[txp]; 3339 fs->tx_queue = rxq; 3340 fs->peer_addr = fs->tx_port; 3341 fs->retry_enabled = retry_enabled; 3342 rxp++; 3343 if (rxp < nb_fwd_ports) 3344 continue; 3345 rxp = 0; 3346 rxq++; 3347 if (rxq >= end) 3348 rxq = start; 3349 } 3350 } 3351 3352 static uint16_t 3353 get_fwd_port_total_tc_num(void) 3354 { 3355 struct rte_eth_dcb_info dcb_info; 3356 uint16_t total_tc_num = 0; 3357 unsigned int i; 3358 3359 for (i = 0; i < nb_fwd_ports; i++) { 3360 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3361 total_tc_num += dcb_info.nb_tcs; 3362 } 3363 3364 return total_tc_num; 3365 } 3366 3367 /** 3368 * For the DCB forwarding test, each core is assigned on each traffic class. 3369 * 3370 * Each core is assigned a multi-stream, each stream being composed of 3371 * a RX queue to poll on a RX port for input messages, associated with 3372 * a TX queue of a TX port where to send forwarded packets. All RX and 3373 * TX queues are mapping to the same traffic class. 3374 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3375 * the same core 3376 */ 3377 static void 3378 dcb_fwd_config_setup(void) 3379 { 3380 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3381 portid_t txp, rxp = 0; 3382 queueid_t txq, rxq = 0; 3383 lcoreid_t lc_id; 3384 uint16_t nb_rx_queue, nb_tx_queue; 3385 uint16_t i, j, k, sm_id = 0; 3386 uint16_t total_tc_num; 3387 struct rte_port *port; 3388 uint8_t tc = 0; 3389 portid_t pid; 3390 int ret; 3391 3392 /* 3393 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3394 * or RTE_PORT_STOPPED. 3395 * 3396 * Re-configure ports to get updated mapping between tc and queue in 3397 * case the queue number of the port is changed. Skip for started ports 3398 * since modifying queue number and calling dev_configure need to stop 3399 * ports first. 3400 */ 3401 for (pid = 0; pid < nb_fwd_ports; pid++) { 3402 if (port_is_started(pid) == 1) 3403 continue; 3404 3405 port = &ports[pid]; 3406 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3407 &port->dev_conf); 3408 if (ret < 0) { 3409 fprintf(stderr, 3410 "Failed to re-configure port %d, ret = %d.\n", 3411 pid, ret); 3412 return; 3413 } 3414 } 3415 3416 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3417 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3418 cur_fwd_config.nb_fwd_streams = 3419 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3420 total_tc_num = get_fwd_port_total_tc_num(); 3421 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3422 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3423 3424 /* reinitialize forwarding streams */ 3425 init_fwd_streams(); 3426 sm_id = 0; 3427 txp = 1; 3428 /* get the dcb info on the first RX and TX ports */ 3429 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3430 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3431 3432 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3433 fwd_lcores[lc_id]->stream_nb = 0; 3434 fwd_lcores[lc_id]->stream_idx = sm_id; 3435 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 3436 /* if the nb_queue is zero, means this tc is 3437 * not enabled on the POOL 3438 */ 3439 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3440 break; 3441 k = fwd_lcores[lc_id]->stream_nb + 3442 fwd_lcores[lc_id]->stream_idx; 3443 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3444 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3445 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3446 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3447 for (j = 0; j < nb_rx_queue; j++) { 3448 struct fwd_stream *fs; 3449 3450 fs = fwd_streams[k + j]; 3451 fs->rx_port = fwd_ports_ids[rxp]; 3452 fs->rx_queue = rxq + j; 3453 fs->tx_port = fwd_ports_ids[txp]; 3454 fs->tx_queue = txq + j % nb_tx_queue; 3455 fs->peer_addr = fs->tx_port; 3456 fs->retry_enabled = retry_enabled; 3457 } 3458 fwd_lcores[lc_id]->stream_nb += 3459 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3460 } 3461 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3462 3463 tc++; 3464 if (tc < rxp_dcb_info.nb_tcs) 3465 continue; 3466 /* Restart from TC 0 on next RX port */ 3467 tc = 0; 3468 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3469 rxp = (portid_t) 3470 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3471 else 3472 rxp++; 3473 if (rxp >= nb_fwd_ports) 3474 return; 3475 /* get the dcb information on next RX and TX ports */ 3476 if ((rxp & 0x1) == 0) 3477 txp = (portid_t) (rxp + 1); 3478 else 3479 txp = (portid_t) (rxp - 1); 3480 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3481 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3482 } 3483 } 3484 3485 static void 3486 icmp_echo_config_setup(void) 3487 { 3488 portid_t rxp; 3489 queueid_t rxq; 3490 lcoreid_t lc_id; 3491 uint16_t sm_id; 3492 3493 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3494 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3495 (nb_txq * nb_fwd_ports); 3496 else 3497 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3498 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3499 cur_fwd_config.nb_fwd_streams = 3500 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3501 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3502 cur_fwd_config.nb_fwd_lcores = 3503 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3504 if (verbose_level > 0) { 3505 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3506 __FUNCTION__, 3507 cur_fwd_config.nb_fwd_lcores, 3508 cur_fwd_config.nb_fwd_ports, 3509 cur_fwd_config.nb_fwd_streams); 3510 } 3511 3512 /* reinitialize forwarding streams */ 3513 init_fwd_streams(); 3514 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3515 rxp = 0; rxq = 0; 3516 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3517 if (verbose_level > 0) 3518 printf(" core=%d: \n", lc_id); 3519 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3520 struct fwd_stream *fs; 3521 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3522 fs->rx_port = fwd_ports_ids[rxp]; 3523 fs->rx_queue = rxq; 3524 fs->tx_port = fs->rx_port; 3525 fs->tx_queue = rxq; 3526 fs->peer_addr = fs->tx_port; 3527 fs->retry_enabled = retry_enabled; 3528 if (verbose_level > 0) 3529 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3530 sm_id, fs->rx_port, fs->rx_queue, 3531 fs->tx_queue); 3532 rxq = (queueid_t) (rxq + 1); 3533 if (rxq == nb_rxq) { 3534 rxq = 0; 3535 rxp = (portid_t) (rxp + 1); 3536 } 3537 } 3538 } 3539 } 3540 3541 void 3542 fwd_config_setup(void) 3543 { 3544 struct rte_port *port; 3545 portid_t pt_id; 3546 unsigned int i; 3547 3548 cur_fwd_config.fwd_eng = cur_fwd_eng; 3549 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3550 icmp_echo_config_setup(); 3551 return; 3552 } 3553 3554 if ((nb_rxq > 1) && (nb_txq > 1)){ 3555 if (dcb_config) { 3556 for (i = 0; i < nb_fwd_ports; i++) { 3557 pt_id = fwd_ports_ids[i]; 3558 port = &ports[pt_id]; 3559 if (!port->dcb_flag) { 3560 fprintf(stderr, 3561 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3562 return; 3563 } 3564 } 3565 if (nb_fwd_lcores == 1) { 3566 fprintf(stderr, 3567 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3568 return; 3569 } 3570 3571 dcb_fwd_config_setup(); 3572 } else 3573 rss_fwd_config_setup(); 3574 } 3575 else 3576 simple_fwd_config_setup(); 3577 } 3578 3579 static const char * 3580 mp_alloc_to_str(uint8_t mode) 3581 { 3582 switch (mode) { 3583 case MP_ALLOC_NATIVE: 3584 return "native"; 3585 case MP_ALLOC_ANON: 3586 return "anon"; 3587 case MP_ALLOC_XMEM: 3588 return "xmem"; 3589 case MP_ALLOC_XMEM_HUGE: 3590 return "xmemhuge"; 3591 case MP_ALLOC_XBUF: 3592 return "xbuf"; 3593 default: 3594 return "invalid"; 3595 } 3596 } 3597 3598 void 3599 pkt_fwd_config_display(struct fwd_config *cfg) 3600 { 3601 struct fwd_stream *fs; 3602 lcoreid_t lc_id; 3603 streamid_t sm_id; 3604 3605 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3606 "NUMA support %s, MP allocation mode: %s\n", 3607 cfg->fwd_eng->fwd_mode_name, 3608 retry_enabled == 0 ? "" : " with retry", 3609 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3610 numa_support == 1 ? "enabled" : "disabled", 3611 mp_alloc_to_str(mp_alloc_type)); 3612 3613 if (retry_enabled) 3614 printf("TX retry num: %u, delay between TX retries: %uus\n", 3615 burst_tx_retry_num, burst_tx_delay_time); 3616 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3617 printf("Logical Core %u (socket %u) forwards packets on " 3618 "%d streams:", 3619 fwd_lcores_cpuids[lc_id], 3620 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3621 fwd_lcores[lc_id]->stream_nb); 3622 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3623 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3624 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3625 "P=%d/Q=%d (socket %u) ", 3626 fs->rx_port, fs->rx_queue, 3627 ports[fs->rx_port].socket_id, 3628 fs->tx_port, fs->tx_queue, 3629 ports[fs->tx_port].socket_id); 3630 print_ethaddr("peer=", 3631 &peer_eth_addrs[fs->peer_addr]); 3632 } 3633 printf("\n"); 3634 } 3635 printf("\n"); 3636 } 3637 3638 void 3639 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3640 { 3641 struct rte_ether_addr new_peer_addr; 3642 if (!rte_eth_dev_is_valid_port(port_id)) { 3643 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3644 return; 3645 } 3646 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3647 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3648 peer_addr); 3649 return; 3650 } 3651 peer_eth_addrs[port_id] = new_peer_addr; 3652 } 3653 3654 int 3655 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3656 { 3657 unsigned int i; 3658 unsigned int lcore_cpuid; 3659 int record_now; 3660 3661 record_now = 0; 3662 again: 3663 for (i = 0; i < nb_lc; i++) { 3664 lcore_cpuid = lcorelist[i]; 3665 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3666 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3667 return -1; 3668 } 3669 if (lcore_cpuid == rte_get_main_lcore()) { 3670 fprintf(stderr, 3671 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3672 lcore_cpuid); 3673 return -1; 3674 } 3675 if (record_now) 3676 fwd_lcores_cpuids[i] = lcore_cpuid; 3677 } 3678 if (record_now == 0) { 3679 record_now = 1; 3680 goto again; 3681 } 3682 nb_cfg_lcores = (lcoreid_t) nb_lc; 3683 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3684 printf("previous number of forwarding cores %u - changed to " 3685 "number of configured cores %u\n", 3686 (unsigned int) nb_fwd_lcores, nb_lc); 3687 nb_fwd_lcores = (lcoreid_t) nb_lc; 3688 } 3689 3690 return 0; 3691 } 3692 3693 int 3694 set_fwd_lcores_mask(uint64_t lcoremask) 3695 { 3696 unsigned int lcorelist[64]; 3697 unsigned int nb_lc; 3698 unsigned int i; 3699 3700 if (lcoremask == 0) { 3701 fprintf(stderr, "Invalid NULL mask of cores\n"); 3702 return -1; 3703 } 3704 nb_lc = 0; 3705 for (i = 0; i < 64; i++) { 3706 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3707 continue; 3708 lcorelist[nb_lc++] = i; 3709 } 3710 return set_fwd_lcores_list(lcorelist, nb_lc); 3711 } 3712 3713 void 3714 set_fwd_lcores_number(uint16_t nb_lc) 3715 { 3716 if (test_done == 0) { 3717 fprintf(stderr, "Please stop forwarding first\n"); 3718 return; 3719 } 3720 if (nb_lc > nb_cfg_lcores) { 3721 fprintf(stderr, 3722 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3723 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3724 return; 3725 } 3726 nb_fwd_lcores = (lcoreid_t) nb_lc; 3727 printf("Number of forwarding cores set to %u\n", 3728 (unsigned int) nb_fwd_lcores); 3729 } 3730 3731 void 3732 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3733 { 3734 unsigned int i; 3735 portid_t port_id; 3736 int record_now; 3737 3738 record_now = 0; 3739 again: 3740 for (i = 0; i < nb_pt; i++) { 3741 port_id = (portid_t) portlist[i]; 3742 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3743 return; 3744 if (record_now) 3745 fwd_ports_ids[i] = port_id; 3746 } 3747 if (record_now == 0) { 3748 record_now = 1; 3749 goto again; 3750 } 3751 nb_cfg_ports = (portid_t) nb_pt; 3752 if (nb_fwd_ports != (portid_t) nb_pt) { 3753 printf("previous number of forwarding ports %u - changed to " 3754 "number of configured ports %u\n", 3755 (unsigned int) nb_fwd_ports, nb_pt); 3756 nb_fwd_ports = (portid_t) nb_pt; 3757 } 3758 } 3759 3760 /** 3761 * Parse the user input and obtain the list of forwarding ports 3762 * 3763 * @param[in] list 3764 * String containing the user input. User can specify 3765 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3766 * For example, if the user wants to use all the available 3767 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3768 * If the user wants to use only the ports 1,2 then the input 3769 * is 1,2. 3770 * valid characters are '-' and ',' 3771 * @param[out] values 3772 * This array will be filled with a list of port IDs 3773 * based on the user input 3774 * Note that duplicate entries are discarded and only the first 3775 * count entries in this array are port IDs and all the rest 3776 * will contain default values 3777 * @param[in] maxsize 3778 * This parameter denotes 2 things 3779 * 1) Number of elements in the values array 3780 * 2) Maximum value of each element in the values array 3781 * @return 3782 * On success, returns total count of parsed port IDs 3783 * On failure, returns 0 3784 */ 3785 static unsigned int 3786 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3787 { 3788 unsigned int count = 0; 3789 char *end = NULL; 3790 int min, max; 3791 int value, i; 3792 unsigned int marked[maxsize]; 3793 3794 if (list == NULL || values == NULL) 3795 return 0; 3796 3797 for (i = 0; i < (int)maxsize; i++) 3798 marked[i] = 0; 3799 3800 min = INT_MAX; 3801 3802 do { 3803 /*Remove the blank spaces if any*/ 3804 while (isblank(*list)) 3805 list++; 3806 if (*list == '\0') 3807 break; 3808 errno = 0; 3809 value = strtol(list, &end, 10); 3810 if (errno || end == NULL) 3811 return 0; 3812 if (value < 0 || value >= (int)maxsize) 3813 return 0; 3814 while (isblank(*end)) 3815 end++; 3816 if (*end == '-' && min == INT_MAX) { 3817 min = value; 3818 } else if ((*end == ',') || (*end == '\0')) { 3819 max = value; 3820 if (min == INT_MAX) 3821 min = value; 3822 for (i = min; i <= max; i++) { 3823 if (count < maxsize) { 3824 if (marked[i]) 3825 continue; 3826 values[count] = i; 3827 marked[i] = 1; 3828 count++; 3829 } 3830 } 3831 min = INT_MAX; 3832 } else 3833 return 0; 3834 list = end + 1; 3835 } while (*end != '\0'); 3836 3837 return count; 3838 } 3839 3840 void 3841 parse_fwd_portlist(const char *portlist) 3842 { 3843 unsigned int portcount; 3844 unsigned int portindex[RTE_MAX_ETHPORTS]; 3845 unsigned int i, valid_port_count = 0; 3846 3847 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3848 if (!portcount) 3849 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3850 3851 /* 3852 * Here we verify the validity of the ports 3853 * and thereby calculate the total number of 3854 * valid ports 3855 */ 3856 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3857 if (rte_eth_dev_is_valid_port(portindex[i])) { 3858 portindex[valid_port_count] = portindex[i]; 3859 valid_port_count++; 3860 } 3861 } 3862 3863 set_fwd_ports_list(portindex, valid_port_count); 3864 } 3865 3866 void 3867 set_fwd_ports_mask(uint64_t portmask) 3868 { 3869 unsigned int portlist[64]; 3870 unsigned int nb_pt; 3871 unsigned int i; 3872 3873 if (portmask == 0) { 3874 fprintf(stderr, "Invalid NULL mask of ports\n"); 3875 return; 3876 } 3877 nb_pt = 0; 3878 RTE_ETH_FOREACH_DEV(i) { 3879 if (! ((uint64_t)(1ULL << i) & portmask)) 3880 continue; 3881 portlist[nb_pt++] = i; 3882 } 3883 set_fwd_ports_list(portlist, nb_pt); 3884 } 3885 3886 void 3887 set_fwd_ports_number(uint16_t nb_pt) 3888 { 3889 if (nb_pt > nb_cfg_ports) { 3890 fprintf(stderr, 3891 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3892 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3893 return; 3894 } 3895 nb_fwd_ports = (portid_t) nb_pt; 3896 printf("Number of forwarding ports set to %u\n", 3897 (unsigned int) nb_fwd_ports); 3898 } 3899 3900 int 3901 port_is_forwarding(portid_t port_id) 3902 { 3903 unsigned int i; 3904 3905 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3906 return -1; 3907 3908 for (i = 0; i < nb_fwd_ports; i++) { 3909 if (fwd_ports_ids[i] == port_id) 3910 return 1; 3911 } 3912 3913 return 0; 3914 } 3915 3916 void 3917 set_nb_pkt_per_burst(uint16_t nb) 3918 { 3919 if (nb > MAX_PKT_BURST) { 3920 fprintf(stderr, 3921 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3922 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3923 return; 3924 } 3925 nb_pkt_per_burst = nb; 3926 printf("Number of packets per burst set to %u\n", 3927 (unsigned int) nb_pkt_per_burst); 3928 } 3929 3930 static const char * 3931 tx_split_get_name(enum tx_pkt_split split) 3932 { 3933 uint32_t i; 3934 3935 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3936 if (tx_split_name[i].split == split) 3937 return tx_split_name[i].name; 3938 } 3939 return NULL; 3940 } 3941 3942 void 3943 set_tx_pkt_split(const char *name) 3944 { 3945 uint32_t i; 3946 3947 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3948 if (strcmp(tx_split_name[i].name, name) == 0) { 3949 tx_pkt_split = tx_split_name[i].split; 3950 return; 3951 } 3952 } 3953 fprintf(stderr, "unknown value: \"%s\"\n", name); 3954 } 3955 3956 int 3957 parse_fec_mode(const char *name, uint32_t *fec_capa) 3958 { 3959 uint8_t i; 3960 3961 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3962 if (strcmp(fec_mode_name[i].name, name) == 0) { 3963 *fec_capa = 3964 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3965 return 0; 3966 } 3967 } 3968 return -1; 3969 } 3970 3971 void 3972 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3973 { 3974 unsigned int i, j; 3975 3976 printf("FEC capabilities:\n"); 3977 3978 for (i = 0; i < num; i++) { 3979 printf("%s : ", 3980 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3981 3982 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3983 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3984 speed_fec_capa[i].capa) 3985 printf("%s ", fec_mode_name[j].name); 3986 } 3987 printf("\n"); 3988 } 3989 } 3990 3991 void 3992 show_rx_pkt_offsets(void) 3993 { 3994 uint32_t i, n; 3995 3996 n = rx_pkt_nb_offs; 3997 printf("Number of offsets: %u\n", n); 3998 if (n) { 3999 printf("Segment offsets: "); 4000 for (i = 0; i != n - 1; i++) 4001 printf("%hu,", rx_pkt_seg_offsets[i]); 4002 printf("%hu\n", rx_pkt_seg_lengths[i]); 4003 } 4004 } 4005 4006 void 4007 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4008 { 4009 unsigned int i; 4010 4011 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4012 printf("nb segments per RX packets=%u >= " 4013 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4014 return; 4015 } 4016 4017 /* 4018 * No extra check here, the segment length will be checked by PMD 4019 * in the extended queue setup. 4020 */ 4021 for (i = 0; i < nb_offs; i++) { 4022 if (seg_offsets[i] >= UINT16_MAX) { 4023 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4024 i, seg_offsets[i]); 4025 return; 4026 } 4027 } 4028 4029 for (i = 0; i < nb_offs; i++) 4030 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4031 4032 rx_pkt_nb_offs = (uint8_t) nb_offs; 4033 } 4034 4035 void 4036 show_rx_pkt_segments(void) 4037 { 4038 uint32_t i, n; 4039 4040 n = rx_pkt_nb_segs; 4041 printf("Number of segments: %u\n", n); 4042 if (n) { 4043 printf("Segment sizes: "); 4044 for (i = 0; i != n - 1; i++) 4045 printf("%hu,", rx_pkt_seg_lengths[i]); 4046 printf("%hu\n", rx_pkt_seg_lengths[i]); 4047 } 4048 } 4049 4050 void 4051 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4052 { 4053 unsigned int i; 4054 4055 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4056 printf("nb segments per RX packets=%u >= " 4057 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4058 return; 4059 } 4060 4061 /* 4062 * No extra check here, the segment length will be checked by PMD 4063 * in the extended queue setup. 4064 */ 4065 for (i = 0; i < nb_segs; i++) { 4066 if (seg_lengths[i] >= UINT16_MAX) { 4067 printf("length[%u]=%u > UINT16_MAX - give up\n", 4068 i, seg_lengths[i]); 4069 return; 4070 } 4071 } 4072 4073 for (i = 0; i < nb_segs; i++) 4074 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4075 4076 rx_pkt_nb_segs = (uint8_t) nb_segs; 4077 } 4078 4079 void 4080 show_tx_pkt_segments(void) 4081 { 4082 uint32_t i, n; 4083 const char *split; 4084 4085 n = tx_pkt_nb_segs; 4086 split = tx_split_get_name(tx_pkt_split); 4087 4088 printf("Number of segments: %u\n", n); 4089 printf("Segment sizes: "); 4090 for (i = 0; i != n - 1; i++) 4091 printf("%hu,", tx_pkt_seg_lengths[i]); 4092 printf("%hu\n", tx_pkt_seg_lengths[i]); 4093 printf("Split packet: %s\n", split); 4094 } 4095 4096 static bool 4097 nb_segs_is_invalid(unsigned int nb_segs) 4098 { 4099 uint16_t ring_size; 4100 uint16_t queue_id; 4101 uint16_t port_id; 4102 int ret; 4103 4104 RTE_ETH_FOREACH_DEV(port_id) { 4105 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4106 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4107 if (ret) { 4108 /* Port may not be initialized yet, can't say 4109 * the port is invalid in this stage. 4110 */ 4111 continue; 4112 } 4113 if (ring_size < nb_segs) { 4114 printf("nb segments per TX packets=%u >= TX " 4115 "queue(%u) ring_size=%u - txpkts ignored\n", 4116 nb_segs, queue_id, ring_size); 4117 return true; 4118 } 4119 } 4120 } 4121 4122 return false; 4123 } 4124 4125 void 4126 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4127 { 4128 uint16_t tx_pkt_len; 4129 unsigned int i; 4130 4131 /* 4132 * For single segment settings failed check is ignored. 4133 * It is a very basic capability to send the single segment 4134 * packets, suppose it is always supported. 4135 */ 4136 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4137 fprintf(stderr, 4138 "Tx segment size(%u) is not supported - txpkts ignored\n", 4139 nb_segs); 4140 return; 4141 } 4142 4143 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4144 fprintf(stderr, 4145 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4146 nb_segs, RTE_MAX_SEGS_PER_PKT); 4147 return; 4148 } 4149 4150 /* 4151 * Check that each segment length is greater or equal than 4152 * the mbuf data size. 4153 * Check also that the total packet length is greater or equal than the 4154 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4155 * 20 + 8). 4156 */ 4157 tx_pkt_len = 0; 4158 for (i = 0; i < nb_segs; i++) { 4159 if (seg_lengths[i] > mbuf_data_size[0]) { 4160 fprintf(stderr, 4161 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4162 i, seg_lengths[i], mbuf_data_size[0]); 4163 return; 4164 } 4165 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4166 } 4167 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4168 fprintf(stderr, "total packet length=%u < %d - give up\n", 4169 (unsigned) tx_pkt_len, 4170 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4171 return; 4172 } 4173 4174 for (i = 0; i < nb_segs; i++) 4175 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4176 4177 tx_pkt_length = tx_pkt_len; 4178 tx_pkt_nb_segs = (uint8_t) nb_segs; 4179 } 4180 4181 void 4182 show_tx_pkt_times(void) 4183 { 4184 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4185 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4186 } 4187 4188 void 4189 set_tx_pkt_times(unsigned int *tx_times) 4190 { 4191 tx_pkt_times_inter = tx_times[0]; 4192 tx_pkt_times_intra = tx_times[1]; 4193 } 4194 4195 void 4196 setup_gro(const char *onoff, portid_t port_id) 4197 { 4198 if (!rte_eth_dev_is_valid_port(port_id)) { 4199 fprintf(stderr, "invalid port id %u\n", port_id); 4200 return; 4201 } 4202 if (test_done == 0) { 4203 fprintf(stderr, 4204 "Before enable/disable GRO, please stop forwarding first\n"); 4205 return; 4206 } 4207 if (strcmp(onoff, "on") == 0) { 4208 if (gro_ports[port_id].enable != 0) { 4209 fprintf(stderr, 4210 "Port %u has enabled GRO. Please disable GRO first\n", 4211 port_id); 4212 return; 4213 } 4214 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4215 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4216 gro_ports[port_id].param.max_flow_num = 4217 GRO_DEFAULT_FLOW_NUM; 4218 gro_ports[port_id].param.max_item_per_flow = 4219 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4220 } 4221 gro_ports[port_id].enable = 1; 4222 } else { 4223 if (gro_ports[port_id].enable == 0) { 4224 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4225 return; 4226 } 4227 gro_ports[port_id].enable = 0; 4228 } 4229 } 4230 4231 void 4232 setup_gro_flush_cycles(uint8_t cycles) 4233 { 4234 if (test_done == 0) { 4235 fprintf(stderr, 4236 "Before change flush interval for GRO, please stop forwarding first.\n"); 4237 return; 4238 } 4239 4240 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4241 GRO_DEFAULT_FLUSH_CYCLES) { 4242 fprintf(stderr, 4243 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4244 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4245 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4246 } 4247 4248 gro_flush_cycles = cycles; 4249 } 4250 4251 void 4252 show_gro(portid_t port_id) 4253 { 4254 struct rte_gro_param *param; 4255 uint32_t max_pkts_num; 4256 4257 param = &gro_ports[port_id].param; 4258 4259 if (!rte_eth_dev_is_valid_port(port_id)) { 4260 fprintf(stderr, "Invalid port id %u.\n", port_id); 4261 return; 4262 } 4263 if (gro_ports[port_id].enable) { 4264 printf("GRO type: TCP/IPv4\n"); 4265 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4266 max_pkts_num = param->max_flow_num * 4267 param->max_item_per_flow; 4268 } else 4269 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4270 printf("Max number of packets to perform GRO: %u\n", 4271 max_pkts_num); 4272 printf("Flushing cycles: %u\n", gro_flush_cycles); 4273 } else 4274 printf("Port %u doesn't enable GRO.\n", port_id); 4275 } 4276 4277 void 4278 setup_gso(const char *mode, portid_t port_id) 4279 { 4280 if (!rte_eth_dev_is_valid_port(port_id)) { 4281 fprintf(stderr, "invalid port id %u\n", port_id); 4282 return; 4283 } 4284 if (strcmp(mode, "on") == 0) { 4285 if (test_done == 0) { 4286 fprintf(stderr, 4287 "before enabling GSO, please stop forwarding first\n"); 4288 return; 4289 } 4290 gso_ports[port_id].enable = 1; 4291 } else if (strcmp(mode, "off") == 0) { 4292 if (test_done == 0) { 4293 fprintf(stderr, 4294 "before disabling GSO, please stop forwarding first\n"); 4295 return; 4296 } 4297 gso_ports[port_id].enable = 0; 4298 } 4299 } 4300 4301 char* 4302 list_pkt_forwarding_modes(void) 4303 { 4304 static char fwd_modes[128] = ""; 4305 const char *separator = "|"; 4306 struct fwd_engine *fwd_eng; 4307 unsigned i = 0; 4308 4309 if (strlen (fwd_modes) == 0) { 4310 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4311 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4312 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4313 strncat(fwd_modes, separator, 4314 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4315 } 4316 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4317 } 4318 4319 return fwd_modes; 4320 } 4321 4322 char* 4323 list_pkt_forwarding_retry_modes(void) 4324 { 4325 static char fwd_modes[128] = ""; 4326 const char *separator = "|"; 4327 struct fwd_engine *fwd_eng; 4328 unsigned i = 0; 4329 4330 if (strlen(fwd_modes) == 0) { 4331 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4332 if (fwd_eng == &rx_only_engine) 4333 continue; 4334 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4335 sizeof(fwd_modes) - 4336 strlen(fwd_modes) - 1); 4337 strncat(fwd_modes, separator, 4338 sizeof(fwd_modes) - 4339 strlen(fwd_modes) - 1); 4340 } 4341 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4342 } 4343 4344 return fwd_modes; 4345 } 4346 4347 void 4348 set_pkt_forwarding_mode(const char *fwd_mode_name) 4349 { 4350 struct fwd_engine *fwd_eng; 4351 unsigned i; 4352 4353 i = 0; 4354 while ((fwd_eng = fwd_engines[i]) != NULL) { 4355 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4356 printf("Set %s packet forwarding mode%s\n", 4357 fwd_mode_name, 4358 retry_enabled == 0 ? "" : " with retry"); 4359 cur_fwd_eng = fwd_eng; 4360 return; 4361 } 4362 i++; 4363 } 4364 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4365 } 4366 4367 void 4368 add_rx_dump_callbacks(portid_t portid) 4369 { 4370 struct rte_eth_dev_info dev_info; 4371 uint16_t queue; 4372 int ret; 4373 4374 if (port_id_is_invalid(portid, ENABLED_WARN)) 4375 return; 4376 4377 ret = eth_dev_info_get_print_err(portid, &dev_info); 4378 if (ret != 0) 4379 return; 4380 4381 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4382 if (!ports[portid].rx_dump_cb[queue]) 4383 ports[portid].rx_dump_cb[queue] = 4384 rte_eth_add_rx_callback(portid, queue, 4385 dump_rx_pkts, NULL); 4386 } 4387 4388 void 4389 add_tx_dump_callbacks(portid_t portid) 4390 { 4391 struct rte_eth_dev_info dev_info; 4392 uint16_t queue; 4393 int ret; 4394 4395 if (port_id_is_invalid(portid, ENABLED_WARN)) 4396 return; 4397 4398 ret = eth_dev_info_get_print_err(portid, &dev_info); 4399 if (ret != 0) 4400 return; 4401 4402 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4403 if (!ports[portid].tx_dump_cb[queue]) 4404 ports[portid].tx_dump_cb[queue] = 4405 rte_eth_add_tx_callback(portid, queue, 4406 dump_tx_pkts, NULL); 4407 } 4408 4409 void 4410 remove_rx_dump_callbacks(portid_t portid) 4411 { 4412 struct rte_eth_dev_info dev_info; 4413 uint16_t queue; 4414 int ret; 4415 4416 if (port_id_is_invalid(portid, ENABLED_WARN)) 4417 return; 4418 4419 ret = eth_dev_info_get_print_err(portid, &dev_info); 4420 if (ret != 0) 4421 return; 4422 4423 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4424 if (ports[portid].rx_dump_cb[queue]) { 4425 rte_eth_remove_rx_callback(portid, queue, 4426 ports[portid].rx_dump_cb[queue]); 4427 ports[portid].rx_dump_cb[queue] = NULL; 4428 } 4429 } 4430 4431 void 4432 remove_tx_dump_callbacks(portid_t portid) 4433 { 4434 struct rte_eth_dev_info dev_info; 4435 uint16_t queue; 4436 int ret; 4437 4438 if (port_id_is_invalid(portid, ENABLED_WARN)) 4439 return; 4440 4441 ret = eth_dev_info_get_print_err(portid, &dev_info); 4442 if (ret != 0) 4443 return; 4444 4445 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4446 if (ports[portid].tx_dump_cb[queue]) { 4447 rte_eth_remove_tx_callback(portid, queue, 4448 ports[portid].tx_dump_cb[queue]); 4449 ports[portid].tx_dump_cb[queue] = NULL; 4450 } 4451 } 4452 4453 void 4454 configure_rxtx_dump_callbacks(uint16_t verbose) 4455 { 4456 portid_t portid; 4457 4458 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4459 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4460 return; 4461 #endif 4462 4463 RTE_ETH_FOREACH_DEV(portid) 4464 { 4465 if (verbose == 1 || verbose > 2) 4466 add_rx_dump_callbacks(portid); 4467 else 4468 remove_rx_dump_callbacks(portid); 4469 if (verbose >= 2) 4470 add_tx_dump_callbacks(portid); 4471 else 4472 remove_tx_dump_callbacks(portid); 4473 } 4474 } 4475 4476 void 4477 set_verbose_level(uint16_t vb_level) 4478 { 4479 printf("Change verbose level from %u to %u\n", 4480 (unsigned int) verbose_level, (unsigned int) vb_level); 4481 verbose_level = vb_level; 4482 configure_rxtx_dump_callbacks(verbose_level); 4483 } 4484 4485 void 4486 vlan_extend_set(portid_t port_id, int on) 4487 { 4488 int diag; 4489 int vlan_offload; 4490 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4491 4492 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4493 return; 4494 4495 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4496 4497 if (on) { 4498 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4499 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4500 } else { 4501 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 4502 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4503 } 4504 4505 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4506 if (diag < 0) { 4507 fprintf(stderr, 4508 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4509 port_id, on, diag); 4510 return; 4511 } 4512 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4513 } 4514 4515 void 4516 rx_vlan_strip_set(portid_t port_id, int on) 4517 { 4518 int diag; 4519 int vlan_offload; 4520 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4521 4522 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4523 return; 4524 4525 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4526 4527 if (on) { 4528 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4529 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4530 } else { 4531 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 4532 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4533 } 4534 4535 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4536 if (diag < 0) { 4537 fprintf(stderr, 4538 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4539 __func__, port_id, on, diag); 4540 return; 4541 } 4542 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4543 } 4544 4545 void 4546 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4547 { 4548 int diag; 4549 4550 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4551 return; 4552 4553 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4554 if (diag < 0) 4555 fprintf(stderr, 4556 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4557 __func__, port_id, queue_id, on, diag); 4558 } 4559 4560 void 4561 rx_vlan_filter_set(portid_t port_id, int on) 4562 { 4563 int diag; 4564 int vlan_offload; 4565 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4566 4567 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4568 return; 4569 4570 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4571 4572 if (on) { 4573 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4574 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4575 } else { 4576 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 4577 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4578 } 4579 4580 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4581 if (diag < 0) { 4582 fprintf(stderr, 4583 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4584 __func__, port_id, on, diag); 4585 return; 4586 } 4587 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4588 } 4589 4590 void 4591 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4592 { 4593 int diag; 4594 int vlan_offload; 4595 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4596 4597 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4598 return; 4599 4600 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4601 4602 if (on) { 4603 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4604 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4605 } else { 4606 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 4607 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4608 } 4609 4610 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4611 if (diag < 0) { 4612 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4613 __func__, port_id, on, diag); 4614 return; 4615 } 4616 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4617 } 4618 4619 int 4620 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4621 { 4622 int diag; 4623 4624 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4625 return 1; 4626 if (vlan_id_is_invalid(vlan_id)) 4627 return 1; 4628 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4629 if (diag == 0) 4630 return 0; 4631 fprintf(stderr, 4632 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4633 port_id, vlan_id, on, diag); 4634 return -1; 4635 } 4636 4637 void 4638 rx_vlan_all_filter_set(portid_t port_id, int on) 4639 { 4640 uint16_t vlan_id; 4641 4642 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4643 return; 4644 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4645 if (rx_vft_set(port_id, vlan_id, on)) 4646 break; 4647 } 4648 } 4649 4650 void 4651 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4652 { 4653 int diag; 4654 4655 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4656 return; 4657 4658 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4659 if (diag == 0) 4660 return; 4661 4662 fprintf(stderr, 4663 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4664 port_id, vlan_type, tp_id, diag); 4665 } 4666 4667 void 4668 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4669 { 4670 struct rte_eth_dev_info dev_info; 4671 int ret; 4672 4673 if (vlan_id_is_invalid(vlan_id)) 4674 return; 4675 4676 if (ports[port_id].dev_conf.txmode.offloads & 4677 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 4678 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4679 return; 4680 } 4681 4682 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4683 if (ret != 0) 4684 return; 4685 4686 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 4687 fprintf(stderr, 4688 "Error: vlan insert is not supported by port %d\n", 4689 port_id); 4690 return; 4691 } 4692 4693 tx_vlan_reset(port_id); 4694 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 4695 ports[port_id].tx_vlan_id = vlan_id; 4696 } 4697 4698 void 4699 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4700 { 4701 struct rte_eth_dev_info dev_info; 4702 int ret; 4703 4704 if (vlan_id_is_invalid(vlan_id)) 4705 return; 4706 if (vlan_id_is_invalid(vlan_id_outer)) 4707 return; 4708 4709 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4710 if (ret != 0) 4711 return; 4712 4713 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 4714 fprintf(stderr, 4715 "Error: qinq insert not supported by port %d\n", 4716 port_id); 4717 return; 4718 } 4719 4720 tx_vlan_reset(port_id); 4721 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4722 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4723 ports[port_id].tx_vlan_id = vlan_id; 4724 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4725 } 4726 4727 void 4728 tx_vlan_reset(portid_t port_id) 4729 { 4730 ports[port_id].dev_conf.txmode.offloads &= 4731 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4732 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4733 ports[port_id].tx_vlan_id = 0; 4734 ports[port_id].tx_vlan_id_outer = 0; 4735 } 4736 4737 void 4738 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4739 { 4740 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4741 return; 4742 4743 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4744 } 4745 4746 void 4747 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4748 { 4749 int ret; 4750 4751 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4752 return; 4753 4754 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4755 return; 4756 4757 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4758 fprintf(stderr, "map_value not in required range 0..%d\n", 4759 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4760 return; 4761 } 4762 4763 if (!is_rx) { /* tx */ 4764 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4765 map_value); 4766 if (ret) { 4767 fprintf(stderr, 4768 "failed to set tx queue stats mapping.\n"); 4769 return; 4770 } 4771 } else { /* rx */ 4772 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4773 map_value); 4774 if (ret) { 4775 fprintf(stderr, 4776 "failed to set rx queue stats mapping.\n"); 4777 return; 4778 } 4779 } 4780 } 4781 4782 void 4783 set_xstats_hide_zero(uint8_t on_off) 4784 { 4785 xstats_hide_zero = on_off; 4786 } 4787 4788 void 4789 set_record_core_cycles(uint8_t on_off) 4790 { 4791 record_core_cycles = on_off; 4792 } 4793 4794 void 4795 set_record_burst_stats(uint8_t on_off) 4796 { 4797 record_burst_stats = on_off; 4798 } 4799 4800 static char* 4801 flowtype_to_str(uint16_t flow_type) 4802 { 4803 struct flow_type_info { 4804 char str[32]; 4805 uint16_t ftype; 4806 }; 4807 4808 uint8_t i; 4809 static struct flow_type_info flowtype_str_table[] = { 4810 {"raw", RTE_ETH_FLOW_RAW}, 4811 {"ipv4", RTE_ETH_FLOW_IPV4}, 4812 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4813 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4814 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4815 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4816 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4817 {"ipv6", RTE_ETH_FLOW_IPV6}, 4818 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4819 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4820 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4821 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4822 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4823 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4824 {"port", RTE_ETH_FLOW_PORT}, 4825 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4826 {"geneve", RTE_ETH_FLOW_GENEVE}, 4827 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4828 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4829 }; 4830 4831 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4832 if (flowtype_str_table[i].ftype == flow_type) 4833 return flowtype_str_table[i].str; 4834 } 4835 4836 return NULL; 4837 } 4838 4839 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4840 4841 static inline void 4842 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4843 { 4844 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4845 4846 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4847 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4848 " tunnel_id: 0x%08x", 4849 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4850 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4851 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4852 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4853 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4854 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4855 4856 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4857 rte_be_to_cpu_16(mask->src_port_mask), 4858 rte_be_to_cpu_16(mask->dst_port_mask)); 4859 4860 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4861 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4862 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4863 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4864 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4865 4866 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4867 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4868 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4869 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4870 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4871 } 4872 4873 printf("\n"); 4874 } 4875 4876 static inline void 4877 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4878 { 4879 struct rte_eth_flex_payload_cfg *cfg; 4880 uint32_t i, j; 4881 4882 for (i = 0; i < flex_conf->nb_payloads; i++) { 4883 cfg = &flex_conf->flex_set[i]; 4884 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4885 printf("\n RAW: "); 4886 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4887 printf("\n L2_PAYLOAD: "); 4888 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4889 printf("\n L3_PAYLOAD: "); 4890 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4891 printf("\n L4_PAYLOAD: "); 4892 else 4893 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4894 for (j = 0; j < num; j++) 4895 printf(" %-5u", cfg->src_offset[j]); 4896 } 4897 printf("\n"); 4898 } 4899 4900 static inline void 4901 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4902 { 4903 struct rte_eth_fdir_flex_mask *mask; 4904 uint32_t i, j; 4905 char *p; 4906 4907 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4908 mask = &flex_conf->flex_mask[i]; 4909 p = flowtype_to_str(mask->flow_type); 4910 printf("\n %s:\t", p ? p : "unknown"); 4911 for (j = 0; j < num; j++) 4912 printf(" %02x", mask->mask[j]); 4913 } 4914 printf("\n"); 4915 } 4916 4917 static inline void 4918 print_fdir_flow_type(uint32_t flow_types_mask) 4919 { 4920 int i; 4921 char *p; 4922 4923 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4924 if (!(flow_types_mask & (1 << i))) 4925 continue; 4926 p = flowtype_to_str(i); 4927 if (p) 4928 printf(" %s", p); 4929 else 4930 printf(" unknown"); 4931 } 4932 printf("\n"); 4933 } 4934 4935 static int 4936 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4937 struct rte_eth_fdir_stats *fdir_stat) 4938 { 4939 int ret = -ENOTSUP; 4940 4941 #ifdef RTE_NET_I40E 4942 if (ret == -ENOTSUP) { 4943 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4944 if (!ret) 4945 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4946 } 4947 #endif 4948 #ifdef RTE_NET_IXGBE 4949 if (ret == -ENOTSUP) { 4950 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4951 if (!ret) 4952 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4953 } 4954 #endif 4955 switch (ret) { 4956 case 0: 4957 break; 4958 case -ENOTSUP: 4959 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4960 port_id); 4961 break; 4962 default: 4963 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4964 break; 4965 } 4966 return ret; 4967 } 4968 4969 void 4970 fdir_get_infos(portid_t port_id) 4971 { 4972 struct rte_eth_fdir_stats fdir_stat; 4973 struct rte_eth_fdir_info fdir_info; 4974 4975 static const char *fdir_stats_border = "########################"; 4976 4977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4978 return; 4979 4980 memset(&fdir_info, 0, sizeof(fdir_info)); 4981 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4982 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4983 return; 4984 4985 printf("\n %s FDIR infos for port %-2d %s\n", 4986 fdir_stats_border, port_id, fdir_stats_border); 4987 printf(" MODE: "); 4988 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4989 printf(" PERFECT\n"); 4990 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4991 printf(" PERFECT-MAC-VLAN\n"); 4992 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4993 printf(" PERFECT-TUNNEL\n"); 4994 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4995 printf(" SIGNATURE\n"); 4996 else 4997 printf(" DISABLE\n"); 4998 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4999 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5000 printf(" SUPPORTED FLOW TYPE: "); 5001 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5002 } 5003 printf(" FLEX PAYLOAD INFO:\n"); 5004 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5005 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5006 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5007 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5008 fdir_info.flex_payload_unit, 5009 fdir_info.max_flex_payload_segment_num, 5010 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5011 printf(" MASK: "); 5012 print_fdir_mask(&fdir_info.mask); 5013 if (fdir_info.flex_conf.nb_payloads > 0) { 5014 printf(" FLEX PAYLOAD SRC OFFSET:"); 5015 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5016 } 5017 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5018 printf(" FLEX MASK CFG:"); 5019 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5020 } 5021 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5022 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5023 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5024 fdir_info.guarant_spc, fdir_info.best_spc); 5025 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5026 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5027 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5028 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5029 fdir_stat.collision, fdir_stat.free, 5030 fdir_stat.maxhash, fdir_stat.maxlen, 5031 fdir_stat.add, fdir_stat.remove, 5032 fdir_stat.f_add, fdir_stat.f_remove); 5033 printf(" %s############################%s\n", 5034 fdir_stats_border, fdir_stats_border); 5035 } 5036 5037 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5038 5039 void 5040 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5041 { 5042 struct rte_port *port; 5043 struct rte_eth_fdir_flex_conf *flex_conf; 5044 int i, idx = 0; 5045 5046 port = &ports[port_id]; 5047 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5048 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5049 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5050 idx = i; 5051 break; 5052 } 5053 } 5054 if (i >= RTE_ETH_FLOW_MAX) { 5055 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5056 idx = flex_conf->nb_flexmasks; 5057 flex_conf->nb_flexmasks++; 5058 } else { 5059 fprintf(stderr, 5060 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5061 cfg->flow_type); 5062 return; 5063 } 5064 } 5065 rte_memcpy(&flex_conf->flex_mask[idx], 5066 cfg, 5067 sizeof(struct rte_eth_fdir_flex_mask)); 5068 } 5069 5070 void 5071 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5072 { 5073 struct rte_port *port; 5074 struct rte_eth_fdir_flex_conf *flex_conf; 5075 int i, idx = 0; 5076 5077 port = &ports[port_id]; 5078 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5079 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5080 if (cfg->type == flex_conf->flex_set[i].type) { 5081 idx = i; 5082 break; 5083 } 5084 } 5085 if (i >= RTE_ETH_PAYLOAD_MAX) { 5086 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5087 idx = flex_conf->nb_payloads; 5088 flex_conf->nb_payloads++; 5089 } else { 5090 fprintf(stderr, 5091 "The flex payload table is full. Can not set flex payload for type(%u).", 5092 cfg->type); 5093 return; 5094 } 5095 } 5096 rte_memcpy(&flex_conf->flex_set[idx], 5097 cfg, 5098 sizeof(struct rte_eth_flex_payload_cfg)); 5099 5100 } 5101 5102 void 5103 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5104 { 5105 #ifdef RTE_NET_IXGBE 5106 int diag; 5107 5108 if (is_rx) 5109 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5110 else 5111 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5112 5113 if (diag == 0) 5114 return; 5115 fprintf(stderr, 5116 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5117 is_rx ? "rx" : "tx", port_id, diag); 5118 return; 5119 #endif 5120 fprintf(stderr, "VF %s setting not supported for port %d\n", 5121 is_rx ? "Rx" : "Tx", port_id); 5122 RTE_SET_USED(vf); 5123 RTE_SET_USED(on); 5124 } 5125 5126 int 5127 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5128 { 5129 int diag; 5130 struct rte_eth_link link; 5131 int ret; 5132 5133 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5134 return 1; 5135 ret = eth_link_get_nowait_print_err(port_id, &link); 5136 if (ret < 0) 5137 return 1; 5138 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5139 rate > link.link_speed) { 5140 fprintf(stderr, 5141 "Invalid rate value:%u bigger than link speed: %u\n", 5142 rate, link.link_speed); 5143 return 1; 5144 } 5145 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5146 if (diag == 0) 5147 return diag; 5148 fprintf(stderr, 5149 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5150 port_id, diag); 5151 return diag; 5152 } 5153 5154 int 5155 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5156 { 5157 int diag = -ENOTSUP; 5158 5159 RTE_SET_USED(vf); 5160 RTE_SET_USED(rate); 5161 RTE_SET_USED(q_msk); 5162 5163 #ifdef RTE_NET_IXGBE 5164 if (diag == -ENOTSUP) 5165 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5166 q_msk); 5167 #endif 5168 #ifdef RTE_NET_BNXT 5169 if (diag == -ENOTSUP) 5170 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5171 #endif 5172 if (diag == 0) 5173 return diag; 5174 5175 fprintf(stderr, 5176 "%s for port_id=%d failed diag=%d\n", 5177 __func__, port_id, diag); 5178 return diag; 5179 } 5180 5181 /* 5182 * Functions to manage the set of filtered Multicast MAC addresses. 5183 * 5184 * A pool of filtered multicast MAC addresses is associated with each port. 5185 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5186 * The address of the pool and the number of valid multicast MAC addresses 5187 * recorded in the pool are stored in the fields "mc_addr_pool" and 5188 * "mc_addr_nb" of the "rte_port" data structure. 5189 * 5190 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5191 * to be supplied a contiguous array of multicast MAC addresses. 5192 * To comply with this constraint, the set of multicast addresses recorded 5193 * into the pool are systematically compacted at the beginning of the pool. 5194 * Hence, when a multicast address is removed from the pool, all following 5195 * addresses, if any, are copied back to keep the set contiguous. 5196 */ 5197 #define MCAST_POOL_INC 32 5198 5199 static int 5200 mcast_addr_pool_extend(struct rte_port *port) 5201 { 5202 struct rte_ether_addr *mc_pool; 5203 size_t mc_pool_size; 5204 5205 /* 5206 * If a free entry is available at the end of the pool, just 5207 * increment the number of recorded multicast addresses. 5208 */ 5209 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5210 port->mc_addr_nb++; 5211 return 0; 5212 } 5213 5214 /* 5215 * [re]allocate a pool with MCAST_POOL_INC more entries. 5216 * The previous test guarantees that port->mc_addr_nb is a multiple 5217 * of MCAST_POOL_INC. 5218 */ 5219 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5220 MCAST_POOL_INC); 5221 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5222 mc_pool_size); 5223 if (mc_pool == NULL) { 5224 fprintf(stderr, 5225 "allocation of pool of %u multicast addresses failed\n", 5226 port->mc_addr_nb + MCAST_POOL_INC); 5227 return -ENOMEM; 5228 } 5229 5230 port->mc_addr_pool = mc_pool; 5231 port->mc_addr_nb++; 5232 return 0; 5233 5234 } 5235 5236 static void 5237 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5238 { 5239 if (mcast_addr_pool_extend(port) != 0) 5240 return; 5241 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5242 } 5243 5244 static void 5245 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5246 { 5247 port->mc_addr_nb--; 5248 if (addr_idx == port->mc_addr_nb) { 5249 /* No need to recompact the set of multicast addressses. */ 5250 if (port->mc_addr_nb == 0) { 5251 /* free the pool of multicast addresses. */ 5252 free(port->mc_addr_pool); 5253 port->mc_addr_pool = NULL; 5254 } 5255 return; 5256 } 5257 memmove(&port->mc_addr_pool[addr_idx], 5258 &port->mc_addr_pool[addr_idx + 1], 5259 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5260 } 5261 5262 static int 5263 eth_port_multicast_addr_list_set(portid_t port_id) 5264 { 5265 struct rte_port *port; 5266 int diag; 5267 5268 port = &ports[port_id]; 5269 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5270 port->mc_addr_nb); 5271 if (diag < 0) 5272 fprintf(stderr, 5273 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5274 port_id, port->mc_addr_nb, diag); 5275 5276 return diag; 5277 } 5278 5279 void 5280 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5281 { 5282 struct rte_port *port; 5283 uint32_t i; 5284 5285 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5286 return; 5287 5288 port = &ports[port_id]; 5289 5290 /* 5291 * Check that the added multicast MAC address is not already recorded 5292 * in the pool of multicast addresses. 5293 */ 5294 for (i = 0; i < port->mc_addr_nb; i++) { 5295 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5296 fprintf(stderr, 5297 "multicast address already filtered by port\n"); 5298 return; 5299 } 5300 } 5301 5302 mcast_addr_pool_append(port, mc_addr); 5303 if (eth_port_multicast_addr_list_set(port_id) < 0) 5304 /* Rollback on failure, remove the address from the pool */ 5305 mcast_addr_pool_remove(port, i); 5306 } 5307 5308 void 5309 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5310 { 5311 struct rte_port *port; 5312 uint32_t i; 5313 5314 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5315 return; 5316 5317 port = &ports[port_id]; 5318 5319 /* 5320 * Search the pool of multicast MAC addresses for the removed address. 5321 */ 5322 for (i = 0; i < port->mc_addr_nb; i++) { 5323 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5324 break; 5325 } 5326 if (i == port->mc_addr_nb) { 5327 fprintf(stderr, "multicast address not filtered by port %d\n", 5328 port_id); 5329 return; 5330 } 5331 5332 mcast_addr_pool_remove(port, i); 5333 if (eth_port_multicast_addr_list_set(port_id) < 0) 5334 /* Rollback on failure, add the address back into the pool */ 5335 mcast_addr_pool_append(port, mc_addr); 5336 } 5337 5338 void 5339 port_dcb_info_display(portid_t port_id) 5340 { 5341 struct rte_eth_dcb_info dcb_info; 5342 uint16_t i; 5343 int ret; 5344 static const char *border = "================"; 5345 5346 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5347 return; 5348 5349 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5350 if (ret) { 5351 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5352 port_id); 5353 return; 5354 } 5355 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5356 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5357 printf("\n TC : "); 5358 for (i = 0; i < dcb_info.nb_tcs; i++) 5359 printf("\t%4d", i); 5360 printf("\n Priority : "); 5361 for (i = 0; i < dcb_info.nb_tcs; i++) 5362 printf("\t%4d", dcb_info.prio_tc[i]); 5363 printf("\n BW percent :"); 5364 for (i = 0; i < dcb_info.nb_tcs; i++) 5365 printf("\t%4d%%", dcb_info.tc_bws[i]); 5366 printf("\n RXQ base : "); 5367 for (i = 0; i < dcb_info.nb_tcs; i++) 5368 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5369 printf("\n RXQ number :"); 5370 for (i = 0; i < dcb_info.nb_tcs; i++) 5371 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5372 printf("\n TXQ base : "); 5373 for (i = 0; i < dcb_info.nb_tcs; i++) 5374 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5375 printf("\n TXQ number :"); 5376 for (i = 0; i < dcb_info.nb_tcs; i++) 5377 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5378 printf("\n"); 5379 } 5380 5381 uint8_t * 5382 open_file(const char *file_path, uint32_t *size) 5383 { 5384 int fd = open(file_path, O_RDONLY); 5385 off_t pkg_size; 5386 uint8_t *buf = NULL; 5387 int ret = 0; 5388 struct stat st_buf; 5389 5390 if (size) 5391 *size = 0; 5392 5393 if (fd == -1) { 5394 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5395 return buf; 5396 } 5397 5398 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5399 close(fd); 5400 fprintf(stderr, "%s: File operations failed\n", __func__); 5401 return buf; 5402 } 5403 5404 pkg_size = st_buf.st_size; 5405 if (pkg_size < 0) { 5406 close(fd); 5407 fprintf(stderr, "%s: File operations failed\n", __func__); 5408 return buf; 5409 } 5410 5411 buf = (uint8_t *)malloc(pkg_size); 5412 if (!buf) { 5413 close(fd); 5414 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5415 return buf; 5416 } 5417 5418 ret = read(fd, buf, pkg_size); 5419 if (ret < 0) { 5420 close(fd); 5421 fprintf(stderr, "%s: File read operation failed\n", __func__); 5422 close_file(buf); 5423 return NULL; 5424 } 5425 5426 if (size) 5427 *size = pkg_size; 5428 5429 close(fd); 5430 5431 return buf; 5432 } 5433 5434 int 5435 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5436 { 5437 FILE *fh = fopen(file_path, "wb"); 5438 5439 if (fh == NULL) { 5440 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5441 return -1; 5442 } 5443 5444 if (fwrite(buf, 1, size, fh) != size) { 5445 fclose(fh); 5446 fprintf(stderr, "%s: File write operation failed\n", __func__); 5447 return -1; 5448 } 5449 5450 fclose(fh); 5451 5452 return 0; 5453 } 5454 5455 int 5456 close_file(uint8_t *buf) 5457 { 5458 if (buf) { 5459 free((void *)buf); 5460 return 0; 5461 } 5462 5463 return -1; 5464 } 5465 5466 void 5467 port_queue_region_info_display(portid_t port_id, void *buf) 5468 { 5469 #ifdef RTE_NET_I40E 5470 uint16_t i, j; 5471 struct rte_pmd_i40e_queue_regions *info = 5472 (struct rte_pmd_i40e_queue_regions *)buf; 5473 static const char *queue_region_info_stats_border = "-------"; 5474 5475 if (!info->queue_region_number) 5476 printf("there is no region has been set before"); 5477 5478 printf("\n %s All queue region info for port=%2d %s", 5479 queue_region_info_stats_border, port_id, 5480 queue_region_info_stats_border); 5481 printf("\n queue_region_number: %-14u \n", 5482 info->queue_region_number); 5483 5484 for (i = 0; i < info->queue_region_number; i++) { 5485 printf("\n region_id: %-14u queue_number: %-14u " 5486 "queue_start_index: %-14u \n", 5487 info->region[i].region_id, 5488 info->region[i].queue_num, 5489 info->region[i].queue_start_index); 5490 5491 printf(" user_priority_num is %-14u :", 5492 info->region[i].user_priority_num); 5493 for (j = 0; j < info->region[i].user_priority_num; j++) 5494 printf(" %-14u ", info->region[i].user_priority[j]); 5495 5496 printf("\n flowtype_num is %-14u :", 5497 info->region[i].flowtype_num); 5498 for (j = 0; j < info->region[i].flowtype_num; j++) 5499 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5500 } 5501 #else 5502 RTE_SET_USED(port_id); 5503 RTE_SET_USED(buf); 5504 #endif 5505 5506 printf("\n\n"); 5507 } 5508 5509 void 5510 show_macs(portid_t port_id) 5511 { 5512 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5513 struct rte_eth_dev_info dev_info; 5514 int32_t i, rc, num_macs = 0; 5515 5516 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5517 return; 5518 5519 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 5520 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 5521 if (rc < 0) 5522 return; 5523 5524 for (i = 0; i < rc; i++) { 5525 5526 /* skip zero address */ 5527 if (rte_is_zero_ether_addr(&addr[i])) 5528 continue; 5529 5530 num_macs++; 5531 } 5532 5533 printf("Number of MAC address added: %d\n", num_macs); 5534 5535 for (i = 0; i < rc; i++) { 5536 5537 /* skip zero address */ 5538 if (rte_is_zero_ether_addr(&addr[i])) 5539 continue; 5540 5541 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 5542 printf(" %s\n", buf); 5543 } 5544 } 5545 5546 void 5547 show_mcast_macs(portid_t port_id) 5548 { 5549 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5550 struct rte_ether_addr *addr; 5551 struct rte_port *port; 5552 uint32_t i; 5553 5554 port = &ports[port_id]; 5555 5556 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5557 5558 for (i = 0; i < port->mc_addr_nb; i++) { 5559 addr = &port->mc_addr_pool[i]; 5560 5561 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5562 printf(" %s\n", buf); 5563 } 5564 } 5565