1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_mtr.h> 42 #include <rte_errno.h> 43 #ifdef RTE_NET_IXGBE 44 #include <rte_pmd_ixgbe.h> 45 #endif 46 #ifdef RTE_NET_I40E 47 #include <rte_pmd_i40e.h> 48 #endif 49 #ifdef RTE_NET_BNXT 50 #include <rte_pmd_bnxt.h> 51 #endif 52 #include <rte_gro.h> 53 #include <rte_hexdump.h> 54 55 #include "testpmd.h" 56 #include "cmdline_mtr.h" 57 58 #define ETHDEV_FWVERS_LEN 32 59 60 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 62 #else 63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 64 #endif 65 66 #define NS_PER_SEC 1E9 67 68 static char *flowtype_to_str(uint16_t flow_type); 69 70 static const struct { 71 enum tx_pkt_split split; 72 const char *name; 73 } tx_split_name[] = { 74 { 75 .split = TX_PKT_SPLIT_OFF, 76 .name = "off", 77 }, 78 { 79 .split = TX_PKT_SPLIT_ON, 80 .name = "on", 81 }, 82 { 83 .split = TX_PKT_SPLIT_RND, 84 .name = "rand", 85 }, 86 }; 87 88 const struct rss_type_info rss_type_table[] = { 89 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 90 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 91 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 92 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS}, 93 { "none", 0 }, 94 { "eth", RTE_ETH_RSS_ETH }, 95 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 96 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 97 { "vlan", RTE_ETH_RSS_VLAN }, 98 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 99 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 100 { "ipv4", RTE_ETH_RSS_IPV4 }, 101 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 102 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 103 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 104 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 105 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 106 { "ipv6", RTE_ETH_RSS_IPV6 }, 107 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 108 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 109 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 110 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 111 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 112 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 113 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 114 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 115 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 116 { "port", RTE_ETH_RSS_PORT }, 117 { "vxlan", RTE_ETH_RSS_VXLAN }, 118 { "geneve", RTE_ETH_RSS_GENEVE }, 119 { "nvgre", RTE_ETH_RSS_NVGRE }, 120 { "ip", RTE_ETH_RSS_IP }, 121 { "udp", RTE_ETH_RSS_UDP }, 122 { "tcp", RTE_ETH_RSS_TCP }, 123 { "sctp", RTE_ETH_RSS_SCTP }, 124 { "tunnel", RTE_ETH_RSS_TUNNEL }, 125 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 126 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 127 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 128 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 129 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 130 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 131 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 132 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 133 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 134 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 135 { "esp", RTE_ETH_RSS_ESP }, 136 { "ah", RTE_ETH_RSS_AH }, 137 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 138 { "pfcp", RTE_ETH_RSS_PFCP }, 139 { "pppoe", RTE_ETH_RSS_PPPOE }, 140 { "gtpu", RTE_ETH_RSS_GTPU }, 141 { "ecpri", RTE_ETH_RSS_ECPRI }, 142 { "mpls", RTE_ETH_RSS_MPLS }, 143 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 144 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 145 { NULL, 0 }, 146 }; 147 148 static const struct { 149 enum rte_eth_fec_mode mode; 150 const char *name; 151 } fec_mode_name[] = { 152 { 153 .mode = RTE_ETH_FEC_NOFEC, 154 .name = "off", 155 }, 156 { 157 .mode = RTE_ETH_FEC_AUTO, 158 .name = "auto", 159 }, 160 { 161 .mode = RTE_ETH_FEC_BASER, 162 .name = "baser", 163 }, 164 { 165 .mode = RTE_ETH_FEC_RS, 166 .name = "rs", 167 }, 168 }; 169 170 static void 171 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 172 { 173 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 174 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 175 printf("%s%s", name, buf); 176 } 177 178 static void 179 nic_xstats_display_periodic(portid_t port_id) 180 { 181 struct xstat_display_info *xstats_info; 182 uint64_t *prev_values, *curr_values; 183 uint64_t diff_value, value_rate; 184 struct timespec cur_time; 185 uint64_t *ids_supp; 186 size_t ids_supp_sz; 187 uint64_t diff_ns; 188 unsigned int i; 189 int rc; 190 191 xstats_info = &ports[port_id].xstats_info; 192 193 ids_supp_sz = xstats_info->ids_supp_sz; 194 if (ids_supp_sz == 0) 195 return; 196 197 printf("\n"); 198 199 ids_supp = xstats_info->ids_supp; 200 prev_values = xstats_info->prev_values; 201 curr_values = xstats_info->curr_values; 202 203 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 204 ids_supp_sz); 205 if (rc != (int)ids_supp_sz) { 206 fprintf(stderr, 207 "Failed to get values of %zu xstats for port %u - return code %d\n", 208 ids_supp_sz, port_id, rc); 209 return; 210 } 211 212 diff_ns = 0; 213 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 214 uint64_t ns; 215 216 ns = cur_time.tv_sec * NS_PER_SEC; 217 ns += cur_time.tv_nsec; 218 219 if (xstats_info->prev_ns != 0) 220 diff_ns = ns - xstats_info->prev_ns; 221 xstats_info->prev_ns = ns; 222 } 223 224 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 225 for (i = 0; i < ids_supp_sz; i++) { 226 diff_value = (curr_values[i] > prev_values[i]) ? 227 (curr_values[i] - prev_values[i]) : 0; 228 prev_values[i] = curr_values[i]; 229 value_rate = diff_ns > 0 ? 230 (double)diff_value / diff_ns * NS_PER_SEC : 0; 231 232 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 233 xstats_display[i].name, curr_values[i], value_rate); 234 } 235 } 236 237 void 238 nic_stats_display(portid_t port_id) 239 { 240 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 241 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 242 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 245 struct timespec cur_time; 246 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 247 diff_ns; 248 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 249 struct rte_eth_stats stats; 250 251 static const char *nic_stats_border = "########################"; 252 253 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 254 print_valid_ports(); 255 return; 256 } 257 rte_eth_stats_get(port_id, &stats); 258 printf("\n %s NIC statistics for port %-2d %s\n", 259 nic_stats_border, port_id, nic_stats_border); 260 261 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 262 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 263 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 264 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 265 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 266 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 267 268 diff_ns = 0; 269 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 270 uint64_t ns; 271 272 ns = cur_time.tv_sec * NS_PER_SEC; 273 ns += cur_time.tv_nsec; 274 275 if (prev_ns[port_id] != 0) 276 diff_ns = ns - prev_ns[port_id]; 277 prev_ns[port_id] = ns; 278 } 279 280 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 281 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 282 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 283 (stats.opackets - prev_pkts_tx[port_id]) : 0; 284 prev_pkts_rx[port_id] = stats.ipackets; 285 prev_pkts_tx[port_id] = stats.opackets; 286 mpps_rx = diff_ns > 0 ? 287 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 288 mpps_tx = diff_ns > 0 ? 289 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 290 291 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 292 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 293 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 294 (stats.obytes - prev_bytes_tx[port_id]) : 0; 295 prev_bytes_rx[port_id] = stats.ibytes; 296 prev_bytes_tx[port_id] = stats.obytes; 297 mbps_rx = diff_ns > 0 ? 298 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 299 mbps_tx = diff_ns > 0 ? 300 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 301 302 printf("\n Throughput (since last show)\n"); 303 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 304 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 305 mpps_tx, mbps_tx * 8); 306 307 if (xstats_display_num > 0) 308 nic_xstats_display_periodic(port_id); 309 310 printf(" %s############################%s\n", 311 nic_stats_border, nic_stats_border); 312 } 313 314 void 315 nic_stats_clear(portid_t port_id) 316 { 317 int ret; 318 319 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 320 print_valid_ports(); 321 return; 322 } 323 324 ret = rte_eth_stats_reset(port_id); 325 if (ret != 0) { 326 fprintf(stderr, 327 "%s: Error: failed to reset stats (port %u): %s", 328 __func__, port_id, strerror(-ret)); 329 return; 330 } 331 332 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 333 if (ret != 0) { 334 if (ret < 0) 335 ret = -ret; 336 fprintf(stderr, 337 "%s: Error: failed to get stats (port %u): %s", 338 __func__, port_id, strerror(ret)); 339 return; 340 } 341 printf("\n NIC statistics for port %d cleared\n", port_id); 342 } 343 344 void 345 nic_xstats_display(portid_t port_id) 346 { 347 struct rte_eth_xstat *xstats; 348 int cnt_xstats, idx_xstat; 349 struct rte_eth_xstat_name *xstats_names; 350 351 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 352 print_valid_ports(); 353 return; 354 } 355 printf("###### NIC extended statistics for port %-2d\n", port_id); 356 if (!rte_eth_dev_is_valid_port(port_id)) { 357 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 358 return; 359 } 360 361 /* Get count */ 362 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 363 if (cnt_xstats < 0) { 364 fprintf(stderr, "Error: Cannot get count of xstats\n"); 365 return; 366 } 367 368 /* Get id-name lookup table */ 369 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 370 if (xstats_names == NULL) { 371 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 372 return; 373 } 374 if (cnt_xstats != rte_eth_xstats_get_names( 375 port_id, xstats_names, cnt_xstats)) { 376 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 377 free(xstats_names); 378 return; 379 } 380 381 /* Get stats themselves */ 382 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 383 if (xstats == NULL) { 384 fprintf(stderr, "Cannot allocate memory for xstats\n"); 385 free(xstats_names); 386 return; 387 } 388 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 389 fprintf(stderr, "Error: Unable to get xstats\n"); 390 free(xstats_names); 391 free(xstats); 392 return; 393 } 394 395 /* Display xstats */ 396 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 397 if (xstats_hide_zero && !xstats[idx_xstat].value) 398 continue; 399 printf("%s: %"PRIu64"\n", 400 xstats_names[idx_xstat].name, 401 xstats[idx_xstat].value); 402 } 403 free(xstats_names); 404 free(xstats); 405 } 406 407 void 408 nic_xstats_clear(portid_t port_id) 409 { 410 int ret; 411 412 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 413 print_valid_ports(); 414 return; 415 } 416 417 ret = rte_eth_xstats_reset(port_id); 418 if (ret != 0) { 419 fprintf(stderr, 420 "%s: Error: failed to reset xstats (port %u): %s\n", 421 __func__, port_id, strerror(-ret)); 422 return; 423 } 424 425 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 426 if (ret != 0) { 427 if (ret < 0) 428 ret = -ret; 429 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 430 __func__, port_id, strerror(ret)); 431 return; 432 } 433 } 434 435 static const char * 436 get_queue_state_name(uint8_t queue_state) 437 { 438 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 439 return "stopped"; 440 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 441 return "started"; 442 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 443 return "hairpin"; 444 else 445 return "unknown"; 446 } 447 448 void 449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 450 { 451 struct rte_eth_burst_mode mode; 452 struct rte_eth_rxq_info qinfo; 453 int32_t rc; 454 static const char *info_border = "*********************"; 455 456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 457 if (rc != 0) { 458 fprintf(stderr, 459 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 460 port_id, queue_id, strerror(-rc), rc); 461 return; 462 } 463 464 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 465 info_border, port_id, queue_id, info_border); 466 467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 472 printf("\nRX drop packets: %s", 473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 474 printf("\nRX deferred start: %s", 475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 476 printf("\nRX scattered packets: %s", 477 (qinfo.scattered_rx != 0) ? "on" : "off"); 478 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 479 if (qinfo.rx_buf_size != 0) 480 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 481 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 482 483 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 484 printf("\nBurst mode: %s%s", 485 mode.info, 486 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 487 " (per queue)" : ""); 488 489 printf("\n"); 490 } 491 492 void 493 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_txq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 512 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 513 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 514 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 515 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 516 printf("\nTX deferred start: %s", 517 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 518 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 519 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 520 521 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 522 printf("\nBurst mode: %s%s", 523 mode.info, 524 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 525 " (per queue)" : ""); 526 527 printf("\n"); 528 } 529 530 static int bus_match_all(const struct rte_bus *bus, const void *data) 531 { 532 RTE_SET_USED(bus); 533 RTE_SET_USED(data); 534 return 0; 535 } 536 537 static void 538 device_infos_display_speeds(uint32_t speed_capa) 539 { 540 printf("\n\tDevice speed capability:"); 541 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 542 printf(" Autonegotiate (all speeds)"); 543 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 544 printf(" Disable autonegotiate (fixed speed) "); 545 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 546 printf(" 10 Mbps half-duplex "); 547 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 548 printf(" 10 Mbps full-duplex "); 549 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 550 printf(" 100 Mbps half-duplex "); 551 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 552 printf(" 100 Mbps full-duplex "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 554 printf(" 1 Gbps "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 556 printf(" 2.5 Gbps "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 558 printf(" 5 Gbps "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 560 printf(" 10 Gbps "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 562 printf(" 20 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 564 printf(" 25 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 566 printf(" 40 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 568 printf(" 50 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 570 printf(" 56 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 572 printf(" 100 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 574 printf(" 200 Gbps "); 575 } 576 577 void 578 device_infos_display(const char *identifier) 579 { 580 static const char *info_border = "*********************"; 581 struct rte_bus *start = NULL, *next; 582 struct rte_dev_iterator dev_iter; 583 char name[RTE_ETH_NAME_MAX_LEN]; 584 struct rte_ether_addr mac_addr; 585 struct rte_device *dev; 586 struct rte_devargs da; 587 portid_t port_id; 588 struct rte_eth_dev_info dev_info; 589 char devstr[128]; 590 591 memset(&da, 0, sizeof(da)); 592 if (!identifier) 593 goto skip_parse; 594 595 if (rte_devargs_parsef(&da, "%s", identifier)) { 596 fprintf(stderr, "cannot parse identifier\n"); 597 return; 598 } 599 600 skip_parse: 601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 602 603 start = next; 604 if (identifier && da.bus != next) 605 continue; 606 607 /* Skip buses that don't have iterate method */ 608 if (!next->dev_iterate) 609 continue; 610 611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 613 614 if (!dev->driver) 615 continue; 616 /* Check for matching device if identifier is present */ 617 if (identifier && 618 strncmp(da.name, dev->name, strlen(dev->name))) 619 continue; 620 printf("\n%s Infos for device %s %s\n", 621 info_border, dev->name, info_border); 622 printf("Bus name: %s", dev->bus->name); 623 printf("\nDriver name: %s", dev->driver->name); 624 printf("\nDevargs: %s", 625 dev->devargs ? dev->devargs->args : ""); 626 printf("\nConnect to socket: %d", dev->numa_node); 627 printf("\n"); 628 629 /* List ports with matching device name */ 630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 631 printf("\n\tPort id: %-2d", port_id); 632 if (eth_macaddr_get_print_err(port_id, 633 &mac_addr) == 0) 634 print_ethaddr("\n\tMAC address: ", 635 &mac_addr); 636 rte_eth_dev_get_name_by_port(port_id, name); 637 printf("\n\tDevice name: %s", name); 638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 639 device_infos_display_speeds(dev_info.speed_capa); 640 printf("\n"); 641 } 642 } 643 }; 644 rte_devargs_reset(&da); 645 } 646 647 static void 648 print_dev_capabilities(uint64_t capabilities) 649 { 650 uint64_t single_capa; 651 int begin; 652 int end; 653 int bit; 654 655 if (capabilities == 0) 656 return; 657 658 begin = __builtin_ctzll(capabilities); 659 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 660 661 single_capa = 1ULL << begin; 662 for (bit = begin; bit < end; bit++) { 663 if (capabilities & single_capa) 664 printf(" %s", 665 rte_eth_dev_capability_name(single_capa)); 666 single_capa <<= 1; 667 } 668 } 669 670 void 671 port_infos_display(portid_t port_id) 672 { 673 struct rte_port *port; 674 struct rte_ether_addr mac_addr; 675 struct rte_eth_link link; 676 struct rte_eth_dev_info dev_info; 677 int vlan_offload; 678 struct rte_mempool * mp; 679 static const char *info_border = "*********************"; 680 uint16_t mtu; 681 char name[RTE_ETH_NAME_MAX_LEN]; 682 int ret; 683 char fw_version[ETHDEV_FWVERS_LEN]; 684 685 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 686 print_valid_ports(); 687 return; 688 } 689 port = &ports[port_id]; 690 ret = eth_link_get_nowait_print_err(port_id, &link); 691 if (ret < 0) 692 return; 693 694 ret = eth_dev_info_get_print_err(port_id, &dev_info); 695 if (ret != 0) 696 return; 697 698 printf("\n%s Infos for port %-2d %s\n", 699 info_border, port_id, info_border); 700 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 701 print_ethaddr("MAC address: ", &mac_addr); 702 rte_eth_dev_get_name_by_port(port_id, name); 703 printf("\nDevice name: %s", name); 704 printf("\nDriver name: %s", dev_info.driver_name); 705 706 if (rte_eth_dev_fw_version_get(port_id, fw_version, 707 ETHDEV_FWVERS_LEN) == 0) 708 printf("\nFirmware-version: %s", fw_version); 709 else 710 printf("\nFirmware-version: %s", "not available"); 711 712 if (dev_info.device->devargs && dev_info.device->devargs->args) 713 printf("\nDevargs: %s", dev_info.device->devargs->args); 714 printf("\nConnect to socket: %u", port->socket_id); 715 716 if (port_numa[port_id] != NUMA_NO_CONFIG) { 717 mp = mbuf_pool_find(port_numa[port_id], 0); 718 if (mp) 719 printf("\nmemory allocation on the socket: %d", 720 port_numa[port_id]); 721 } else 722 printf("\nmemory allocation on the socket: %u",port->socket_id); 723 724 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 725 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 726 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 727 ("full-duplex") : ("half-duplex")); 728 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 729 ("On") : ("Off")); 730 731 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 732 printf("MTU: %u\n", mtu); 733 734 printf("Promiscuous mode: %s\n", 735 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 736 printf("Allmulticast mode: %s\n", 737 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 738 printf("Maximum number of MAC addresses: %u\n", 739 (unsigned int)(port->dev_info.max_mac_addrs)); 740 printf("Maximum number of MAC addresses of hash filtering: %u\n", 741 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 742 743 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 744 if (vlan_offload >= 0){ 745 printf("VLAN offload: \n"); 746 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 747 printf(" strip on, "); 748 else 749 printf(" strip off, "); 750 751 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 752 printf("filter on, "); 753 else 754 printf("filter off, "); 755 756 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 757 printf("extend on, "); 758 else 759 printf("extend off, "); 760 761 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 762 printf("qinq strip on\n"); 763 else 764 printf("qinq strip off\n"); 765 } 766 767 if (dev_info.hash_key_size > 0) 768 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 769 if (dev_info.reta_size > 0) 770 printf("Redirection table size: %u\n", dev_info.reta_size); 771 if (!dev_info.flow_type_rss_offloads) 772 printf("No RSS offload flow type is supported.\n"); 773 else { 774 uint16_t i; 775 char *p; 776 777 printf("Supported RSS offload flow types:\n"); 778 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 779 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 780 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 781 continue; 782 p = flowtype_to_str(i); 783 if (p) 784 printf(" %s\n", p); 785 else 786 printf(" user defined %d\n", i); 787 } 788 } 789 790 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 791 printf("Maximum configurable length of RX packet: %u\n", 792 dev_info.max_rx_pktlen); 793 printf("Maximum configurable size of LRO aggregated packet: %u\n", 794 dev_info.max_lro_pkt_size); 795 if (dev_info.max_vfs) 796 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 797 if (dev_info.max_vmdq_pools) 798 printf("Maximum number of VMDq pools: %u\n", 799 dev_info.max_vmdq_pools); 800 801 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 802 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 803 printf("Max possible number of RXDs per queue: %hu\n", 804 dev_info.rx_desc_lim.nb_max); 805 printf("Min possible number of RXDs per queue: %hu\n", 806 dev_info.rx_desc_lim.nb_min); 807 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 808 809 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 810 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 811 printf("Max possible number of TXDs per queue: %hu\n", 812 dev_info.tx_desc_lim.nb_max); 813 printf("Min possible number of TXDs per queue: %hu\n", 814 dev_info.tx_desc_lim.nb_min); 815 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 816 printf("Max segment number per packet: %hu\n", 817 dev_info.tx_desc_lim.nb_seg_max); 818 printf("Max segment number per MTU/TSO: %hu\n", 819 dev_info.tx_desc_lim.nb_mtu_seg_max); 820 821 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 822 print_dev_capabilities(dev_info.dev_capa); 823 printf(" )\n"); 824 /* Show switch info only if valid switch domain and port id is set */ 825 if (dev_info.switch_info.domain_id != 826 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 827 if (dev_info.switch_info.name) 828 printf("Switch name: %s\n", dev_info.switch_info.name); 829 830 printf("Switch domain Id: %u\n", 831 dev_info.switch_info.domain_id); 832 printf("Switch Port Id: %u\n", 833 dev_info.switch_info.port_id); 834 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 835 printf("Switch Rx domain: %u\n", 836 dev_info.switch_info.rx_domain); 837 } 838 } 839 840 void 841 port_summary_header_display(void) 842 { 843 uint16_t port_number; 844 845 port_number = rte_eth_dev_count_avail(); 846 printf("Number of available ports: %i\n", port_number); 847 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 848 "Driver", "Status", "Link"); 849 } 850 851 void 852 port_summary_display(portid_t port_id) 853 { 854 struct rte_ether_addr mac_addr; 855 struct rte_eth_link link; 856 struct rte_eth_dev_info dev_info; 857 char name[RTE_ETH_NAME_MAX_LEN]; 858 int ret; 859 860 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 861 print_valid_ports(); 862 return; 863 } 864 865 ret = eth_link_get_nowait_print_err(port_id, &link); 866 if (ret < 0) 867 return; 868 869 ret = eth_dev_info_get_print_err(port_id, &dev_info); 870 if (ret != 0) 871 return; 872 873 rte_eth_dev_get_name_by_port(port_id, name); 874 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 875 if (ret != 0) 876 return; 877 878 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 879 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 880 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 881 rte_eth_link_speed_to_str(link.link_speed)); 882 } 883 884 void 885 port_eeprom_display(portid_t port_id) 886 { 887 struct rte_dev_eeprom_info einfo; 888 int ret; 889 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 890 print_valid_ports(); 891 return; 892 } 893 894 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 895 if (len_eeprom < 0) { 896 switch (len_eeprom) { 897 case -ENODEV: 898 fprintf(stderr, "port index %d invalid\n", port_id); 899 break; 900 case -ENOTSUP: 901 fprintf(stderr, "operation not supported by device\n"); 902 break; 903 case -EIO: 904 fprintf(stderr, "device is removed\n"); 905 break; 906 default: 907 fprintf(stderr, "Unable to get EEPROM: %d\n", 908 len_eeprom); 909 break; 910 } 911 return; 912 } 913 914 char buf[len_eeprom]; 915 einfo.offset = 0; 916 einfo.length = len_eeprom; 917 einfo.data = buf; 918 919 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 920 if (ret != 0) { 921 switch (ret) { 922 case -ENODEV: 923 fprintf(stderr, "port index %d invalid\n", port_id); 924 break; 925 case -ENOTSUP: 926 fprintf(stderr, "operation not supported by device\n"); 927 break; 928 case -EIO: 929 fprintf(stderr, "device is removed\n"); 930 break; 931 default: 932 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 933 break; 934 } 935 return; 936 } 937 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 938 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 939 } 940 941 void 942 port_module_eeprom_display(portid_t port_id) 943 { 944 struct rte_eth_dev_module_info minfo; 945 struct rte_dev_eeprom_info einfo; 946 int ret; 947 948 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 949 print_valid_ports(); 950 return; 951 } 952 953 954 ret = rte_eth_dev_get_module_info(port_id, &minfo); 955 if (ret != 0) { 956 switch (ret) { 957 case -ENODEV: 958 fprintf(stderr, "port index %d invalid\n", port_id); 959 break; 960 case -ENOTSUP: 961 fprintf(stderr, "operation not supported by device\n"); 962 break; 963 case -EIO: 964 fprintf(stderr, "device is removed\n"); 965 break; 966 default: 967 fprintf(stderr, "Unable to get module EEPROM: %d\n", 968 ret); 969 break; 970 } 971 return; 972 } 973 974 char buf[minfo.eeprom_len]; 975 einfo.offset = 0; 976 einfo.length = minfo.eeprom_len; 977 einfo.data = buf; 978 979 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 980 if (ret != 0) { 981 switch (ret) { 982 case -ENODEV: 983 fprintf(stderr, "port index %d invalid\n", port_id); 984 break; 985 case -ENOTSUP: 986 fprintf(stderr, "operation not supported by device\n"); 987 break; 988 case -EIO: 989 fprintf(stderr, "device is removed\n"); 990 break; 991 default: 992 fprintf(stderr, "Unable to get module EEPROM: %d\n", 993 ret); 994 break; 995 } 996 return; 997 } 998 999 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1000 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1001 } 1002 1003 int 1004 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1005 { 1006 uint16_t pid; 1007 1008 if (port_id == (portid_t)RTE_PORT_ALL) 1009 return 0; 1010 1011 RTE_ETH_FOREACH_DEV(pid) 1012 if (port_id == pid) 1013 return 0; 1014 1015 if (warning == ENABLED_WARN) 1016 fprintf(stderr, "Invalid port %d\n", port_id); 1017 1018 return 1; 1019 } 1020 1021 void print_valid_ports(void) 1022 { 1023 portid_t pid; 1024 1025 printf("The valid ports array is ["); 1026 RTE_ETH_FOREACH_DEV(pid) { 1027 printf(" %d", pid); 1028 } 1029 printf(" ]\n"); 1030 } 1031 1032 static int 1033 vlan_id_is_invalid(uint16_t vlan_id) 1034 { 1035 if (vlan_id < 4096) 1036 return 0; 1037 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1038 return 1; 1039 } 1040 1041 static int 1042 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1043 { 1044 const struct rte_pci_device *pci_dev; 1045 const struct rte_bus *bus; 1046 uint64_t pci_len; 1047 1048 if (reg_off & 0x3) { 1049 fprintf(stderr, 1050 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1051 (unsigned int)reg_off); 1052 return 1; 1053 } 1054 1055 if (!ports[port_id].dev_info.device) { 1056 fprintf(stderr, "Invalid device\n"); 1057 return 0; 1058 } 1059 1060 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1061 if (bus && !strcmp(bus->name, "pci")) { 1062 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1063 } else { 1064 fprintf(stderr, "Not a PCI device\n"); 1065 return 1; 1066 } 1067 1068 pci_len = pci_dev->mem_resource[0].len; 1069 if (reg_off >= pci_len) { 1070 fprintf(stderr, 1071 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1072 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1073 pci_len); 1074 return 1; 1075 } 1076 return 0; 1077 } 1078 1079 static int 1080 reg_bit_pos_is_invalid(uint8_t bit_pos) 1081 { 1082 if (bit_pos <= 31) 1083 return 0; 1084 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1085 return 1; 1086 } 1087 1088 #define display_port_and_reg_off(port_id, reg_off) \ 1089 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1090 1091 static inline void 1092 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1093 { 1094 display_port_and_reg_off(port_id, (unsigned)reg_off); 1095 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1096 } 1097 1098 void 1099 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1100 { 1101 uint32_t reg_v; 1102 1103 1104 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1105 return; 1106 if (port_reg_off_is_invalid(port_id, reg_off)) 1107 return; 1108 if (reg_bit_pos_is_invalid(bit_x)) 1109 return; 1110 reg_v = port_id_pci_reg_read(port_id, reg_off); 1111 display_port_and_reg_off(port_id, (unsigned)reg_off); 1112 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1113 } 1114 1115 void 1116 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1117 uint8_t bit1_pos, uint8_t bit2_pos) 1118 { 1119 uint32_t reg_v; 1120 uint8_t l_bit; 1121 uint8_t h_bit; 1122 1123 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1124 return; 1125 if (port_reg_off_is_invalid(port_id, reg_off)) 1126 return; 1127 if (reg_bit_pos_is_invalid(bit1_pos)) 1128 return; 1129 if (reg_bit_pos_is_invalid(bit2_pos)) 1130 return; 1131 if (bit1_pos > bit2_pos) 1132 l_bit = bit2_pos, h_bit = bit1_pos; 1133 else 1134 l_bit = bit1_pos, h_bit = bit2_pos; 1135 1136 reg_v = port_id_pci_reg_read(port_id, reg_off); 1137 reg_v >>= l_bit; 1138 if (h_bit < 31) 1139 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1140 display_port_and_reg_off(port_id, (unsigned)reg_off); 1141 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1142 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1143 } 1144 1145 void 1146 port_reg_display(portid_t port_id, uint32_t reg_off) 1147 { 1148 uint32_t reg_v; 1149 1150 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1151 return; 1152 if (port_reg_off_is_invalid(port_id, reg_off)) 1153 return; 1154 reg_v = port_id_pci_reg_read(port_id, reg_off); 1155 display_port_reg_value(port_id, reg_off, reg_v); 1156 } 1157 1158 void 1159 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1160 uint8_t bit_v) 1161 { 1162 uint32_t reg_v; 1163 1164 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1165 return; 1166 if (port_reg_off_is_invalid(port_id, reg_off)) 1167 return; 1168 if (reg_bit_pos_is_invalid(bit_pos)) 1169 return; 1170 if (bit_v > 1) { 1171 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1172 (int) bit_v); 1173 return; 1174 } 1175 reg_v = port_id_pci_reg_read(port_id, reg_off); 1176 if (bit_v == 0) 1177 reg_v &= ~(1 << bit_pos); 1178 else 1179 reg_v |= (1 << bit_pos); 1180 port_id_pci_reg_write(port_id, reg_off, reg_v); 1181 display_port_reg_value(port_id, reg_off, reg_v); 1182 } 1183 1184 void 1185 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1186 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1187 { 1188 uint32_t max_v; 1189 uint32_t reg_v; 1190 uint8_t l_bit; 1191 uint8_t h_bit; 1192 1193 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1194 return; 1195 if (port_reg_off_is_invalid(port_id, reg_off)) 1196 return; 1197 if (reg_bit_pos_is_invalid(bit1_pos)) 1198 return; 1199 if (reg_bit_pos_is_invalid(bit2_pos)) 1200 return; 1201 if (bit1_pos > bit2_pos) 1202 l_bit = bit2_pos, h_bit = bit1_pos; 1203 else 1204 l_bit = bit1_pos, h_bit = bit2_pos; 1205 1206 if ((h_bit - l_bit) < 31) 1207 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1208 else 1209 max_v = 0xFFFFFFFF; 1210 1211 if (value > max_v) { 1212 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1213 (unsigned)value, (unsigned)value, 1214 (unsigned)max_v, (unsigned)max_v); 1215 return; 1216 } 1217 reg_v = port_id_pci_reg_read(port_id, reg_off); 1218 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1219 reg_v |= (value << l_bit); /* Set changed bits */ 1220 port_id_pci_reg_write(port_id, reg_off, reg_v); 1221 display_port_reg_value(port_id, reg_off, reg_v); 1222 } 1223 1224 void 1225 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1226 { 1227 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1228 return; 1229 if (port_reg_off_is_invalid(port_id, reg_off)) 1230 return; 1231 port_id_pci_reg_write(port_id, reg_off, reg_v); 1232 display_port_reg_value(port_id, reg_off, reg_v); 1233 } 1234 1235 void 1236 port_mtu_set(portid_t port_id, uint16_t mtu) 1237 { 1238 struct rte_port *port = &ports[port_id]; 1239 int diag; 1240 1241 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1242 return; 1243 1244 diag = rte_eth_dev_set_mtu(port_id, mtu); 1245 if (diag != 0) { 1246 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1247 return; 1248 } 1249 1250 port->dev_conf.rxmode.mtu = mtu; 1251 } 1252 1253 /* Generic flow management functions. */ 1254 1255 static struct port_flow_tunnel * 1256 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1257 { 1258 struct port_flow_tunnel *flow_tunnel; 1259 1260 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1261 if (flow_tunnel->id == port_tunnel_id) 1262 goto out; 1263 } 1264 flow_tunnel = NULL; 1265 1266 out: 1267 return flow_tunnel; 1268 } 1269 1270 const char * 1271 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1272 { 1273 const char *type; 1274 switch (tunnel->type) { 1275 default: 1276 type = "unknown"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_VXLAN: 1279 type = "vxlan"; 1280 break; 1281 case RTE_FLOW_ITEM_TYPE_GRE: 1282 type = "gre"; 1283 break; 1284 case RTE_FLOW_ITEM_TYPE_NVGRE: 1285 type = "nvgre"; 1286 break; 1287 case RTE_FLOW_ITEM_TYPE_GENEVE: 1288 type = "geneve"; 1289 break; 1290 } 1291 1292 return type; 1293 } 1294 1295 struct port_flow_tunnel * 1296 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1297 { 1298 struct rte_port *port = &ports[port_id]; 1299 struct port_flow_tunnel *flow_tunnel; 1300 1301 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1302 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1303 goto out; 1304 } 1305 flow_tunnel = NULL; 1306 1307 out: 1308 return flow_tunnel; 1309 } 1310 1311 void port_flow_tunnel_list(portid_t port_id) 1312 { 1313 struct rte_port *port = &ports[port_id]; 1314 struct port_flow_tunnel *flt; 1315 1316 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1317 printf("port %u tunnel #%u type=%s", 1318 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1319 if (flt->tunnel.tun_id) 1320 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1321 printf("\n"); 1322 } 1323 } 1324 1325 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1326 { 1327 struct rte_port *port = &ports[port_id]; 1328 struct port_flow_tunnel *flt; 1329 1330 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1331 if (flt->id == tunnel_id) 1332 break; 1333 } 1334 if (flt) { 1335 LIST_REMOVE(flt, chain); 1336 free(flt); 1337 printf("port %u: flow tunnel #%u destroyed\n", 1338 port_id, tunnel_id); 1339 } 1340 } 1341 1342 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1343 { 1344 struct rte_port *port = &ports[port_id]; 1345 enum rte_flow_item_type type; 1346 struct port_flow_tunnel *flt; 1347 1348 if (!strcmp(ops->type, "vxlan")) 1349 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1350 else if (!strcmp(ops->type, "gre")) 1351 type = RTE_FLOW_ITEM_TYPE_GRE; 1352 else if (!strcmp(ops->type, "nvgre")) 1353 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1354 else if (!strcmp(ops->type, "geneve")) 1355 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1356 else { 1357 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1358 ops->type); 1359 return; 1360 } 1361 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1362 if (flt->tunnel.type == type) 1363 break; 1364 } 1365 if (!flt) { 1366 flt = calloc(1, sizeof(*flt)); 1367 if (!flt) { 1368 fprintf(stderr, "failed to allocate port flt object\n"); 1369 return; 1370 } 1371 flt->tunnel.type = type; 1372 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1373 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1374 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1375 } 1376 printf("port %d: flow tunnel #%u type %s\n", 1377 port_id, flt->id, ops->type); 1378 } 1379 1380 /** Generate a port_flow entry from attributes/pattern/actions. */ 1381 static struct port_flow * 1382 port_flow_new(const struct rte_flow_attr *attr, 1383 const struct rte_flow_item *pattern, 1384 const struct rte_flow_action *actions, 1385 struct rte_flow_error *error) 1386 { 1387 const struct rte_flow_conv_rule rule = { 1388 .attr_ro = attr, 1389 .pattern_ro = pattern, 1390 .actions_ro = actions, 1391 }; 1392 struct port_flow *pf; 1393 int ret; 1394 1395 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1396 if (ret < 0) 1397 return NULL; 1398 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1399 if (!pf) { 1400 rte_flow_error_set 1401 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1402 "calloc() failed"); 1403 return NULL; 1404 } 1405 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1406 error) >= 0) 1407 return pf; 1408 free(pf); 1409 return NULL; 1410 } 1411 1412 /** Print a message out of a flow error. */ 1413 static int 1414 port_flow_complain(struct rte_flow_error *error) 1415 { 1416 static const char *const errstrlist[] = { 1417 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1418 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1419 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1420 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1421 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1422 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1423 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1424 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1425 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1426 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1427 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1428 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1429 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1430 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1431 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1432 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1433 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1434 }; 1435 const char *errstr; 1436 char buf[32]; 1437 int err = rte_errno; 1438 1439 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1440 !errstrlist[error->type]) 1441 errstr = "unknown type"; 1442 else 1443 errstr = errstrlist[error->type]; 1444 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1445 __func__, error->type, errstr, 1446 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1447 error->cause), buf) : "", 1448 error->message ? error->message : "(no stated reason)", 1449 rte_strerror(err)); 1450 return -err; 1451 } 1452 1453 static void 1454 rss_config_display(struct rte_flow_action_rss *rss_conf) 1455 { 1456 uint8_t i; 1457 1458 if (rss_conf == NULL) { 1459 fprintf(stderr, "Invalid rule\n"); 1460 return; 1461 } 1462 1463 printf("RSS:\n" 1464 " queues:"); 1465 if (rss_conf->queue_num == 0) 1466 printf(" none"); 1467 for (i = 0; i < rss_conf->queue_num; i++) 1468 printf(" %d", rss_conf->queue[i]); 1469 printf("\n"); 1470 1471 printf(" function: "); 1472 switch (rss_conf->func) { 1473 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1474 printf("default\n"); 1475 break; 1476 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1477 printf("toeplitz\n"); 1478 break; 1479 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1480 printf("simple_xor\n"); 1481 break; 1482 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1483 printf("symmetric_toeplitz\n"); 1484 break; 1485 default: 1486 printf("Unknown function\n"); 1487 return; 1488 } 1489 1490 printf(" types:\n"); 1491 if (rss_conf->types == 0) { 1492 printf(" none\n"); 1493 return; 1494 } 1495 for (i = 0; rss_type_table[i].str; i++) { 1496 if ((rss_conf->types & 1497 rss_type_table[i].rss_type) == 1498 rss_type_table[i].rss_type && 1499 rss_type_table[i].rss_type != 0) 1500 printf(" %s\n", rss_type_table[i].str); 1501 } 1502 } 1503 1504 static struct port_indirect_action * 1505 action_get_by_id(portid_t port_id, uint32_t id) 1506 { 1507 struct rte_port *port; 1508 struct port_indirect_action **ppia; 1509 struct port_indirect_action *pia = NULL; 1510 1511 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1512 port_id == (portid_t)RTE_PORT_ALL) 1513 return NULL; 1514 port = &ports[port_id]; 1515 ppia = &port->actions_list; 1516 while (*ppia) { 1517 if ((*ppia)->id == id) { 1518 pia = *ppia; 1519 break; 1520 } 1521 ppia = &(*ppia)->next; 1522 } 1523 if (!pia) 1524 fprintf(stderr, 1525 "Failed to find indirect action #%u on port %u\n", 1526 id, port_id); 1527 return pia; 1528 } 1529 1530 static int 1531 action_alloc(portid_t port_id, uint32_t id, 1532 struct port_indirect_action **action) 1533 { 1534 struct rte_port *port; 1535 struct port_indirect_action **ppia; 1536 struct port_indirect_action *pia = NULL; 1537 1538 *action = NULL; 1539 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1540 port_id == (portid_t)RTE_PORT_ALL) 1541 return -EINVAL; 1542 port = &ports[port_id]; 1543 if (id == UINT32_MAX) { 1544 /* taking first available ID */ 1545 if (port->actions_list) { 1546 if (port->actions_list->id == UINT32_MAX - 1) { 1547 fprintf(stderr, 1548 "Highest indirect action ID is already assigned, delete it first\n"); 1549 return -ENOMEM; 1550 } 1551 id = port->actions_list->id + 1; 1552 } else { 1553 id = 0; 1554 } 1555 } 1556 pia = calloc(1, sizeof(*pia)); 1557 if (!pia) { 1558 fprintf(stderr, 1559 "Allocation of port %u indirect action failed\n", 1560 port_id); 1561 return -ENOMEM; 1562 } 1563 ppia = &port->actions_list; 1564 while (*ppia && (*ppia)->id > id) 1565 ppia = &(*ppia)->next; 1566 if (*ppia && (*ppia)->id == id) { 1567 fprintf(stderr, 1568 "Indirect action #%u is already assigned, delete it first\n", 1569 id); 1570 free(pia); 1571 return -EINVAL; 1572 } 1573 pia->next = *ppia; 1574 pia->id = id; 1575 *ppia = pia; 1576 *action = pia; 1577 return 0; 1578 } 1579 1580 /** Create indirect action */ 1581 int 1582 port_action_handle_create(portid_t port_id, uint32_t id, 1583 const struct rte_flow_indir_action_conf *conf, 1584 const struct rte_flow_action *action) 1585 { 1586 struct port_indirect_action *pia; 1587 int ret; 1588 struct rte_flow_error error; 1589 struct rte_port *port; 1590 1591 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1592 port_id == (portid_t)RTE_PORT_ALL) 1593 return -EINVAL; 1594 1595 ret = action_alloc(port_id, id, &pia); 1596 if (ret) 1597 return ret; 1598 1599 port = &ports[port_id]; 1600 1601 if (conf->transfer) 1602 port_id = port->flow_transfer_proxy; 1603 1604 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1605 port_id == (portid_t)RTE_PORT_ALL) 1606 return -EINVAL; 1607 1608 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1609 struct rte_flow_action_age *age = 1610 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1611 1612 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1613 age->context = &pia->age_type; 1614 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1615 struct rte_flow_action_conntrack *ct = 1616 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1617 1618 memcpy(ct, &conntrack_context, sizeof(*ct)); 1619 } 1620 /* Poisoning to make sure PMDs update it in case of error. */ 1621 memset(&error, 0x22, sizeof(error)); 1622 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1623 &error); 1624 if (!pia->handle) { 1625 uint32_t destroy_id = pia->id; 1626 port_action_handle_destroy(port_id, 1, &destroy_id); 1627 return port_flow_complain(&error); 1628 } 1629 pia->type = action->type; 1630 pia->transfer = conf->transfer; 1631 printf("Indirect action #%u created\n", pia->id); 1632 return 0; 1633 } 1634 1635 /** Destroy indirect action */ 1636 int 1637 port_action_handle_destroy(portid_t port_id, 1638 uint32_t n, 1639 const uint32_t *actions) 1640 { 1641 struct rte_port *port; 1642 struct port_indirect_action **tmp; 1643 uint32_t c = 0; 1644 int ret = 0; 1645 1646 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1647 port_id == (portid_t)RTE_PORT_ALL) 1648 return -EINVAL; 1649 port = &ports[port_id]; 1650 tmp = &port->actions_list; 1651 while (*tmp) { 1652 uint32_t i; 1653 1654 for (i = 0; i != n; ++i) { 1655 struct rte_flow_error error; 1656 struct port_indirect_action *pia = *tmp; 1657 portid_t port_id_eff = port_id; 1658 1659 if (actions[i] != pia->id) 1660 continue; 1661 1662 if (pia->transfer) 1663 port_id_eff = port->flow_transfer_proxy; 1664 1665 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 1666 port_id_eff == (portid_t)RTE_PORT_ALL) 1667 return -EINVAL; 1668 1669 /* 1670 * Poisoning to make sure PMDs update it in case 1671 * of error. 1672 */ 1673 memset(&error, 0x33, sizeof(error)); 1674 1675 if (pia->handle && rte_flow_action_handle_destroy( 1676 port_id_eff, pia->handle, &error)) { 1677 ret = port_flow_complain(&error); 1678 continue; 1679 } 1680 *tmp = pia->next; 1681 printf("Indirect action #%u destroyed\n", pia->id); 1682 free(pia); 1683 break; 1684 } 1685 if (i == n) 1686 tmp = &(*tmp)->next; 1687 ++c; 1688 } 1689 return ret; 1690 } 1691 1692 1693 /** Get indirect action by port + id */ 1694 struct rte_flow_action_handle * 1695 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1696 { 1697 1698 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1699 1700 return (pia) ? pia->handle : NULL; 1701 } 1702 1703 /** Update indirect action */ 1704 int 1705 port_action_handle_update(portid_t port_id, uint32_t id, 1706 const struct rte_flow_action *action) 1707 { 1708 struct rte_flow_error error; 1709 struct rte_flow_action_handle *action_handle; 1710 struct port_indirect_action *pia; 1711 struct rte_port *port; 1712 const void *update; 1713 1714 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1715 port_id == (portid_t)RTE_PORT_ALL) 1716 return -EINVAL; 1717 1718 port = &ports[port_id]; 1719 1720 action_handle = port_action_handle_get_by_id(port_id, id); 1721 if (!action_handle) 1722 return -EINVAL; 1723 pia = action_get_by_id(port_id, id); 1724 if (!pia) 1725 return -EINVAL; 1726 switch (pia->type) { 1727 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1728 update = action->conf; 1729 break; 1730 default: 1731 update = action; 1732 break; 1733 } 1734 1735 if (pia->transfer) 1736 port_id = port->flow_transfer_proxy; 1737 1738 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1739 port_id == (portid_t)RTE_PORT_ALL) 1740 return -EINVAL; 1741 1742 if (rte_flow_action_handle_update(port_id, action_handle, update, 1743 &error)) { 1744 return port_flow_complain(&error); 1745 } 1746 printf("Indirect action #%u updated\n", id); 1747 return 0; 1748 } 1749 1750 int 1751 port_action_handle_query(portid_t port_id, uint32_t id) 1752 { 1753 struct rte_flow_error error; 1754 struct port_indirect_action *pia; 1755 union { 1756 struct rte_flow_query_count count; 1757 struct rte_flow_query_age age; 1758 struct rte_flow_action_conntrack ct; 1759 } query; 1760 portid_t port_id_eff = port_id; 1761 struct rte_port *port; 1762 1763 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1764 port_id == (portid_t)RTE_PORT_ALL) 1765 return -EINVAL; 1766 1767 port = &ports[port_id]; 1768 1769 pia = action_get_by_id(port_id, id); 1770 if (!pia) 1771 return -EINVAL; 1772 switch (pia->type) { 1773 case RTE_FLOW_ACTION_TYPE_AGE: 1774 case RTE_FLOW_ACTION_TYPE_COUNT: 1775 break; 1776 default: 1777 fprintf(stderr, 1778 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1779 id, pia->type, port_id); 1780 return -ENOTSUP; 1781 } 1782 1783 if (pia->transfer) 1784 port_id_eff = port->flow_transfer_proxy; 1785 1786 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 1787 port_id_eff == (portid_t)RTE_PORT_ALL) 1788 return -EINVAL; 1789 1790 /* Poisoning to make sure PMDs update it in case of error. */ 1791 memset(&error, 0x55, sizeof(error)); 1792 memset(&query, 0, sizeof(query)); 1793 if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query, 1794 &error)) 1795 return port_flow_complain(&error); 1796 switch (pia->type) { 1797 case RTE_FLOW_ACTION_TYPE_AGE: 1798 printf("Indirect AGE action:\n" 1799 " aged: %u\n" 1800 " sec_since_last_hit_valid: %u\n" 1801 " sec_since_last_hit: %" PRIu32 "\n", 1802 query.age.aged, 1803 query.age.sec_since_last_hit_valid, 1804 query.age.sec_since_last_hit); 1805 break; 1806 case RTE_FLOW_ACTION_TYPE_COUNT: 1807 printf("Indirect COUNT action:\n" 1808 " hits_set: %u\n" 1809 " bytes_set: %u\n" 1810 " hits: %" PRIu64 "\n" 1811 " bytes: %" PRIu64 "\n", 1812 query.count.hits_set, 1813 query.count.bytes_set, 1814 query.count.hits, 1815 query.count.bytes); 1816 break; 1817 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1818 printf("Conntrack Context:\n" 1819 " Peer: %u, Flow dir: %s, Enable: %u\n" 1820 " Live: %u, SACK: %u, CACK: %u\n" 1821 " Packet dir: %s, Liberal: %u, State: %u\n" 1822 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1823 " Last Seq: %u, Last ACK: %u\n" 1824 " Last Win: %u, Last End: %u\n", 1825 query.ct.peer_port, 1826 query.ct.is_original_dir ? "Original" : "Reply", 1827 query.ct.enable, query.ct.live_connection, 1828 query.ct.selective_ack, query.ct.challenge_ack_passed, 1829 query.ct.last_direction ? "Original" : "Reply", 1830 query.ct.liberal_mode, query.ct.state, 1831 query.ct.max_ack_window, query.ct.retransmission_limit, 1832 query.ct.last_index, query.ct.last_seq, 1833 query.ct.last_ack, query.ct.last_window, 1834 query.ct.last_end); 1835 printf(" Original Dir:\n" 1836 " scale: %u, fin: %u, ack seen: %u\n" 1837 " unacked data: %u\n Sent end: %u," 1838 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1839 query.ct.original_dir.scale, 1840 query.ct.original_dir.close_initiated, 1841 query.ct.original_dir.last_ack_seen, 1842 query.ct.original_dir.data_unacked, 1843 query.ct.original_dir.sent_end, 1844 query.ct.original_dir.reply_end, 1845 query.ct.original_dir.max_win, 1846 query.ct.original_dir.max_ack); 1847 printf(" Reply Dir:\n" 1848 " scale: %u, fin: %u, ack seen: %u\n" 1849 " unacked data: %u\n Sent end: %u," 1850 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1851 query.ct.reply_dir.scale, 1852 query.ct.reply_dir.close_initiated, 1853 query.ct.reply_dir.last_ack_seen, 1854 query.ct.reply_dir.data_unacked, 1855 query.ct.reply_dir.sent_end, 1856 query.ct.reply_dir.reply_end, 1857 query.ct.reply_dir.max_win, 1858 query.ct.reply_dir.max_ack); 1859 break; 1860 default: 1861 fprintf(stderr, 1862 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1863 id, pia->type, port_id); 1864 break; 1865 } 1866 return 0; 1867 } 1868 1869 static struct port_flow_tunnel * 1870 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1871 const struct rte_flow_item *pattern, 1872 const struct rte_flow_action *actions, 1873 const struct tunnel_ops *tunnel_ops) 1874 { 1875 int ret; 1876 struct rte_port *port; 1877 struct port_flow_tunnel *pft; 1878 struct rte_flow_error error; 1879 1880 port = &ports[port_id]; 1881 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1882 if (!pft) { 1883 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 1884 tunnel_ops->id); 1885 return NULL; 1886 } 1887 if (tunnel_ops->actions) { 1888 uint32_t num_actions; 1889 const struct rte_flow_action *aptr; 1890 1891 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 1892 &pft->pmd_actions, 1893 &pft->num_pmd_actions, 1894 &error); 1895 if (ret) { 1896 port_flow_complain(&error); 1897 return NULL; 1898 } 1899 for (aptr = actions, num_actions = 1; 1900 aptr->type != RTE_FLOW_ACTION_TYPE_END; 1901 aptr++, num_actions++); 1902 pft->actions = malloc( 1903 (num_actions + pft->num_pmd_actions) * 1904 sizeof(actions[0])); 1905 if (!pft->actions) { 1906 rte_flow_tunnel_action_decap_release( 1907 port_id, pft->actions, 1908 pft->num_pmd_actions, &error); 1909 return NULL; 1910 } 1911 rte_memcpy(pft->actions, pft->pmd_actions, 1912 pft->num_pmd_actions * sizeof(actions[0])); 1913 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 1914 num_actions * sizeof(actions[0])); 1915 } 1916 if (tunnel_ops->items) { 1917 uint32_t num_items; 1918 const struct rte_flow_item *iptr; 1919 1920 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 1921 &pft->pmd_items, 1922 &pft->num_pmd_items, 1923 &error); 1924 if (ret) { 1925 port_flow_complain(&error); 1926 return NULL; 1927 } 1928 for (iptr = pattern, num_items = 1; 1929 iptr->type != RTE_FLOW_ITEM_TYPE_END; 1930 iptr++, num_items++); 1931 pft->items = malloc((num_items + pft->num_pmd_items) * 1932 sizeof(pattern[0])); 1933 if (!pft->items) { 1934 rte_flow_tunnel_item_release( 1935 port_id, pft->pmd_items, 1936 pft->num_pmd_items, &error); 1937 return NULL; 1938 } 1939 rte_memcpy(pft->items, pft->pmd_items, 1940 pft->num_pmd_items * sizeof(pattern[0])); 1941 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 1942 num_items * sizeof(pattern[0])); 1943 } 1944 1945 return pft; 1946 } 1947 1948 static void 1949 port_flow_tunnel_offload_cmd_release(portid_t port_id, 1950 const struct tunnel_ops *tunnel_ops, 1951 struct port_flow_tunnel *pft) 1952 { 1953 struct rte_flow_error error; 1954 1955 if (tunnel_ops->actions) { 1956 free(pft->actions); 1957 rte_flow_tunnel_action_decap_release( 1958 port_id, pft->pmd_actions, 1959 pft->num_pmd_actions, &error); 1960 pft->actions = NULL; 1961 pft->pmd_actions = NULL; 1962 } 1963 if (tunnel_ops->items) { 1964 free(pft->items); 1965 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 1966 pft->num_pmd_items, 1967 &error); 1968 pft->items = NULL; 1969 pft->pmd_items = NULL; 1970 } 1971 } 1972 1973 /** Add port meter policy */ 1974 int 1975 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 1976 const struct rte_flow_action *actions) 1977 { 1978 struct rte_mtr_error error; 1979 const struct rte_flow_action *act = actions; 1980 const struct rte_flow_action *start; 1981 struct rte_mtr_meter_policy_params policy; 1982 uint32_t i = 0, act_n; 1983 int ret; 1984 1985 for (i = 0; i < RTE_COLORS; i++) { 1986 for (act_n = 0, start = act; 1987 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 1988 act_n++; 1989 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 1990 policy.actions[i] = start; 1991 else 1992 policy.actions[i] = NULL; 1993 act++; 1994 } 1995 ret = rte_mtr_meter_policy_add(port_id, 1996 policy_id, 1997 &policy, &error); 1998 if (ret) 1999 print_mtr_err_msg(&error); 2000 return ret; 2001 } 2002 2003 /** Validate flow rule. */ 2004 int 2005 port_flow_validate(portid_t port_id, 2006 const struct rte_flow_attr *attr, 2007 const struct rte_flow_item *pattern, 2008 const struct rte_flow_action *actions, 2009 const struct tunnel_ops *tunnel_ops) 2010 { 2011 struct rte_flow_error error; 2012 struct port_flow_tunnel *pft = NULL; 2013 struct rte_port *port; 2014 2015 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2016 port_id == (portid_t)RTE_PORT_ALL) 2017 return -EINVAL; 2018 2019 port = &ports[port_id]; 2020 2021 if (attr->transfer) 2022 port_id = port->flow_transfer_proxy; 2023 2024 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2025 port_id == (portid_t)RTE_PORT_ALL) 2026 return -EINVAL; 2027 2028 /* Poisoning to make sure PMDs update it in case of error. */ 2029 memset(&error, 0x11, sizeof(error)); 2030 if (tunnel_ops->enabled) { 2031 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2032 actions, tunnel_ops); 2033 if (!pft) 2034 return -ENOENT; 2035 if (pft->items) 2036 pattern = pft->items; 2037 if (pft->actions) 2038 actions = pft->actions; 2039 } 2040 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 2041 return port_flow_complain(&error); 2042 if (tunnel_ops->enabled) 2043 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2044 printf("Flow rule validated\n"); 2045 return 0; 2046 } 2047 2048 /** Return age action structure if exists, otherwise NULL. */ 2049 static struct rte_flow_action_age * 2050 age_action_get(const struct rte_flow_action *actions) 2051 { 2052 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2053 switch (actions->type) { 2054 case RTE_FLOW_ACTION_TYPE_AGE: 2055 return (struct rte_flow_action_age *) 2056 (uintptr_t)actions->conf; 2057 default: 2058 break; 2059 } 2060 } 2061 return NULL; 2062 } 2063 2064 /** Create flow rule. */ 2065 int 2066 port_flow_create(portid_t port_id, 2067 const struct rte_flow_attr *attr, 2068 const struct rte_flow_item *pattern, 2069 const struct rte_flow_action *actions, 2070 const struct tunnel_ops *tunnel_ops) 2071 { 2072 struct rte_flow *flow; 2073 struct rte_port *port; 2074 struct port_flow *pf; 2075 uint32_t id = 0; 2076 struct rte_flow_error error; 2077 struct port_flow_tunnel *pft = NULL; 2078 struct rte_flow_action_age *age = age_action_get(actions); 2079 2080 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2081 port_id == (portid_t)RTE_PORT_ALL) 2082 return -EINVAL; 2083 2084 port = &ports[port_id]; 2085 2086 if (attr->transfer) 2087 port_id = port->flow_transfer_proxy; 2088 2089 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2090 port_id == (portid_t)RTE_PORT_ALL) 2091 return -EINVAL; 2092 2093 if (port->flow_list) { 2094 if (port->flow_list->id == UINT32_MAX) { 2095 fprintf(stderr, 2096 "Highest rule ID is already assigned, delete it first"); 2097 return -ENOMEM; 2098 } 2099 id = port->flow_list->id + 1; 2100 } 2101 if (tunnel_ops->enabled) { 2102 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2103 actions, tunnel_ops); 2104 if (!pft) 2105 return -ENOENT; 2106 if (pft->items) 2107 pattern = pft->items; 2108 if (pft->actions) 2109 actions = pft->actions; 2110 } 2111 pf = port_flow_new(attr, pattern, actions, &error); 2112 if (!pf) 2113 return port_flow_complain(&error); 2114 if (age) { 2115 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2116 age->context = &pf->age_type; 2117 } 2118 /* Poisoning to make sure PMDs update it in case of error. */ 2119 memset(&error, 0x22, sizeof(error)); 2120 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2121 if (!flow) { 2122 if (tunnel_ops->enabled) 2123 port_flow_tunnel_offload_cmd_release(port_id, 2124 tunnel_ops, pft); 2125 free(pf); 2126 return port_flow_complain(&error); 2127 } 2128 pf->next = port->flow_list; 2129 pf->id = id; 2130 pf->flow = flow; 2131 port->flow_list = pf; 2132 if (tunnel_ops->enabled) 2133 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2134 printf("Flow rule #%u created\n", pf->id); 2135 return 0; 2136 } 2137 2138 /** Destroy a number of flow rules. */ 2139 int 2140 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2141 { 2142 struct rte_port *port; 2143 struct port_flow **tmp; 2144 uint32_t c = 0; 2145 int ret = 0; 2146 2147 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2148 port_id == (portid_t)RTE_PORT_ALL) 2149 return -EINVAL; 2150 port = &ports[port_id]; 2151 tmp = &port->flow_list; 2152 while (*tmp) { 2153 uint32_t i; 2154 2155 for (i = 0; i != n; ++i) { 2156 portid_t port_id_eff = port_id; 2157 struct rte_flow_error error; 2158 struct port_flow *pf = *tmp; 2159 2160 if (rule[i] != pf->id) 2161 continue; 2162 /* 2163 * Poisoning to make sure PMDs update it in case 2164 * of error. 2165 */ 2166 memset(&error, 0x33, sizeof(error)); 2167 2168 if (pf->rule.attr->transfer) 2169 port_id_eff = port->flow_transfer_proxy; 2170 2171 if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || 2172 port_id_eff == (portid_t)RTE_PORT_ALL) 2173 return -EINVAL; 2174 2175 if (rte_flow_destroy(port_id_eff, pf->flow, &error)) { 2176 ret = port_flow_complain(&error); 2177 continue; 2178 } 2179 printf("Flow rule #%u destroyed\n", pf->id); 2180 *tmp = pf->next; 2181 free(pf); 2182 break; 2183 } 2184 if (i == n) 2185 tmp = &(*tmp)->next; 2186 ++c; 2187 } 2188 return ret; 2189 } 2190 2191 /** Remove all flow rules. */ 2192 int 2193 port_flow_flush(portid_t port_id) 2194 { 2195 struct rte_flow_error error; 2196 struct rte_port *port; 2197 int ret = 0; 2198 2199 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2200 port_id == (portid_t)RTE_PORT_ALL) 2201 return -EINVAL; 2202 2203 port = &ports[port_id]; 2204 2205 if (port->flow_list == NULL) 2206 return ret; 2207 2208 /* Poisoning to make sure PMDs update it in case of error. */ 2209 memset(&error, 0x44, sizeof(error)); 2210 if (rte_flow_flush(port_id, &error)) { 2211 port_flow_complain(&error); 2212 } 2213 2214 while (port->flow_list) { 2215 struct port_flow *pf = port->flow_list->next; 2216 2217 free(port->flow_list); 2218 port->flow_list = pf; 2219 } 2220 return ret; 2221 } 2222 2223 /** Dump flow rules. */ 2224 int 2225 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2226 const char *file_name) 2227 { 2228 int ret = 0; 2229 FILE *file = stdout; 2230 struct rte_flow_error error; 2231 struct rte_port *port; 2232 struct port_flow *pflow; 2233 struct rte_flow *tmpFlow = NULL; 2234 bool found = false; 2235 2236 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2237 port_id == (portid_t)RTE_PORT_ALL) 2238 return -EINVAL; 2239 2240 if (!dump_all) { 2241 port = &ports[port_id]; 2242 pflow = port->flow_list; 2243 while (pflow) { 2244 if (rule_id != pflow->id) { 2245 pflow = pflow->next; 2246 } else { 2247 tmpFlow = pflow->flow; 2248 if (tmpFlow) 2249 found = true; 2250 break; 2251 } 2252 } 2253 if (found == false) { 2254 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2255 return -EINVAL; 2256 } 2257 } 2258 2259 if (file_name && strlen(file_name)) { 2260 file = fopen(file_name, "w"); 2261 if (!file) { 2262 fprintf(stderr, "Failed to create file %s: %s\n", 2263 file_name, strerror(errno)); 2264 return -errno; 2265 } 2266 } 2267 2268 if (!dump_all) 2269 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2270 else 2271 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2272 if (ret) { 2273 port_flow_complain(&error); 2274 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2275 } else 2276 printf("Flow dump finished\n"); 2277 if (file_name && strlen(file_name)) 2278 fclose(file); 2279 return ret; 2280 } 2281 2282 /** Query a flow rule. */ 2283 int 2284 port_flow_query(portid_t port_id, uint32_t rule, 2285 const struct rte_flow_action *action) 2286 { 2287 struct rte_flow_error error; 2288 struct rte_port *port; 2289 struct port_flow *pf; 2290 const char *name; 2291 union { 2292 struct rte_flow_query_count count; 2293 struct rte_flow_action_rss rss_conf; 2294 struct rte_flow_query_age age; 2295 } query; 2296 int ret; 2297 2298 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2299 port_id == (portid_t)RTE_PORT_ALL) 2300 return -EINVAL; 2301 port = &ports[port_id]; 2302 for (pf = port->flow_list; pf; pf = pf->next) 2303 if (pf->id == rule) 2304 break; 2305 if (!pf) { 2306 fprintf(stderr, "Flow rule #%u not found\n", rule); 2307 return -ENOENT; 2308 } 2309 2310 if (pf->rule.attr->transfer) 2311 port_id = port->flow_transfer_proxy; 2312 2313 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2314 port_id == (portid_t)RTE_PORT_ALL) 2315 return -EINVAL; 2316 2317 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2318 &name, sizeof(name), 2319 (void *)(uintptr_t)action->type, &error); 2320 if (ret < 0) 2321 return port_flow_complain(&error); 2322 switch (action->type) { 2323 case RTE_FLOW_ACTION_TYPE_COUNT: 2324 case RTE_FLOW_ACTION_TYPE_RSS: 2325 case RTE_FLOW_ACTION_TYPE_AGE: 2326 break; 2327 default: 2328 fprintf(stderr, "Cannot query action type %d (%s)\n", 2329 action->type, name); 2330 return -ENOTSUP; 2331 } 2332 /* Poisoning to make sure PMDs update it in case of error. */ 2333 memset(&error, 0x55, sizeof(error)); 2334 memset(&query, 0, sizeof(query)); 2335 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2336 return port_flow_complain(&error); 2337 switch (action->type) { 2338 case RTE_FLOW_ACTION_TYPE_COUNT: 2339 printf("%s:\n" 2340 " hits_set: %u\n" 2341 " bytes_set: %u\n" 2342 " hits: %" PRIu64 "\n" 2343 " bytes: %" PRIu64 "\n", 2344 name, 2345 query.count.hits_set, 2346 query.count.bytes_set, 2347 query.count.hits, 2348 query.count.bytes); 2349 break; 2350 case RTE_FLOW_ACTION_TYPE_RSS: 2351 rss_config_display(&query.rss_conf); 2352 break; 2353 case RTE_FLOW_ACTION_TYPE_AGE: 2354 printf("%s:\n" 2355 " aged: %u\n" 2356 " sec_since_last_hit_valid: %u\n" 2357 " sec_since_last_hit: %" PRIu32 "\n", 2358 name, 2359 query.age.aged, 2360 query.age.sec_since_last_hit_valid, 2361 query.age.sec_since_last_hit); 2362 break; 2363 default: 2364 fprintf(stderr, 2365 "Cannot display result for action type %d (%s)\n", 2366 action->type, name); 2367 break; 2368 } 2369 return 0; 2370 } 2371 2372 /** List simply and destroy all aged flows. */ 2373 void 2374 port_flow_aged(portid_t port_id, uint8_t destroy) 2375 { 2376 void **contexts; 2377 int nb_context, total = 0, idx; 2378 struct rte_flow_error error; 2379 enum age_action_context_type *type; 2380 union { 2381 struct port_flow *pf; 2382 struct port_indirect_action *pia; 2383 } ctx; 2384 2385 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2386 port_id == (portid_t)RTE_PORT_ALL) 2387 return; 2388 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2389 printf("Port %u total aged flows: %d\n", port_id, total); 2390 if (total < 0) { 2391 port_flow_complain(&error); 2392 return; 2393 } 2394 if (total == 0) 2395 return; 2396 contexts = malloc(sizeof(void *) * total); 2397 if (contexts == NULL) { 2398 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 2399 return; 2400 } 2401 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2402 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2403 if (nb_context != total) { 2404 fprintf(stderr, 2405 "Port:%d get aged flows count(%d) != total(%d)\n", 2406 port_id, nb_context, total); 2407 free(contexts); 2408 return; 2409 } 2410 total = 0; 2411 for (idx = 0; idx < nb_context; idx++) { 2412 if (!contexts[idx]) { 2413 fprintf(stderr, "Error: get Null context in port %u\n", 2414 port_id); 2415 continue; 2416 } 2417 type = (enum age_action_context_type *)contexts[idx]; 2418 switch (*type) { 2419 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2420 ctx.pf = container_of(type, struct port_flow, age_type); 2421 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2422 "\t%c%c%c\t\n", 2423 "Flow", 2424 ctx.pf->id, 2425 ctx.pf->rule.attr->group, 2426 ctx.pf->rule.attr->priority, 2427 ctx.pf->rule.attr->ingress ? 'i' : '-', 2428 ctx.pf->rule.attr->egress ? 'e' : '-', 2429 ctx.pf->rule.attr->transfer ? 't' : '-'); 2430 if (destroy && !port_flow_destroy(port_id, 1, 2431 &ctx.pf->id)) 2432 total++; 2433 break; 2434 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 2435 ctx.pia = container_of(type, 2436 struct port_indirect_action, age_type); 2437 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 2438 ctx.pia->id); 2439 break; 2440 default: 2441 fprintf(stderr, "Error: invalid context type %u\n", 2442 port_id); 2443 break; 2444 } 2445 } 2446 printf("\n%d flows destroyed\n", total); 2447 free(contexts); 2448 } 2449 2450 /** List flow rules. */ 2451 void 2452 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2453 { 2454 struct rte_port *port; 2455 struct port_flow *pf; 2456 struct port_flow *list = NULL; 2457 uint32_t i; 2458 2459 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2460 port_id == (portid_t)RTE_PORT_ALL) 2461 return; 2462 port = &ports[port_id]; 2463 if (!port->flow_list) 2464 return; 2465 /* Sort flows by group, priority and ID. */ 2466 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2467 struct port_flow **tmp; 2468 const struct rte_flow_attr *curr = pf->rule.attr; 2469 2470 if (n) { 2471 /* Filter out unwanted groups. */ 2472 for (i = 0; i != n; ++i) 2473 if (curr->group == group[i]) 2474 break; 2475 if (i == n) 2476 continue; 2477 } 2478 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2479 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2480 2481 if (curr->group > comp->group || 2482 (curr->group == comp->group && 2483 curr->priority > comp->priority) || 2484 (curr->group == comp->group && 2485 curr->priority == comp->priority && 2486 pf->id > (*tmp)->id)) 2487 continue; 2488 break; 2489 } 2490 pf->tmp = *tmp; 2491 *tmp = pf; 2492 } 2493 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2494 for (pf = list; pf != NULL; pf = pf->tmp) { 2495 const struct rte_flow_item *item = pf->rule.pattern; 2496 const struct rte_flow_action *action = pf->rule.actions; 2497 const char *name; 2498 2499 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2500 pf->id, 2501 pf->rule.attr->group, 2502 pf->rule.attr->priority, 2503 pf->rule.attr->ingress ? 'i' : '-', 2504 pf->rule.attr->egress ? 'e' : '-', 2505 pf->rule.attr->transfer ? 't' : '-'); 2506 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2507 if ((uint32_t)item->type > INT_MAX) 2508 name = "PMD_INTERNAL"; 2509 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2510 &name, sizeof(name), 2511 (void *)(uintptr_t)item->type, 2512 NULL) <= 0) 2513 name = "[UNKNOWN]"; 2514 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2515 printf("%s ", name); 2516 ++item; 2517 } 2518 printf("=>"); 2519 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2520 if ((uint32_t)action->type > INT_MAX) 2521 name = "PMD_INTERNAL"; 2522 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2523 &name, sizeof(name), 2524 (void *)(uintptr_t)action->type, 2525 NULL) <= 0) 2526 name = "[UNKNOWN]"; 2527 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2528 printf(" %s", name); 2529 ++action; 2530 } 2531 printf("\n"); 2532 } 2533 } 2534 2535 /** Restrict ingress traffic to the defined flow rules. */ 2536 int 2537 port_flow_isolate(portid_t port_id, int set) 2538 { 2539 struct rte_flow_error error; 2540 2541 /* Poisoning to make sure PMDs update it in case of error. */ 2542 memset(&error, 0x66, sizeof(error)); 2543 if (rte_flow_isolate(port_id, set, &error)) 2544 return port_flow_complain(&error); 2545 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2546 port_id, 2547 set ? "now restricted" : "not restricted anymore"); 2548 return 0; 2549 } 2550 2551 /* 2552 * RX/TX ring descriptors display functions. 2553 */ 2554 int 2555 rx_queue_id_is_invalid(queueid_t rxq_id) 2556 { 2557 if (rxq_id < nb_rxq) 2558 return 0; 2559 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 2560 rxq_id, nb_rxq); 2561 return 1; 2562 } 2563 2564 int 2565 tx_queue_id_is_invalid(queueid_t txq_id) 2566 { 2567 if (txq_id < nb_txq) 2568 return 0; 2569 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 2570 txq_id, nb_txq); 2571 return 1; 2572 } 2573 2574 static int 2575 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2576 { 2577 struct rte_port *port = &ports[port_id]; 2578 struct rte_eth_rxq_info rx_qinfo; 2579 int ret; 2580 2581 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2582 if (ret == 0) { 2583 *ring_size = rx_qinfo.nb_desc; 2584 return ret; 2585 } 2586 2587 if (ret != -ENOTSUP) 2588 return ret; 2589 /* 2590 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2591 * ring_size stored in testpmd will be used for validity verification. 2592 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2593 * being 0, it will use a default value provided by PMDs to setup this 2594 * rxq. If the default value is 0, it will use the 2595 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2596 */ 2597 if (port->nb_rx_desc[rxq_id]) 2598 *ring_size = port->nb_rx_desc[rxq_id]; 2599 else if (port->dev_info.default_rxportconf.ring_size) 2600 *ring_size = port->dev_info.default_rxportconf.ring_size; 2601 else 2602 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2603 return 0; 2604 } 2605 2606 static int 2607 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2608 { 2609 struct rte_port *port = &ports[port_id]; 2610 struct rte_eth_txq_info tx_qinfo; 2611 int ret; 2612 2613 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2614 if (ret == 0) { 2615 *ring_size = tx_qinfo.nb_desc; 2616 return ret; 2617 } 2618 2619 if (ret != -ENOTSUP) 2620 return ret; 2621 /* 2622 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2623 * ring_size stored in testpmd will be used for validity verification. 2624 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2625 * being 0, it will use a default value provided by PMDs to setup this 2626 * txq. If the default value is 0, it will use the 2627 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2628 */ 2629 if (port->nb_tx_desc[txq_id]) 2630 *ring_size = port->nb_tx_desc[txq_id]; 2631 else if (port->dev_info.default_txportconf.ring_size) 2632 *ring_size = port->dev_info.default_txportconf.ring_size; 2633 else 2634 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2635 return 0; 2636 } 2637 2638 static int 2639 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2640 { 2641 uint16_t ring_size; 2642 int ret; 2643 2644 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2645 if (ret) 2646 return 1; 2647 2648 if (rxdesc_id < ring_size) 2649 return 0; 2650 2651 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 2652 rxdesc_id, ring_size); 2653 return 1; 2654 } 2655 2656 static int 2657 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2658 { 2659 uint16_t ring_size; 2660 int ret; 2661 2662 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2663 if (ret) 2664 return 1; 2665 2666 if (txdesc_id < ring_size) 2667 return 0; 2668 2669 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 2670 txdesc_id, ring_size); 2671 return 1; 2672 } 2673 2674 static const struct rte_memzone * 2675 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2676 { 2677 char mz_name[RTE_MEMZONE_NAMESIZE]; 2678 const struct rte_memzone *mz; 2679 2680 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2681 port_id, q_id, ring_name); 2682 mz = rte_memzone_lookup(mz_name); 2683 if (mz == NULL) 2684 fprintf(stderr, 2685 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 2686 ring_name, port_id, q_id, mz_name); 2687 return mz; 2688 } 2689 2690 union igb_ring_dword { 2691 uint64_t dword; 2692 struct { 2693 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2694 uint32_t lo; 2695 uint32_t hi; 2696 #else 2697 uint32_t hi; 2698 uint32_t lo; 2699 #endif 2700 } words; 2701 }; 2702 2703 struct igb_ring_desc_32_bytes { 2704 union igb_ring_dword lo_dword; 2705 union igb_ring_dword hi_dword; 2706 union igb_ring_dword resv1; 2707 union igb_ring_dword resv2; 2708 }; 2709 2710 struct igb_ring_desc_16_bytes { 2711 union igb_ring_dword lo_dword; 2712 union igb_ring_dword hi_dword; 2713 }; 2714 2715 static void 2716 ring_rxd_display_dword(union igb_ring_dword dword) 2717 { 2718 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2719 (unsigned)dword.words.hi); 2720 } 2721 2722 static void 2723 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2724 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2725 portid_t port_id, 2726 #else 2727 __rte_unused portid_t port_id, 2728 #endif 2729 uint16_t desc_id) 2730 { 2731 struct igb_ring_desc_16_bytes *ring = 2732 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2733 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2734 int ret; 2735 struct rte_eth_dev_info dev_info; 2736 2737 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2738 if (ret != 0) 2739 return; 2740 2741 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2742 /* 32 bytes RX descriptor, i40e only */ 2743 struct igb_ring_desc_32_bytes *ring = 2744 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2745 ring[desc_id].lo_dword.dword = 2746 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2747 ring_rxd_display_dword(ring[desc_id].lo_dword); 2748 ring[desc_id].hi_dword.dword = 2749 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2750 ring_rxd_display_dword(ring[desc_id].hi_dword); 2751 ring[desc_id].resv1.dword = 2752 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2753 ring_rxd_display_dword(ring[desc_id].resv1); 2754 ring[desc_id].resv2.dword = 2755 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2756 ring_rxd_display_dword(ring[desc_id].resv2); 2757 2758 return; 2759 } 2760 #endif 2761 /* 16 bytes RX descriptor */ 2762 ring[desc_id].lo_dword.dword = 2763 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2764 ring_rxd_display_dword(ring[desc_id].lo_dword); 2765 ring[desc_id].hi_dword.dword = 2766 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2767 ring_rxd_display_dword(ring[desc_id].hi_dword); 2768 } 2769 2770 static void 2771 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2772 { 2773 struct igb_ring_desc_16_bytes *ring; 2774 struct igb_ring_desc_16_bytes txd; 2775 2776 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2777 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2778 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2779 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2780 (unsigned)txd.lo_dword.words.lo, 2781 (unsigned)txd.lo_dword.words.hi, 2782 (unsigned)txd.hi_dword.words.lo, 2783 (unsigned)txd.hi_dword.words.hi); 2784 } 2785 2786 void 2787 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2788 { 2789 const struct rte_memzone *rx_mz; 2790 2791 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2792 return; 2793 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2794 if (rx_mz == NULL) 2795 return; 2796 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2797 } 2798 2799 void 2800 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2801 { 2802 const struct rte_memzone *tx_mz; 2803 2804 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2805 return; 2806 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2807 if (tx_mz == NULL) 2808 return; 2809 ring_tx_descriptor_display(tx_mz, txd_id); 2810 } 2811 2812 void 2813 fwd_lcores_config_display(void) 2814 { 2815 lcoreid_t lc_id; 2816 2817 printf("List of forwarding lcores:"); 2818 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2819 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2820 printf("\n"); 2821 } 2822 void 2823 rxtx_config_display(void) 2824 { 2825 portid_t pid; 2826 queueid_t qid; 2827 2828 printf(" %s packet forwarding%s packets/burst=%d\n", 2829 cur_fwd_eng->fwd_mode_name, 2830 retry_enabled == 0 ? "" : " with retry", 2831 nb_pkt_per_burst); 2832 2833 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2834 printf(" packet len=%u - nb packet segments=%d\n", 2835 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2836 2837 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2838 nb_fwd_lcores, nb_fwd_ports); 2839 2840 RTE_ETH_FOREACH_DEV(pid) { 2841 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2842 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2843 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2844 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2845 struct rte_eth_rxq_info rx_qinfo; 2846 struct rte_eth_txq_info tx_qinfo; 2847 uint16_t rx_free_thresh_tmp; 2848 uint16_t tx_free_thresh_tmp; 2849 uint16_t tx_rs_thresh_tmp; 2850 uint16_t nb_rx_desc_tmp; 2851 uint16_t nb_tx_desc_tmp; 2852 uint64_t offloads_tmp; 2853 uint8_t pthresh_tmp; 2854 uint8_t hthresh_tmp; 2855 uint8_t wthresh_tmp; 2856 int32_t rc; 2857 2858 /* per port config */ 2859 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2860 (unsigned int)pid, nb_rxq, nb_txq); 2861 2862 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2863 ports[pid].dev_conf.rxmode.offloads, 2864 ports[pid].dev_conf.txmode.offloads); 2865 2866 /* per rx queue config only for first queue to be less verbose */ 2867 for (qid = 0; qid < 1; qid++) { 2868 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2869 if (rc) { 2870 nb_rx_desc_tmp = nb_rx_desc[qid]; 2871 rx_free_thresh_tmp = 2872 rx_conf[qid].rx_free_thresh; 2873 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2874 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2875 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2876 offloads_tmp = rx_conf[qid].offloads; 2877 } else { 2878 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2879 rx_free_thresh_tmp = 2880 rx_qinfo.conf.rx_free_thresh; 2881 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2882 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2883 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2884 offloads_tmp = rx_qinfo.conf.offloads; 2885 } 2886 2887 printf(" RX queue: %d\n", qid); 2888 printf(" RX desc=%d - RX free threshold=%d\n", 2889 nb_rx_desc_tmp, rx_free_thresh_tmp); 2890 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2891 " wthresh=%d\n", 2892 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2893 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 2894 if (rx_conf->share_group > 0) 2895 printf(" share_group=%u share_qid=%u", 2896 rx_conf->share_group, 2897 rx_conf->share_qid); 2898 printf("\n"); 2899 } 2900 2901 /* per tx queue config only for first queue to be less verbose */ 2902 for (qid = 0; qid < 1; qid++) { 2903 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2904 if (rc) { 2905 nb_tx_desc_tmp = nb_tx_desc[qid]; 2906 tx_free_thresh_tmp = 2907 tx_conf[qid].tx_free_thresh; 2908 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2909 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2910 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2911 offloads_tmp = tx_conf[qid].offloads; 2912 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2913 } else { 2914 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2915 tx_free_thresh_tmp = 2916 tx_qinfo.conf.tx_free_thresh; 2917 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2918 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2919 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2920 offloads_tmp = tx_qinfo.conf.offloads; 2921 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2922 } 2923 2924 printf(" TX queue: %d\n", qid); 2925 printf(" TX desc=%d - TX free threshold=%d\n", 2926 nb_tx_desc_tmp, tx_free_thresh_tmp); 2927 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2928 " wthresh=%d\n", 2929 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2930 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2931 offloads_tmp, tx_rs_thresh_tmp); 2932 } 2933 } 2934 } 2935 2936 void 2937 port_rss_reta_info(portid_t port_id, 2938 struct rte_eth_rss_reta_entry64 *reta_conf, 2939 uint16_t nb_entries) 2940 { 2941 uint16_t i, idx, shift; 2942 int ret; 2943 2944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2945 return; 2946 2947 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2948 if (ret != 0) { 2949 fprintf(stderr, 2950 "Failed to get RSS RETA info, return code = %d\n", 2951 ret); 2952 return; 2953 } 2954 2955 for (i = 0; i < nb_entries; i++) { 2956 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2957 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2958 if (!(reta_conf[idx].mask & (1ULL << shift))) 2959 continue; 2960 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2961 i, reta_conf[idx].reta[shift]); 2962 } 2963 } 2964 2965 /* 2966 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2967 * key of the port. 2968 */ 2969 void 2970 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2971 { 2972 struct rte_eth_rss_conf rss_conf = {0}; 2973 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2974 uint64_t rss_hf; 2975 uint8_t i; 2976 int diag; 2977 struct rte_eth_dev_info dev_info; 2978 uint8_t hash_key_size; 2979 int ret; 2980 2981 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2982 return; 2983 2984 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2985 if (ret != 0) 2986 return; 2987 2988 if (dev_info.hash_key_size > 0 && 2989 dev_info.hash_key_size <= sizeof(rss_key)) 2990 hash_key_size = dev_info.hash_key_size; 2991 else { 2992 fprintf(stderr, 2993 "dev_info did not provide a valid hash key size\n"); 2994 return; 2995 } 2996 2997 /* Get RSS hash key if asked to display it */ 2998 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2999 rss_conf.rss_key_len = hash_key_size; 3000 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3001 if (diag != 0) { 3002 switch (diag) { 3003 case -ENODEV: 3004 fprintf(stderr, "port index %d invalid\n", port_id); 3005 break; 3006 case -ENOTSUP: 3007 fprintf(stderr, "operation not supported by device\n"); 3008 break; 3009 default: 3010 fprintf(stderr, "operation failed - diag=%d\n", diag); 3011 break; 3012 } 3013 return; 3014 } 3015 rss_hf = rss_conf.rss_hf; 3016 if (rss_hf == 0) { 3017 printf("RSS disabled\n"); 3018 return; 3019 } 3020 printf("RSS functions:\n "); 3021 for (i = 0; rss_type_table[i].str; i++) { 3022 if (rss_hf & rss_type_table[i].rss_type) 3023 printf("%s ", rss_type_table[i].str); 3024 } 3025 printf("\n"); 3026 if (!show_rss_key) 3027 return; 3028 printf("RSS key:\n"); 3029 for (i = 0; i < hash_key_size; i++) 3030 printf("%02X", rss_key[i]); 3031 printf("\n"); 3032 } 3033 3034 void 3035 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3036 uint8_t hash_key_len) 3037 { 3038 struct rte_eth_rss_conf rss_conf; 3039 int diag; 3040 unsigned int i; 3041 3042 rss_conf.rss_key = NULL; 3043 rss_conf.rss_key_len = hash_key_len; 3044 rss_conf.rss_hf = 0; 3045 for (i = 0; rss_type_table[i].str; i++) { 3046 if (!strcmp(rss_type_table[i].str, rss_type)) 3047 rss_conf.rss_hf = rss_type_table[i].rss_type; 3048 } 3049 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3050 if (diag == 0) { 3051 rss_conf.rss_key = hash_key; 3052 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3053 } 3054 if (diag == 0) 3055 return; 3056 3057 switch (diag) { 3058 case -ENODEV: 3059 fprintf(stderr, "port index %d invalid\n", port_id); 3060 break; 3061 case -ENOTSUP: 3062 fprintf(stderr, "operation not supported by device\n"); 3063 break; 3064 default: 3065 fprintf(stderr, "operation failed - diag=%d\n", diag); 3066 break; 3067 } 3068 } 3069 3070 /* 3071 * Check whether a shared rxq scheduled on other lcores. 3072 */ 3073 static bool 3074 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3075 portid_t src_port, queueid_t src_rxq, 3076 uint32_t share_group, queueid_t share_rxq) 3077 { 3078 streamid_t sm_id; 3079 streamid_t nb_fs_per_lcore; 3080 lcoreid_t nb_fc; 3081 lcoreid_t lc_id; 3082 struct fwd_stream *fs; 3083 struct rte_port *port; 3084 struct rte_eth_dev_info *dev_info; 3085 struct rte_eth_rxconf *rxq_conf; 3086 3087 nb_fc = cur_fwd_config.nb_fwd_lcores; 3088 /* Check remaining cores. */ 3089 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3090 sm_id = fwd_lcores[lc_id]->stream_idx; 3091 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3092 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3093 sm_id++) { 3094 fs = fwd_streams[sm_id]; 3095 port = &ports[fs->rx_port]; 3096 dev_info = &port->dev_info; 3097 rxq_conf = &port->rx_conf[fs->rx_queue]; 3098 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3099 == 0 || rxq_conf->share_group == 0) 3100 /* Not shared rxq. */ 3101 continue; 3102 if (domain_id != port->dev_info.switch_info.domain_id) 3103 continue; 3104 if (rxq_conf->share_group != share_group) 3105 continue; 3106 if (rxq_conf->share_qid != share_rxq) 3107 continue; 3108 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3109 share_group, share_rxq); 3110 printf(" lcore %hhu Port %hu queue %hu\n", 3111 src_lc, src_port, src_rxq); 3112 printf(" lcore %hhu Port %hu queue %hu\n", 3113 lc_id, fs->rx_port, fs->rx_queue); 3114 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3115 nb_rxq); 3116 return true; 3117 } 3118 } 3119 return false; 3120 } 3121 3122 /* 3123 * Check shared rxq configuration. 3124 * 3125 * Shared group must not being scheduled on different core. 3126 */ 3127 bool 3128 pkt_fwd_shared_rxq_check(void) 3129 { 3130 streamid_t sm_id; 3131 streamid_t nb_fs_per_lcore; 3132 lcoreid_t nb_fc; 3133 lcoreid_t lc_id; 3134 struct fwd_stream *fs; 3135 uint16_t domain_id; 3136 struct rte_port *port; 3137 struct rte_eth_dev_info *dev_info; 3138 struct rte_eth_rxconf *rxq_conf; 3139 3140 if (rxq_share == 0) 3141 return true; 3142 nb_fc = cur_fwd_config.nb_fwd_lcores; 3143 /* 3144 * Check streams on each core, make sure the same switch domain + 3145 * group + queue doesn't get scheduled on other cores. 3146 */ 3147 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3148 sm_id = fwd_lcores[lc_id]->stream_idx; 3149 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3150 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3151 sm_id++) { 3152 fs = fwd_streams[sm_id]; 3153 /* Update lcore info stream being scheduled. */ 3154 fs->lcore = fwd_lcores[lc_id]; 3155 port = &ports[fs->rx_port]; 3156 dev_info = &port->dev_info; 3157 rxq_conf = &port->rx_conf[fs->rx_queue]; 3158 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3159 == 0 || rxq_conf->share_group == 0) 3160 /* Not shared rxq. */ 3161 continue; 3162 /* Check shared rxq not scheduled on remaining cores. */ 3163 domain_id = port->dev_info.switch_info.domain_id; 3164 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3165 fs->rx_port, 3166 fs->rx_queue, 3167 rxq_conf->share_group, 3168 rxq_conf->share_qid)) 3169 return false; 3170 } 3171 } 3172 return true; 3173 } 3174 3175 /* 3176 * Setup forwarding configuration for each logical core. 3177 */ 3178 static void 3179 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3180 { 3181 streamid_t nb_fs_per_lcore; 3182 streamid_t nb_fs; 3183 streamid_t sm_id; 3184 lcoreid_t nb_extra; 3185 lcoreid_t nb_fc; 3186 lcoreid_t nb_lc; 3187 lcoreid_t lc_id; 3188 3189 nb_fs = cfg->nb_fwd_streams; 3190 nb_fc = cfg->nb_fwd_lcores; 3191 if (nb_fs <= nb_fc) { 3192 nb_fs_per_lcore = 1; 3193 nb_extra = 0; 3194 } else { 3195 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3196 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3197 } 3198 3199 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3200 sm_id = 0; 3201 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3202 fwd_lcores[lc_id]->stream_idx = sm_id; 3203 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3204 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3205 } 3206 3207 /* 3208 * Assign extra remaining streams, if any. 3209 */ 3210 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3211 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3212 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3213 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3214 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3215 } 3216 } 3217 3218 static portid_t 3219 fwd_topology_tx_port_get(portid_t rxp) 3220 { 3221 static int warning_once = 1; 3222 3223 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3224 3225 switch (port_topology) { 3226 default: 3227 case PORT_TOPOLOGY_PAIRED: 3228 if ((rxp & 0x1) == 0) { 3229 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3230 return rxp + 1; 3231 if (warning_once) { 3232 fprintf(stderr, 3233 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3234 warning_once = 0; 3235 } 3236 return rxp; 3237 } 3238 return rxp - 1; 3239 case PORT_TOPOLOGY_CHAINED: 3240 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3241 case PORT_TOPOLOGY_LOOP: 3242 return rxp; 3243 } 3244 } 3245 3246 static void 3247 simple_fwd_config_setup(void) 3248 { 3249 portid_t i; 3250 3251 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3252 cur_fwd_config.nb_fwd_streams = 3253 (streamid_t) cur_fwd_config.nb_fwd_ports; 3254 3255 /* reinitialize forwarding streams */ 3256 init_fwd_streams(); 3257 3258 /* 3259 * In the simple forwarding test, the number of forwarding cores 3260 * must be lower or equal to the number of forwarding ports. 3261 */ 3262 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3263 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3264 cur_fwd_config.nb_fwd_lcores = 3265 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3266 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3267 3268 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3269 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3270 fwd_streams[i]->rx_queue = 0; 3271 fwd_streams[i]->tx_port = 3272 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3273 fwd_streams[i]->tx_queue = 0; 3274 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3275 fwd_streams[i]->retry_enabled = retry_enabled; 3276 } 3277 } 3278 3279 /** 3280 * For the RSS forwarding test all streams distributed over lcores. Each stream 3281 * being composed of a RX queue to poll on a RX port for input messages, 3282 * associated with a TX queue of a TX port where to send forwarded packets. 3283 */ 3284 static void 3285 rss_fwd_config_setup(void) 3286 { 3287 portid_t rxp; 3288 portid_t txp; 3289 queueid_t rxq; 3290 queueid_t nb_q; 3291 streamid_t sm_id; 3292 int start; 3293 int end; 3294 3295 nb_q = nb_rxq; 3296 if (nb_q > nb_txq) 3297 nb_q = nb_txq; 3298 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3299 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3300 cur_fwd_config.nb_fwd_streams = 3301 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3302 3303 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3304 cur_fwd_config.nb_fwd_lcores = 3305 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3306 3307 /* reinitialize forwarding streams */ 3308 init_fwd_streams(); 3309 3310 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3311 3312 if (proc_id > 0 && nb_q % num_procs != 0) 3313 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 3314 3315 /** 3316 * In multi-process, All queues are allocated to different 3317 * processes based on num_procs and proc_id. For example: 3318 * if supports 4 queues(nb_q), 2 processes(num_procs), 3319 * the 0~1 queue for primary process. 3320 * the 2~3 queue for secondary process. 3321 */ 3322 start = proc_id * nb_q / num_procs; 3323 end = start + nb_q / num_procs; 3324 rxp = 0; 3325 rxq = start; 3326 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3327 struct fwd_stream *fs; 3328 3329 fs = fwd_streams[sm_id]; 3330 txp = fwd_topology_tx_port_get(rxp); 3331 fs->rx_port = fwd_ports_ids[rxp]; 3332 fs->rx_queue = rxq; 3333 fs->tx_port = fwd_ports_ids[txp]; 3334 fs->tx_queue = rxq; 3335 fs->peer_addr = fs->tx_port; 3336 fs->retry_enabled = retry_enabled; 3337 rxp++; 3338 if (rxp < nb_fwd_ports) 3339 continue; 3340 rxp = 0; 3341 rxq++; 3342 if (rxq >= end) 3343 rxq = start; 3344 } 3345 } 3346 3347 static uint16_t 3348 get_fwd_port_total_tc_num(void) 3349 { 3350 struct rte_eth_dcb_info dcb_info; 3351 uint16_t total_tc_num = 0; 3352 unsigned int i; 3353 3354 for (i = 0; i < nb_fwd_ports; i++) { 3355 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 3356 total_tc_num += dcb_info.nb_tcs; 3357 } 3358 3359 return total_tc_num; 3360 } 3361 3362 /** 3363 * For the DCB forwarding test, each core is assigned on each traffic class. 3364 * 3365 * Each core is assigned a multi-stream, each stream being composed of 3366 * a RX queue to poll on a RX port for input messages, associated with 3367 * a TX queue of a TX port where to send forwarded packets. All RX and 3368 * TX queues are mapping to the same traffic class. 3369 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3370 * the same core 3371 */ 3372 static void 3373 dcb_fwd_config_setup(void) 3374 { 3375 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3376 portid_t txp, rxp = 0; 3377 queueid_t txq, rxq = 0; 3378 lcoreid_t lc_id; 3379 uint16_t nb_rx_queue, nb_tx_queue; 3380 uint16_t i, j, k, sm_id = 0; 3381 uint16_t total_tc_num; 3382 struct rte_port *port; 3383 uint8_t tc = 0; 3384 portid_t pid; 3385 int ret; 3386 3387 /* 3388 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 3389 * or RTE_PORT_STOPPED. 3390 * 3391 * Re-configure ports to get updated mapping between tc and queue in 3392 * case the queue number of the port is changed. Skip for started ports 3393 * since modifying queue number and calling dev_configure need to stop 3394 * ports first. 3395 */ 3396 for (pid = 0; pid < nb_fwd_ports; pid++) { 3397 if (port_is_started(pid) == 1) 3398 continue; 3399 3400 port = &ports[pid]; 3401 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 3402 &port->dev_conf); 3403 if (ret < 0) { 3404 fprintf(stderr, 3405 "Failed to re-configure port %d, ret = %d.\n", 3406 pid, ret); 3407 return; 3408 } 3409 } 3410 3411 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3412 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3413 cur_fwd_config.nb_fwd_streams = 3414 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3415 total_tc_num = get_fwd_port_total_tc_num(); 3416 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 3417 cur_fwd_config.nb_fwd_lcores = total_tc_num; 3418 3419 /* reinitialize forwarding streams */ 3420 init_fwd_streams(); 3421 sm_id = 0; 3422 txp = 1; 3423 /* get the dcb info on the first RX and TX ports */ 3424 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3425 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3426 3427 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3428 fwd_lcores[lc_id]->stream_nb = 0; 3429 fwd_lcores[lc_id]->stream_idx = sm_id; 3430 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 3431 /* if the nb_queue is zero, means this tc is 3432 * not enabled on the POOL 3433 */ 3434 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3435 break; 3436 k = fwd_lcores[lc_id]->stream_nb + 3437 fwd_lcores[lc_id]->stream_idx; 3438 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3439 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3440 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3441 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3442 for (j = 0; j < nb_rx_queue; j++) { 3443 struct fwd_stream *fs; 3444 3445 fs = fwd_streams[k + j]; 3446 fs->rx_port = fwd_ports_ids[rxp]; 3447 fs->rx_queue = rxq + j; 3448 fs->tx_port = fwd_ports_ids[txp]; 3449 fs->tx_queue = txq + j % nb_tx_queue; 3450 fs->peer_addr = fs->tx_port; 3451 fs->retry_enabled = retry_enabled; 3452 } 3453 fwd_lcores[lc_id]->stream_nb += 3454 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3455 } 3456 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3457 3458 tc++; 3459 if (tc < rxp_dcb_info.nb_tcs) 3460 continue; 3461 /* Restart from TC 0 on next RX port */ 3462 tc = 0; 3463 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3464 rxp = (portid_t) 3465 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3466 else 3467 rxp++; 3468 if (rxp >= nb_fwd_ports) 3469 return; 3470 /* get the dcb information on next RX and TX ports */ 3471 if ((rxp & 0x1) == 0) 3472 txp = (portid_t) (rxp + 1); 3473 else 3474 txp = (portid_t) (rxp - 1); 3475 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3476 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3477 } 3478 } 3479 3480 static void 3481 icmp_echo_config_setup(void) 3482 { 3483 portid_t rxp; 3484 queueid_t rxq; 3485 lcoreid_t lc_id; 3486 uint16_t sm_id; 3487 3488 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3489 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3490 (nb_txq * nb_fwd_ports); 3491 else 3492 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3493 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3494 cur_fwd_config.nb_fwd_streams = 3495 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3496 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3497 cur_fwd_config.nb_fwd_lcores = 3498 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3499 if (verbose_level > 0) { 3500 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3501 __FUNCTION__, 3502 cur_fwd_config.nb_fwd_lcores, 3503 cur_fwd_config.nb_fwd_ports, 3504 cur_fwd_config.nb_fwd_streams); 3505 } 3506 3507 /* reinitialize forwarding streams */ 3508 init_fwd_streams(); 3509 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3510 rxp = 0; rxq = 0; 3511 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3512 if (verbose_level > 0) 3513 printf(" core=%d: \n", lc_id); 3514 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3515 struct fwd_stream *fs; 3516 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3517 fs->rx_port = fwd_ports_ids[rxp]; 3518 fs->rx_queue = rxq; 3519 fs->tx_port = fs->rx_port; 3520 fs->tx_queue = rxq; 3521 fs->peer_addr = fs->tx_port; 3522 fs->retry_enabled = retry_enabled; 3523 if (verbose_level > 0) 3524 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3525 sm_id, fs->rx_port, fs->rx_queue, 3526 fs->tx_queue); 3527 rxq = (queueid_t) (rxq + 1); 3528 if (rxq == nb_rxq) { 3529 rxq = 0; 3530 rxp = (portid_t) (rxp + 1); 3531 } 3532 } 3533 } 3534 } 3535 3536 void 3537 fwd_config_setup(void) 3538 { 3539 struct rte_port *port; 3540 portid_t pt_id; 3541 unsigned int i; 3542 3543 cur_fwd_config.fwd_eng = cur_fwd_eng; 3544 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3545 icmp_echo_config_setup(); 3546 return; 3547 } 3548 3549 if ((nb_rxq > 1) && (nb_txq > 1)){ 3550 if (dcb_config) { 3551 for (i = 0; i < nb_fwd_ports; i++) { 3552 pt_id = fwd_ports_ids[i]; 3553 port = &ports[pt_id]; 3554 if (!port->dcb_flag) { 3555 fprintf(stderr, 3556 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 3557 return; 3558 } 3559 } 3560 if (nb_fwd_lcores == 1) { 3561 fprintf(stderr, 3562 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 3563 return; 3564 } 3565 3566 dcb_fwd_config_setup(); 3567 } else 3568 rss_fwd_config_setup(); 3569 } 3570 else 3571 simple_fwd_config_setup(); 3572 } 3573 3574 static const char * 3575 mp_alloc_to_str(uint8_t mode) 3576 { 3577 switch (mode) { 3578 case MP_ALLOC_NATIVE: 3579 return "native"; 3580 case MP_ALLOC_ANON: 3581 return "anon"; 3582 case MP_ALLOC_XMEM: 3583 return "xmem"; 3584 case MP_ALLOC_XMEM_HUGE: 3585 return "xmemhuge"; 3586 case MP_ALLOC_XBUF: 3587 return "xbuf"; 3588 default: 3589 return "invalid"; 3590 } 3591 } 3592 3593 void 3594 pkt_fwd_config_display(struct fwd_config *cfg) 3595 { 3596 struct fwd_stream *fs; 3597 lcoreid_t lc_id; 3598 streamid_t sm_id; 3599 3600 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3601 "NUMA support %s, MP allocation mode: %s\n", 3602 cfg->fwd_eng->fwd_mode_name, 3603 retry_enabled == 0 ? "" : " with retry", 3604 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3605 numa_support == 1 ? "enabled" : "disabled", 3606 mp_alloc_to_str(mp_alloc_type)); 3607 3608 if (retry_enabled) 3609 printf("TX retry num: %u, delay between TX retries: %uus\n", 3610 burst_tx_retry_num, burst_tx_delay_time); 3611 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3612 printf("Logical Core %u (socket %u) forwards packets on " 3613 "%d streams:", 3614 fwd_lcores_cpuids[lc_id], 3615 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3616 fwd_lcores[lc_id]->stream_nb); 3617 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3618 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3619 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3620 "P=%d/Q=%d (socket %u) ", 3621 fs->rx_port, fs->rx_queue, 3622 ports[fs->rx_port].socket_id, 3623 fs->tx_port, fs->tx_queue, 3624 ports[fs->tx_port].socket_id); 3625 print_ethaddr("peer=", 3626 &peer_eth_addrs[fs->peer_addr]); 3627 } 3628 printf("\n"); 3629 } 3630 printf("\n"); 3631 } 3632 3633 void 3634 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3635 { 3636 struct rte_ether_addr new_peer_addr; 3637 if (!rte_eth_dev_is_valid_port(port_id)) { 3638 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 3639 return; 3640 } 3641 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3642 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 3643 peer_addr); 3644 return; 3645 } 3646 peer_eth_addrs[port_id] = new_peer_addr; 3647 } 3648 3649 int 3650 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3651 { 3652 unsigned int i; 3653 unsigned int lcore_cpuid; 3654 int record_now; 3655 3656 record_now = 0; 3657 again: 3658 for (i = 0; i < nb_lc; i++) { 3659 lcore_cpuid = lcorelist[i]; 3660 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3661 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 3662 return -1; 3663 } 3664 if (lcore_cpuid == rte_get_main_lcore()) { 3665 fprintf(stderr, 3666 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 3667 lcore_cpuid); 3668 return -1; 3669 } 3670 if (record_now) 3671 fwd_lcores_cpuids[i] = lcore_cpuid; 3672 } 3673 if (record_now == 0) { 3674 record_now = 1; 3675 goto again; 3676 } 3677 nb_cfg_lcores = (lcoreid_t) nb_lc; 3678 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3679 printf("previous number of forwarding cores %u - changed to " 3680 "number of configured cores %u\n", 3681 (unsigned int) nb_fwd_lcores, nb_lc); 3682 nb_fwd_lcores = (lcoreid_t) nb_lc; 3683 } 3684 3685 return 0; 3686 } 3687 3688 int 3689 set_fwd_lcores_mask(uint64_t lcoremask) 3690 { 3691 unsigned int lcorelist[64]; 3692 unsigned int nb_lc; 3693 unsigned int i; 3694 3695 if (lcoremask == 0) { 3696 fprintf(stderr, "Invalid NULL mask of cores\n"); 3697 return -1; 3698 } 3699 nb_lc = 0; 3700 for (i = 0; i < 64; i++) { 3701 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3702 continue; 3703 lcorelist[nb_lc++] = i; 3704 } 3705 return set_fwd_lcores_list(lcorelist, nb_lc); 3706 } 3707 3708 void 3709 set_fwd_lcores_number(uint16_t nb_lc) 3710 { 3711 if (test_done == 0) { 3712 fprintf(stderr, "Please stop forwarding first\n"); 3713 return; 3714 } 3715 if (nb_lc > nb_cfg_lcores) { 3716 fprintf(stderr, 3717 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 3718 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3719 return; 3720 } 3721 nb_fwd_lcores = (lcoreid_t) nb_lc; 3722 printf("Number of forwarding cores set to %u\n", 3723 (unsigned int) nb_fwd_lcores); 3724 } 3725 3726 void 3727 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3728 { 3729 unsigned int i; 3730 portid_t port_id; 3731 int record_now; 3732 3733 record_now = 0; 3734 again: 3735 for (i = 0; i < nb_pt; i++) { 3736 port_id = (portid_t) portlist[i]; 3737 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3738 return; 3739 if (record_now) 3740 fwd_ports_ids[i] = port_id; 3741 } 3742 if (record_now == 0) { 3743 record_now = 1; 3744 goto again; 3745 } 3746 nb_cfg_ports = (portid_t) nb_pt; 3747 if (nb_fwd_ports != (portid_t) nb_pt) { 3748 printf("previous number of forwarding ports %u - changed to " 3749 "number of configured ports %u\n", 3750 (unsigned int) nb_fwd_ports, nb_pt); 3751 nb_fwd_ports = (portid_t) nb_pt; 3752 } 3753 } 3754 3755 /** 3756 * Parse the user input and obtain the list of forwarding ports 3757 * 3758 * @param[in] list 3759 * String containing the user input. User can specify 3760 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3761 * For example, if the user wants to use all the available 3762 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3763 * If the user wants to use only the ports 1,2 then the input 3764 * is 1,2. 3765 * valid characters are '-' and ',' 3766 * @param[out] values 3767 * This array will be filled with a list of port IDs 3768 * based on the user input 3769 * Note that duplicate entries are discarded and only the first 3770 * count entries in this array are port IDs and all the rest 3771 * will contain default values 3772 * @param[in] maxsize 3773 * This parameter denotes 2 things 3774 * 1) Number of elements in the values array 3775 * 2) Maximum value of each element in the values array 3776 * @return 3777 * On success, returns total count of parsed port IDs 3778 * On failure, returns 0 3779 */ 3780 static unsigned int 3781 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3782 { 3783 unsigned int count = 0; 3784 char *end = NULL; 3785 int min, max; 3786 int value, i; 3787 unsigned int marked[maxsize]; 3788 3789 if (list == NULL || values == NULL) 3790 return 0; 3791 3792 for (i = 0; i < (int)maxsize; i++) 3793 marked[i] = 0; 3794 3795 min = INT_MAX; 3796 3797 do { 3798 /*Remove the blank spaces if any*/ 3799 while (isblank(*list)) 3800 list++; 3801 if (*list == '\0') 3802 break; 3803 errno = 0; 3804 value = strtol(list, &end, 10); 3805 if (errno || end == NULL) 3806 return 0; 3807 if (value < 0 || value >= (int)maxsize) 3808 return 0; 3809 while (isblank(*end)) 3810 end++; 3811 if (*end == '-' && min == INT_MAX) { 3812 min = value; 3813 } else if ((*end == ',') || (*end == '\0')) { 3814 max = value; 3815 if (min == INT_MAX) 3816 min = value; 3817 for (i = min; i <= max; i++) { 3818 if (count < maxsize) { 3819 if (marked[i]) 3820 continue; 3821 values[count] = i; 3822 marked[i] = 1; 3823 count++; 3824 } 3825 } 3826 min = INT_MAX; 3827 } else 3828 return 0; 3829 list = end + 1; 3830 } while (*end != '\0'); 3831 3832 return count; 3833 } 3834 3835 void 3836 parse_fwd_portlist(const char *portlist) 3837 { 3838 unsigned int portcount; 3839 unsigned int portindex[RTE_MAX_ETHPORTS]; 3840 unsigned int i, valid_port_count = 0; 3841 3842 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3843 if (!portcount) 3844 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3845 3846 /* 3847 * Here we verify the validity of the ports 3848 * and thereby calculate the total number of 3849 * valid ports 3850 */ 3851 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3852 if (rte_eth_dev_is_valid_port(portindex[i])) { 3853 portindex[valid_port_count] = portindex[i]; 3854 valid_port_count++; 3855 } 3856 } 3857 3858 set_fwd_ports_list(portindex, valid_port_count); 3859 } 3860 3861 void 3862 set_fwd_ports_mask(uint64_t portmask) 3863 { 3864 unsigned int portlist[64]; 3865 unsigned int nb_pt; 3866 unsigned int i; 3867 3868 if (portmask == 0) { 3869 fprintf(stderr, "Invalid NULL mask of ports\n"); 3870 return; 3871 } 3872 nb_pt = 0; 3873 RTE_ETH_FOREACH_DEV(i) { 3874 if (! ((uint64_t)(1ULL << i) & portmask)) 3875 continue; 3876 portlist[nb_pt++] = i; 3877 } 3878 set_fwd_ports_list(portlist, nb_pt); 3879 } 3880 3881 void 3882 set_fwd_ports_number(uint16_t nb_pt) 3883 { 3884 if (nb_pt > nb_cfg_ports) { 3885 fprintf(stderr, 3886 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 3887 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3888 return; 3889 } 3890 nb_fwd_ports = (portid_t) nb_pt; 3891 printf("Number of forwarding ports set to %u\n", 3892 (unsigned int) nb_fwd_ports); 3893 } 3894 3895 int 3896 port_is_forwarding(portid_t port_id) 3897 { 3898 unsigned int i; 3899 3900 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3901 return -1; 3902 3903 for (i = 0; i < nb_fwd_ports; i++) { 3904 if (fwd_ports_ids[i] == port_id) 3905 return 1; 3906 } 3907 3908 return 0; 3909 } 3910 3911 void 3912 set_nb_pkt_per_burst(uint16_t nb) 3913 { 3914 if (nb > MAX_PKT_BURST) { 3915 fprintf(stderr, 3916 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 3917 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3918 return; 3919 } 3920 nb_pkt_per_burst = nb; 3921 printf("Number of packets per burst set to %u\n", 3922 (unsigned int) nb_pkt_per_burst); 3923 } 3924 3925 static const char * 3926 tx_split_get_name(enum tx_pkt_split split) 3927 { 3928 uint32_t i; 3929 3930 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3931 if (tx_split_name[i].split == split) 3932 return tx_split_name[i].name; 3933 } 3934 return NULL; 3935 } 3936 3937 void 3938 set_tx_pkt_split(const char *name) 3939 { 3940 uint32_t i; 3941 3942 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3943 if (strcmp(tx_split_name[i].name, name) == 0) { 3944 tx_pkt_split = tx_split_name[i].split; 3945 return; 3946 } 3947 } 3948 fprintf(stderr, "unknown value: \"%s\"\n", name); 3949 } 3950 3951 int 3952 parse_fec_mode(const char *name, uint32_t *fec_capa) 3953 { 3954 uint8_t i; 3955 3956 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3957 if (strcmp(fec_mode_name[i].name, name) == 0) { 3958 *fec_capa = 3959 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3960 return 0; 3961 } 3962 } 3963 return -1; 3964 } 3965 3966 void 3967 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3968 { 3969 unsigned int i, j; 3970 3971 printf("FEC capabilities:\n"); 3972 3973 for (i = 0; i < num; i++) { 3974 printf("%s : ", 3975 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3976 3977 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 3978 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3979 speed_fec_capa[i].capa) 3980 printf("%s ", fec_mode_name[j].name); 3981 } 3982 printf("\n"); 3983 } 3984 } 3985 3986 void 3987 show_rx_pkt_offsets(void) 3988 { 3989 uint32_t i, n; 3990 3991 n = rx_pkt_nb_offs; 3992 printf("Number of offsets: %u\n", n); 3993 if (n) { 3994 printf("Segment offsets: "); 3995 for (i = 0; i != n - 1; i++) 3996 printf("%hu,", rx_pkt_seg_offsets[i]); 3997 printf("%hu\n", rx_pkt_seg_lengths[i]); 3998 } 3999 } 4000 4001 void 4002 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4003 { 4004 unsigned int i; 4005 4006 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4007 printf("nb segments per RX packets=%u >= " 4008 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4009 return; 4010 } 4011 4012 /* 4013 * No extra check here, the segment length will be checked by PMD 4014 * in the extended queue setup. 4015 */ 4016 for (i = 0; i < nb_offs; i++) { 4017 if (seg_offsets[i] >= UINT16_MAX) { 4018 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4019 i, seg_offsets[i]); 4020 return; 4021 } 4022 } 4023 4024 for (i = 0; i < nb_offs; i++) 4025 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4026 4027 rx_pkt_nb_offs = (uint8_t) nb_offs; 4028 } 4029 4030 void 4031 show_rx_pkt_segments(void) 4032 { 4033 uint32_t i, n; 4034 4035 n = rx_pkt_nb_segs; 4036 printf("Number of segments: %u\n", n); 4037 if (n) { 4038 printf("Segment sizes: "); 4039 for (i = 0; i != n - 1; i++) 4040 printf("%hu,", rx_pkt_seg_lengths[i]); 4041 printf("%hu\n", rx_pkt_seg_lengths[i]); 4042 } 4043 } 4044 4045 void 4046 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4047 { 4048 unsigned int i; 4049 4050 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4051 printf("nb segments per RX packets=%u >= " 4052 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4053 return; 4054 } 4055 4056 /* 4057 * No extra check here, the segment length will be checked by PMD 4058 * in the extended queue setup. 4059 */ 4060 for (i = 0; i < nb_segs; i++) { 4061 if (seg_lengths[i] >= UINT16_MAX) { 4062 printf("length[%u]=%u > UINT16_MAX - give up\n", 4063 i, seg_lengths[i]); 4064 return; 4065 } 4066 } 4067 4068 for (i = 0; i < nb_segs; i++) 4069 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4070 4071 rx_pkt_nb_segs = (uint8_t) nb_segs; 4072 } 4073 4074 void 4075 show_tx_pkt_segments(void) 4076 { 4077 uint32_t i, n; 4078 const char *split; 4079 4080 n = tx_pkt_nb_segs; 4081 split = tx_split_get_name(tx_pkt_split); 4082 4083 printf("Number of segments: %u\n", n); 4084 printf("Segment sizes: "); 4085 for (i = 0; i != n - 1; i++) 4086 printf("%hu,", tx_pkt_seg_lengths[i]); 4087 printf("%hu\n", tx_pkt_seg_lengths[i]); 4088 printf("Split packet: %s\n", split); 4089 } 4090 4091 static bool 4092 nb_segs_is_invalid(unsigned int nb_segs) 4093 { 4094 uint16_t ring_size; 4095 uint16_t queue_id; 4096 uint16_t port_id; 4097 int ret; 4098 4099 RTE_ETH_FOREACH_DEV(port_id) { 4100 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4101 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4102 if (ret) { 4103 /* Port may not be initialized yet, can't say 4104 * the port is invalid in this stage. 4105 */ 4106 continue; 4107 } 4108 if (ring_size < nb_segs) { 4109 printf("nb segments per TX packets=%u >= TX " 4110 "queue(%u) ring_size=%u - txpkts ignored\n", 4111 nb_segs, queue_id, ring_size); 4112 return true; 4113 } 4114 } 4115 } 4116 4117 return false; 4118 } 4119 4120 void 4121 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4122 { 4123 uint16_t tx_pkt_len; 4124 unsigned int i; 4125 4126 /* 4127 * For single segment settings failed check is ignored. 4128 * It is a very basic capability to send the single segment 4129 * packets, suppose it is always supported. 4130 */ 4131 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4132 fprintf(stderr, 4133 "Tx segment size(%u) is not supported - txpkts ignored\n", 4134 nb_segs); 4135 return; 4136 } 4137 4138 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4139 fprintf(stderr, 4140 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4141 nb_segs, RTE_MAX_SEGS_PER_PKT); 4142 return; 4143 } 4144 4145 /* 4146 * Check that each segment length is greater or equal than 4147 * the mbuf data size. 4148 * Check also that the total packet length is greater or equal than the 4149 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4150 * 20 + 8). 4151 */ 4152 tx_pkt_len = 0; 4153 for (i = 0; i < nb_segs; i++) { 4154 if (seg_lengths[i] > mbuf_data_size[0]) { 4155 fprintf(stderr, 4156 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4157 i, seg_lengths[i], mbuf_data_size[0]); 4158 return; 4159 } 4160 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4161 } 4162 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4163 fprintf(stderr, "total packet length=%u < %d - give up\n", 4164 (unsigned) tx_pkt_len, 4165 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4166 return; 4167 } 4168 4169 for (i = 0; i < nb_segs; i++) 4170 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4171 4172 tx_pkt_length = tx_pkt_len; 4173 tx_pkt_nb_segs = (uint8_t) nb_segs; 4174 } 4175 4176 void 4177 show_tx_pkt_times(void) 4178 { 4179 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4180 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4181 } 4182 4183 void 4184 set_tx_pkt_times(unsigned int *tx_times) 4185 { 4186 tx_pkt_times_inter = tx_times[0]; 4187 tx_pkt_times_intra = tx_times[1]; 4188 } 4189 4190 void 4191 setup_gro(const char *onoff, portid_t port_id) 4192 { 4193 if (!rte_eth_dev_is_valid_port(port_id)) { 4194 fprintf(stderr, "invalid port id %u\n", port_id); 4195 return; 4196 } 4197 if (test_done == 0) { 4198 fprintf(stderr, 4199 "Before enable/disable GRO, please stop forwarding first\n"); 4200 return; 4201 } 4202 if (strcmp(onoff, "on") == 0) { 4203 if (gro_ports[port_id].enable != 0) { 4204 fprintf(stderr, 4205 "Port %u has enabled GRO. Please disable GRO first\n", 4206 port_id); 4207 return; 4208 } 4209 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4210 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4211 gro_ports[port_id].param.max_flow_num = 4212 GRO_DEFAULT_FLOW_NUM; 4213 gro_ports[port_id].param.max_item_per_flow = 4214 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4215 } 4216 gro_ports[port_id].enable = 1; 4217 } else { 4218 if (gro_ports[port_id].enable == 0) { 4219 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4220 return; 4221 } 4222 gro_ports[port_id].enable = 0; 4223 } 4224 } 4225 4226 void 4227 setup_gro_flush_cycles(uint8_t cycles) 4228 { 4229 if (test_done == 0) { 4230 fprintf(stderr, 4231 "Before change flush interval for GRO, please stop forwarding first.\n"); 4232 return; 4233 } 4234 4235 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4236 GRO_DEFAULT_FLUSH_CYCLES) { 4237 fprintf(stderr, 4238 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4239 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4240 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4241 } 4242 4243 gro_flush_cycles = cycles; 4244 } 4245 4246 void 4247 show_gro(portid_t port_id) 4248 { 4249 struct rte_gro_param *param; 4250 uint32_t max_pkts_num; 4251 4252 param = &gro_ports[port_id].param; 4253 4254 if (!rte_eth_dev_is_valid_port(port_id)) { 4255 fprintf(stderr, "Invalid port id %u.\n", port_id); 4256 return; 4257 } 4258 if (gro_ports[port_id].enable) { 4259 printf("GRO type: TCP/IPv4\n"); 4260 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4261 max_pkts_num = param->max_flow_num * 4262 param->max_item_per_flow; 4263 } else 4264 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4265 printf("Max number of packets to perform GRO: %u\n", 4266 max_pkts_num); 4267 printf("Flushing cycles: %u\n", gro_flush_cycles); 4268 } else 4269 printf("Port %u doesn't enable GRO.\n", port_id); 4270 } 4271 4272 void 4273 setup_gso(const char *mode, portid_t port_id) 4274 { 4275 if (!rte_eth_dev_is_valid_port(port_id)) { 4276 fprintf(stderr, "invalid port id %u\n", port_id); 4277 return; 4278 } 4279 if (strcmp(mode, "on") == 0) { 4280 if (test_done == 0) { 4281 fprintf(stderr, 4282 "before enabling GSO, please stop forwarding first\n"); 4283 return; 4284 } 4285 gso_ports[port_id].enable = 1; 4286 } else if (strcmp(mode, "off") == 0) { 4287 if (test_done == 0) { 4288 fprintf(stderr, 4289 "before disabling GSO, please stop forwarding first\n"); 4290 return; 4291 } 4292 gso_ports[port_id].enable = 0; 4293 } 4294 } 4295 4296 char* 4297 list_pkt_forwarding_modes(void) 4298 { 4299 static char fwd_modes[128] = ""; 4300 const char *separator = "|"; 4301 struct fwd_engine *fwd_eng; 4302 unsigned i = 0; 4303 4304 if (strlen (fwd_modes) == 0) { 4305 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4306 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4307 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4308 strncat(fwd_modes, separator, 4309 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4310 } 4311 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4312 } 4313 4314 return fwd_modes; 4315 } 4316 4317 char* 4318 list_pkt_forwarding_retry_modes(void) 4319 { 4320 static char fwd_modes[128] = ""; 4321 const char *separator = "|"; 4322 struct fwd_engine *fwd_eng; 4323 unsigned i = 0; 4324 4325 if (strlen(fwd_modes) == 0) { 4326 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4327 if (fwd_eng == &rx_only_engine) 4328 continue; 4329 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4330 sizeof(fwd_modes) - 4331 strlen(fwd_modes) - 1); 4332 strncat(fwd_modes, separator, 4333 sizeof(fwd_modes) - 4334 strlen(fwd_modes) - 1); 4335 } 4336 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4337 } 4338 4339 return fwd_modes; 4340 } 4341 4342 void 4343 set_pkt_forwarding_mode(const char *fwd_mode_name) 4344 { 4345 struct fwd_engine *fwd_eng; 4346 unsigned i; 4347 4348 i = 0; 4349 while ((fwd_eng = fwd_engines[i]) != NULL) { 4350 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4351 printf("Set %s packet forwarding mode%s\n", 4352 fwd_mode_name, 4353 retry_enabled == 0 ? "" : " with retry"); 4354 cur_fwd_eng = fwd_eng; 4355 return; 4356 } 4357 i++; 4358 } 4359 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 4360 } 4361 4362 void 4363 add_rx_dump_callbacks(portid_t portid) 4364 { 4365 struct rte_eth_dev_info dev_info; 4366 uint16_t queue; 4367 int ret; 4368 4369 if (port_id_is_invalid(portid, ENABLED_WARN)) 4370 return; 4371 4372 ret = eth_dev_info_get_print_err(portid, &dev_info); 4373 if (ret != 0) 4374 return; 4375 4376 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4377 if (!ports[portid].rx_dump_cb[queue]) 4378 ports[portid].rx_dump_cb[queue] = 4379 rte_eth_add_rx_callback(portid, queue, 4380 dump_rx_pkts, NULL); 4381 } 4382 4383 void 4384 add_tx_dump_callbacks(portid_t portid) 4385 { 4386 struct rte_eth_dev_info dev_info; 4387 uint16_t queue; 4388 int ret; 4389 4390 if (port_id_is_invalid(portid, ENABLED_WARN)) 4391 return; 4392 4393 ret = eth_dev_info_get_print_err(portid, &dev_info); 4394 if (ret != 0) 4395 return; 4396 4397 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4398 if (!ports[portid].tx_dump_cb[queue]) 4399 ports[portid].tx_dump_cb[queue] = 4400 rte_eth_add_tx_callback(portid, queue, 4401 dump_tx_pkts, NULL); 4402 } 4403 4404 void 4405 remove_rx_dump_callbacks(portid_t portid) 4406 { 4407 struct rte_eth_dev_info dev_info; 4408 uint16_t queue; 4409 int ret; 4410 4411 if (port_id_is_invalid(portid, ENABLED_WARN)) 4412 return; 4413 4414 ret = eth_dev_info_get_print_err(portid, &dev_info); 4415 if (ret != 0) 4416 return; 4417 4418 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4419 if (ports[portid].rx_dump_cb[queue]) { 4420 rte_eth_remove_rx_callback(portid, queue, 4421 ports[portid].rx_dump_cb[queue]); 4422 ports[portid].rx_dump_cb[queue] = NULL; 4423 } 4424 } 4425 4426 void 4427 remove_tx_dump_callbacks(portid_t portid) 4428 { 4429 struct rte_eth_dev_info dev_info; 4430 uint16_t queue; 4431 int ret; 4432 4433 if (port_id_is_invalid(portid, ENABLED_WARN)) 4434 return; 4435 4436 ret = eth_dev_info_get_print_err(portid, &dev_info); 4437 if (ret != 0) 4438 return; 4439 4440 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4441 if (ports[portid].tx_dump_cb[queue]) { 4442 rte_eth_remove_tx_callback(portid, queue, 4443 ports[portid].tx_dump_cb[queue]); 4444 ports[portid].tx_dump_cb[queue] = NULL; 4445 } 4446 } 4447 4448 void 4449 configure_rxtx_dump_callbacks(uint16_t verbose) 4450 { 4451 portid_t portid; 4452 4453 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4454 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4455 return; 4456 #endif 4457 4458 RTE_ETH_FOREACH_DEV(portid) 4459 { 4460 if (verbose == 1 || verbose > 2) 4461 add_rx_dump_callbacks(portid); 4462 else 4463 remove_rx_dump_callbacks(portid); 4464 if (verbose >= 2) 4465 add_tx_dump_callbacks(portid); 4466 else 4467 remove_tx_dump_callbacks(portid); 4468 } 4469 } 4470 4471 void 4472 set_verbose_level(uint16_t vb_level) 4473 { 4474 printf("Change verbose level from %u to %u\n", 4475 (unsigned int) verbose_level, (unsigned int) vb_level); 4476 verbose_level = vb_level; 4477 configure_rxtx_dump_callbacks(verbose_level); 4478 } 4479 4480 void 4481 vlan_extend_set(portid_t port_id, int on) 4482 { 4483 int diag; 4484 int vlan_offload; 4485 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4486 4487 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4488 return; 4489 4490 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4491 4492 if (on) { 4493 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4494 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4495 } else { 4496 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 4497 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4498 } 4499 4500 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4501 if (diag < 0) { 4502 fprintf(stderr, 4503 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 4504 port_id, on, diag); 4505 return; 4506 } 4507 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4508 } 4509 4510 void 4511 rx_vlan_strip_set(portid_t port_id, int on) 4512 { 4513 int diag; 4514 int vlan_offload; 4515 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4516 4517 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4518 return; 4519 4520 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4521 4522 if (on) { 4523 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4524 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4525 } else { 4526 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 4527 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4528 } 4529 4530 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4531 if (diag < 0) { 4532 fprintf(stderr, 4533 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4534 __func__, port_id, on, diag); 4535 return; 4536 } 4537 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4538 } 4539 4540 void 4541 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4542 { 4543 int diag; 4544 4545 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4546 return; 4547 4548 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4549 if (diag < 0) 4550 fprintf(stderr, 4551 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 4552 __func__, port_id, queue_id, on, diag); 4553 } 4554 4555 void 4556 rx_vlan_filter_set(portid_t port_id, int on) 4557 { 4558 int diag; 4559 int vlan_offload; 4560 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4561 4562 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4563 return; 4564 4565 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4566 4567 if (on) { 4568 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4569 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4570 } else { 4571 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 4572 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4573 } 4574 4575 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4576 if (diag < 0) { 4577 fprintf(stderr, 4578 "%s(port_pi=%d, on=%d) failed diag=%d\n", 4579 __func__, port_id, on, diag); 4580 return; 4581 } 4582 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4583 } 4584 4585 void 4586 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4587 { 4588 int diag; 4589 int vlan_offload; 4590 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4591 4592 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4593 return; 4594 4595 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4596 4597 if (on) { 4598 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4599 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4600 } else { 4601 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 4602 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4603 } 4604 4605 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4606 if (diag < 0) { 4607 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 4608 __func__, port_id, on, diag); 4609 return; 4610 } 4611 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4612 } 4613 4614 int 4615 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4616 { 4617 int diag; 4618 4619 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4620 return 1; 4621 if (vlan_id_is_invalid(vlan_id)) 4622 return 1; 4623 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4624 if (diag == 0) 4625 return 0; 4626 fprintf(stderr, 4627 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 4628 port_id, vlan_id, on, diag); 4629 return -1; 4630 } 4631 4632 void 4633 rx_vlan_all_filter_set(portid_t port_id, int on) 4634 { 4635 uint16_t vlan_id; 4636 4637 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4638 return; 4639 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4640 if (rx_vft_set(port_id, vlan_id, on)) 4641 break; 4642 } 4643 } 4644 4645 void 4646 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4647 { 4648 int diag; 4649 4650 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4651 return; 4652 4653 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4654 if (diag == 0) 4655 return; 4656 4657 fprintf(stderr, 4658 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 4659 port_id, vlan_type, tp_id, diag); 4660 } 4661 4662 void 4663 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4664 { 4665 struct rte_eth_dev_info dev_info; 4666 int ret; 4667 4668 if (vlan_id_is_invalid(vlan_id)) 4669 return; 4670 4671 if (ports[port_id].dev_conf.txmode.offloads & 4672 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 4673 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 4674 return; 4675 } 4676 4677 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4678 if (ret != 0) 4679 return; 4680 4681 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 4682 fprintf(stderr, 4683 "Error: vlan insert is not supported by port %d\n", 4684 port_id); 4685 return; 4686 } 4687 4688 tx_vlan_reset(port_id); 4689 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 4690 ports[port_id].tx_vlan_id = vlan_id; 4691 } 4692 4693 void 4694 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4695 { 4696 struct rte_eth_dev_info dev_info; 4697 int ret; 4698 4699 if (vlan_id_is_invalid(vlan_id)) 4700 return; 4701 if (vlan_id_is_invalid(vlan_id_outer)) 4702 return; 4703 4704 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4705 if (ret != 0) 4706 return; 4707 4708 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 4709 fprintf(stderr, 4710 "Error: qinq insert not supported by port %d\n", 4711 port_id); 4712 return; 4713 } 4714 4715 tx_vlan_reset(port_id); 4716 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4717 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4718 ports[port_id].tx_vlan_id = vlan_id; 4719 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4720 } 4721 4722 void 4723 tx_vlan_reset(portid_t port_id) 4724 { 4725 ports[port_id].dev_conf.txmode.offloads &= 4726 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 4727 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 4728 ports[port_id].tx_vlan_id = 0; 4729 ports[port_id].tx_vlan_id_outer = 0; 4730 } 4731 4732 void 4733 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4734 { 4735 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4736 return; 4737 4738 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4739 } 4740 4741 void 4742 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4743 { 4744 int ret; 4745 4746 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4747 return; 4748 4749 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4750 return; 4751 4752 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4753 fprintf(stderr, "map_value not in required range 0..%d\n", 4754 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4755 return; 4756 } 4757 4758 if (!is_rx) { /* tx */ 4759 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 4760 map_value); 4761 if (ret) { 4762 fprintf(stderr, 4763 "failed to set tx queue stats mapping.\n"); 4764 return; 4765 } 4766 } else { /* rx */ 4767 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 4768 map_value); 4769 if (ret) { 4770 fprintf(stderr, 4771 "failed to set rx queue stats mapping.\n"); 4772 return; 4773 } 4774 } 4775 } 4776 4777 void 4778 set_xstats_hide_zero(uint8_t on_off) 4779 { 4780 xstats_hide_zero = on_off; 4781 } 4782 4783 void 4784 set_record_core_cycles(uint8_t on_off) 4785 { 4786 record_core_cycles = on_off; 4787 } 4788 4789 void 4790 set_record_burst_stats(uint8_t on_off) 4791 { 4792 record_burst_stats = on_off; 4793 } 4794 4795 static char* 4796 flowtype_to_str(uint16_t flow_type) 4797 { 4798 struct flow_type_info { 4799 char str[32]; 4800 uint16_t ftype; 4801 }; 4802 4803 uint8_t i; 4804 static struct flow_type_info flowtype_str_table[] = { 4805 {"raw", RTE_ETH_FLOW_RAW}, 4806 {"ipv4", RTE_ETH_FLOW_IPV4}, 4807 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4808 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4809 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4810 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4811 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4812 {"ipv6", RTE_ETH_FLOW_IPV6}, 4813 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4814 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4815 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4816 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4817 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4818 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4819 {"port", RTE_ETH_FLOW_PORT}, 4820 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4821 {"geneve", RTE_ETH_FLOW_GENEVE}, 4822 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4823 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4824 }; 4825 4826 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4827 if (flowtype_str_table[i].ftype == flow_type) 4828 return flowtype_str_table[i].str; 4829 } 4830 4831 return NULL; 4832 } 4833 4834 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4835 4836 static inline void 4837 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4838 { 4839 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4840 4841 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4842 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4843 " tunnel_id: 0x%08x", 4844 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4845 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4846 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4847 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4848 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4849 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4850 4851 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4852 rte_be_to_cpu_16(mask->src_port_mask), 4853 rte_be_to_cpu_16(mask->dst_port_mask)); 4854 4855 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4856 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4857 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4858 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4859 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4860 4861 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4862 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4863 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4864 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4865 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4866 } 4867 4868 printf("\n"); 4869 } 4870 4871 static inline void 4872 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4873 { 4874 struct rte_eth_flex_payload_cfg *cfg; 4875 uint32_t i, j; 4876 4877 for (i = 0; i < flex_conf->nb_payloads; i++) { 4878 cfg = &flex_conf->flex_set[i]; 4879 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4880 printf("\n RAW: "); 4881 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4882 printf("\n L2_PAYLOAD: "); 4883 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4884 printf("\n L3_PAYLOAD: "); 4885 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4886 printf("\n L4_PAYLOAD: "); 4887 else 4888 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4889 for (j = 0; j < num; j++) 4890 printf(" %-5u", cfg->src_offset[j]); 4891 } 4892 printf("\n"); 4893 } 4894 4895 static inline void 4896 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4897 { 4898 struct rte_eth_fdir_flex_mask *mask; 4899 uint32_t i, j; 4900 char *p; 4901 4902 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4903 mask = &flex_conf->flex_mask[i]; 4904 p = flowtype_to_str(mask->flow_type); 4905 printf("\n %s:\t", p ? p : "unknown"); 4906 for (j = 0; j < num; j++) 4907 printf(" %02x", mask->mask[j]); 4908 } 4909 printf("\n"); 4910 } 4911 4912 static inline void 4913 print_fdir_flow_type(uint32_t flow_types_mask) 4914 { 4915 int i; 4916 char *p; 4917 4918 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4919 if (!(flow_types_mask & (1 << i))) 4920 continue; 4921 p = flowtype_to_str(i); 4922 if (p) 4923 printf(" %s", p); 4924 else 4925 printf(" unknown"); 4926 } 4927 printf("\n"); 4928 } 4929 4930 static int 4931 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4932 struct rte_eth_fdir_stats *fdir_stat) 4933 { 4934 int ret = -ENOTSUP; 4935 4936 #ifdef RTE_NET_I40E 4937 if (ret == -ENOTSUP) { 4938 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4939 if (!ret) 4940 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4941 } 4942 #endif 4943 #ifdef RTE_NET_IXGBE 4944 if (ret == -ENOTSUP) { 4945 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4946 if (!ret) 4947 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4948 } 4949 #endif 4950 switch (ret) { 4951 case 0: 4952 break; 4953 case -ENOTSUP: 4954 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 4955 port_id); 4956 break; 4957 default: 4958 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 4959 break; 4960 } 4961 return ret; 4962 } 4963 4964 void 4965 fdir_get_infos(portid_t port_id) 4966 { 4967 struct rte_eth_fdir_stats fdir_stat; 4968 struct rte_eth_fdir_info fdir_info; 4969 4970 static const char *fdir_stats_border = "########################"; 4971 4972 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4973 return; 4974 4975 memset(&fdir_info, 0, sizeof(fdir_info)); 4976 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4977 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4978 return; 4979 4980 printf("\n %s FDIR infos for port %-2d %s\n", 4981 fdir_stats_border, port_id, fdir_stats_border); 4982 printf(" MODE: "); 4983 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4984 printf(" PERFECT\n"); 4985 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4986 printf(" PERFECT-MAC-VLAN\n"); 4987 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4988 printf(" PERFECT-TUNNEL\n"); 4989 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4990 printf(" SIGNATURE\n"); 4991 else 4992 printf(" DISABLE\n"); 4993 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4994 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4995 printf(" SUPPORTED FLOW TYPE: "); 4996 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4997 } 4998 printf(" FLEX PAYLOAD INFO:\n"); 4999 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5000 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5001 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5002 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5003 fdir_info.flex_payload_unit, 5004 fdir_info.max_flex_payload_segment_num, 5005 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5006 printf(" MASK: "); 5007 print_fdir_mask(&fdir_info.mask); 5008 if (fdir_info.flex_conf.nb_payloads > 0) { 5009 printf(" FLEX PAYLOAD SRC OFFSET:"); 5010 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5011 } 5012 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5013 printf(" FLEX MASK CFG:"); 5014 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5015 } 5016 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5017 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5018 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5019 fdir_info.guarant_spc, fdir_info.best_spc); 5020 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5021 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5022 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5023 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5024 fdir_stat.collision, fdir_stat.free, 5025 fdir_stat.maxhash, fdir_stat.maxlen, 5026 fdir_stat.add, fdir_stat.remove, 5027 fdir_stat.f_add, fdir_stat.f_remove); 5028 printf(" %s############################%s\n", 5029 fdir_stats_border, fdir_stats_border); 5030 } 5031 5032 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5033 5034 void 5035 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5036 { 5037 struct rte_port *port; 5038 struct rte_eth_fdir_flex_conf *flex_conf; 5039 int i, idx = 0; 5040 5041 port = &ports[port_id]; 5042 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5043 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5044 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5045 idx = i; 5046 break; 5047 } 5048 } 5049 if (i >= RTE_ETH_FLOW_MAX) { 5050 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5051 idx = flex_conf->nb_flexmasks; 5052 flex_conf->nb_flexmasks++; 5053 } else { 5054 fprintf(stderr, 5055 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5056 cfg->flow_type); 5057 return; 5058 } 5059 } 5060 rte_memcpy(&flex_conf->flex_mask[idx], 5061 cfg, 5062 sizeof(struct rte_eth_fdir_flex_mask)); 5063 } 5064 5065 void 5066 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5067 { 5068 struct rte_port *port; 5069 struct rte_eth_fdir_flex_conf *flex_conf; 5070 int i, idx = 0; 5071 5072 port = &ports[port_id]; 5073 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5074 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5075 if (cfg->type == flex_conf->flex_set[i].type) { 5076 idx = i; 5077 break; 5078 } 5079 } 5080 if (i >= RTE_ETH_PAYLOAD_MAX) { 5081 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5082 idx = flex_conf->nb_payloads; 5083 flex_conf->nb_payloads++; 5084 } else { 5085 fprintf(stderr, 5086 "The flex payload table is full. Can not set flex payload for type(%u).", 5087 cfg->type); 5088 return; 5089 } 5090 } 5091 rte_memcpy(&flex_conf->flex_set[idx], 5092 cfg, 5093 sizeof(struct rte_eth_flex_payload_cfg)); 5094 5095 } 5096 5097 void 5098 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5099 { 5100 #ifdef RTE_NET_IXGBE 5101 int diag; 5102 5103 if (is_rx) 5104 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5105 else 5106 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5107 5108 if (diag == 0) 5109 return; 5110 fprintf(stderr, 5111 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5112 is_rx ? "rx" : "tx", port_id, diag); 5113 return; 5114 #endif 5115 fprintf(stderr, "VF %s setting not supported for port %d\n", 5116 is_rx ? "Rx" : "Tx", port_id); 5117 RTE_SET_USED(vf); 5118 RTE_SET_USED(on); 5119 } 5120 5121 int 5122 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5123 { 5124 int diag; 5125 struct rte_eth_link link; 5126 int ret; 5127 5128 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5129 return 1; 5130 ret = eth_link_get_nowait_print_err(port_id, &link); 5131 if (ret < 0) 5132 return 1; 5133 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5134 rate > link.link_speed) { 5135 fprintf(stderr, 5136 "Invalid rate value:%u bigger than link speed: %u\n", 5137 rate, link.link_speed); 5138 return 1; 5139 } 5140 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5141 if (diag == 0) 5142 return diag; 5143 fprintf(stderr, 5144 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5145 port_id, diag); 5146 return diag; 5147 } 5148 5149 int 5150 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5151 { 5152 int diag = -ENOTSUP; 5153 5154 RTE_SET_USED(vf); 5155 RTE_SET_USED(rate); 5156 RTE_SET_USED(q_msk); 5157 5158 #ifdef RTE_NET_IXGBE 5159 if (diag == -ENOTSUP) 5160 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5161 q_msk); 5162 #endif 5163 #ifdef RTE_NET_BNXT 5164 if (diag == -ENOTSUP) 5165 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5166 #endif 5167 if (diag == 0) 5168 return diag; 5169 5170 fprintf(stderr, 5171 "%s for port_id=%d failed diag=%d\n", 5172 __func__, port_id, diag); 5173 return diag; 5174 } 5175 5176 /* 5177 * Functions to manage the set of filtered Multicast MAC addresses. 5178 * 5179 * A pool of filtered multicast MAC addresses is associated with each port. 5180 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5181 * The address of the pool and the number of valid multicast MAC addresses 5182 * recorded in the pool are stored in the fields "mc_addr_pool" and 5183 * "mc_addr_nb" of the "rte_port" data structure. 5184 * 5185 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5186 * to be supplied a contiguous array of multicast MAC addresses. 5187 * To comply with this constraint, the set of multicast addresses recorded 5188 * into the pool are systematically compacted at the beginning of the pool. 5189 * Hence, when a multicast address is removed from the pool, all following 5190 * addresses, if any, are copied back to keep the set contiguous. 5191 */ 5192 #define MCAST_POOL_INC 32 5193 5194 static int 5195 mcast_addr_pool_extend(struct rte_port *port) 5196 { 5197 struct rte_ether_addr *mc_pool; 5198 size_t mc_pool_size; 5199 5200 /* 5201 * If a free entry is available at the end of the pool, just 5202 * increment the number of recorded multicast addresses. 5203 */ 5204 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5205 port->mc_addr_nb++; 5206 return 0; 5207 } 5208 5209 /* 5210 * [re]allocate a pool with MCAST_POOL_INC more entries. 5211 * The previous test guarantees that port->mc_addr_nb is a multiple 5212 * of MCAST_POOL_INC. 5213 */ 5214 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5215 MCAST_POOL_INC); 5216 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5217 mc_pool_size); 5218 if (mc_pool == NULL) { 5219 fprintf(stderr, 5220 "allocation of pool of %u multicast addresses failed\n", 5221 port->mc_addr_nb + MCAST_POOL_INC); 5222 return -ENOMEM; 5223 } 5224 5225 port->mc_addr_pool = mc_pool; 5226 port->mc_addr_nb++; 5227 return 0; 5228 5229 } 5230 5231 static void 5232 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5233 { 5234 if (mcast_addr_pool_extend(port) != 0) 5235 return; 5236 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5237 } 5238 5239 static void 5240 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5241 { 5242 port->mc_addr_nb--; 5243 if (addr_idx == port->mc_addr_nb) { 5244 /* No need to recompact the set of multicast addressses. */ 5245 if (port->mc_addr_nb == 0) { 5246 /* free the pool of multicast addresses. */ 5247 free(port->mc_addr_pool); 5248 port->mc_addr_pool = NULL; 5249 } 5250 return; 5251 } 5252 memmove(&port->mc_addr_pool[addr_idx], 5253 &port->mc_addr_pool[addr_idx + 1], 5254 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5255 } 5256 5257 static int 5258 eth_port_multicast_addr_list_set(portid_t port_id) 5259 { 5260 struct rte_port *port; 5261 int diag; 5262 5263 port = &ports[port_id]; 5264 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5265 port->mc_addr_nb); 5266 if (diag < 0) 5267 fprintf(stderr, 5268 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5269 port_id, port->mc_addr_nb, diag); 5270 5271 return diag; 5272 } 5273 5274 void 5275 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5276 { 5277 struct rte_port *port; 5278 uint32_t i; 5279 5280 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5281 return; 5282 5283 port = &ports[port_id]; 5284 5285 /* 5286 * Check that the added multicast MAC address is not already recorded 5287 * in the pool of multicast addresses. 5288 */ 5289 for (i = 0; i < port->mc_addr_nb; i++) { 5290 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5291 fprintf(stderr, 5292 "multicast address already filtered by port\n"); 5293 return; 5294 } 5295 } 5296 5297 mcast_addr_pool_append(port, mc_addr); 5298 if (eth_port_multicast_addr_list_set(port_id) < 0) 5299 /* Rollback on failure, remove the address from the pool */ 5300 mcast_addr_pool_remove(port, i); 5301 } 5302 5303 void 5304 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5305 { 5306 struct rte_port *port; 5307 uint32_t i; 5308 5309 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5310 return; 5311 5312 port = &ports[port_id]; 5313 5314 /* 5315 * Search the pool of multicast MAC addresses for the removed address. 5316 */ 5317 for (i = 0; i < port->mc_addr_nb; i++) { 5318 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5319 break; 5320 } 5321 if (i == port->mc_addr_nb) { 5322 fprintf(stderr, "multicast address not filtered by port %d\n", 5323 port_id); 5324 return; 5325 } 5326 5327 mcast_addr_pool_remove(port, i); 5328 if (eth_port_multicast_addr_list_set(port_id) < 0) 5329 /* Rollback on failure, add the address back into the pool */ 5330 mcast_addr_pool_append(port, mc_addr); 5331 } 5332 5333 void 5334 port_dcb_info_display(portid_t port_id) 5335 { 5336 struct rte_eth_dcb_info dcb_info; 5337 uint16_t i; 5338 int ret; 5339 static const char *border = "================"; 5340 5341 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5342 return; 5343 5344 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5345 if (ret) { 5346 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 5347 port_id); 5348 return; 5349 } 5350 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5351 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5352 printf("\n TC : "); 5353 for (i = 0; i < dcb_info.nb_tcs; i++) 5354 printf("\t%4d", i); 5355 printf("\n Priority : "); 5356 for (i = 0; i < dcb_info.nb_tcs; i++) 5357 printf("\t%4d", dcb_info.prio_tc[i]); 5358 printf("\n BW percent :"); 5359 for (i = 0; i < dcb_info.nb_tcs; i++) 5360 printf("\t%4d%%", dcb_info.tc_bws[i]); 5361 printf("\n RXQ base : "); 5362 for (i = 0; i < dcb_info.nb_tcs; i++) 5363 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5364 printf("\n RXQ number :"); 5365 for (i = 0; i < dcb_info.nb_tcs; i++) 5366 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5367 printf("\n TXQ base : "); 5368 for (i = 0; i < dcb_info.nb_tcs; i++) 5369 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5370 printf("\n TXQ number :"); 5371 for (i = 0; i < dcb_info.nb_tcs; i++) 5372 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5373 printf("\n"); 5374 } 5375 5376 uint8_t * 5377 open_file(const char *file_path, uint32_t *size) 5378 { 5379 int fd = open(file_path, O_RDONLY); 5380 off_t pkg_size; 5381 uint8_t *buf = NULL; 5382 int ret = 0; 5383 struct stat st_buf; 5384 5385 if (size) 5386 *size = 0; 5387 5388 if (fd == -1) { 5389 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5390 return buf; 5391 } 5392 5393 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5394 close(fd); 5395 fprintf(stderr, "%s: File operations failed\n", __func__); 5396 return buf; 5397 } 5398 5399 pkg_size = st_buf.st_size; 5400 if (pkg_size < 0) { 5401 close(fd); 5402 fprintf(stderr, "%s: File operations failed\n", __func__); 5403 return buf; 5404 } 5405 5406 buf = (uint8_t *)malloc(pkg_size); 5407 if (!buf) { 5408 close(fd); 5409 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 5410 return buf; 5411 } 5412 5413 ret = read(fd, buf, pkg_size); 5414 if (ret < 0) { 5415 close(fd); 5416 fprintf(stderr, "%s: File read operation failed\n", __func__); 5417 close_file(buf); 5418 return NULL; 5419 } 5420 5421 if (size) 5422 *size = pkg_size; 5423 5424 close(fd); 5425 5426 return buf; 5427 } 5428 5429 int 5430 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5431 { 5432 FILE *fh = fopen(file_path, "wb"); 5433 5434 if (fh == NULL) { 5435 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 5436 return -1; 5437 } 5438 5439 if (fwrite(buf, 1, size, fh) != size) { 5440 fclose(fh); 5441 fprintf(stderr, "%s: File write operation failed\n", __func__); 5442 return -1; 5443 } 5444 5445 fclose(fh); 5446 5447 return 0; 5448 } 5449 5450 int 5451 close_file(uint8_t *buf) 5452 { 5453 if (buf) { 5454 free((void *)buf); 5455 return 0; 5456 } 5457 5458 return -1; 5459 } 5460 5461 void 5462 port_queue_region_info_display(portid_t port_id, void *buf) 5463 { 5464 #ifdef RTE_NET_I40E 5465 uint16_t i, j; 5466 struct rte_pmd_i40e_queue_regions *info = 5467 (struct rte_pmd_i40e_queue_regions *)buf; 5468 static const char *queue_region_info_stats_border = "-------"; 5469 5470 if (!info->queue_region_number) 5471 printf("there is no region has been set before"); 5472 5473 printf("\n %s All queue region info for port=%2d %s", 5474 queue_region_info_stats_border, port_id, 5475 queue_region_info_stats_border); 5476 printf("\n queue_region_number: %-14u \n", 5477 info->queue_region_number); 5478 5479 for (i = 0; i < info->queue_region_number; i++) { 5480 printf("\n region_id: %-14u queue_number: %-14u " 5481 "queue_start_index: %-14u \n", 5482 info->region[i].region_id, 5483 info->region[i].queue_num, 5484 info->region[i].queue_start_index); 5485 5486 printf(" user_priority_num is %-14u :", 5487 info->region[i].user_priority_num); 5488 for (j = 0; j < info->region[i].user_priority_num; j++) 5489 printf(" %-14u ", info->region[i].user_priority[j]); 5490 5491 printf("\n flowtype_num is %-14u :", 5492 info->region[i].flowtype_num); 5493 for (j = 0; j < info->region[i].flowtype_num; j++) 5494 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5495 } 5496 #else 5497 RTE_SET_USED(port_id); 5498 RTE_SET_USED(buf); 5499 #endif 5500 5501 printf("\n\n"); 5502 } 5503 5504 void 5505 show_macs(portid_t port_id) 5506 { 5507 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5508 struct rte_eth_dev_info dev_info; 5509 int32_t i, rc, num_macs = 0; 5510 5511 if (eth_dev_info_get_print_err(port_id, &dev_info)) 5512 return; 5513 5514 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 5515 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 5516 if (rc < 0) 5517 return; 5518 5519 for (i = 0; i < rc; i++) { 5520 5521 /* skip zero address */ 5522 if (rte_is_zero_ether_addr(&addr[i])) 5523 continue; 5524 5525 num_macs++; 5526 } 5527 5528 printf("Number of MAC address added: %d\n", num_macs); 5529 5530 for (i = 0; i < rc; i++) { 5531 5532 /* skip zero address */ 5533 if (rte_is_zero_ether_addr(&addr[i])) 5534 continue; 5535 5536 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 5537 printf(" %s\n", buf); 5538 } 5539 } 5540 5541 void 5542 show_mcast_macs(portid_t port_id) 5543 { 5544 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5545 struct rte_ether_addr *addr; 5546 struct rte_port *port; 5547 uint32_t i; 5548 5549 port = &ports[port_id]; 5550 5551 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5552 5553 for (i = 0; i < port->mc_addr_nb; i++) { 5554 addr = &port->mc_addr_pool[i]; 5555 5556 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5557 printf(" %s\n", buf); 5558 } 5559 } 5560