1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static char *flowtype_to_str(uint16_t flow_type); 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 91 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 92 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 93 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 94 { "none", 0 }, 95 { "eth", RTE_ETH_RSS_ETH }, 96 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 97 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 98 { "vlan", RTE_ETH_RSS_VLAN }, 99 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 100 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 101 { "ipv4", RTE_ETH_RSS_IPV4 }, 102 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 103 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 104 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 105 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 106 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 107 { "ipv6", RTE_ETH_RSS_IPV6 }, 108 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 109 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 110 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 111 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 112 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 113 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 114 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 115 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 116 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 117 { "port", RTE_ETH_RSS_PORT }, 118 { "vxlan", RTE_ETH_RSS_VXLAN }, 119 { "geneve", RTE_ETH_RSS_GENEVE }, 120 { "nvgre", RTE_ETH_RSS_NVGRE }, 121 { "ip", RTE_ETH_RSS_IP }, 122 { "udp", RTE_ETH_RSS_UDP }, 123 { "tcp", RTE_ETH_RSS_TCP }, 124 { "sctp", RTE_ETH_RSS_SCTP }, 125 { "tunnel", RTE_ETH_RSS_TUNNEL }, 126 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 127 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 128 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 129 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 130 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 131 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 132 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 133 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 134 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 135 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 136 { "esp", RTE_ETH_RSS_ESP }, 137 { "ah", RTE_ETH_RSS_AH }, 138 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 139 { "pfcp", RTE_ETH_RSS_PFCP }, 140 { "pppoe", RTE_ETH_RSS_PPPOE }, 141 { "gtpu", RTE_ETH_RSS_GTPU }, 142 { "ecpri", RTE_ETH_RSS_ECPRI }, 143 { "mpls", RTE_ETH_RSS_MPLS }, 144 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 145 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 146 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 147 { NULL, 0 }, 148 }; 149 150 static const struct { 151 enum rte_eth_fec_mode mode; 152 const char *name; 153 } fec_mode_name[] = { 154 { 155 .mode = RTE_ETH_FEC_NOFEC, 156 .name = "off", 157 }, 158 { 159 .mode = RTE_ETH_FEC_AUTO, 160 .name = "auto", 161 }, 162 { 163 .mode = RTE_ETH_FEC_BASER, 164 .name = "baser", 165 }, 166 { 167 .mode = RTE_ETH_FEC_RS, 168 .name = "rs", 169 }, 170 }; 171 172 static void 173 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 174 { 175 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 176 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 177 printf("%s%s", name, buf); 178 } 179 180 static void 181 nic_xstats_display_periodic(portid_t port_id) 182 { 183 struct xstat_display_info *xstats_info; 184 uint64_t *prev_values, *curr_values; 185 uint64_t diff_value, value_rate; 186 struct timespec cur_time; 187 uint64_t *ids_supp; 188 size_t ids_supp_sz; 189 uint64_t diff_ns; 190 unsigned int i; 191 int rc; 192 193 xstats_info = &ports[port_id].xstats_info; 194 195 ids_supp_sz = xstats_info->ids_supp_sz; 196 if (ids_supp_sz == 0) 197 return; 198 199 printf("\n"); 200 201 ids_supp = xstats_info->ids_supp; 202 prev_values = xstats_info->prev_values; 203 curr_values = xstats_info->curr_values; 204 205 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 206 ids_supp_sz); 207 if (rc != (int)ids_supp_sz) { 208 fprintf(stderr, 209 "Failed to get values of %zu xstats for port %u - return code %d\n", 210 ids_supp_sz, port_id, rc); 211 return; 212 } 213 214 diff_ns = 0; 215 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 216 uint64_t ns; 217 218 ns = cur_time.tv_sec * NS_PER_SEC; 219 ns += cur_time.tv_nsec; 220 221 if (xstats_info->prev_ns != 0) 222 diff_ns = ns - xstats_info->prev_ns; 223 xstats_info->prev_ns = ns; 224 } 225 226 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 227 for (i = 0; i < ids_supp_sz; i++) { 228 diff_value = (curr_values[i] > prev_values[i]) ? 229 (curr_values[i] - prev_values[i]) : 0; 230 prev_values[i] = curr_values[i]; 231 value_rate = diff_ns > 0 ? 232 (double)diff_value / diff_ns * NS_PER_SEC : 0; 233 234 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 235 xstats_display[i].name, curr_values[i], value_rate); 236 } 237 } 238 239 void 240 nic_stats_display(portid_t port_id) 241 { 242 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 245 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 246 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 247 struct timespec cur_time; 248 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 249 diff_ns; 250 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 251 struct rte_eth_stats stats; 252 static const char *nic_stats_border = "########################"; 253 int ret; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 ret = rte_eth_stats_get(port_id, &stats); 260 if (ret != 0) { 261 fprintf(stderr, 262 "%s: Error: failed to get stats (port %u): %d", 263 __func__, port_id, ret); 264 return; 265 } 266 printf("\n %s NIC statistics for port %-2d %s\n", 267 nic_stats_border, port_id, nic_stats_border); 268 269 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 270 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 271 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 272 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 273 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 274 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 275 276 diff_ns = 0; 277 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 278 uint64_t ns; 279 280 ns = cur_time.tv_sec * NS_PER_SEC; 281 ns += cur_time.tv_nsec; 282 283 if (prev_ns[port_id] != 0) 284 diff_ns = ns - prev_ns[port_id]; 285 prev_ns[port_id] = ns; 286 } 287 288 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 289 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 290 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 291 (stats.opackets - prev_pkts_tx[port_id]) : 0; 292 prev_pkts_rx[port_id] = stats.ipackets; 293 prev_pkts_tx[port_id] = stats.opackets; 294 mpps_rx = diff_ns > 0 ? 295 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 296 mpps_tx = diff_ns > 0 ? 297 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 298 299 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 300 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 301 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 302 (stats.obytes - prev_bytes_tx[port_id]) : 0; 303 prev_bytes_rx[port_id] = stats.ibytes; 304 prev_bytes_tx[port_id] = stats.obytes; 305 mbps_rx = diff_ns > 0 ? 306 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 307 mbps_tx = diff_ns > 0 ? 308 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 309 310 printf("\n Throughput (since last show)\n"); 311 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 312 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 313 mpps_tx, mbps_tx * 8); 314 315 if (xstats_display_num > 0) 316 nic_xstats_display_periodic(port_id); 317 318 printf(" %s############################%s\n", 319 nic_stats_border, nic_stats_border); 320 } 321 322 void 323 nic_stats_clear(portid_t port_id) 324 { 325 int ret; 326 327 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 328 print_valid_ports(); 329 return; 330 } 331 332 ret = rte_eth_stats_reset(port_id); 333 if (ret != 0) { 334 fprintf(stderr, 335 "%s: Error: failed to reset stats (port %u): %s", 336 __func__, port_id, strerror(-ret)); 337 return; 338 } 339 340 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 341 if (ret != 0) { 342 if (ret < 0) 343 ret = -ret; 344 fprintf(stderr, 345 "%s: Error: failed to get stats (port %u): %s", 346 __func__, port_id, strerror(ret)); 347 return; 348 } 349 printf("\n NIC statistics for port %d cleared\n", port_id); 350 } 351 352 void 353 nic_xstats_display(portid_t port_id) 354 { 355 struct rte_eth_xstat *xstats; 356 int cnt_xstats, idx_xstat; 357 struct rte_eth_xstat_name *xstats_names; 358 359 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 360 print_valid_ports(); 361 return; 362 } 363 printf("###### NIC extended statistics for port %-2d\n", port_id); 364 if (!rte_eth_dev_is_valid_port(port_id)) { 365 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 366 return; 367 } 368 369 /* Get count */ 370 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 371 if (cnt_xstats < 0) { 372 fprintf(stderr, "Error: Cannot get count of xstats\n"); 373 return; 374 } 375 376 /* Get id-name lookup table */ 377 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 378 if (xstats_names == NULL) { 379 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 380 return; 381 } 382 if (cnt_xstats != rte_eth_xstats_get_names( 383 port_id, xstats_names, cnt_xstats)) { 384 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 385 free(xstats_names); 386 return; 387 } 388 389 /* Get stats themselves */ 390 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 391 if (xstats == NULL) { 392 fprintf(stderr, "Cannot allocate memory for xstats\n"); 393 free(xstats_names); 394 return; 395 } 396 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 397 fprintf(stderr, "Error: Unable to get xstats\n"); 398 free(xstats_names); 399 free(xstats); 400 return; 401 } 402 403 /* Display xstats */ 404 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 405 if (xstats_hide_zero && !xstats[idx_xstat].value) 406 continue; 407 printf("%s: %"PRIu64"\n", 408 xstats_names[idx_xstat].name, 409 xstats[idx_xstat].value); 410 } 411 free(xstats_names); 412 free(xstats); 413 } 414 415 void 416 nic_xstats_clear(portid_t port_id) 417 { 418 int ret; 419 420 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 421 print_valid_ports(); 422 return; 423 } 424 425 ret = rte_eth_xstats_reset(port_id); 426 if (ret != 0) { 427 fprintf(stderr, 428 "%s: Error: failed to reset xstats (port %u): %s\n", 429 __func__, port_id, strerror(-ret)); 430 return; 431 } 432 433 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 434 if (ret != 0) { 435 if (ret < 0) 436 ret = -ret; 437 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 438 __func__, port_id, strerror(ret)); 439 return; 440 } 441 } 442 443 static const char * 444 get_queue_state_name(uint8_t queue_state) 445 { 446 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 447 return "stopped"; 448 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 449 return "started"; 450 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 451 return "hairpin"; 452 else 453 return "unknown"; 454 } 455 456 void 457 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 458 { 459 struct rte_eth_burst_mode mode; 460 struct rte_eth_rxq_info qinfo; 461 int32_t rc; 462 static const char *info_border = "*********************"; 463 464 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 465 if (rc != 0) { 466 fprintf(stderr, 467 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 468 port_id, queue_id, strerror(-rc), rc); 469 return; 470 } 471 472 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 473 info_border, port_id, queue_id, info_border); 474 475 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 476 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 477 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 478 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 479 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 480 printf("\nRX drop packets: %s", 481 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 482 printf("\nRX deferred start: %s", 483 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 484 printf("\nRX scattered packets: %s", 485 (qinfo.scattered_rx != 0) ? "on" : "off"); 486 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 487 if (qinfo.rx_buf_size != 0) 488 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 489 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 490 491 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 492 printf("\nBurst mode: %s%s", 493 mode.info, 494 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 495 " (per queue)" : ""); 496 497 printf("\n"); 498 } 499 500 void 501 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 502 { 503 struct rte_eth_burst_mode mode; 504 struct rte_eth_txq_info qinfo; 505 int32_t rc; 506 static const char *info_border = "*********************"; 507 508 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 509 if (rc != 0) { 510 fprintf(stderr, 511 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 512 port_id, queue_id, strerror(-rc), rc); 513 return; 514 } 515 516 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 517 info_border, port_id, queue_id, info_border); 518 519 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 520 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 521 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 522 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 523 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 524 printf("\nTX deferred start: %s", 525 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 526 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 527 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 528 529 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 530 printf("\nBurst mode: %s%s", 531 mode.info, 532 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 533 " (per queue)" : ""); 534 535 printf("\n"); 536 } 537 538 static int bus_match_all(const struct rte_bus *bus, const void *data) 539 { 540 RTE_SET_USED(bus); 541 RTE_SET_USED(data); 542 return 0; 543 } 544 545 static void 546 device_infos_display_speeds(uint32_t speed_capa) 547 { 548 printf("\n\tDevice speed capability:"); 549 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 550 printf(" Autonegotiate (all speeds)"); 551 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 552 printf(" Disable autonegotiate (fixed speed) "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 554 printf(" 10 Mbps half-duplex "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 556 printf(" 10 Mbps full-duplex "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 558 printf(" 100 Mbps half-duplex "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 560 printf(" 100 Mbps full-duplex "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 562 printf(" 1 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 564 printf(" 2.5 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 566 printf(" 5 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 568 printf(" 10 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 570 printf(" 20 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 572 printf(" 25 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 574 printf(" 40 Gbps "); 575 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 576 printf(" 50 Gbps "); 577 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 578 printf(" 56 Gbps "); 579 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 580 printf(" 100 Gbps "); 581 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 582 printf(" 200 Gbps "); 583 } 584 585 void 586 device_infos_display(const char *identifier) 587 { 588 static const char *info_border = "*********************"; 589 struct rte_bus *start = NULL, *next; 590 struct rte_dev_iterator dev_iter; 591 char name[RTE_ETH_NAME_MAX_LEN]; 592 struct rte_ether_addr mac_addr; 593 struct rte_device *dev; 594 struct rte_devargs da; 595 portid_t port_id; 596 struct rte_eth_dev_info dev_info; 597 char devstr[128]; 598 599 memset(&da, 0, sizeof(da)); 600 if (!identifier) 601 goto skip_parse; 602 603 if (rte_devargs_parsef(&da, "%s", identifier)) { 604 fprintf(stderr, "cannot parse identifier\n"); 605 return; 606 } 607 608 skip_parse: 609 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 610 611 start = next; 612 if (identifier && da.bus != next) 613 continue; 614 615 /* Skip buses that don't have iterate method */ 616 if (!next->dev_iterate) 617 continue; 618 619 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 620 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 621 622 if (!dev->driver) 623 continue; 624 /* Check for matching device if identifier is present */ 625 if (identifier && 626 strncmp(da.name, dev->name, strlen(dev->name))) 627 continue; 628 printf("\n%s Infos for device %s %s\n", 629 info_border, dev->name, info_border); 630 printf("Bus name: %s", dev->bus->name); 631 printf("\nDriver name: %s", dev->driver->name); 632 printf("\nDevargs: %s", 633 dev->devargs ? dev->devargs->args : ""); 634 printf("\nConnect to socket: %d", dev->numa_node); 635 printf("\n"); 636 637 /* List ports with matching device name */ 638 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 639 printf("\n\tPort id: %-2d", port_id); 640 if (eth_macaddr_get_print_err(port_id, 641 &mac_addr) == 0) 642 print_ethaddr("\n\tMAC address: ", 643 &mac_addr); 644 rte_eth_dev_get_name_by_port(port_id, name); 645 printf("\n\tDevice name: %s", name); 646 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 647 device_infos_display_speeds(dev_info.speed_capa); 648 printf("\n"); 649 } 650 } 651 }; 652 rte_devargs_reset(&da); 653 } 654 655 static void 656 print_dev_capabilities(uint64_t capabilities) 657 { 658 uint64_t single_capa; 659 int begin; 660 int end; 661 int bit; 662 663 if (capabilities == 0) 664 return; 665 666 begin = __builtin_ctzll(capabilities); 667 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 668 669 single_capa = 1ULL << begin; 670 for (bit = begin; bit < end; bit++) { 671 if (capabilities & single_capa) 672 printf(" %s", 673 rte_eth_dev_capability_name(single_capa)); 674 single_capa <<= 1; 675 } 676 } 677 678 void 679 port_infos_display(portid_t port_id) 680 { 681 struct rte_port *port; 682 struct rte_ether_addr mac_addr; 683 struct rte_eth_link link; 684 struct rte_eth_dev_info dev_info; 685 int vlan_offload; 686 struct rte_mempool * mp; 687 static const char *info_border = "*********************"; 688 uint16_t mtu; 689 char name[RTE_ETH_NAME_MAX_LEN]; 690 int ret; 691 char fw_version[ETHDEV_FWVERS_LEN]; 692 693 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 694 print_valid_ports(); 695 return; 696 } 697 port = &ports[port_id]; 698 ret = eth_link_get_nowait_print_err(port_id, &link); 699 if (ret < 0) 700 return; 701 702 ret = eth_dev_info_get_print_err(port_id, &dev_info); 703 if (ret != 0) 704 return; 705 706 printf("\n%s Infos for port %-2d %s\n", 707 info_border, port_id, info_border); 708 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 709 print_ethaddr("MAC address: ", &mac_addr); 710 rte_eth_dev_get_name_by_port(port_id, name); 711 printf("\nDevice name: %s", name); 712 printf("\nDriver name: %s", dev_info.driver_name); 713 714 if (rte_eth_dev_fw_version_get(port_id, fw_version, 715 ETHDEV_FWVERS_LEN) == 0) 716 printf("\nFirmware-version: %s", fw_version); 717 else 718 printf("\nFirmware-version: %s", "not available"); 719 720 if (dev_info.device->devargs && dev_info.device->devargs->args) 721 printf("\nDevargs: %s", dev_info.device->devargs->args); 722 printf("\nConnect to socket: %u", port->socket_id); 723 724 if (port_numa[port_id] != NUMA_NO_CONFIG) { 725 mp = mbuf_pool_find(port_numa[port_id], 0); 726 if (mp) 727 printf("\nmemory allocation on the socket: %d", 728 port_numa[port_id]); 729 } else 730 printf("\nmemory allocation on the socket: %u",port->socket_id); 731 732 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 733 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 734 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 735 ("full-duplex") : ("half-duplex")); 736 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 737 ("On") : ("Off")); 738 739 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 740 printf("MTU: %u\n", mtu); 741 742 printf("Promiscuous mode: %s\n", 743 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 744 printf("Allmulticast mode: %s\n", 745 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 746 printf("Maximum number of MAC addresses: %u\n", 747 (unsigned int)(port->dev_info.max_mac_addrs)); 748 printf("Maximum number of MAC addresses of hash filtering: %u\n", 749 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 750 751 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 752 if (vlan_offload >= 0){ 753 printf("VLAN offload: \n"); 754 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 755 printf(" strip on, "); 756 else 757 printf(" strip off, "); 758 759 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 760 printf("filter on, "); 761 else 762 printf("filter off, "); 763 764 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 765 printf("extend on, "); 766 else 767 printf("extend off, "); 768 769 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 770 printf("qinq strip on\n"); 771 else 772 printf("qinq strip off\n"); 773 } 774 775 if (dev_info.hash_key_size > 0) 776 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 777 if (dev_info.reta_size > 0) 778 printf("Redirection table size: %u\n", dev_info.reta_size); 779 if (!dev_info.flow_type_rss_offloads) 780 printf("No RSS offload flow type is supported.\n"); 781 else { 782 uint16_t i; 783 char *p; 784 785 printf("Supported RSS offload flow types:\n"); 786 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 787 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 788 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 789 continue; 790 p = flowtype_to_str(i); 791 if (p) 792 printf(" %s\n", p); 793 else 794 printf(" user defined %d\n", i); 795 } 796 } 797 798 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 799 printf("Maximum configurable length of RX packet: %u\n", 800 dev_info.max_rx_pktlen); 801 printf("Maximum configurable size of LRO aggregated packet: %u\n", 802 dev_info.max_lro_pkt_size); 803 if (dev_info.max_vfs) 804 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 805 if (dev_info.max_vmdq_pools) 806 printf("Maximum number of VMDq pools: %u\n", 807 dev_info.max_vmdq_pools); 808 809 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 810 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 811 printf("Max possible number of RXDs per queue: %hu\n", 812 dev_info.rx_desc_lim.nb_max); 813 printf("Min possible number of RXDs per queue: %hu\n", 814 dev_info.rx_desc_lim.nb_min); 815 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 816 817 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 818 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 819 printf("Max possible number of TXDs per queue: %hu\n", 820 dev_info.tx_desc_lim.nb_max); 821 printf("Min possible number of TXDs per queue: %hu\n", 822 dev_info.tx_desc_lim.nb_min); 823 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 824 printf("Max segment number per packet: %hu\n", 825 dev_info.tx_desc_lim.nb_seg_max); 826 printf("Max segment number per MTU/TSO: %hu\n", 827 dev_info.tx_desc_lim.nb_mtu_seg_max); 828 829 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 830 print_dev_capabilities(dev_info.dev_capa); 831 printf(" )\n"); 832 /* Show switch info only if valid switch domain and port id is set */ 833 if (dev_info.switch_info.domain_id != 834 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 835 if (dev_info.switch_info.name) 836 printf("Switch name: %s\n", dev_info.switch_info.name); 837 838 printf("Switch domain Id: %u\n", 839 dev_info.switch_info.domain_id); 840 printf("Switch Port Id: %u\n", 841 dev_info.switch_info.port_id); 842 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 843 printf("Switch Rx domain: %u\n", 844 dev_info.switch_info.rx_domain); 845 } 846 } 847 848 void 849 port_summary_header_display(void) 850 { 851 uint16_t port_number; 852 853 port_number = rte_eth_dev_count_avail(); 854 printf("Number of available ports: %i\n", port_number); 855 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 856 "Driver", "Status", "Link"); 857 } 858 859 void 860 port_summary_display(portid_t port_id) 861 { 862 struct rte_ether_addr mac_addr; 863 struct rte_eth_link link; 864 struct rte_eth_dev_info dev_info; 865 char name[RTE_ETH_NAME_MAX_LEN]; 866 int ret; 867 868 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 869 print_valid_ports(); 870 return; 871 } 872 873 ret = eth_link_get_nowait_print_err(port_id, &link); 874 if (ret < 0) 875 return; 876 877 ret = eth_dev_info_get_print_err(port_id, &dev_info); 878 if (ret != 0) 879 return; 880 881 rte_eth_dev_get_name_by_port(port_id, name); 882 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 883 if (ret != 0) 884 return; 885 886 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 887 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 888 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 889 rte_eth_link_speed_to_str(link.link_speed)); 890 } 891 892 void 893 port_eeprom_display(portid_t port_id) 894 { 895 struct rte_dev_eeprom_info einfo; 896 int ret; 897 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 898 print_valid_ports(); 899 return; 900 } 901 902 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 903 if (len_eeprom < 0) { 904 switch (len_eeprom) { 905 case -ENODEV: 906 fprintf(stderr, "port index %d invalid\n", port_id); 907 break; 908 case -ENOTSUP: 909 fprintf(stderr, "operation not supported by device\n"); 910 break; 911 case -EIO: 912 fprintf(stderr, "device is removed\n"); 913 break; 914 default: 915 fprintf(stderr, "Unable to get EEPROM: %d\n", 916 len_eeprom); 917 break; 918 } 919 return; 920 } 921 922 einfo.offset = 0; 923 einfo.length = len_eeprom; 924 einfo.data = calloc(1, len_eeprom); 925 if (!einfo.data) { 926 fprintf(stderr, 927 "Allocation of port %u eeprom data failed\n", 928 port_id); 929 return; 930 } 931 932 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 933 if (ret != 0) { 934 switch (ret) { 935 case -ENODEV: 936 fprintf(stderr, "port index %d invalid\n", port_id); 937 break; 938 case -ENOTSUP: 939 fprintf(stderr, "operation not supported by device\n"); 940 break; 941 case -EIO: 942 fprintf(stderr, "device is removed\n"); 943 break; 944 default: 945 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 946 break; 947 } 948 free(einfo.data); 949 return; 950 } 951 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 952 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 953 free(einfo.data); 954 } 955 956 void 957 port_module_eeprom_display(portid_t port_id) 958 { 959 struct rte_eth_dev_module_info minfo; 960 struct rte_dev_eeprom_info einfo; 961 int ret; 962 963 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 964 print_valid_ports(); 965 return; 966 } 967 968 969 ret = rte_eth_dev_get_module_info(port_id, &minfo); 970 if (ret != 0) { 971 switch (ret) { 972 case -ENODEV: 973 fprintf(stderr, "port index %d invalid\n", port_id); 974 break; 975 case -ENOTSUP: 976 fprintf(stderr, "operation not supported by device\n"); 977 break; 978 case -EIO: 979 fprintf(stderr, "device is removed\n"); 980 break; 981 default: 982 fprintf(stderr, "Unable to get module EEPROM: %d\n", 983 ret); 984 break; 985 } 986 return; 987 } 988 989 einfo.offset = 0; 990 einfo.length = minfo.eeprom_len; 991 einfo.data = calloc(1, minfo.eeprom_len); 992 if (!einfo.data) { 993 fprintf(stderr, 994 "Allocation of port %u eeprom data failed\n", 995 port_id); 996 return; 997 } 998 999 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1000 if (ret != 0) { 1001 switch (ret) { 1002 case -ENODEV: 1003 fprintf(stderr, "port index %d invalid\n", port_id); 1004 break; 1005 case -ENOTSUP: 1006 fprintf(stderr, "operation not supported by device\n"); 1007 break; 1008 case -EIO: 1009 fprintf(stderr, "device is removed\n"); 1010 break; 1011 default: 1012 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1013 ret); 1014 break; 1015 } 1016 free(einfo.data); 1017 return; 1018 } 1019 1020 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1021 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1022 free(einfo.data); 1023 } 1024 1025 int 1026 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1027 { 1028 uint16_t pid; 1029 1030 if (port_id == (portid_t)RTE_PORT_ALL) 1031 return 0; 1032 1033 RTE_ETH_FOREACH_DEV(pid) 1034 if (port_id == pid) 1035 return 0; 1036 1037 if (warning == ENABLED_WARN) 1038 fprintf(stderr, "Invalid port %d\n", port_id); 1039 1040 return 1; 1041 } 1042 1043 void print_valid_ports(void) 1044 { 1045 portid_t pid; 1046 1047 printf("The valid ports array is ["); 1048 RTE_ETH_FOREACH_DEV(pid) { 1049 printf(" %d", pid); 1050 } 1051 printf(" ]\n"); 1052 } 1053 1054 static int 1055 vlan_id_is_invalid(uint16_t vlan_id) 1056 { 1057 if (vlan_id < 4096) 1058 return 0; 1059 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1060 return 1; 1061 } 1062 1063 static int 1064 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1065 { 1066 const struct rte_pci_device *pci_dev; 1067 const struct rte_bus *bus; 1068 uint64_t pci_len; 1069 1070 if (reg_off & 0x3) { 1071 fprintf(stderr, 1072 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1073 (unsigned int)reg_off); 1074 return 1; 1075 } 1076 1077 if (!ports[port_id].dev_info.device) { 1078 fprintf(stderr, "Invalid device\n"); 1079 return 0; 1080 } 1081 1082 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1083 if (bus && !strcmp(bus->name, "pci")) { 1084 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1085 } else { 1086 fprintf(stderr, "Not a PCI device\n"); 1087 return 1; 1088 } 1089 1090 pci_len = pci_dev->mem_resource[0].len; 1091 if (reg_off >= pci_len) { 1092 fprintf(stderr, 1093 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1094 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1095 pci_len); 1096 return 1; 1097 } 1098 return 0; 1099 } 1100 1101 static int 1102 reg_bit_pos_is_invalid(uint8_t bit_pos) 1103 { 1104 if (bit_pos <= 31) 1105 return 0; 1106 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1107 return 1; 1108 } 1109 1110 #define display_port_and_reg_off(port_id, reg_off) \ 1111 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1112 1113 static inline void 1114 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1115 { 1116 display_port_and_reg_off(port_id, (unsigned)reg_off); 1117 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1118 } 1119 1120 void 1121 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1122 { 1123 uint32_t reg_v; 1124 1125 1126 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1127 return; 1128 if (port_reg_off_is_invalid(port_id, reg_off)) 1129 return; 1130 if (reg_bit_pos_is_invalid(bit_x)) 1131 return; 1132 reg_v = port_id_pci_reg_read(port_id, reg_off); 1133 display_port_and_reg_off(port_id, (unsigned)reg_off); 1134 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1135 } 1136 1137 void 1138 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1139 uint8_t bit1_pos, uint8_t bit2_pos) 1140 { 1141 uint32_t reg_v; 1142 uint8_t l_bit; 1143 uint8_t h_bit; 1144 1145 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1146 return; 1147 if (port_reg_off_is_invalid(port_id, reg_off)) 1148 return; 1149 if (reg_bit_pos_is_invalid(bit1_pos)) 1150 return; 1151 if (reg_bit_pos_is_invalid(bit2_pos)) 1152 return; 1153 if (bit1_pos > bit2_pos) 1154 l_bit = bit2_pos, h_bit = bit1_pos; 1155 else 1156 l_bit = bit1_pos, h_bit = bit2_pos; 1157 1158 reg_v = port_id_pci_reg_read(port_id, reg_off); 1159 reg_v >>= l_bit; 1160 if (h_bit < 31) 1161 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1162 display_port_and_reg_off(port_id, (unsigned)reg_off); 1163 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1164 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1165 } 1166 1167 void 1168 port_reg_display(portid_t port_id, uint32_t reg_off) 1169 { 1170 uint32_t reg_v; 1171 1172 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1173 return; 1174 if (port_reg_off_is_invalid(port_id, reg_off)) 1175 return; 1176 reg_v = port_id_pci_reg_read(port_id, reg_off); 1177 display_port_reg_value(port_id, reg_off, reg_v); 1178 } 1179 1180 void 1181 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1182 uint8_t bit_v) 1183 { 1184 uint32_t reg_v; 1185 1186 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1187 return; 1188 if (port_reg_off_is_invalid(port_id, reg_off)) 1189 return; 1190 if (reg_bit_pos_is_invalid(bit_pos)) 1191 return; 1192 if (bit_v > 1) { 1193 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1194 (int) bit_v); 1195 return; 1196 } 1197 reg_v = port_id_pci_reg_read(port_id, reg_off); 1198 if (bit_v == 0) 1199 reg_v &= ~(1 << bit_pos); 1200 else 1201 reg_v |= (1 << bit_pos); 1202 port_id_pci_reg_write(port_id, reg_off, reg_v); 1203 display_port_reg_value(port_id, reg_off, reg_v); 1204 } 1205 1206 void 1207 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1208 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1209 { 1210 uint32_t max_v; 1211 uint32_t reg_v; 1212 uint8_t l_bit; 1213 uint8_t h_bit; 1214 1215 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1216 return; 1217 if (port_reg_off_is_invalid(port_id, reg_off)) 1218 return; 1219 if (reg_bit_pos_is_invalid(bit1_pos)) 1220 return; 1221 if (reg_bit_pos_is_invalid(bit2_pos)) 1222 return; 1223 if (bit1_pos > bit2_pos) 1224 l_bit = bit2_pos, h_bit = bit1_pos; 1225 else 1226 l_bit = bit1_pos, h_bit = bit2_pos; 1227 1228 if ((h_bit - l_bit) < 31) 1229 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1230 else 1231 max_v = 0xFFFFFFFF; 1232 1233 if (value > max_v) { 1234 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1235 (unsigned)value, (unsigned)value, 1236 (unsigned)max_v, (unsigned)max_v); 1237 return; 1238 } 1239 reg_v = port_id_pci_reg_read(port_id, reg_off); 1240 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1241 reg_v |= (value << l_bit); /* Set changed bits */ 1242 port_id_pci_reg_write(port_id, reg_off, reg_v); 1243 display_port_reg_value(port_id, reg_off, reg_v); 1244 } 1245 1246 void 1247 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1248 { 1249 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1250 return; 1251 if (port_reg_off_is_invalid(port_id, reg_off)) 1252 return; 1253 port_id_pci_reg_write(port_id, reg_off, reg_v); 1254 display_port_reg_value(port_id, reg_off, reg_v); 1255 } 1256 1257 static uint32_t 1258 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1259 { 1260 uint32_t overhead_len; 1261 1262 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1263 overhead_len = max_rx_pktlen - max_mtu; 1264 else 1265 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1266 1267 return overhead_len; 1268 } 1269 1270 static int 1271 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1272 { 1273 struct rte_eth_dev_info dev_info; 1274 uint32_t overhead_len; 1275 uint32_t frame_size; 1276 int ret; 1277 1278 ret = rte_eth_dev_info_get(port_id, &dev_info); 1279 if (ret != 0) 1280 return ret; 1281 1282 if (mtu < dev_info.min_mtu) { 1283 fprintf(stderr, 1284 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1285 mtu, dev_info.min_mtu, port_id); 1286 return -EINVAL; 1287 } 1288 if (mtu > dev_info.max_mtu) { 1289 fprintf(stderr, 1290 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1291 mtu, dev_info.max_mtu, port_id); 1292 return -EINVAL; 1293 } 1294 1295 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1296 dev_info.max_mtu); 1297 frame_size = mtu + overhead_len; 1298 if (frame_size > dev_info.max_rx_pktlen) { 1299 fprintf(stderr, 1300 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1301 frame_size, dev_info.max_rx_pktlen, port_id); 1302 return -EINVAL; 1303 } 1304 1305 return 0; 1306 } 1307 1308 void 1309 port_mtu_set(portid_t port_id, uint16_t mtu) 1310 { 1311 struct rte_port *port = &ports[port_id]; 1312 int diag; 1313 1314 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1315 return; 1316 1317 diag = eth_dev_validate_mtu(port_id, mtu); 1318 if (diag != 0) 1319 return; 1320 1321 if (port->need_reconfig == 0) { 1322 diag = rte_eth_dev_set_mtu(port_id, mtu); 1323 if (diag != 0) { 1324 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1325 return; 1326 } 1327 } 1328 1329 port->dev_conf.rxmode.mtu = mtu; 1330 } 1331 1332 /* Generic flow management functions. */ 1333 1334 static struct port_flow_tunnel * 1335 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1336 { 1337 struct port_flow_tunnel *flow_tunnel; 1338 1339 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1340 if (flow_tunnel->id == port_tunnel_id) 1341 goto out; 1342 } 1343 flow_tunnel = NULL; 1344 1345 out: 1346 return flow_tunnel; 1347 } 1348 1349 const char * 1350 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1351 { 1352 const char *type; 1353 switch (tunnel->type) { 1354 default: 1355 type = "unknown"; 1356 break; 1357 case RTE_FLOW_ITEM_TYPE_VXLAN: 1358 type = "vxlan"; 1359 break; 1360 case RTE_FLOW_ITEM_TYPE_GRE: 1361 type = "gre"; 1362 break; 1363 case RTE_FLOW_ITEM_TYPE_NVGRE: 1364 type = "nvgre"; 1365 break; 1366 case RTE_FLOW_ITEM_TYPE_GENEVE: 1367 type = "geneve"; 1368 break; 1369 } 1370 1371 return type; 1372 } 1373 1374 struct port_flow_tunnel * 1375 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1376 { 1377 struct rte_port *port = &ports[port_id]; 1378 struct port_flow_tunnel *flow_tunnel; 1379 1380 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1381 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1382 goto out; 1383 } 1384 flow_tunnel = NULL; 1385 1386 out: 1387 return flow_tunnel; 1388 } 1389 1390 void port_flow_tunnel_list(portid_t port_id) 1391 { 1392 struct rte_port *port = &ports[port_id]; 1393 struct port_flow_tunnel *flt; 1394 1395 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1396 printf("port %u tunnel #%u type=%s", 1397 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1398 if (flt->tunnel.tun_id) 1399 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1400 printf("\n"); 1401 } 1402 } 1403 1404 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1405 { 1406 struct rte_port *port = &ports[port_id]; 1407 struct port_flow_tunnel *flt; 1408 1409 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1410 if (flt->id == tunnel_id) 1411 break; 1412 } 1413 if (flt) { 1414 LIST_REMOVE(flt, chain); 1415 free(flt); 1416 printf("port %u: flow tunnel #%u destroyed\n", 1417 port_id, tunnel_id); 1418 } 1419 } 1420 1421 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1422 { 1423 struct rte_port *port = &ports[port_id]; 1424 enum rte_flow_item_type type; 1425 struct port_flow_tunnel *flt; 1426 1427 if (!strcmp(ops->type, "vxlan")) 1428 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1429 else if (!strcmp(ops->type, "gre")) 1430 type = RTE_FLOW_ITEM_TYPE_GRE; 1431 else if (!strcmp(ops->type, "nvgre")) 1432 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1433 else if (!strcmp(ops->type, "geneve")) 1434 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1435 else { 1436 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1437 ops->type); 1438 return; 1439 } 1440 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1441 if (flt->tunnel.type == type) 1442 break; 1443 } 1444 if (!flt) { 1445 flt = calloc(1, sizeof(*flt)); 1446 if (!flt) { 1447 fprintf(stderr, "failed to allocate port flt object\n"); 1448 return; 1449 } 1450 flt->tunnel.type = type; 1451 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1452 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1453 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1454 } 1455 printf("port %d: flow tunnel #%u type %s\n", 1456 port_id, flt->id, ops->type); 1457 } 1458 1459 /** Generate a port_flow entry from attributes/pattern/actions. */ 1460 static struct port_flow * 1461 port_flow_new(const struct rte_flow_attr *attr, 1462 const struct rte_flow_item *pattern, 1463 const struct rte_flow_action *actions, 1464 struct rte_flow_error *error) 1465 { 1466 const struct rte_flow_conv_rule rule = { 1467 .attr_ro = attr, 1468 .pattern_ro = pattern, 1469 .actions_ro = actions, 1470 }; 1471 struct port_flow *pf; 1472 int ret; 1473 1474 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1475 if (ret < 0) 1476 return NULL; 1477 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1478 if (!pf) { 1479 rte_flow_error_set 1480 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1481 "calloc() failed"); 1482 return NULL; 1483 } 1484 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1485 error) >= 0) 1486 return pf; 1487 free(pf); 1488 return NULL; 1489 } 1490 1491 /** Print a message out of a flow error. */ 1492 static int 1493 port_flow_complain(struct rte_flow_error *error) 1494 { 1495 static const char *const errstrlist[] = { 1496 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1497 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1498 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1499 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1500 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1501 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1502 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1503 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1504 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1505 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1506 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1507 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1508 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1509 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1510 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1511 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1512 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1513 }; 1514 const char *errstr; 1515 char buf[32]; 1516 int err = rte_errno; 1517 1518 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1519 !errstrlist[error->type]) 1520 errstr = "unknown type"; 1521 else 1522 errstr = errstrlist[error->type]; 1523 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1524 __func__, error->type, errstr, 1525 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1526 error->cause), buf) : "", 1527 error->message ? error->message : "(no stated reason)", 1528 rte_strerror(err)); 1529 1530 switch (error->type) { 1531 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1532 fprintf(stderr, "The status suggests the use of \"transfer\" " 1533 "as the possible cause of the failure. Make " 1534 "sure that the flow in question and its " 1535 "indirect components (if any) are managed " 1536 "via \"transfer\" proxy port. Use command " 1537 "\"show port (port_id) flow transfer proxy\" " 1538 "to figure out the proxy port ID\n"); 1539 break; 1540 default: 1541 break; 1542 } 1543 1544 return -err; 1545 } 1546 1547 static void 1548 rss_config_display(struct rte_flow_action_rss *rss_conf) 1549 { 1550 uint8_t i; 1551 1552 if (rss_conf == NULL) { 1553 fprintf(stderr, "Invalid rule\n"); 1554 return; 1555 } 1556 1557 printf("RSS:\n" 1558 " queues:"); 1559 if (rss_conf->queue_num == 0) 1560 printf(" none"); 1561 for (i = 0; i < rss_conf->queue_num; i++) 1562 printf(" %d", rss_conf->queue[i]); 1563 printf("\n"); 1564 1565 printf(" function: "); 1566 switch (rss_conf->func) { 1567 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1568 printf("default\n"); 1569 break; 1570 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1571 printf("toeplitz\n"); 1572 break; 1573 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1574 printf("simple_xor\n"); 1575 break; 1576 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1577 printf("symmetric_toeplitz\n"); 1578 break; 1579 default: 1580 printf("Unknown function\n"); 1581 return; 1582 } 1583 1584 printf(" types:\n"); 1585 if (rss_conf->types == 0) { 1586 printf(" none\n"); 1587 return; 1588 } 1589 for (i = 0; rss_type_table[i].str; i++) { 1590 if ((rss_conf->types & 1591 rss_type_table[i].rss_type) == 1592 rss_type_table[i].rss_type && 1593 rss_type_table[i].rss_type != 0) 1594 printf(" %s\n", rss_type_table[i].str); 1595 } 1596 } 1597 1598 static struct port_indirect_action * 1599 action_get_by_id(portid_t port_id, uint32_t id) 1600 { 1601 struct rte_port *port; 1602 struct port_indirect_action **ppia; 1603 struct port_indirect_action *pia = NULL; 1604 1605 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1606 port_id == (portid_t)RTE_PORT_ALL) 1607 return NULL; 1608 port = &ports[port_id]; 1609 ppia = &port->actions_list; 1610 while (*ppia) { 1611 if ((*ppia)->id == id) { 1612 pia = *ppia; 1613 break; 1614 } 1615 ppia = &(*ppia)->next; 1616 } 1617 if (!pia) 1618 fprintf(stderr, 1619 "Failed to find indirect action #%u on port %u\n", 1620 id, port_id); 1621 return pia; 1622 } 1623 1624 static int 1625 action_alloc(portid_t port_id, uint32_t id, 1626 struct port_indirect_action **action) 1627 { 1628 struct rte_port *port; 1629 struct port_indirect_action **ppia; 1630 struct port_indirect_action *pia = NULL; 1631 1632 *action = NULL; 1633 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1634 port_id == (portid_t)RTE_PORT_ALL) 1635 return -EINVAL; 1636 port = &ports[port_id]; 1637 if (id == UINT32_MAX) { 1638 /* taking first available ID */ 1639 if (port->actions_list) { 1640 if (port->actions_list->id == UINT32_MAX - 1) { 1641 fprintf(stderr, 1642 "Highest indirect action ID is already assigned, delete it first\n"); 1643 return -ENOMEM; 1644 } 1645 id = port->actions_list->id + 1; 1646 } else { 1647 id = 0; 1648 } 1649 } 1650 pia = calloc(1, sizeof(*pia)); 1651 if (!pia) { 1652 fprintf(stderr, 1653 "Allocation of port %u indirect action failed\n", 1654 port_id); 1655 return -ENOMEM; 1656 } 1657 ppia = &port->actions_list; 1658 while (*ppia && (*ppia)->id > id) 1659 ppia = &(*ppia)->next; 1660 if (*ppia && (*ppia)->id == id) { 1661 fprintf(stderr, 1662 "Indirect action #%u is already assigned, delete it first\n", 1663 id); 1664 free(pia); 1665 return -EINVAL; 1666 } 1667 pia->next = *ppia; 1668 pia->id = id; 1669 *ppia = pia; 1670 *action = pia; 1671 return 0; 1672 } 1673 1674 static int 1675 template_alloc(uint32_t id, struct port_template **template, 1676 struct port_template **list) 1677 { 1678 struct port_template *lst = *list; 1679 struct port_template **ppt; 1680 struct port_template *pt = NULL; 1681 1682 *template = NULL; 1683 if (id == UINT32_MAX) { 1684 /* taking first available ID */ 1685 if (lst) { 1686 if (lst->id == UINT32_MAX - 1) { 1687 printf("Highest template ID is already" 1688 " assigned, delete it first\n"); 1689 return -ENOMEM; 1690 } 1691 id = lst->id + 1; 1692 } else { 1693 id = 0; 1694 } 1695 } 1696 pt = calloc(1, sizeof(*pt)); 1697 if (!pt) { 1698 printf("Allocation of port template failed\n"); 1699 return -ENOMEM; 1700 } 1701 ppt = list; 1702 while (*ppt && (*ppt)->id > id) 1703 ppt = &(*ppt)->next; 1704 if (*ppt && (*ppt)->id == id) { 1705 printf("Template #%u is already assigned," 1706 " delete it first\n", id); 1707 free(pt); 1708 return -EINVAL; 1709 } 1710 pt->next = *ppt; 1711 pt->id = id; 1712 *ppt = pt; 1713 *template = pt; 1714 return 0; 1715 } 1716 1717 static int 1718 table_alloc(uint32_t id, struct port_table **table, 1719 struct port_table **list) 1720 { 1721 struct port_table *lst = *list; 1722 struct port_table **ppt; 1723 struct port_table *pt = NULL; 1724 1725 *table = NULL; 1726 if (id == UINT32_MAX) { 1727 /* taking first available ID */ 1728 if (lst) { 1729 if (lst->id == UINT32_MAX - 1) { 1730 printf("Highest table ID is already" 1731 " assigned, delete it first\n"); 1732 return -ENOMEM; 1733 } 1734 id = lst->id + 1; 1735 } else { 1736 id = 0; 1737 } 1738 } 1739 pt = calloc(1, sizeof(*pt)); 1740 if (!pt) { 1741 printf("Allocation of table failed\n"); 1742 return -ENOMEM; 1743 } 1744 ppt = list; 1745 while (*ppt && (*ppt)->id > id) 1746 ppt = &(*ppt)->next; 1747 if (*ppt && (*ppt)->id == id) { 1748 printf("Table #%u is already assigned," 1749 " delete it first\n", id); 1750 free(pt); 1751 return -EINVAL; 1752 } 1753 pt->next = *ppt; 1754 pt->id = id; 1755 *ppt = pt; 1756 *table = pt; 1757 return 0; 1758 } 1759 1760 /** Get info about flow management resources. */ 1761 int 1762 port_flow_get_info(portid_t port_id) 1763 { 1764 struct rte_flow_port_info port_info; 1765 struct rte_flow_queue_info queue_info; 1766 struct rte_flow_error error; 1767 1768 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1769 port_id == (portid_t)RTE_PORT_ALL) 1770 return -EINVAL; 1771 /* Poisoning to make sure PMDs update it in case of error. */ 1772 memset(&error, 0x99, sizeof(error)); 1773 memset(&port_info, 0, sizeof(port_info)); 1774 memset(&queue_info, 0, sizeof(queue_info)); 1775 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1776 return port_flow_complain(&error); 1777 printf("Flow engine resources on port %u:\n" 1778 "Number of queues: %d\n" 1779 "Size of queues: %d\n" 1780 "Number of counters: %d\n" 1781 "Number of aging objects: %d\n" 1782 "Number of meter actions: %d\n", 1783 port_id, port_info.max_nb_queues, 1784 queue_info.max_size, 1785 port_info.max_nb_counters, 1786 port_info.max_nb_aging_objects, 1787 port_info.max_nb_meters); 1788 return 0; 1789 } 1790 1791 /** Configure flow management resources. */ 1792 int 1793 port_flow_configure(portid_t port_id, 1794 const struct rte_flow_port_attr *port_attr, 1795 uint16_t nb_queue, 1796 const struct rte_flow_queue_attr *queue_attr) 1797 { 1798 struct rte_port *port; 1799 struct rte_flow_error error; 1800 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1801 int std_queue; 1802 1803 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1804 port_id == (portid_t)RTE_PORT_ALL) 1805 return -EINVAL; 1806 port = &ports[port_id]; 1807 port->queue_nb = nb_queue; 1808 port->queue_sz = queue_attr->size; 1809 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1810 attr_list[std_queue] = queue_attr; 1811 /* Poisoning to make sure PMDs update it in case of error. */ 1812 memset(&error, 0x66, sizeof(error)); 1813 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1814 return port_flow_complain(&error); 1815 printf("Configure flows on port %u: " 1816 "number of queues %d with %d elements\n", 1817 port_id, nb_queue, queue_attr->size); 1818 return 0; 1819 } 1820 1821 /** Create indirect action */ 1822 int 1823 port_action_handle_create(portid_t port_id, uint32_t id, 1824 const struct rte_flow_indir_action_conf *conf, 1825 const struct rte_flow_action *action) 1826 { 1827 struct port_indirect_action *pia; 1828 int ret; 1829 struct rte_flow_error error; 1830 1831 ret = action_alloc(port_id, id, &pia); 1832 if (ret) 1833 return ret; 1834 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1835 struct rte_flow_action_age *age = 1836 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1837 1838 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1839 age->context = &pia->age_type; 1840 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1841 struct rte_flow_action_conntrack *ct = 1842 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1843 1844 memcpy(ct, &conntrack_context, sizeof(*ct)); 1845 } 1846 /* Poisoning to make sure PMDs update it in case of error. */ 1847 memset(&error, 0x22, sizeof(error)); 1848 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1849 &error); 1850 if (!pia->handle) { 1851 uint32_t destroy_id = pia->id; 1852 port_action_handle_destroy(port_id, 1, &destroy_id); 1853 return port_flow_complain(&error); 1854 } 1855 pia->type = action->type; 1856 printf("Indirect action #%u created\n", pia->id); 1857 return 0; 1858 } 1859 1860 /** Destroy indirect action */ 1861 int 1862 port_action_handle_destroy(portid_t port_id, 1863 uint32_t n, 1864 const uint32_t *actions) 1865 { 1866 struct rte_port *port; 1867 struct port_indirect_action **tmp; 1868 uint32_t c = 0; 1869 int ret = 0; 1870 1871 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1872 port_id == (portid_t)RTE_PORT_ALL) 1873 return -EINVAL; 1874 port = &ports[port_id]; 1875 tmp = &port->actions_list; 1876 while (*tmp) { 1877 uint32_t i; 1878 1879 for (i = 0; i != n; ++i) { 1880 struct rte_flow_error error; 1881 struct port_indirect_action *pia = *tmp; 1882 1883 if (actions[i] != pia->id) 1884 continue; 1885 /* 1886 * Poisoning to make sure PMDs update it in case 1887 * of error. 1888 */ 1889 memset(&error, 0x33, sizeof(error)); 1890 1891 if (pia->handle && rte_flow_action_handle_destroy( 1892 port_id, pia->handle, &error)) { 1893 ret = port_flow_complain(&error); 1894 continue; 1895 } 1896 *tmp = pia->next; 1897 printf("Indirect action #%u destroyed\n", pia->id); 1898 free(pia); 1899 break; 1900 } 1901 if (i == n) 1902 tmp = &(*tmp)->next; 1903 ++c; 1904 } 1905 return ret; 1906 } 1907 1908 1909 /** Get indirect action by port + id */ 1910 struct rte_flow_action_handle * 1911 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1912 { 1913 1914 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1915 1916 return (pia) ? pia->handle : NULL; 1917 } 1918 1919 /** Update indirect action */ 1920 int 1921 port_action_handle_update(portid_t port_id, uint32_t id, 1922 const struct rte_flow_action *action) 1923 { 1924 struct rte_flow_error error; 1925 struct rte_flow_action_handle *action_handle; 1926 struct port_indirect_action *pia; 1927 const void *update; 1928 1929 action_handle = port_action_handle_get_by_id(port_id, id); 1930 if (!action_handle) 1931 return -EINVAL; 1932 pia = action_get_by_id(port_id, id); 1933 if (!pia) 1934 return -EINVAL; 1935 switch (pia->type) { 1936 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1937 update = action->conf; 1938 break; 1939 default: 1940 update = action; 1941 break; 1942 } 1943 if (rte_flow_action_handle_update(port_id, action_handle, update, 1944 &error)) { 1945 return port_flow_complain(&error); 1946 } 1947 printf("Indirect action #%u updated\n", id); 1948 return 0; 1949 } 1950 1951 int 1952 port_action_handle_query(portid_t port_id, uint32_t id) 1953 { 1954 struct rte_flow_error error; 1955 struct port_indirect_action *pia; 1956 union { 1957 struct rte_flow_query_count count; 1958 struct rte_flow_query_age age; 1959 struct rte_flow_action_conntrack ct; 1960 } query; 1961 1962 pia = action_get_by_id(port_id, id); 1963 if (!pia) 1964 return -EINVAL; 1965 switch (pia->type) { 1966 case RTE_FLOW_ACTION_TYPE_AGE: 1967 case RTE_FLOW_ACTION_TYPE_COUNT: 1968 break; 1969 default: 1970 fprintf(stderr, 1971 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1972 id, pia->type, port_id); 1973 return -ENOTSUP; 1974 } 1975 /* Poisoning to make sure PMDs update it in case of error. */ 1976 memset(&error, 0x55, sizeof(error)); 1977 memset(&query, 0, sizeof(query)); 1978 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1979 return port_flow_complain(&error); 1980 switch (pia->type) { 1981 case RTE_FLOW_ACTION_TYPE_AGE: 1982 printf("Indirect AGE action:\n" 1983 " aged: %u\n" 1984 " sec_since_last_hit_valid: %u\n" 1985 " sec_since_last_hit: %" PRIu32 "\n", 1986 query.age.aged, 1987 query.age.sec_since_last_hit_valid, 1988 query.age.sec_since_last_hit); 1989 break; 1990 case RTE_FLOW_ACTION_TYPE_COUNT: 1991 printf("Indirect COUNT action:\n" 1992 " hits_set: %u\n" 1993 " bytes_set: %u\n" 1994 " hits: %" PRIu64 "\n" 1995 " bytes: %" PRIu64 "\n", 1996 query.count.hits_set, 1997 query.count.bytes_set, 1998 query.count.hits, 1999 query.count.bytes); 2000 break; 2001 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2002 printf("Conntrack Context:\n" 2003 " Peer: %u, Flow dir: %s, Enable: %u\n" 2004 " Live: %u, SACK: %u, CACK: %u\n" 2005 " Packet dir: %s, Liberal: %u, State: %u\n" 2006 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2007 " Last Seq: %u, Last ACK: %u\n" 2008 " Last Win: %u, Last End: %u\n", 2009 query.ct.peer_port, 2010 query.ct.is_original_dir ? "Original" : "Reply", 2011 query.ct.enable, query.ct.live_connection, 2012 query.ct.selective_ack, query.ct.challenge_ack_passed, 2013 query.ct.last_direction ? "Original" : "Reply", 2014 query.ct.liberal_mode, query.ct.state, 2015 query.ct.max_ack_window, query.ct.retransmission_limit, 2016 query.ct.last_index, query.ct.last_seq, 2017 query.ct.last_ack, query.ct.last_window, 2018 query.ct.last_end); 2019 printf(" Original Dir:\n" 2020 " scale: %u, fin: %u, ack seen: %u\n" 2021 " unacked data: %u\n Sent end: %u," 2022 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2023 query.ct.original_dir.scale, 2024 query.ct.original_dir.close_initiated, 2025 query.ct.original_dir.last_ack_seen, 2026 query.ct.original_dir.data_unacked, 2027 query.ct.original_dir.sent_end, 2028 query.ct.original_dir.reply_end, 2029 query.ct.original_dir.max_win, 2030 query.ct.original_dir.max_ack); 2031 printf(" Reply Dir:\n" 2032 " scale: %u, fin: %u, ack seen: %u\n" 2033 " unacked data: %u\n Sent end: %u," 2034 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2035 query.ct.reply_dir.scale, 2036 query.ct.reply_dir.close_initiated, 2037 query.ct.reply_dir.last_ack_seen, 2038 query.ct.reply_dir.data_unacked, 2039 query.ct.reply_dir.sent_end, 2040 query.ct.reply_dir.reply_end, 2041 query.ct.reply_dir.max_win, 2042 query.ct.reply_dir.max_ack); 2043 break; 2044 default: 2045 fprintf(stderr, 2046 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2047 id, pia->type, port_id); 2048 break; 2049 } 2050 return 0; 2051 } 2052 2053 static struct port_flow_tunnel * 2054 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2055 const struct rte_flow_item *pattern, 2056 const struct rte_flow_action *actions, 2057 const struct tunnel_ops *tunnel_ops) 2058 { 2059 int ret; 2060 struct rte_port *port; 2061 struct port_flow_tunnel *pft; 2062 struct rte_flow_error error; 2063 2064 port = &ports[port_id]; 2065 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2066 if (!pft) { 2067 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2068 tunnel_ops->id); 2069 return NULL; 2070 } 2071 if (tunnel_ops->actions) { 2072 uint32_t num_actions; 2073 const struct rte_flow_action *aptr; 2074 2075 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2076 &pft->pmd_actions, 2077 &pft->num_pmd_actions, 2078 &error); 2079 if (ret) { 2080 port_flow_complain(&error); 2081 return NULL; 2082 } 2083 for (aptr = actions, num_actions = 1; 2084 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2085 aptr++, num_actions++); 2086 pft->actions = malloc( 2087 (num_actions + pft->num_pmd_actions) * 2088 sizeof(actions[0])); 2089 if (!pft->actions) { 2090 rte_flow_tunnel_action_decap_release( 2091 port_id, pft->actions, 2092 pft->num_pmd_actions, &error); 2093 return NULL; 2094 } 2095 rte_memcpy(pft->actions, pft->pmd_actions, 2096 pft->num_pmd_actions * sizeof(actions[0])); 2097 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2098 num_actions * sizeof(actions[0])); 2099 } 2100 if (tunnel_ops->items) { 2101 uint32_t num_items; 2102 const struct rte_flow_item *iptr; 2103 2104 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2105 &pft->pmd_items, 2106 &pft->num_pmd_items, 2107 &error); 2108 if (ret) { 2109 port_flow_complain(&error); 2110 return NULL; 2111 } 2112 for (iptr = pattern, num_items = 1; 2113 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2114 iptr++, num_items++); 2115 pft->items = malloc((num_items + pft->num_pmd_items) * 2116 sizeof(pattern[0])); 2117 if (!pft->items) { 2118 rte_flow_tunnel_item_release( 2119 port_id, pft->pmd_items, 2120 pft->num_pmd_items, &error); 2121 return NULL; 2122 } 2123 rte_memcpy(pft->items, pft->pmd_items, 2124 pft->num_pmd_items * sizeof(pattern[0])); 2125 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2126 num_items * sizeof(pattern[0])); 2127 } 2128 2129 return pft; 2130 } 2131 2132 static void 2133 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2134 const struct tunnel_ops *tunnel_ops, 2135 struct port_flow_tunnel *pft) 2136 { 2137 struct rte_flow_error error; 2138 2139 if (tunnel_ops->actions) { 2140 free(pft->actions); 2141 rte_flow_tunnel_action_decap_release( 2142 port_id, pft->pmd_actions, 2143 pft->num_pmd_actions, &error); 2144 pft->actions = NULL; 2145 pft->pmd_actions = NULL; 2146 } 2147 if (tunnel_ops->items) { 2148 free(pft->items); 2149 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2150 pft->num_pmd_items, 2151 &error); 2152 pft->items = NULL; 2153 pft->pmd_items = NULL; 2154 } 2155 } 2156 2157 /** Add port meter policy */ 2158 int 2159 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2160 const struct rte_flow_action *actions) 2161 { 2162 struct rte_mtr_error error; 2163 const struct rte_flow_action *act = actions; 2164 const struct rte_flow_action *start; 2165 struct rte_mtr_meter_policy_params policy; 2166 uint32_t i = 0, act_n; 2167 int ret; 2168 2169 for (i = 0; i < RTE_COLORS; i++) { 2170 for (act_n = 0, start = act; 2171 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2172 act_n++; 2173 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2174 policy.actions[i] = start; 2175 else 2176 policy.actions[i] = NULL; 2177 act++; 2178 } 2179 ret = rte_mtr_meter_policy_add(port_id, 2180 policy_id, 2181 &policy, &error); 2182 if (ret) 2183 print_mtr_err_msg(&error); 2184 return ret; 2185 } 2186 2187 /** Validate flow rule. */ 2188 int 2189 port_flow_validate(portid_t port_id, 2190 const struct rte_flow_attr *attr, 2191 const struct rte_flow_item *pattern, 2192 const struct rte_flow_action *actions, 2193 const struct tunnel_ops *tunnel_ops) 2194 { 2195 struct rte_flow_error error; 2196 struct port_flow_tunnel *pft = NULL; 2197 int ret; 2198 2199 /* Poisoning to make sure PMDs update it in case of error. */ 2200 memset(&error, 0x11, sizeof(error)); 2201 if (tunnel_ops->enabled) { 2202 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2203 actions, tunnel_ops); 2204 if (!pft) 2205 return -ENOENT; 2206 if (pft->items) 2207 pattern = pft->items; 2208 if (pft->actions) 2209 actions = pft->actions; 2210 } 2211 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2212 if (tunnel_ops->enabled) 2213 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2214 if (ret) 2215 return port_flow_complain(&error); 2216 printf("Flow rule validated\n"); 2217 return 0; 2218 } 2219 2220 /** Return age action structure if exists, otherwise NULL. */ 2221 static struct rte_flow_action_age * 2222 age_action_get(const struct rte_flow_action *actions) 2223 { 2224 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2225 switch (actions->type) { 2226 case RTE_FLOW_ACTION_TYPE_AGE: 2227 return (struct rte_flow_action_age *) 2228 (uintptr_t)actions->conf; 2229 default: 2230 break; 2231 } 2232 } 2233 return NULL; 2234 } 2235 2236 /** Create pattern template */ 2237 int 2238 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2239 const struct rte_flow_pattern_template_attr *attr, 2240 const struct rte_flow_item *pattern) 2241 { 2242 struct rte_port *port; 2243 struct port_template *pit; 2244 int ret; 2245 struct rte_flow_error error; 2246 2247 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2248 port_id == (portid_t)RTE_PORT_ALL) 2249 return -EINVAL; 2250 port = &ports[port_id]; 2251 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2252 if (ret) 2253 return ret; 2254 /* Poisoning to make sure PMDs update it in case of error. */ 2255 memset(&error, 0x22, sizeof(error)); 2256 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2257 attr, pattern, &error); 2258 if (!pit->template.pattern_template) { 2259 uint32_t destroy_id = pit->id; 2260 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2261 return port_flow_complain(&error); 2262 } 2263 printf("Pattern template #%u created\n", pit->id); 2264 return 0; 2265 } 2266 2267 /** Destroy pattern template */ 2268 int 2269 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2270 const uint32_t *template) 2271 { 2272 struct rte_port *port; 2273 struct port_template **tmp; 2274 uint32_t c = 0; 2275 int ret = 0; 2276 2277 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2278 port_id == (portid_t)RTE_PORT_ALL) 2279 return -EINVAL; 2280 port = &ports[port_id]; 2281 tmp = &port->pattern_templ_list; 2282 while (*tmp) { 2283 uint32_t i; 2284 2285 for (i = 0; i != n; ++i) { 2286 struct rte_flow_error error; 2287 struct port_template *pit = *tmp; 2288 2289 if (template[i] != pit->id) 2290 continue; 2291 /* 2292 * Poisoning to make sure PMDs update it in case 2293 * of error. 2294 */ 2295 memset(&error, 0x33, sizeof(error)); 2296 2297 if (pit->template.pattern_template && 2298 rte_flow_pattern_template_destroy(port_id, 2299 pit->template.pattern_template, 2300 &error)) { 2301 ret = port_flow_complain(&error); 2302 continue; 2303 } 2304 *tmp = pit->next; 2305 printf("Pattern template #%u destroyed\n", pit->id); 2306 free(pit); 2307 break; 2308 } 2309 if (i == n) 2310 tmp = &(*tmp)->next; 2311 ++c; 2312 } 2313 return ret; 2314 } 2315 2316 /** Create actions template */ 2317 int 2318 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2319 const struct rte_flow_actions_template_attr *attr, 2320 const struct rte_flow_action *actions, 2321 const struct rte_flow_action *masks) 2322 { 2323 struct rte_port *port; 2324 struct port_template *pat; 2325 int ret; 2326 struct rte_flow_error error; 2327 2328 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2329 port_id == (portid_t)RTE_PORT_ALL) 2330 return -EINVAL; 2331 port = &ports[port_id]; 2332 ret = template_alloc(id, &pat, &port->actions_templ_list); 2333 if (ret) 2334 return ret; 2335 /* Poisoning to make sure PMDs update it in case of error. */ 2336 memset(&error, 0x22, sizeof(error)); 2337 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2338 attr, actions, masks, &error); 2339 if (!pat->template.actions_template) { 2340 uint32_t destroy_id = pat->id; 2341 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2342 return port_flow_complain(&error); 2343 } 2344 printf("Actions template #%u created\n", pat->id); 2345 return 0; 2346 } 2347 2348 /** Destroy actions template */ 2349 int 2350 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2351 const uint32_t *template) 2352 { 2353 struct rte_port *port; 2354 struct port_template **tmp; 2355 uint32_t c = 0; 2356 int ret = 0; 2357 2358 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2359 port_id == (portid_t)RTE_PORT_ALL) 2360 return -EINVAL; 2361 port = &ports[port_id]; 2362 tmp = &port->actions_templ_list; 2363 while (*tmp) { 2364 uint32_t i; 2365 2366 for (i = 0; i != n; ++i) { 2367 struct rte_flow_error error; 2368 struct port_template *pat = *tmp; 2369 2370 if (template[i] != pat->id) 2371 continue; 2372 /* 2373 * Poisoning to make sure PMDs update it in case 2374 * of error. 2375 */ 2376 memset(&error, 0x33, sizeof(error)); 2377 2378 if (pat->template.actions_template && 2379 rte_flow_actions_template_destroy(port_id, 2380 pat->template.actions_template, &error)) { 2381 ret = port_flow_complain(&error); 2382 continue; 2383 } 2384 *tmp = pat->next; 2385 printf("Actions template #%u destroyed\n", pat->id); 2386 free(pat); 2387 break; 2388 } 2389 if (i == n) 2390 tmp = &(*tmp)->next; 2391 ++c; 2392 } 2393 return ret; 2394 } 2395 2396 /** Create table */ 2397 int 2398 port_flow_template_table_create(portid_t port_id, uint32_t id, 2399 const struct rte_flow_template_table_attr *table_attr, 2400 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2401 uint32_t nb_actions_templates, uint32_t *actions_templates) 2402 { 2403 struct rte_port *port; 2404 struct port_table *pt; 2405 struct port_template *temp = NULL; 2406 int ret; 2407 uint32_t i; 2408 struct rte_flow_error error; 2409 struct rte_flow_pattern_template 2410 *flow_pattern_templates[nb_pattern_templates]; 2411 struct rte_flow_actions_template 2412 *flow_actions_templates[nb_actions_templates]; 2413 2414 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2415 port_id == (portid_t)RTE_PORT_ALL) 2416 return -EINVAL; 2417 port = &ports[port_id]; 2418 for (i = 0; i < nb_pattern_templates; ++i) { 2419 bool found = false; 2420 temp = port->pattern_templ_list; 2421 while (temp) { 2422 if (pattern_templates[i] == temp->id) { 2423 flow_pattern_templates[i] = 2424 temp->template.pattern_template; 2425 found = true; 2426 break; 2427 } 2428 temp = temp->next; 2429 } 2430 if (!found) { 2431 printf("Pattern template #%u is invalid\n", 2432 pattern_templates[i]); 2433 return -EINVAL; 2434 } 2435 } 2436 for (i = 0; i < nb_actions_templates; ++i) { 2437 bool found = false; 2438 temp = port->actions_templ_list; 2439 while (temp) { 2440 if (actions_templates[i] == temp->id) { 2441 flow_actions_templates[i] = 2442 temp->template.actions_template; 2443 found = true; 2444 break; 2445 } 2446 temp = temp->next; 2447 } 2448 if (!found) { 2449 printf("Actions template #%u is invalid\n", 2450 actions_templates[i]); 2451 return -EINVAL; 2452 } 2453 } 2454 ret = table_alloc(id, &pt, &port->table_list); 2455 if (ret) 2456 return ret; 2457 /* Poisoning to make sure PMDs update it in case of error. */ 2458 memset(&error, 0x22, sizeof(error)); 2459 pt->table = rte_flow_template_table_create(port_id, table_attr, 2460 flow_pattern_templates, nb_pattern_templates, 2461 flow_actions_templates, nb_actions_templates, 2462 &error); 2463 2464 if (!pt->table) { 2465 uint32_t destroy_id = pt->id; 2466 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2467 return port_flow_complain(&error); 2468 } 2469 pt->nb_pattern_templates = nb_pattern_templates; 2470 pt->nb_actions_templates = nb_actions_templates; 2471 printf("Template table #%u created\n", pt->id); 2472 return 0; 2473 } 2474 2475 /** Destroy table */ 2476 int 2477 port_flow_template_table_destroy(portid_t port_id, 2478 uint32_t n, const uint32_t *table) 2479 { 2480 struct rte_port *port; 2481 struct port_table **tmp; 2482 uint32_t c = 0; 2483 int ret = 0; 2484 2485 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2486 port_id == (portid_t)RTE_PORT_ALL) 2487 return -EINVAL; 2488 port = &ports[port_id]; 2489 tmp = &port->table_list; 2490 while (*tmp) { 2491 uint32_t i; 2492 2493 for (i = 0; i != n; ++i) { 2494 struct rte_flow_error error; 2495 struct port_table *pt = *tmp; 2496 2497 if (table[i] != pt->id) 2498 continue; 2499 /* 2500 * Poisoning to make sure PMDs update it in case 2501 * of error. 2502 */ 2503 memset(&error, 0x33, sizeof(error)); 2504 2505 if (pt->table && 2506 rte_flow_template_table_destroy(port_id, 2507 pt->table, 2508 &error)) { 2509 ret = port_flow_complain(&error); 2510 continue; 2511 } 2512 *tmp = pt->next; 2513 printf("Template table #%u destroyed\n", pt->id); 2514 free(pt); 2515 break; 2516 } 2517 if (i == n) 2518 tmp = &(*tmp)->next; 2519 ++c; 2520 } 2521 return ret; 2522 } 2523 2524 /** Enqueue create flow rule operation. */ 2525 int 2526 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2527 bool postpone, uint32_t table_id, 2528 uint32_t pattern_idx, uint32_t actions_idx, 2529 const struct rte_flow_item *pattern, 2530 const struct rte_flow_action *actions) 2531 { 2532 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2533 struct rte_flow *flow; 2534 struct rte_port *port; 2535 struct port_flow *pf; 2536 struct port_table *pt; 2537 uint32_t id = 0; 2538 bool found; 2539 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2540 struct rte_flow_action_age *age = age_action_get(actions); 2541 2542 port = &ports[port_id]; 2543 if (port->flow_list) { 2544 if (port->flow_list->id == UINT32_MAX) { 2545 printf("Highest rule ID is already assigned," 2546 " delete it first"); 2547 return -ENOMEM; 2548 } 2549 id = port->flow_list->id + 1; 2550 } 2551 2552 if (queue_id >= port->queue_nb) { 2553 printf("Queue #%u is invalid\n", queue_id); 2554 return -EINVAL; 2555 } 2556 2557 found = false; 2558 pt = port->table_list; 2559 while (pt) { 2560 if (table_id == pt->id) { 2561 found = true; 2562 break; 2563 } 2564 pt = pt->next; 2565 } 2566 if (!found) { 2567 printf("Table #%u is invalid\n", table_id); 2568 return -EINVAL; 2569 } 2570 2571 if (pattern_idx >= pt->nb_pattern_templates) { 2572 printf("Pattern template index #%u is invalid," 2573 " %u templates present in the table\n", 2574 pattern_idx, pt->nb_pattern_templates); 2575 return -EINVAL; 2576 } 2577 if (actions_idx >= pt->nb_actions_templates) { 2578 printf("Actions template index #%u is invalid," 2579 " %u templates present in the table\n", 2580 actions_idx, pt->nb_actions_templates); 2581 return -EINVAL; 2582 } 2583 2584 pf = port_flow_new(NULL, pattern, actions, &error); 2585 if (!pf) 2586 return port_flow_complain(&error); 2587 if (age) { 2588 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2589 age->context = &pf->age_type; 2590 } 2591 /* Poisoning to make sure PMDs update it in case of error. */ 2592 memset(&error, 0x11, sizeof(error)); 2593 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2594 pattern, pattern_idx, actions, actions_idx, NULL, &error); 2595 if (!flow) { 2596 uint32_t flow_id = pf->id; 2597 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2598 return port_flow_complain(&error); 2599 } 2600 2601 pf->next = port->flow_list; 2602 pf->id = id; 2603 pf->flow = flow; 2604 port->flow_list = pf; 2605 printf("Flow rule #%u creation enqueued\n", pf->id); 2606 return 0; 2607 } 2608 2609 /** Enqueue number of destroy flow rules operations. */ 2610 int 2611 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2612 bool postpone, uint32_t n, const uint32_t *rule) 2613 { 2614 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2615 struct rte_port *port; 2616 struct port_flow **tmp; 2617 uint32_t c = 0; 2618 int ret = 0; 2619 2620 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2621 port_id == (portid_t)RTE_PORT_ALL) 2622 return -EINVAL; 2623 port = &ports[port_id]; 2624 2625 if (queue_id >= port->queue_nb) { 2626 printf("Queue #%u is invalid\n", queue_id); 2627 return -EINVAL; 2628 } 2629 2630 tmp = &port->flow_list; 2631 while (*tmp) { 2632 uint32_t i; 2633 2634 for (i = 0; i != n; ++i) { 2635 struct rte_flow_error error; 2636 struct port_flow *pf = *tmp; 2637 2638 if (rule[i] != pf->id) 2639 continue; 2640 /* 2641 * Poisoning to make sure PMD 2642 * update it in case of error. 2643 */ 2644 memset(&error, 0x33, sizeof(error)); 2645 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2646 pf->flow, NULL, &error)) { 2647 ret = port_flow_complain(&error); 2648 continue; 2649 } 2650 printf("Flow rule #%u destruction enqueued\n", pf->id); 2651 *tmp = pf->next; 2652 free(pf); 2653 break; 2654 } 2655 if (i == n) 2656 tmp = &(*tmp)->next; 2657 ++c; 2658 } 2659 return ret; 2660 } 2661 2662 /** Enqueue indirect action create operation. */ 2663 int 2664 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2665 bool postpone, uint32_t id, 2666 const struct rte_flow_indir_action_conf *conf, 2667 const struct rte_flow_action *action) 2668 { 2669 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2670 struct rte_port *port; 2671 struct port_indirect_action *pia; 2672 int ret; 2673 struct rte_flow_error error; 2674 2675 ret = action_alloc(port_id, id, &pia); 2676 if (ret) 2677 return ret; 2678 2679 port = &ports[port_id]; 2680 if (queue_id >= port->queue_nb) { 2681 printf("Queue #%u is invalid\n", queue_id); 2682 return -EINVAL; 2683 } 2684 2685 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2686 struct rte_flow_action_age *age = 2687 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2688 2689 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2690 age->context = &pia->age_type; 2691 } 2692 /* Poisoning to make sure PMDs update it in case of error. */ 2693 memset(&error, 0x88, sizeof(error)); 2694 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2695 &attr, conf, action, NULL, &error); 2696 if (!pia->handle) { 2697 uint32_t destroy_id = pia->id; 2698 port_queue_action_handle_destroy(port_id, queue_id, 2699 postpone, 1, &destroy_id); 2700 return port_flow_complain(&error); 2701 } 2702 pia->type = action->type; 2703 printf("Indirect action #%u creation queued\n", pia->id); 2704 return 0; 2705 } 2706 2707 /** Enqueue indirect action destroy operation. */ 2708 int 2709 port_queue_action_handle_destroy(portid_t port_id, 2710 uint32_t queue_id, bool postpone, 2711 uint32_t n, const uint32_t *actions) 2712 { 2713 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2714 struct rte_port *port; 2715 struct port_indirect_action **tmp; 2716 uint32_t c = 0; 2717 int ret = 0; 2718 2719 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2720 port_id == (portid_t)RTE_PORT_ALL) 2721 return -EINVAL; 2722 port = &ports[port_id]; 2723 2724 if (queue_id >= port->queue_nb) { 2725 printf("Queue #%u is invalid\n", queue_id); 2726 return -EINVAL; 2727 } 2728 2729 tmp = &port->actions_list; 2730 while (*tmp) { 2731 uint32_t i; 2732 2733 for (i = 0; i != n; ++i) { 2734 struct rte_flow_error error; 2735 struct port_indirect_action *pia = *tmp; 2736 2737 if (actions[i] != pia->id) 2738 continue; 2739 /* 2740 * Poisoning to make sure PMDs update it in case 2741 * of error. 2742 */ 2743 memset(&error, 0x99, sizeof(error)); 2744 2745 if (pia->handle && 2746 rte_flow_async_action_handle_destroy(port_id, 2747 queue_id, &attr, pia->handle, NULL, &error)) { 2748 ret = port_flow_complain(&error); 2749 continue; 2750 } 2751 *tmp = pia->next; 2752 printf("Indirect action #%u destruction queued\n", 2753 pia->id); 2754 free(pia); 2755 break; 2756 } 2757 if (i == n) 2758 tmp = &(*tmp)->next; 2759 ++c; 2760 } 2761 return ret; 2762 } 2763 2764 /** Enqueue indirect action update operation. */ 2765 int 2766 port_queue_action_handle_update(portid_t port_id, 2767 uint32_t queue_id, bool postpone, uint32_t id, 2768 const struct rte_flow_action *action) 2769 { 2770 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2771 struct rte_port *port; 2772 struct rte_flow_error error; 2773 struct rte_flow_action_handle *action_handle; 2774 2775 action_handle = port_action_handle_get_by_id(port_id, id); 2776 if (!action_handle) 2777 return -EINVAL; 2778 2779 port = &ports[port_id]; 2780 if (queue_id >= port->queue_nb) { 2781 printf("Queue #%u is invalid\n", queue_id); 2782 return -EINVAL; 2783 } 2784 2785 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2786 action_handle, action, NULL, &error)) { 2787 return port_flow_complain(&error); 2788 } 2789 printf("Indirect action #%u update queued\n", id); 2790 return 0; 2791 } 2792 2793 /** Push all the queue operations in the queue to the NIC. */ 2794 int 2795 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 2796 { 2797 struct rte_port *port; 2798 struct rte_flow_error error; 2799 int ret = 0; 2800 2801 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2802 port_id == (portid_t)RTE_PORT_ALL) 2803 return -EINVAL; 2804 port = &ports[port_id]; 2805 2806 if (queue_id >= port->queue_nb) { 2807 printf("Queue #%u is invalid\n", queue_id); 2808 return -EINVAL; 2809 } 2810 2811 memset(&error, 0x55, sizeof(error)); 2812 ret = rte_flow_push(port_id, queue_id, &error); 2813 if (ret < 0) { 2814 printf("Failed to push operations in the queue\n"); 2815 return -EINVAL; 2816 } 2817 printf("Queue #%u operations pushed\n", queue_id); 2818 return ret; 2819 } 2820 2821 /** Pull queue operation results from the queue. */ 2822 int 2823 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 2824 { 2825 struct rte_port *port; 2826 struct rte_flow_op_result *res; 2827 struct rte_flow_error error; 2828 int ret = 0; 2829 int success = 0; 2830 int i; 2831 2832 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2833 port_id == (portid_t)RTE_PORT_ALL) 2834 return -EINVAL; 2835 port = &ports[port_id]; 2836 2837 if (queue_id >= port->queue_nb) { 2838 printf("Queue #%u is invalid\n", queue_id); 2839 return -EINVAL; 2840 } 2841 2842 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 2843 if (!res) { 2844 printf("Failed to allocate memory for pulled results\n"); 2845 return -ENOMEM; 2846 } 2847 2848 memset(&error, 0x66, sizeof(error)); 2849 ret = rte_flow_pull(port_id, queue_id, res, 2850 port->queue_sz, &error); 2851 if (ret < 0) { 2852 printf("Failed to pull a operation results\n"); 2853 free(res); 2854 return -EINVAL; 2855 } 2856 2857 for (i = 0; i < ret; i++) { 2858 if (res[i].status == RTE_FLOW_OP_SUCCESS) 2859 success++; 2860 } 2861 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 2862 queue_id, ret, ret - success, success); 2863 free(res); 2864 return ret; 2865 } 2866 2867 /** Create flow rule. */ 2868 int 2869 port_flow_create(portid_t port_id, 2870 const struct rte_flow_attr *attr, 2871 const struct rte_flow_item *pattern, 2872 const struct rte_flow_action *actions, 2873 const struct tunnel_ops *tunnel_ops) 2874 { 2875 struct rte_flow *flow; 2876 struct rte_port *port; 2877 struct port_flow *pf; 2878 uint32_t id = 0; 2879 struct rte_flow_error error; 2880 struct port_flow_tunnel *pft = NULL; 2881 struct rte_flow_action_age *age = age_action_get(actions); 2882 2883 port = &ports[port_id]; 2884 if (port->flow_list) { 2885 if (port->flow_list->id == UINT32_MAX) { 2886 fprintf(stderr, 2887 "Highest rule ID is already assigned, delete it first"); 2888 return -ENOMEM; 2889 } 2890 id = port->flow_list->id + 1; 2891 } 2892 if (tunnel_ops->enabled) { 2893 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2894 actions, tunnel_ops); 2895 if (!pft) 2896 return -ENOENT; 2897 if (pft->items) 2898 pattern = pft->items; 2899 if (pft->actions) 2900 actions = pft->actions; 2901 } 2902 pf = port_flow_new(attr, pattern, actions, &error); 2903 if (!pf) 2904 return port_flow_complain(&error); 2905 if (age) { 2906 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2907 age->context = &pf->age_type; 2908 } 2909 /* Poisoning to make sure PMDs update it in case of error. */ 2910 memset(&error, 0x22, sizeof(error)); 2911 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2912 if (!flow) { 2913 if (tunnel_ops->enabled) 2914 port_flow_tunnel_offload_cmd_release(port_id, 2915 tunnel_ops, pft); 2916 free(pf); 2917 return port_flow_complain(&error); 2918 } 2919 pf->next = port->flow_list; 2920 pf->id = id; 2921 pf->flow = flow; 2922 port->flow_list = pf; 2923 if (tunnel_ops->enabled) 2924 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2925 printf("Flow rule #%u created\n", pf->id); 2926 return 0; 2927 } 2928 2929 /** Destroy a number of flow rules. */ 2930 int 2931 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2932 { 2933 struct rte_port *port; 2934 struct port_flow **tmp; 2935 uint32_t c = 0; 2936 int ret = 0; 2937 2938 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2939 port_id == (portid_t)RTE_PORT_ALL) 2940 return -EINVAL; 2941 port = &ports[port_id]; 2942 tmp = &port->flow_list; 2943 while (*tmp) { 2944 uint32_t i; 2945 2946 for (i = 0; i != n; ++i) { 2947 struct rte_flow_error error; 2948 struct port_flow *pf = *tmp; 2949 2950 if (rule[i] != pf->id) 2951 continue; 2952 /* 2953 * Poisoning to make sure PMDs update it in case 2954 * of error. 2955 */ 2956 memset(&error, 0x33, sizeof(error)); 2957 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2958 ret = port_flow_complain(&error); 2959 continue; 2960 } 2961 printf("Flow rule #%u destroyed\n", pf->id); 2962 *tmp = pf->next; 2963 free(pf); 2964 break; 2965 } 2966 if (i == n) 2967 tmp = &(*tmp)->next; 2968 ++c; 2969 } 2970 return ret; 2971 } 2972 2973 /** Remove all flow rules. */ 2974 int 2975 port_flow_flush(portid_t port_id) 2976 { 2977 struct rte_flow_error error; 2978 struct rte_port *port; 2979 int ret = 0; 2980 2981 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2982 port_id == (portid_t)RTE_PORT_ALL) 2983 return -EINVAL; 2984 2985 port = &ports[port_id]; 2986 2987 if (port->flow_list == NULL) 2988 return ret; 2989 2990 /* Poisoning to make sure PMDs update it in case of error. */ 2991 memset(&error, 0x44, sizeof(error)); 2992 if (rte_flow_flush(port_id, &error)) { 2993 port_flow_complain(&error); 2994 } 2995 2996 while (port->flow_list) { 2997 struct port_flow *pf = port->flow_list->next; 2998 2999 free(port->flow_list); 3000 port->flow_list = pf; 3001 } 3002 return ret; 3003 } 3004 3005 /** Dump flow rules. */ 3006 int 3007 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 3008 const char *file_name) 3009 { 3010 int ret = 0; 3011 FILE *file = stdout; 3012 struct rte_flow_error error; 3013 struct rte_port *port; 3014 struct port_flow *pflow; 3015 struct rte_flow *tmpFlow = NULL; 3016 bool found = false; 3017 3018 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3019 port_id == (portid_t)RTE_PORT_ALL) 3020 return -EINVAL; 3021 3022 if (!dump_all) { 3023 port = &ports[port_id]; 3024 pflow = port->flow_list; 3025 while (pflow) { 3026 if (rule_id != pflow->id) { 3027 pflow = pflow->next; 3028 } else { 3029 tmpFlow = pflow->flow; 3030 if (tmpFlow) 3031 found = true; 3032 break; 3033 } 3034 } 3035 if (found == false) { 3036 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 3037 return -EINVAL; 3038 } 3039 } 3040 3041 if (file_name && strlen(file_name)) { 3042 file = fopen(file_name, "w"); 3043 if (!file) { 3044 fprintf(stderr, "Failed to create file %s: %s\n", 3045 file_name, strerror(errno)); 3046 return -errno; 3047 } 3048 } 3049 3050 if (!dump_all) 3051 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3052 else 3053 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3054 if (ret) { 3055 port_flow_complain(&error); 3056 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3057 } else 3058 printf("Flow dump finished\n"); 3059 if (file_name && strlen(file_name)) 3060 fclose(file); 3061 return ret; 3062 } 3063 3064 /** Query a flow rule. */ 3065 int 3066 port_flow_query(portid_t port_id, uint32_t rule, 3067 const struct rte_flow_action *action) 3068 { 3069 struct rte_flow_error error; 3070 struct rte_port *port; 3071 struct port_flow *pf; 3072 const char *name; 3073 union { 3074 struct rte_flow_query_count count; 3075 struct rte_flow_action_rss rss_conf; 3076 struct rte_flow_query_age age; 3077 } query; 3078 int ret; 3079 3080 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3081 port_id == (portid_t)RTE_PORT_ALL) 3082 return -EINVAL; 3083 port = &ports[port_id]; 3084 for (pf = port->flow_list; pf; pf = pf->next) 3085 if (pf->id == rule) 3086 break; 3087 if (!pf) { 3088 fprintf(stderr, "Flow rule #%u not found\n", rule); 3089 return -ENOENT; 3090 } 3091 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3092 &name, sizeof(name), 3093 (void *)(uintptr_t)action->type, &error); 3094 if (ret < 0) 3095 return port_flow_complain(&error); 3096 switch (action->type) { 3097 case RTE_FLOW_ACTION_TYPE_COUNT: 3098 case RTE_FLOW_ACTION_TYPE_RSS: 3099 case RTE_FLOW_ACTION_TYPE_AGE: 3100 break; 3101 default: 3102 fprintf(stderr, "Cannot query action type %d (%s)\n", 3103 action->type, name); 3104 return -ENOTSUP; 3105 } 3106 /* Poisoning to make sure PMDs update it in case of error. */ 3107 memset(&error, 0x55, sizeof(error)); 3108 memset(&query, 0, sizeof(query)); 3109 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3110 return port_flow_complain(&error); 3111 switch (action->type) { 3112 case RTE_FLOW_ACTION_TYPE_COUNT: 3113 printf("%s:\n" 3114 " hits_set: %u\n" 3115 " bytes_set: %u\n" 3116 " hits: %" PRIu64 "\n" 3117 " bytes: %" PRIu64 "\n", 3118 name, 3119 query.count.hits_set, 3120 query.count.bytes_set, 3121 query.count.hits, 3122 query.count.bytes); 3123 break; 3124 case RTE_FLOW_ACTION_TYPE_RSS: 3125 rss_config_display(&query.rss_conf); 3126 break; 3127 case RTE_FLOW_ACTION_TYPE_AGE: 3128 printf("%s:\n" 3129 " aged: %u\n" 3130 " sec_since_last_hit_valid: %u\n" 3131 " sec_since_last_hit: %" PRIu32 "\n", 3132 name, 3133 query.age.aged, 3134 query.age.sec_since_last_hit_valid, 3135 query.age.sec_since_last_hit); 3136 break; 3137 default: 3138 fprintf(stderr, 3139 "Cannot display result for action type %d (%s)\n", 3140 action->type, name); 3141 break; 3142 } 3143 return 0; 3144 } 3145 3146 /** List simply and destroy all aged flows. */ 3147 void 3148 port_flow_aged(portid_t port_id, uint8_t destroy) 3149 { 3150 void **contexts; 3151 int nb_context, total = 0, idx; 3152 struct rte_flow_error error; 3153 enum age_action_context_type *type; 3154 union { 3155 struct port_flow *pf; 3156 struct port_indirect_action *pia; 3157 } ctx; 3158 3159 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3160 port_id == (portid_t)RTE_PORT_ALL) 3161 return; 3162 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3163 printf("Port %u total aged flows: %d\n", port_id, total); 3164 if (total < 0) { 3165 port_flow_complain(&error); 3166 return; 3167 } 3168 if (total == 0) 3169 return; 3170 contexts = malloc(sizeof(void *) * total); 3171 if (contexts == NULL) { 3172 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3173 return; 3174 } 3175 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3176 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3177 if (nb_context != total) { 3178 fprintf(stderr, 3179 "Port:%d get aged flows count(%d) != total(%d)\n", 3180 port_id, nb_context, total); 3181 free(contexts); 3182 return; 3183 } 3184 total = 0; 3185 for (idx = 0; idx < nb_context; idx++) { 3186 if (!contexts[idx]) { 3187 fprintf(stderr, "Error: get Null context in port %u\n", 3188 port_id); 3189 continue; 3190 } 3191 type = (enum age_action_context_type *)contexts[idx]; 3192 switch (*type) { 3193 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3194 ctx.pf = container_of(type, struct port_flow, age_type); 3195 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3196 "\t%c%c%c\t\n", 3197 "Flow", 3198 ctx.pf->id, 3199 ctx.pf->rule.attr->group, 3200 ctx.pf->rule.attr->priority, 3201 ctx.pf->rule.attr->ingress ? 'i' : '-', 3202 ctx.pf->rule.attr->egress ? 'e' : '-', 3203 ctx.pf->rule.attr->transfer ? 't' : '-'); 3204 if (destroy && !port_flow_destroy(port_id, 1, 3205 &ctx.pf->id)) 3206 total++; 3207 break; 3208 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3209 ctx.pia = container_of(type, 3210 struct port_indirect_action, age_type); 3211 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3212 ctx.pia->id); 3213 break; 3214 default: 3215 fprintf(stderr, "Error: invalid context type %u\n", 3216 port_id); 3217 break; 3218 } 3219 } 3220 printf("\n%d flows destroyed\n", total); 3221 free(contexts); 3222 } 3223 3224 /** List flow rules. */ 3225 void 3226 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3227 { 3228 struct rte_port *port; 3229 struct port_flow *pf; 3230 struct port_flow *list = NULL; 3231 uint32_t i; 3232 3233 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3234 port_id == (portid_t)RTE_PORT_ALL) 3235 return; 3236 port = &ports[port_id]; 3237 if (!port->flow_list) 3238 return; 3239 /* Sort flows by group, priority and ID. */ 3240 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3241 struct port_flow **tmp; 3242 const struct rte_flow_attr *curr = pf->rule.attr; 3243 3244 if (n) { 3245 /* Filter out unwanted groups. */ 3246 for (i = 0; i != n; ++i) 3247 if (curr->group == group[i]) 3248 break; 3249 if (i == n) 3250 continue; 3251 } 3252 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3253 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3254 3255 if (curr->group > comp->group || 3256 (curr->group == comp->group && 3257 curr->priority > comp->priority) || 3258 (curr->group == comp->group && 3259 curr->priority == comp->priority && 3260 pf->id > (*tmp)->id)) 3261 continue; 3262 break; 3263 } 3264 pf->tmp = *tmp; 3265 *tmp = pf; 3266 } 3267 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3268 for (pf = list; pf != NULL; pf = pf->tmp) { 3269 const struct rte_flow_item *item = pf->rule.pattern; 3270 const struct rte_flow_action *action = pf->rule.actions; 3271 const char *name; 3272 3273 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3274 pf->id, 3275 pf->rule.attr->group, 3276 pf->rule.attr->priority, 3277 pf->rule.attr->ingress ? 'i' : '-', 3278 pf->rule.attr->egress ? 'e' : '-', 3279 pf->rule.attr->transfer ? 't' : '-'); 3280 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3281 if ((uint32_t)item->type > INT_MAX) 3282 name = "PMD_INTERNAL"; 3283 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3284 &name, sizeof(name), 3285 (void *)(uintptr_t)item->type, 3286 NULL) <= 0) 3287 name = "[UNKNOWN]"; 3288 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3289 printf("%s ", name); 3290 ++item; 3291 } 3292 printf("=>"); 3293 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3294 if ((uint32_t)action->type > INT_MAX) 3295 name = "PMD_INTERNAL"; 3296 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3297 &name, sizeof(name), 3298 (void *)(uintptr_t)action->type, 3299 NULL) <= 0) 3300 name = "[UNKNOWN]"; 3301 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3302 printf(" %s", name); 3303 ++action; 3304 } 3305 printf("\n"); 3306 } 3307 } 3308 3309 /** Restrict ingress traffic to the defined flow rules. */ 3310 int 3311 port_flow_isolate(portid_t port_id, int set) 3312 { 3313 struct rte_flow_error error; 3314 3315 /* Poisoning to make sure PMDs update it in case of error. */ 3316 memset(&error, 0x66, sizeof(error)); 3317 if (rte_flow_isolate(port_id, set, &error)) 3318 return port_flow_complain(&error); 3319 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3320 port_id, 3321 set ? "now restricted" : "not restricted anymore"); 3322 return 0; 3323 } 3324 3325 /* 3326 * RX/TX ring descriptors display functions. 3327 */ 3328 int 3329 rx_queue_id_is_invalid(queueid_t rxq_id) 3330 { 3331 if (rxq_id < nb_rxq) 3332 return 0; 3333 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3334 rxq_id, nb_rxq); 3335 return 1; 3336 } 3337 3338 int 3339 tx_queue_id_is_invalid(queueid_t txq_id) 3340 { 3341 if (txq_id < nb_txq) 3342 return 0; 3343 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3344 txq_id, nb_txq); 3345 return 1; 3346 } 3347 3348 static int 3349 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3350 { 3351 struct rte_port *port = &ports[port_id]; 3352 struct rte_eth_rxq_info rx_qinfo; 3353 int ret; 3354 3355 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3356 if (ret == 0) { 3357 *ring_size = rx_qinfo.nb_desc; 3358 return ret; 3359 } 3360 3361 if (ret != -ENOTSUP) 3362 return ret; 3363 /* 3364 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3365 * ring_size stored in testpmd will be used for validity verification. 3366 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3367 * being 0, it will use a default value provided by PMDs to setup this 3368 * rxq. If the default value is 0, it will use the 3369 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3370 */ 3371 if (port->nb_rx_desc[rxq_id]) 3372 *ring_size = port->nb_rx_desc[rxq_id]; 3373 else if (port->dev_info.default_rxportconf.ring_size) 3374 *ring_size = port->dev_info.default_rxportconf.ring_size; 3375 else 3376 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3377 return 0; 3378 } 3379 3380 static int 3381 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3382 { 3383 struct rte_port *port = &ports[port_id]; 3384 struct rte_eth_txq_info tx_qinfo; 3385 int ret; 3386 3387 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3388 if (ret == 0) { 3389 *ring_size = tx_qinfo.nb_desc; 3390 return ret; 3391 } 3392 3393 if (ret != -ENOTSUP) 3394 return ret; 3395 /* 3396 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3397 * ring_size stored in testpmd will be used for validity verification. 3398 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3399 * being 0, it will use a default value provided by PMDs to setup this 3400 * txq. If the default value is 0, it will use the 3401 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3402 */ 3403 if (port->nb_tx_desc[txq_id]) 3404 *ring_size = port->nb_tx_desc[txq_id]; 3405 else if (port->dev_info.default_txportconf.ring_size) 3406 *ring_size = port->dev_info.default_txportconf.ring_size; 3407 else 3408 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3409 return 0; 3410 } 3411 3412 static int 3413 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3414 { 3415 uint16_t ring_size; 3416 int ret; 3417 3418 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3419 if (ret) 3420 return 1; 3421 3422 if (rxdesc_id < ring_size) 3423 return 0; 3424 3425 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3426 rxdesc_id, ring_size); 3427 return 1; 3428 } 3429 3430 static int 3431 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3432 { 3433 uint16_t ring_size; 3434 int ret; 3435 3436 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3437 if (ret) 3438 return 1; 3439 3440 if (txdesc_id < ring_size) 3441 return 0; 3442 3443 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3444 txdesc_id, ring_size); 3445 return 1; 3446 } 3447 3448 static const struct rte_memzone * 3449 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3450 { 3451 char mz_name[RTE_MEMZONE_NAMESIZE]; 3452 const struct rte_memzone *mz; 3453 3454 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3455 port_id, q_id, ring_name); 3456 mz = rte_memzone_lookup(mz_name); 3457 if (mz == NULL) 3458 fprintf(stderr, 3459 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3460 ring_name, port_id, q_id, mz_name); 3461 return mz; 3462 } 3463 3464 union igb_ring_dword { 3465 uint64_t dword; 3466 struct { 3467 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3468 uint32_t lo; 3469 uint32_t hi; 3470 #else 3471 uint32_t hi; 3472 uint32_t lo; 3473 #endif 3474 } words; 3475 }; 3476 3477 struct igb_ring_desc_32_bytes { 3478 union igb_ring_dword lo_dword; 3479 union igb_ring_dword hi_dword; 3480 union igb_ring_dword resv1; 3481 union igb_ring_dword resv2; 3482 }; 3483 3484 struct igb_ring_desc_16_bytes { 3485 union igb_ring_dword lo_dword; 3486 union igb_ring_dword hi_dword; 3487 }; 3488 3489 static void 3490 ring_rxd_display_dword(union igb_ring_dword dword) 3491 { 3492 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3493 (unsigned)dword.words.hi); 3494 } 3495 3496 static void 3497 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3498 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3499 portid_t port_id, 3500 #else 3501 __rte_unused portid_t port_id, 3502 #endif 3503 uint16_t desc_id) 3504 { 3505 struct igb_ring_desc_16_bytes *ring = 3506 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3507 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3508 int ret; 3509 struct rte_eth_dev_info dev_info; 3510 3511 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3512 if (ret != 0) 3513 return; 3514 3515 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3516 /* 32 bytes RX descriptor, i40e only */ 3517 struct igb_ring_desc_32_bytes *ring = 3518 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3519 ring[desc_id].lo_dword.dword = 3520 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3521 ring_rxd_display_dword(ring[desc_id].lo_dword); 3522 ring[desc_id].hi_dword.dword = 3523 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3524 ring_rxd_display_dword(ring[desc_id].hi_dword); 3525 ring[desc_id].resv1.dword = 3526 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3527 ring_rxd_display_dword(ring[desc_id].resv1); 3528 ring[desc_id].resv2.dword = 3529 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3530 ring_rxd_display_dword(ring[desc_id].resv2); 3531 3532 return; 3533 } 3534 #endif 3535 /* 16 bytes RX descriptor */ 3536 ring[desc_id].lo_dword.dword = 3537 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3538 ring_rxd_display_dword(ring[desc_id].lo_dword); 3539 ring[desc_id].hi_dword.dword = 3540 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3541 ring_rxd_display_dword(ring[desc_id].hi_dword); 3542 } 3543 3544 static void 3545 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3546 { 3547 struct igb_ring_desc_16_bytes *ring; 3548 struct igb_ring_desc_16_bytes txd; 3549 3550 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3551 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3552 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3553 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3554 (unsigned)txd.lo_dword.words.lo, 3555 (unsigned)txd.lo_dword.words.hi, 3556 (unsigned)txd.hi_dword.words.lo, 3557 (unsigned)txd.hi_dword.words.hi); 3558 } 3559 3560 void 3561 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3562 { 3563 const struct rte_memzone *rx_mz; 3564 3565 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3566 return; 3567 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3568 if (rx_mz == NULL) 3569 return; 3570 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 3571 } 3572 3573 void 3574 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 3575 { 3576 const struct rte_memzone *tx_mz; 3577 3578 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 3579 return; 3580 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 3581 if (tx_mz == NULL) 3582 return; 3583 ring_tx_descriptor_display(tx_mz, txd_id); 3584 } 3585 3586 void 3587 fwd_lcores_config_display(void) 3588 { 3589 lcoreid_t lc_id; 3590 3591 printf("List of forwarding lcores:"); 3592 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 3593 printf(" %2u", fwd_lcores_cpuids[lc_id]); 3594 printf("\n"); 3595 } 3596 void 3597 rxtx_config_display(void) 3598 { 3599 portid_t pid; 3600 queueid_t qid; 3601 3602 printf(" %s packet forwarding%s packets/burst=%d\n", 3603 cur_fwd_eng->fwd_mode_name, 3604 retry_enabled == 0 ? "" : " with retry", 3605 nb_pkt_per_burst); 3606 3607 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 3608 printf(" packet len=%u - nb packet segments=%d\n", 3609 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 3610 3611 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 3612 nb_fwd_lcores, nb_fwd_ports); 3613 3614 RTE_ETH_FOREACH_DEV(pid) { 3615 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 3616 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 3617 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 3618 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 3619 struct rte_eth_rxq_info rx_qinfo; 3620 struct rte_eth_txq_info tx_qinfo; 3621 uint16_t rx_free_thresh_tmp; 3622 uint16_t tx_free_thresh_tmp; 3623 uint16_t tx_rs_thresh_tmp; 3624 uint16_t nb_rx_desc_tmp; 3625 uint16_t nb_tx_desc_tmp; 3626 uint64_t offloads_tmp; 3627 uint8_t pthresh_tmp; 3628 uint8_t hthresh_tmp; 3629 uint8_t wthresh_tmp; 3630 int32_t rc; 3631 3632 /* per port config */ 3633 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 3634 (unsigned int)pid, nb_rxq, nb_txq); 3635 3636 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 3637 ports[pid].dev_conf.rxmode.offloads, 3638 ports[pid].dev_conf.txmode.offloads); 3639 3640 /* per rx queue config only for first queue to be less verbose */ 3641 for (qid = 0; qid < 1; qid++) { 3642 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 3643 if (rc) { 3644 nb_rx_desc_tmp = nb_rx_desc[qid]; 3645 rx_free_thresh_tmp = 3646 rx_conf[qid].rx_free_thresh; 3647 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 3648 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 3649 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 3650 offloads_tmp = rx_conf[qid].offloads; 3651 } else { 3652 nb_rx_desc_tmp = rx_qinfo.nb_desc; 3653 rx_free_thresh_tmp = 3654 rx_qinfo.conf.rx_free_thresh; 3655 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 3656 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 3657 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 3658 offloads_tmp = rx_qinfo.conf.offloads; 3659 } 3660 3661 printf(" RX queue: %d\n", qid); 3662 printf(" RX desc=%d - RX free threshold=%d\n", 3663 nb_rx_desc_tmp, rx_free_thresh_tmp); 3664 printf(" RX threshold registers: pthresh=%d hthresh=%d " 3665 " wthresh=%d\n", 3666 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3667 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 3668 if (rx_conf->share_group > 0) 3669 printf(" share_group=%u share_qid=%u", 3670 rx_conf->share_group, 3671 rx_conf->share_qid); 3672 printf("\n"); 3673 } 3674 3675 /* per tx queue config only for first queue to be less verbose */ 3676 for (qid = 0; qid < 1; qid++) { 3677 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 3678 if (rc) { 3679 nb_tx_desc_tmp = nb_tx_desc[qid]; 3680 tx_free_thresh_tmp = 3681 tx_conf[qid].tx_free_thresh; 3682 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 3683 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 3684 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 3685 offloads_tmp = tx_conf[qid].offloads; 3686 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 3687 } else { 3688 nb_tx_desc_tmp = tx_qinfo.nb_desc; 3689 tx_free_thresh_tmp = 3690 tx_qinfo.conf.tx_free_thresh; 3691 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 3692 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 3693 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 3694 offloads_tmp = tx_qinfo.conf.offloads; 3695 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 3696 } 3697 3698 printf(" TX queue: %d\n", qid); 3699 printf(" TX desc=%d - TX free threshold=%d\n", 3700 nb_tx_desc_tmp, tx_free_thresh_tmp); 3701 printf(" TX threshold registers: pthresh=%d hthresh=%d " 3702 " wthresh=%d\n", 3703 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3704 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 3705 offloads_tmp, tx_rs_thresh_tmp); 3706 } 3707 } 3708 } 3709 3710 void 3711 port_rss_reta_info(portid_t port_id, 3712 struct rte_eth_rss_reta_entry64 *reta_conf, 3713 uint16_t nb_entries) 3714 { 3715 uint16_t i, idx, shift; 3716 int ret; 3717 3718 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3719 return; 3720 3721 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 3722 if (ret != 0) { 3723 fprintf(stderr, 3724 "Failed to get RSS RETA info, return code = %d\n", 3725 ret); 3726 return; 3727 } 3728 3729 for (i = 0; i < nb_entries; i++) { 3730 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3731 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3732 if (!(reta_conf[idx].mask & (1ULL << shift))) 3733 continue; 3734 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 3735 i, reta_conf[idx].reta[shift]); 3736 } 3737 } 3738 3739 /* 3740 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 3741 * key of the port. 3742 */ 3743 void 3744 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 3745 { 3746 struct rte_eth_rss_conf rss_conf = {0}; 3747 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 3748 uint64_t rss_hf; 3749 uint8_t i; 3750 int diag; 3751 struct rte_eth_dev_info dev_info; 3752 uint8_t hash_key_size; 3753 int ret; 3754 3755 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3756 return; 3757 3758 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3759 if (ret != 0) 3760 return; 3761 3762 if (dev_info.hash_key_size > 0 && 3763 dev_info.hash_key_size <= sizeof(rss_key)) 3764 hash_key_size = dev_info.hash_key_size; 3765 else { 3766 fprintf(stderr, 3767 "dev_info did not provide a valid hash key size\n"); 3768 return; 3769 } 3770 3771 /* Get RSS hash key if asked to display it */ 3772 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3773 rss_conf.rss_key_len = hash_key_size; 3774 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3775 if (diag != 0) { 3776 switch (diag) { 3777 case -ENODEV: 3778 fprintf(stderr, "port index %d invalid\n", port_id); 3779 break; 3780 case -ENOTSUP: 3781 fprintf(stderr, "operation not supported by device\n"); 3782 break; 3783 default: 3784 fprintf(stderr, "operation failed - diag=%d\n", diag); 3785 break; 3786 } 3787 return; 3788 } 3789 rss_hf = rss_conf.rss_hf; 3790 if (rss_hf == 0) { 3791 printf("RSS disabled\n"); 3792 return; 3793 } 3794 printf("RSS functions:\n "); 3795 for (i = 0; rss_type_table[i].str; i++) { 3796 if (rss_type_table[i].rss_type == 0) 3797 continue; 3798 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 3799 printf("%s ", rss_type_table[i].str); 3800 } 3801 printf("\n"); 3802 if (!show_rss_key) 3803 return; 3804 printf("RSS key:\n"); 3805 for (i = 0; i < hash_key_size; i++) 3806 printf("%02X", rss_key[i]); 3807 printf("\n"); 3808 } 3809 3810 void 3811 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3812 uint8_t hash_key_len) 3813 { 3814 struct rte_eth_rss_conf rss_conf; 3815 int diag; 3816 unsigned int i; 3817 3818 rss_conf.rss_key = NULL; 3819 rss_conf.rss_key_len = 0; 3820 rss_conf.rss_hf = 0; 3821 for (i = 0; rss_type_table[i].str; i++) { 3822 if (!strcmp(rss_type_table[i].str, rss_type)) 3823 rss_conf.rss_hf = rss_type_table[i].rss_type; 3824 } 3825 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3826 if (diag == 0) { 3827 rss_conf.rss_key = hash_key; 3828 rss_conf.rss_key_len = hash_key_len; 3829 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3830 } 3831 if (diag == 0) 3832 return; 3833 3834 switch (diag) { 3835 case -ENODEV: 3836 fprintf(stderr, "port index %d invalid\n", port_id); 3837 break; 3838 case -ENOTSUP: 3839 fprintf(stderr, "operation not supported by device\n"); 3840 break; 3841 default: 3842 fprintf(stderr, "operation failed - diag=%d\n", diag); 3843 break; 3844 } 3845 } 3846 3847 /* 3848 * Check whether a shared rxq scheduled on other lcores. 3849 */ 3850 static bool 3851 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3852 portid_t src_port, queueid_t src_rxq, 3853 uint32_t share_group, queueid_t share_rxq) 3854 { 3855 streamid_t sm_id; 3856 streamid_t nb_fs_per_lcore; 3857 lcoreid_t nb_fc; 3858 lcoreid_t lc_id; 3859 struct fwd_stream *fs; 3860 struct rte_port *port; 3861 struct rte_eth_dev_info *dev_info; 3862 struct rte_eth_rxconf *rxq_conf; 3863 3864 nb_fc = cur_fwd_config.nb_fwd_lcores; 3865 /* Check remaining cores. */ 3866 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3867 sm_id = fwd_lcores[lc_id]->stream_idx; 3868 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3869 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3870 sm_id++) { 3871 fs = fwd_streams[sm_id]; 3872 port = &ports[fs->rx_port]; 3873 dev_info = &port->dev_info; 3874 rxq_conf = &port->rx_conf[fs->rx_queue]; 3875 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3876 == 0 || rxq_conf->share_group == 0) 3877 /* Not shared rxq. */ 3878 continue; 3879 if (domain_id != port->dev_info.switch_info.domain_id) 3880 continue; 3881 if (rxq_conf->share_group != share_group) 3882 continue; 3883 if (rxq_conf->share_qid != share_rxq) 3884 continue; 3885 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3886 share_group, share_rxq); 3887 printf(" lcore %hhu Port %hu queue %hu\n", 3888 src_lc, src_port, src_rxq); 3889 printf(" lcore %hhu Port %hu queue %hu\n", 3890 lc_id, fs->rx_port, fs->rx_queue); 3891 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3892 nb_rxq); 3893 return true; 3894 } 3895 } 3896 return false; 3897 } 3898 3899 /* 3900 * Check shared rxq configuration. 3901 * 3902 * Shared group must not being scheduled on different core. 3903 */ 3904 bool 3905 pkt_fwd_shared_rxq_check(void) 3906 { 3907 streamid_t sm_id; 3908 streamid_t nb_fs_per_lcore; 3909 lcoreid_t nb_fc; 3910 lcoreid_t lc_id; 3911 struct fwd_stream *fs; 3912 uint16_t domain_id; 3913 struct rte_port *port; 3914 struct rte_eth_dev_info *dev_info; 3915 struct rte_eth_rxconf *rxq_conf; 3916 3917 if (rxq_share == 0) 3918 return true; 3919 nb_fc = cur_fwd_config.nb_fwd_lcores; 3920 /* 3921 * Check streams on each core, make sure the same switch domain + 3922 * group + queue doesn't get scheduled on other cores. 3923 */ 3924 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3925 sm_id = fwd_lcores[lc_id]->stream_idx; 3926 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3927 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3928 sm_id++) { 3929 fs = fwd_streams[sm_id]; 3930 /* Update lcore info stream being scheduled. */ 3931 fs->lcore = fwd_lcores[lc_id]; 3932 port = &ports[fs->rx_port]; 3933 dev_info = &port->dev_info; 3934 rxq_conf = &port->rx_conf[fs->rx_queue]; 3935 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3936 == 0 || rxq_conf->share_group == 0) 3937 /* Not shared rxq. */ 3938 continue; 3939 /* Check shared rxq not scheduled on remaining cores. */ 3940 domain_id = port->dev_info.switch_info.domain_id; 3941 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3942 fs->rx_port, 3943 fs->rx_queue, 3944 rxq_conf->share_group, 3945 rxq_conf->share_qid)) 3946 return false; 3947 } 3948 } 3949 return true; 3950 } 3951 3952 /* 3953 * Setup forwarding configuration for each logical core. 3954 */ 3955 static void 3956 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3957 { 3958 streamid_t nb_fs_per_lcore; 3959 streamid_t nb_fs; 3960 streamid_t sm_id; 3961 lcoreid_t nb_extra; 3962 lcoreid_t nb_fc; 3963 lcoreid_t nb_lc; 3964 lcoreid_t lc_id; 3965 3966 nb_fs = cfg->nb_fwd_streams; 3967 nb_fc = cfg->nb_fwd_lcores; 3968 if (nb_fs <= nb_fc) { 3969 nb_fs_per_lcore = 1; 3970 nb_extra = 0; 3971 } else { 3972 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3973 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3974 } 3975 3976 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3977 sm_id = 0; 3978 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3979 fwd_lcores[lc_id]->stream_idx = sm_id; 3980 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3981 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3982 } 3983 3984 /* 3985 * Assign extra remaining streams, if any. 3986 */ 3987 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3988 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3989 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3990 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3991 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3992 } 3993 } 3994 3995 static portid_t 3996 fwd_topology_tx_port_get(portid_t rxp) 3997 { 3998 static int warning_once = 1; 3999 4000 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4001 4002 switch (port_topology) { 4003 default: 4004 case PORT_TOPOLOGY_PAIRED: 4005 if ((rxp & 0x1) == 0) { 4006 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4007 return rxp + 1; 4008 if (warning_once) { 4009 fprintf(stderr, 4010 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4011 warning_once = 0; 4012 } 4013 return rxp; 4014 } 4015 return rxp - 1; 4016 case PORT_TOPOLOGY_CHAINED: 4017 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4018 case PORT_TOPOLOGY_LOOP: 4019 return rxp; 4020 } 4021 } 4022 4023 static void 4024 simple_fwd_config_setup(void) 4025 { 4026 portid_t i; 4027 4028 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4029 cur_fwd_config.nb_fwd_streams = 4030 (streamid_t) cur_fwd_config.nb_fwd_ports; 4031 4032 /* reinitialize forwarding streams */ 4033 init_fwd_streams(); 4034 4035 /* 4036 * In the simple forwarding test, the number of forwarding cores 4037 * must be lower or equal to the number of forwarding ports. 4038 */ 4039 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4040 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4041 cur_fwd_config.nb_fwd_lcores = 4042 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4043 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4044 4045 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4046 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4047 fwd_streams[i]->rx_queue = 0; 4048 fwd_streams[i]->tx_port = 4049 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4050 fwd_streams[i]->tx_queue = 0; 4051 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4052 fwd_streams[i]->retry_enabled = retry_enabled; 4053 } 4054 } 4055 4056 /** 4057 * For the RSS forwarding test all streams distributed over lcores. Each stream 4058 * being composed of a RX queue to poll on a RX port for input messages, 4059 * associated with a TX queue of a TX port where to send forwarded packets. 4060 */ 4061 static void 4062 rss_fwd_config_setup(void) 4063 { 4064 portid_t rxp; 4065 portid_t txp; 4066 queueid_t rxq; 4067 queueid_t nb_q; 4068 streamid_t sm_id; 4069 int start; 4070 int end; 4071 4072 nb_q = nb_rxq; 4073 if (nb_q > nb_txq) 4074 nb_q = nb_txq; 4075 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4076 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4077 cur_fwd_config.nb_fwd_streams = 4078 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4079 4080 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4081 cur_fwd_config.nb_fwd_lcores = 4082 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4083 4084 /* reinitialize forwarding streams */ 4085 init_fwd_streams(); 4086 4087 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4088 4089 if (proc_id > 0 && nb_q % num_procs != 0) 4090 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4091 4092 /** 4093 * In multi-process, All queues are allocated to different 4094 * processes based on num_procs and proc_id. For example: 4095 * if supports 4 queues(nb_q), 2 processes(num_procs), 4096 * the 0~1 queue for primary process. 4097 * the 2~3 queue for secondary process. 4098 */ 4099 start = proc_id * nb_q / num_procs; 4100 end = start + nb_q / num_procs; 4101 rxp = 0; 4102 rxq = start; 4103 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4104 struct fwd_stream *fs; 4105 4106 fs = fwd_streams[sm_id]; 4107 txp = fwd_topology_tx_port_get(rxp); 4108 fs->rx_port = fwd_ports_ids[rxp]; 4109 fs->rx_queue = rxq; 4110 fs->tx_port = fwd_ports_ids[txp]; 4111 fs->tx_queue = rxq; 4112 fs->peer_addr = fs->tx_port; 4113 fs->retry_enabled = retry_enabled; 4114 rxp++; 4115 if (rxp < nb_fwd_ports) 4116 continue; 4117 rxp = 0; 4118 rxq++; 4119 if (rxq >= end) 4120 rxq = start; 4121 } 4122 } 4123 4124 static uint16_t 4125 get_fwd_port_total_tc_num(void) 4126 { 4127 struct rte_eth_dcb_info dcb_info; 4128 uint16_t total_tc_num = 0; 4129 unsigned int i; 4130 4131 for (i = 0; i < nb_fwd_ports; i++) { 4132 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4133 total_tc_num += dcb_info.nb_tcs; 4134 } 4135 4136 return total_tc_num; 4137 } 4138 4139 /** 4140 * For the DCB forwarding test, each core is assigned on each traffic class. 4141 * 4142 * Each core is assigned a multi-stream, each stream being composed of 4143 * a RX queue to poll on a RX port for input messages, associated with 4144 * a TX queue of a TX port where to send forwarded packets. All RX and 4145 * TX queues are mapping to the same traffic class. 4146 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4147 * the same core 4148 */ 4149 static void 4150 dcb_fwd_config_setup(void) 4151 { 4152 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4153 portid_t txp, rxp = 0; 4154 queueid_t txq, rxq = 0; 4155 lcoreid_t lc_id; 4156 uint16_t nb_rx_queue, nb_tx_queue; 4157 uint16_t i, j, k, sm_id = 0; 4158 uint16_t total_tc_num; 4159 struct rte_port *port; 4160 uint8_t tc = 0; 4161 portid_t pid; 4162 int ret; 4163 4164 /* 4165 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4166 * or RTE_PORT_STOPPED. 4167 * 4168 * Re-configure ports to get updated mapping between tc and queue in 4169 * case the queue number of the port is changed. Skip for started ports 4170 * since modifying queue number and calling dev_configure need to stop 4171 * ports first. 4172 */ 4173 for (pid = 0; pid < nb_fwd_ports; pid++) { 4174 if (port_is_started(pid) == 1) 4175 continue; 4176 4177 port = &ports[pid]; 4178 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4179 &port->dev_conf); 4180 if (ret < 0) { 4181 fprintf(stderr, 4182 "Failed to re-configure port %d, ret = %d.\n", 4183 pid, ret); 4184 return; 4185 } 4186 } 4187 4188 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4189 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4190 cur_fwd_config.nb_fwd_streams = 4191 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4192 total_tc_num = get_fwd_port_total_tc_num(); 4193 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4194 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4195 4196 /* reinitialize forwarding streams */ 4197 init_fwd_streams(); 4198 sm_id = 0; 4199 txp = 1; 4200 /* get the dcb info on the first RX and TX ports */ 4201 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4202 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4203 4204 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4205 fwd_lcores[lc_id]->stream_nb = 0; 4206 fwd_lcores[lc_id]->stream_idx = sm_id; 4207 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4208 /* if the nb_queue is zero, means this tc is 4209 * not enabled on the POOL 4210 */ 4211 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4212 break; 4213 k = fwd_lcores[lc_id]->stream_nb + 4214 fwd_lcores[lc_id]->stream_idx; 4215 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4216 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4217 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4218 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4219 for (j = 0; j < nb_rx_queue; j++) { 4220 struct fwd_stream *fs; 4221 4222 fs = fwd_streams[k + j]; 4223 fs->rx_port = fwd_ports_ids[rxp]; 4224 fs->rx_queue = rxq + j; 4225 fs->tx_port = fwd_ports_ids[txp]; 4226 fs->tx_queue = txq + j % nb_tx_queue; 4227 fs->peer_addr = fs->tx_port; 4228 fs->retry_enabled = retry_enabled; 4229 } 4230 fwd_lcores[lc_id]->stream_nb += 4231 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4232 } 4233 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4234 4235 tc++; 4236 if (tc < rxp_dcb_info.nb_tcs) 4237 continue; 4238 /* Restart from TC 0 on next RX port */ 4239 tc = 0; 4240 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4241 rxp = (portid_t) 4242 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4243 else 4244 rxp++; 4245 if (rxp >= nb_fwd_ports) 4246 return; 4247 /* get the dcb information on next RX and TX ports */ 4248 if ((rxp & 0x1) == 0) 4249 txp = (portid_t) (rxp + 1); 4250 else 4251 txp = (portid_t) (rxp - 1); 4252 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4253 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4254 } 4255 } 4256 4257 static void 4258 icmp_echo_config_setup(void) 4259 { 4260 portid_t rxp; 4261 queueid_t rxq; 4262 lcoreid_t lc_id; 4263 uint16_t sm_id; 4264 4265 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4266 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4267 (nb_txq * nb_fwd_ports); 4268 else 4269 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4270 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4271 cur_fwd_config.nb_fwd_streams = 4272 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4273 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4274 cur_fwd_config.nb_fwd_lcores = 4275 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4276 if (verbose_level > 0) { 4277 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4278 __FUNCTION__, 4279 cur_fwd_config.nb_fwd_lcores, 4280 cur_fwd_config.nb_fwd_ports, 4281 cur_fwd_config.nb_fwd_streams); 4282 } 4283 4284 /* reinitialize forwarding streams */ 4285 init_fwd_streams(); 4286 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4287 rxp = 0; rxq = 0; 4288 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4289 if (verbose_level > 0) 4290 printf(" core=%d: \n", lc_id); 4291 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4292 struct fwd_stream *fs; 4293 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4294 fs->rx_port = fwd_ports_ids[rxp]; 4295 fs->rx_queue = rxq; 4296 fs->tx_port = fs->rx_port; 4297 fs->tx_queue = rxq; 4298 fs->peer_addr = fs->tx_port; 4299 fs->retry_enabled = retry_enabled; 4300 if (verbose_level > 0) 4301 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4302 sm_id, fs->rx_port, fs->rx_queue, 4303 fs->tx_queue); 4304 rxq = (queueid_t) (rxq + 1); 4305 if (rxq == nb_rxq) { 4306 rxq = 0; 4307 rxp = (portid_t) (rxp + 1); 4308 } 4309 } 4310 } 4311 } 4312 4313 void 4314 fwd_config_setup(void) 4315 { 4316 struct rte_port *port; 4317 portid_t pt_id; 4318 unsigned int i; 4319 4320 cur_fwd_config.fwd_eng = cur_fwd_eng; 4321 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4322 icmp_echo_config_setup(); 4323 return; 4324 } 4325 4326 if ((nb_rxq > 1) && (nb_txq > 1)){ 4327 if (dcb_config) { 4328 for (i = 0; i < nb_fwd_ports; i++) { 4329 pt_id = fwd_ports_ids[i]; 4330 port = &ports[pt_id]; 4331 if (!port->dcb_flag) { 4332 fprintf(stderr, 4333 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4334 return; 4335 } 4336 } 4337 if (nb_fwd_lcores == 1) { 4338 fprintf(stderr, 4339 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4340 return; 4341 } 4342 4343 dcb_fwd_config_setup(); 4344 } else 4345 rss_fwd_config_setup(); 4346 } 4347 else 4348 simple_fwd_config_setup(); 4349 } 4350 4351 static const char * 4352 mp_alloc_to_str(uint8_t mode) 4353 { 4354 switch (mode) { 4355 case MP_ALLOC_NATIVE: 4356 return "native"; 4357 case MP_ALLOC_ANON: 4358 return "anon"; 4359 case MP_ALLOC_XMEM: 4360 return "xmem"; 4361 case MP_ALLOC_XMEM_HUGE: 4362 return "xmemhuge"; 4363 case MP_ALLOC_XBUF: 4364 return "xbuf"; 4365 default: 4366 return "invalid"; 4367 } 4368 } 4369 4370 void 4371 pkt_fwd_config_display(struct fwd_config *cfg) 4372 { 4373 struct fwd_stream *fs; 4374 lcoreid_t lc_id; 4375 streamid_t sm_id; 4376 4377 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4378 "NUMA support %s, MP allocation mode: %s\n", 4379 cfg->fwd_eng->fwd_mode_name, 4380 retry_enabled == 0 ? "" : " with retry", 4381 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4382 numa_support == 1 ? "enabled" : "disabled", 4383 mp_alloc_to_str(mp_alloc_type)); 4384 4385 if (retry_enabled) 4386 printf("TX retry num: %u, delay between TX retries: %uus\n", 4387 burst_tx_retry_num, burst_tx_delay_time); 4388 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4389 printf("Logical Core %u (socket %u) forwards packets on " 4390 "%d streams:", 4391 fwd_lcores_cpuids[lc_id], 4392 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4393 fwd_lcores[lc_id]->stream_nb); 4394 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4395 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4396 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4397 "P=%d/Q=%d (socket %u) ", 4398 fs->rx_port, fs->rx_queue, 4399 ports[fs->rx_port].socket_id, 4400 fs->tx_port, fs->tx_queue, 4401 ports[fs->tx_port].socket_id); 4402 print_ethaddr("peer=", 4403 &peer_eth_addrs[fs->peer_addr]); 4404 } 4405 printf("\n"); 4406 } 4407 printf("\n"); 4408 } 4409 4410 void 4411 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4412 { 4413 struct rte_ether_addr new_peer_addr; 4414 if (!rte_eth_dev_is_valid_port(port_id)) { 4415 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4416 return; 4417 } 4418 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4419 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4420 peer_addr); 4421 return; 4422 } 4423 peer_eth_addrs[port_id] = new_peer_addr; 4424 } 4425 4426 int 4427 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4428 { 4429 unsigned int i; 4430 unsigned int lcore_cpuid; 4431 int record_now; 4432 4433 record_now = 0; 4434 again: 4435 for (i = 0; i < nb_lc; i++) { 4436 lcore_cpuid = lcorelist[i]; 4437 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4438 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4439 return -1; 4440 } 4441 if (lcore_cpuid == rte_get_main_lcore()) { 4442 fprintf(stderr, 4443 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4444 lcore_cpuid); 4445 return -1; 4446 } 4447 if (record_now) 4448 fwd_lcores_cpuids[i] = lcore_cpuid; 4449 } 4450 if (record_now == 0) { 4451 record_now = 1; 4452 goto again; 4453 } 4454 nb_cfg_lcores = (lcoreid_t) nb_lc; 4455 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4456 printf("previous number of forwarding cores %u - changed to " 4457 "number of configured cores %u\n", 4458 (unsigned int) nb_fwd_lcores, nb_lc); 4459 nb_fwd_lcores = (lcoreid_t) nb_lc; 4460 } 4461 4462 return 0; 4463 } 4464 4465 int 4466 set_fwd_lcores_mask(uint64_t lcoremask) 4467 { 4468 unsigned int lcorelist[64]; 4469 unsigned int nb_lc; 4470 unsigned int i; 4471 4472 if (lcoremask == 0) { 4473 fprintf(stderr, "Invalid NULL mask of cores\n"); 4474 return -1; 4475 } 4476 nb_lc = 0; 4477 for (i = 0; i < 64; i++) { 4478 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4479 continue; 4480 lcorelist[nb_lc++] = i; 4481 } 4482 return set_fwd_lcores_list(lcorelist, nb_lc); 4483 } 4484 4485 void 4486 set_fwd_lcores_number(uint16_t nb_lc) 4487 { 4488 if (test_done == 0) { 4489 fprintf(stderr, "Please stop forwarding first\n"); 4490 return; 4491 } 4492 if (nb_lc > nb_cfg_lcores) { 4493 fprintf(stderr, 4494 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4495 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4496 return; 4497 } 4498 nb_fwd_lcores = (lcoreid_t) nb_lc; 4499 printf("Number of forwarding cores set to %u\n", 4500 (unsigned int) nb_fwd_lcores); 4501 } 4502 4503 void 4504 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4505 { 4506 unsigned int i; 4507 portid_t port_id; 4508 int record_now; 4509 4510 record_now = 0; 4511 again: 4512 for (i = 0; i < nb_pt; i++) { 4513 port_id = (portid_t) portlist[i]; 4514 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4515 return; 4516 if (record_now) 4517 fwd_ports_ids[i] = port_id; 4518 } 4519 if (record_now == 0) { 4520 record_now = 1; 4521 goto again; 4522 } 4523 nb_cfg_ports = (portid_t) nb_pt; 4524 if (nb_fwd_ports != (portid_t) nb_pt) { 4525 printf("previous number of forwarding ports %u - changed to " 4526 "number of configured ports %u\n", 4527 (unsigned int) nb_fwd_ports, nb_pt); 4528 nb_fwd_ports = (portid_t) nb_pt; 4529 } 4530 } 4531 4532 /** 4533 * Parse the user input and obtain the list of forwarding ports 4534 * 4535 * @param[in] list 4536 * String containing the user input. User can specify 4537 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4538 * For example, if the user wants to use all the available 4539 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4540 * If the user wants to use only the ports 1,2 then the input 4541 * is 1,2. 4542 * valid characters are '-' and ',' 4543 * @param[out] values 4544 * This array will be filled with a list of port IDs 4545 * based on the user input 4546 * Note that duplicate entries are discarded and only the first 4547 * count entries in this array are port IDs and all the rest 4548 * will contain default values 4549 * @param[in] maxsize 4550 * This parameter denotes 2 things 4551 * 1) Number of elements in the values array 4552 * 2) Maximum value of each element in the values array 4553 * @return 4554 * On success, returns total count of parsed port IDs 4555 * On failure, returns 0 4556 */ 4557 static unsigned int 4558 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4559 { 4560 unsigned int count = 0; 4561 char *end = NULL; 4562 int min, max; 4563 int value, i; 4564 unsigned int marked[maxsize]; 4565 4566 if (list == NULL || values == NULL) 4567 return 0; 4568 4569 for (i = 0; i < (int)maxsize; i++) 4570 marked[i] = 0; 4571 4572 min = INT_MAX; 4573 4574 do { 4575 /*Remove the blank spaces if any*/ 4576 while (isblank(*list)) 4577 list++; 4578 if (*list == '\0') 4579 break; 4580 errno = 0; 4581 value = strtol(list, &end, 10); 4582 if (errno || end == NULL) 4583 return 0; 4584 if (value < 0 || value >= (int)maxsize) 4585 return 0; 4586 while (isblank(*end)) 4587 end++; 4588 if (*end == '-' && min == INT_MAX) { 4589 min = value; 4590 } else if ((*end == ',') || (*end == '\0')) { 4591 max = value; 4592 if (min == INT_MAX) 4593 min = value; 4594 for (i = min; i <= max; i++) { 4595 if (count < maxsize) { 4596 if (marked[i]) 4597 continue; 4598 values[count] = i; 4599 marked[i] = 1; 4600 count++; 4601 } 4602 } 4603 min = INT_MAX; 4604 } else 4605 return 0; 4606 list = end + 1; 4607 } while (*end != '\0'); 4608 4609 return count; 4610 } 4611 4612 void 4613 parse_fwd_portlist(const char *portlist) 4614 { 4615 unsigned int portcount; 4616 unsigned int portindex[RTE_MAX_ETHPORTS]; 4617 unsigned int i, valid_port_count = 0; 4618 4619 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 4620 if (!portcount) 4621 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 4622 4623 /* 4624 * Here we verify the validity of the ports 4625 * and thereby calculate the total number of 4626 * valid ports 4627 */ 4628 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 4629 if (rte_eth_dev_is_valid_port(portindex[i])) { 4630 portindex[valid_port_count] = portindex[i]; 4631 valid_port_count++; 4632 } 4633 } 4634 4635 set_fwd_ports_list(portindex, valid_port_count); 4636 } 4637 4638 void 4639 set_fwd_ports_mask(uint64_t portmask) 4640 { 4641 unsigned int portlist[64]; 4642 unsigned int nb_pt; 4643 unsigned int i; 4644 4645 if (portmask == 0) { 4646 fprintf(stderr, "Invalid NULL mask of ports\n"); 4647 return; 4648 } 4649 nb_pt = 0; 4650 RTE_ETH_FOREACH_DEV(i) { 4651 if (! ((uint64_t)(1ULL << i) & portmask)) 4652 continue; 4653 portlist[nb_pt++] = i; 4654 } 4655 set_fwd_ports_list(portlist, nb_pt); 4656 } 4657 4658 void 4659 set_fwd_ports_number(uint16_t nb_pt) 4660 { 4661 if (nb_pt > nb_cfg_ports) { 4662 fprintf(stderr, 4663 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 4664 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 4665 return; 4666 } 4667 nb_fwd_ports = (portid_t) nb_pt; 4668 printf("Number of forwarding ports set to %u\n", 4669 (unsigned int) nb_fwd_ports); 4670 } 4671 4672 int 4673 port_is_forwarding(portid_t port_id) 4674 { 4675 unsigned int i; 4676 4677 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4678 return -1; 4679 4680 for (i = 0; i < nb_fwd_ports; i++) { 4681 if (fwd_ports_ids[i] == port_id) 4682 return 1; 4683 } 4684 4685 return 0; 4686 } 4687 4688 void 4689 set_nb_pkt_per_burst(uint16_t nb) 4690 { 4691 if (nb > MAX_PKT_BURST) { 4692 fprintf(stderr, 4693 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 4694 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 4695 return; 4696 } 4697 nb_pkt_per_burst = nb; 4698 printf("Number of packets per burst set to %u\n", 4699 (unsigned int) nb_pkt_per_burst); 4700 } 4701 4702 static const char * 4703 tx_split_get_name(enum tx_pkt_split split) 4704 { 4705 uint32_t i; 4706 4707 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4708 if (tx_split_name[i].split == split) 4709 return tx_split_name[i].name; 4710 } 4711 return NULL; 4712 } 4713 4714 void 4715 set_tx_pkt_split(const char *name) 4716 { 4717 uint32_t i; 4718 4719 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4720 if (strcmp(tx_split_name[i].name, name) == 0) { 4721 tx_pkt_split = tx_split_name[i].split; 4722 return; 4723 } 4724 } 4725 fprintf(stderr, "unknown value: \"%s\"\n", name); 4726 } 4727 4728 int 4729 parse_fec_mode(const char *name, uint32_t *fec_capa) 4730 { 4731 uint8_t i; 4732 4733 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 4734 if (strcmp(fec_mode_name[i].name, name) == 0) { 4735 *fec_capa = 4736 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 4737 return 0; 4738 } 4739 } 4740 return -1; 4741 } 4742 4743 void 4744 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 4745 { 4746 unsigned int i, j; 4747 4748 printf("FEC capabilities:\n"); 4749 4750 for (i = 0; i < num; i++) { 4751 printf("%s : ", 4752 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 4753 4754 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 4755 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 4756 speed_fec_capa[i].capa) 4757 printf("%s ", fec_mode_name[j].name); 4758 } 4759 printf("\n"); 4760 } 4761 } 4762 4763 void 4764 show_rx_pkt_offsets(void) 4765 { 4766 uint32_t i, n; 4767 4768 n = rx_pkt_nb_offs; 4769 printf("Number of offsets: %u\n", n); 4770 if (n) { 4771 printf("Segment offsets: "); 4772 for (i = 0; i != n - 1; i++) 4773 printf("%hu,", rx_pkt_seg_offsets[i]); 4774 printf("%hu\n", rx_pkt_seg_lengths[i]); 4775 } 4776 } 4777 4778 void 4779 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4780 { 4781 unsigned int i; 4782 4783 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4784 printf("nb segments per RX packets=%u >= " 4785 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4786 return; 4787 } 4788 4789 /* 4790 * No extra check here, the segment length will be checked by PMD 4791 * in the extended queue setup. 4792 */ 4793 for (i = 0; i < nb_offs; i++) { 4794 if (seg_offsets[i] >= UINT16_MAX) { 4795 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4796 i, seg_offsets[i]); 4797 return; 4798 } 4799 } 4800 4801 for (i = 0; i < nb_offs; i++) 4802 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4803 4804 rx_pkt_nb_offs = (uint8_t) nb_offs; 4805 } 4806 4807 void 4808 show_rx_pkt_segments(void) 4809 { 4810 uint32_t i, n; 4811 4812 n = rx_pkt_nb_segs; 4813 printf("Number of segments: %u\n", n); 4814 if (n) { 4815 printf("Segment sizes: "); 4816 for (i = 0; i != n - 1; i++) 4817 printf("%hu,", rx_pkt_seg_lengths[i]); 4818 printf("%hu\n", rx_pkt_seg_lengths[i]); 4819 } 4820 } 4821 4822 void 4823 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4824 { 4825 unsigned int i; 4826 4827 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4828 printf("nb segments per RX packets=%u >= " 4829 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4830 return; 4831 } 4832 4833 /* 4834 * No extra check here, the segment length will be checked by PMD 4835 * in the extended queue setup. 4836 */ 4837 for (i = 0; i < nb_segs; i++) { 4838 if (seg_lengths[i] >= UINT16_MAX) { 4839 printf("length[%u]=%u > UINT16_MAX - give up\n", 4840 i, seg_lengths[i]); 4841 return; 4842 } 4843 } 4844 4845 for (i = 0; i < nb_segs; i++) 4846 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4847 4848 rx_pkt_nb_segs = (uint8_t) nb_segs; 4849 } 4850 4851 void 4852 show_tx_pkt_segments(void) 4853 { 4854 uint32_t i, n; 4855 const char *split; 4856 4857 n = tx_pkt_nb_segs; 4858 split = tx_split_get_name(tx_pkt_split); 4859 4860 printf("Number of segments: %u\n", n); 4861 printf("Segment sizes: "); 4862 for (i = 0; i != n - 1; i++) 4863 printf("%hu,", tx_pkt_seg_lengths[i]); 4864 printf("%hu\n", tx_pkt_seg_lengths[i]); 4865 printf("Split packet: %s\n", split); 4866 } 4867 4868 static bool 4869 nb_segs_is_invalid(unsigned int nb_segs) 4870 { 4871 uint16_t ring_size; 4872 uint16_t queue_id; 4873 uint16_t port_id; 4874 int ret; 4875 4876 RTE_ETH_FOREACH_DEV(port_id) { 4877 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4878 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4879 if (ret) { 4880 /* Port may not be initialized yet, can't say 4881 * the port is invalid in this stage. 4882 */ 4883 continue; 4884 } 4885 if (ring_size < nb_segs) { 4886 printf("nb segments per TX packets=%u >= TX " 4887 "queue(%u) ring_size=%u - txpkts ignored\n", 4888 nb_segs, queue_id, ring_size); 4889 return true; 4890 } 4891 } 4892 } 4893 4894 return false; 4895 } 4896 4897 void 4898 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4899 { 4900 uint16_t tx_pkt_len; 4901 unsigned int i; 4902 4903 /* 4904 * For single segment settings failed check is ignored. 4905 * It is a very basic capability to send the single segment 4906 * packets, suppose it is always supported. 4907 */ 4908 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4909 fprintf(stderr, 4910 "Tx segment size(%u) is not supported - txpkts ignored\n", 4911 nb_segs); 4912 return; 4913 } 4914 4915 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4916 fprintf(stderr, 4917 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4918 nb_segs, RTE_MAX_SEGS_PER_PKT); 4919 return; 4920 } 4921 4922 /* 4923 * Check that each segment length is greater or equal than 4924 * the mbuf data size. 4925 * Check also that the total packet length is greater or equal than the 4926 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4927 * 20 + 8). 4928 */ 4929 tx_pkt_len = 0; 4930 for (i = 0; i < nb_segs; i++) { 4931 if (seg_lengths[i] > mbuf_data_size[0]) { 4932 fprintf(stderr, 4933 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4934 i, seg_lengths[i], mbuf_data_size[0]); 4935 return; 4936 } 4937 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4938 } 4939 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4940 fprintf(stderr, "total packet length=%u < %d - give up\n", 4941 (unsigned) tx_pkt_len, 4942 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4943 return; 4944 } 4945 4946 for (i = 0; i < nb_segs; i++) 4947 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4948 4949 tx_pkt_length = tx_pkt_len; 4950 tx_pkt_nb_segs = (uint8_t) nb_segs; 4951 } 4952 4953 void 4954 show_tx_pkt_times(void) 4955 { 4956 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4957 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4958 } 4959 4960 void 4961 set_tx_pkt_times(unsigned int *tx_times) 4962 { 4963 tx_pkt_times_inter = tx_times[0]; 4964 tx_pkt_times_intra = tx_times[1]; 4965 } 4966 4967 #ifdef RTE_LIB_GRO 4968 void 4969 setup_gro(const char *onoff, portid_t port_id) 4970 { 4971 if (!rte_eth_dev_is_valid_port(port_id)) { 4972 fprintf(stderr, "invalid port id %u\n", port_id); 4973 return; 4974 } 4975 if (test_done == 0) { 4976 fprintf(stderr, 4977 "Before enable/disable GRO, please stop forwarding first\n"); 4978 return; 4979 } 4980 if (strcmp(onoff, "on") == 0) { 4981 if (gro_ports[port_id].enable != 0) { 4982 fprintf(stderr, 4983 "Port %u has enabled GRO. Please disable GRO first\n", 4984 port_id); 4985 return; 4986 } 4987 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4988 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4989 gro_ports[port_id].param.max_flow_num = 4990 GRO_DEFAULT_FLOW_NUM; 4991 gro_ports[port_id].param.max_item_per_flow = 4992 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4993 } 4994 gro_ports[port_id].enable = 1; 4995 } else { 4996 if (gro_ports[port_id].enable == 0) { 4997 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4998 return; 4999 } 5000 gro_ports[port_id].enable = 0; 5001 } 5002 } 5003 5004 void 5005 setup_gro_flush_cycles(uint8_t cycles) 5006 { 5007 if (test_done == 0) { 5008 fprintf(stderr, 5009 "Before change flush interval for GRO, please stop forwarding first.\n"); 5010 return; 5011 } 5012 5013 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5014 GRO_DEFAULT_FLUSH_CYCLES) { 5015 fprintf(stderr, 5016 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5017 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5018 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5019 } 5020 5021 gro_flush_cycles = cycles; 5022 } 5023 5024 void 5025 show_gro(portid_t port_id) 5026 { 5027 struct rte_gro_param *param; 5028 uint32_t max_pkts_num; 5029 5030 param = &gro_ports[port_id].param; 5031 5032 if (!rte_eth_dev_is_valid_port(port_id)) { 5033 fprintf(stderr, "Invalid port id %u.\n", port_id); 5034 return; 5035 } 5036 if (gro_ports[port_id].enable) { 5037 printf("GRO type: TCP/IPv4\n"); 5038 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5039 max_pkts_num = param->max_flow_num * 5040 param->max_item_per_flow; 5041 } else 5042 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5043 printf("Max number of packets to perform GRO: %u\n", 5044 max_pkts_num); 5045 printf("Flushing cycles: %u\n", gro_flush_cycles); 5046 } else 5047 printf("Port %u doesn't enable GRO.\n", port_id); 5048 } 5049 #endif /* RTE_LIB_GRO */ 5050 5051 #ifdef RTE_LIB_GSO 5052 void 5053 setup_gso(const char *mode, portid_t port_id) 5054 { 5055 if (!rte_eth_dev_is_valid_port(port_id)) { 5056 fprintf(stderr, "invalid port id %u\n", port_id); 5057 return; 5058 } 5059 if (strcmp(mode, "on") == 0) { 5060 if (test_done == 0) { 5061 fprintf(stderr, 5062 "before enabling GSO, please stop forwarding first\n"); 5063 return; 5064 } 5065 gso_ports[port_id].enable = 1; 5066 } else if (strcmp(mode, "off") == 0) { 5067 if (test_done == 0) { 5068 fprintf(stderr, 5069 "before disabling GSO, please stop forwarding first\n"); 5070 return; 5071 } 5072 gso_ports[port_id].enable = 0; 5073 } 5074 } 5075 #endif /* RTE_LIB_GSO */ 5076 5077 char* 5078 list_pkt_forwarding_modes(void) 5079 { 5080 static char fwd_modes[128] = ""; 5081 const char *separator = "|"; 5082 struct fwd_engine *fwd_eng; 5083 unsigned i = 0; 5084 5085 if (strlen (fwd_modes) == 0) { 5086 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5087 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5088 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5089 strncat(fwd_modes, separator, 5090 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5091 } 5092 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5093 } 5094 5095 return fwd_modes; 5096 } 5097 5098 char* 5099 list_pkt_forwarding_retry_modes(void) 5100 { 5101 static char fwd_modes[128] = ""; 5102 const char *separator = "|"; 5103 struct fwd_engine *fwd_eng; 5104 unsigned i = 0; 5105 5106 if (strlen(fwd_modes) == 0) { 5107 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5108 if (fwd_eng == &rx_only_engine) 5109 continue; 5110 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5111 sizeof(fwd_modes) - 5112 strlen(fwd_modes) - 1); 5113 strncat(fwd_modes, separator, 5114 sizeof(fwd_modes) - 5115 strlen(fwd_modes) - 1); 5116 } 5117 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5118 } 5119 5120 return fwd_modes; 5121 } 5122 5123 void 5124 set_pkt_forwarding_mode(const char *fwd_mode_name) 5125 { 5126 struct fwd_engine *fwd_eng; 5127 unsigned i; 5128 5129 i = 0; 5130 while ((fwd_eng = fwd_engines[i]) != NULL) { 5131 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5132 printf("Set %s packet forwarding mode%s\n", 5133 fwd_mode_name, 5134 retry_enabled == 0 ? "" : " with retry"); 5135 cur_fwd_eng = fwd_eng; 5136 return; 5137 } 5138 i++; 5139 } 5140 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5141 } 5142 5143 void 5144 add_rx_dump_callbacks(portid_t portid) 5145 { 5146 struct rte_eth_dev_info dev_info; 5147 uint16_t queue; 5148 int ret; 5149 5150 if (port_id_is_invalid(portid, ENABLED_WARN)) 5151 return; 5152 5153 ret = eth_dev_info_get_print_err(portid, &dev_info); 5154 if (ret != 0) 5155 return; 5156 5157 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5158 if (!ports[portid].rx_dump_cb[queue]) 5159 ports[portid].rx_dump_cb[queue] = 5160 rte_eth_add_rx_callback(portid, queue, 5161 dump_rx_pkts, NULL); 5162 } 5163 5164 void 5165 add_tx_dump_callbacks(portid_t portid) 5166 { 5167 struct rte_eth_dev_info dev_info; 5168 uint16_t queue; 5169 int ret; 5170 5171 if (port_id_is_invalid(portid, ENABLED_WARN)) 5172 return; 5173 5174 ret = eth_dev_info_get_print_err(portid, &dev_info); 5175 if (ret != 0) 5176 return; 5177 5178 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5179 if (!ports[portid].tx_dump_cb[queue]) 5180 ports[portid].tx_dump_cb[queue] = 5181 rte_eth_add_tx_callback(portid, queue, 5182 dump_tx_pkts, NULL); 5183 } 5184 5185 void 5186 remove_rx_dump_callbacks(portid_t portid) 5187 { 5188 struct rte_eth_dev_info dev_info; 5189 uint16_t queue; 5190 int ret; 5191 5192 if (port_id_is_invalid(portid, ENABLED_WARN)) 5193 return; 5194 5195 ret = eth_dev_info_get_print_err(portid, &dev_info); 5196 if (ret != 0) 5197 return; 5198 5199 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5200 if (ports[portid].rx_dump_cb[queue]) { 5201 rte_eth_remove_rx_callback(portid, queue, 5202 ports[portid].rx_dump_cb[queue]); 5203 ports[portid].rx_dump_cb[queue] = NULL; 5204 } 5205 } 5206 5207 void 5208 remove_tx_dump_callbacks(portid_t portid) 5209 { 5210 struct rte_eth_dev_info dev_info; 5211 uint16_t queue; 5212 int ret; 5213 5214 if (port_id_is_invalid(portid, ENABLED_WARN)) 5215 return; 5216 5217 ret = eth_dev_info_get_print_err(portid, &dev_info); 5218 if (ret != 0) 5219 return; 5220 5221 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5222 if (ports[portid].tx_dump_cb[queue]) { 5223 rte_eth_remove_tx_callback(portid, queue, 5224 ports[portid].tx_dump_cb[queue]); 5225 ports[portid].tx_dump_cb[queue] = NULL; 5226 } 5227 } 5228 5229 void 5230 configure_rxtx_dump_callbacks(uint16_t verbose) 5231 { 5232 portid_t portid; 5233 5234 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5235 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5236 return; 5237 #endif 5238 5239 RTE_ETH_FOREACH_DEV(portid) 5240 { 5241 if (verbose == 1 || verbose > 2) 5242 add_rx_dump_callbacks(portid); 5243 else 5244 remove_rx_dump_callbacks(portid); 5245 if (verbose >= 2) 5246 add_tx_dump_callbacks(portid); 5247 else 5248 remove_tx_dump_callbacks(portid); 5249 } 5250 } 5251 5252 void 5253 set_verbose_level(uint16_t vb_level) 5254 { 5255 printf("Change verbose level from %u to %u\n", 5256 (unsigned int) verbose_level, (unsigned int) vb_level); 5257 verbose_level = vb_level; 5258 configure_rxtx_dump_callbacks(verbose_level); 5259 } 5260 5261 void 5262 vlan_extend_set(portid_t port_id, int on) 5263 { 5264 int diag; 5265 int vlan_offload; 5266 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5267 5268 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5269 return; 5270 5271 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5272 5273 if (on) { 5274 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5275 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5276 } else { 5277 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5278 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5279 } 5280 5281 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5282 if (diag < 0) { 5283 fprintf(stderr, 5284 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5285 port_id, on, diag); 5286 return; 5287 } 5288 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5289 } 5290 5291 void 5292 rx_vlan_strip_set(portid_t port_id, int on) 5293 { 5294 int diag; 5295 int vlan_offload; 5296 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5297 5298 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5299 return; 5300 5301 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5302 5303 if (on) { 5304 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5305 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5306 } else { 5307 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5308 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5309 } 5310 5311 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5312 if (diag < 0) { 5313 fprintf(stderr, 5314 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5315 __func__, port_id, on, diag); 5316 return; 5317 } 5318 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5319 } 5320 5321 void 5322 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5323 { 5324 int diag; 5325 5326 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5327 return; 5328 5329 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5330 if (diag < 0) 5331 fprintf(stderr, 5332 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5333 __func__, port_id, queue_id, on, diag); 5334 } 5335 5336 void 5337 rx_vlan_filter_set(portid_t port_id, int on) 5338 { 5339 int diag; 5340 int vlan_offload; 5341 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5342 5343 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5344 return; 5345 5346 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5347 5348 if (on) { 5349 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5350 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5351 } else { 5352 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5353 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5354 } 5355 5356 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5357 if (diag < 0) { 5358 fprintf(stderr, 5359 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5360 __func__, port_id, on, diag); 5361 return; 5362 } 5363 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5364 } 5365 5366 void 5367 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5368 { 5369 int diag; 5370 int vlan_offload; 5371 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5372 5373 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5374 return; 5375 5376 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5377 5378 if (on) { 5379 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5380 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5381 } else { 5382 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5383 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5384 } 5385 5386 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5387 if (diag < 0) { 5388 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5389 __func__, port_id, on, diag); 5390 return; 5391 } 5392 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5393 } 5394 5395 int 5396 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5397 { 5398 int diag; 5399 5400 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5401 return 1; 5402 if (vlan_id_is_invalid(vlan_id)) 5403 return 1; 5404 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5405 if (diag == 0) 5406 return 0; 5407 fprintf(stderr, 5408 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5409 port_id, vlan_id, on, diag); 5410 return -1; 5411 } 5412 5413 void 5414 rx_vlan_all_filter_set(portid_t port_id, int on) 5415 { 5416 uint16_t vlan_id; 5417 5418 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5419 return; 5420 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5421 if (rx_vft_set(port_id, vlan_id, on)) 5422 break; 5423 } 5424 } 5425 5426 void 5427 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5428 { 5429 int diag; 5430 5431 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5432 return; 5433 5434 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5435 if (diag == 0) 5436 return; 5437 5438 fprintf(stderr, 5439 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5440 port_id, vlan_type, tp_id, diag); 5441 } 5442 5443 void 5444 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5445 { 5446 struct rte_eth_dev_info dev_info; 5447 int ret; 5448 5449 if (vlan_id_is_invalid(vlan_id)) 5450 return; 5451 5452 if (ports[port_id].dev_conf.txmode.offloads & 5453 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5454 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5455 return; 5456 } 5457 5458 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5459 if (ret != 0) 5460 return; 5461 5462 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5463 fprintf(stderr, 5464 "Error: vlan insert is not supported by port %d\n", 5465 port_id); 5466 return; 5467 } 5468 5469 tx_vlan_reset(port_id); 5470 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 5471 ports[port_id].tx_vlan_id = vlan_id; 5472 } 5473 5474 void 5475 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 5476 { 5477 struct rte_eth_dev_info dev_info; 5478 int ret; 5479 5480 if (vlan_id_is_invalid(vlan_id)) 5481 return; 5482 if (vlan_id_is_invalid(vlan_id_outer)) 5483 return; 5484 5485 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5486 if (ret != 0) 5487 return; 5488 5489 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 5490 fprintf(stderr, 5491 "Error: qinq insert not supported by port %d\n", 5492 port_id); 5493 return; 5494 } 5495 5496 tx_vlan_reset(port_id); 5497 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5498 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5499 ports[port_id].tx_vlan_id = vlan_id; 5500 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 5501 } 5502 5503 void 5504 tx_vlan_reset(portid_t port_id) 5505 { 5506 ports[port_id].dev_conf.txmode.offloads &= 5507 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5508 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5509 ports[port_id].tx_vlan_id = 0; 5510 ports[port_id].tx_vlan_id_outer = 0; 5511 } 5512 5513 void 5514 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 5515 { 5516 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5517 return; 5518 5519 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 5520 } 5521 5522 void 5523 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 5524 { 5525 int ret; 5526 5527 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5528 return; 5529 5530 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 5531 return; 5532 5533 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 5534 fprintf(stderr, "map_value not in required range 0..%d\n", 5535 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 5536 return; 5537 } 5538 5539 if (!is_rx) { /* tx */ 5540 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 5541 map_value); 5542 if (ret) { 5543 fprintf(stderr, 5544 "failed to set tx queue stats mapping.\n"); 5545 return; 5546 } 5547 } else { /* rx */ 5548 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 5549 map_value); 5550 if (ret) { 5551 fprintf(stderr, 5552 "failed to set rx queue stats mapping.\n"); 5553 return; 5554 } 5555 } 5556 } 5557 5558 void 5559 set_xstats_hide_zero(uint8_t on_off) 5560 { 5561 xstats_hide_zero = on_off; 5562 } 5563 5564 void 5565 set_record_core_cycles(uint8_t on_off) 5566 { 5567 record_core_cycles = on_off; 5568 } 5569 5570 void 5571 set_record_burst_stats(uint8_t on_off) 5572 { 5573 record_burst_stats = on_off; 5574 } 5575 5576 static char* 5577 flowtype_to_str(uint16_t flow_type) 5578 { 5579 struct flow_type_info { 5580 char str[32]; 5581 uint16_t ftype; 5582 }; 5583 5584 uint8_t i; 5585 static struct flow_type_info flowtype_str_table[] = { 5586 {"raw", RTE_ETH_FLOW_RAW}, 5587 {"ipv4", RTE_ETH_FLOW_IPV4}, 5588 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 5589 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 5590 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 5591 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 5592 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 5593 {"ipv6", RTE_ETH_FLOW_IPV6}, 5594 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 5595 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 5596 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 5597 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 5598 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 5599 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 5600 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 5601 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 5602 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 5603 {"port", RTE_ETH_FLOW_PORT}, 5604 {"vxlan", RTE_ETH_FLOW_VXLAN}, 5605 {"geneve", RTE_ETH_FLOW_GENEVE}, 5606 {"nvgre", RTE_ETH_FLOW_NVGRE}, 5607 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 5608 {"gtpu", RTE_ETH_FLOW_GTPU}, 5609 }; 5610 5611 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5612 if (flowtype_str_table[i].ftype == flow_type) 5613 return flowtype_str_table[i].str; 5614 } 5615 5616 return NULL; 5617 } 5618 5619 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 5620 5621 static inline void 5622 print_fdir_mask(struct rte_eth_fdir_masks *mask) 5623 { 5624 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 5625 5626 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5627 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 5628 " tunnel_id: 0x%08x", 5629 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 5630 rte_be_to_cpu_32(mask->tunnel_id_mask)); 5631 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5632 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 5633 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 5634 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 5635 5636 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 5637 rte_be_to_cpu_16(mask->src_port_mask), 5638 rte_be_to_cpu_16(mask->dst_port_mask)); 5639 5640 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5641 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 5642 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 5643 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 5644 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 5645 5646 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5647 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 5648 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 5649 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 5650 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 5651 } 5652 5653 printf("\n"); 5654 } 5655 5656 static inline void 5657 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5658 { 5659 struct rte_eth_flex_payload_cfg *cfg; 5660 uint32_t i, j; 5661 5662 for (i = 0; i < flex_conf->nb_payloads; i++) { 5663 cfg = &flex_conf->flex_set[i]; 5664 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 5665 printf("\n RAW: "); 5666 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 5667 printf("\n L2_PAYLOAD: "); 5668 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 5669 printf("\n L3_PAYLOAD: "); 5670 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 5671 printf("\n L4_PAYLOAD: "); 5672 else 5673 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 5674 for (j = 0; j < num; j++) 5675 printf(" %-5u", cfg->src_offset[j]); 5676 } 5677 printf("\n"); 5678 } 5679 5680 static inline void 5681 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5682 { 5683 struct rte_eth_fdir_flex_mask *mask; 5684 uint32_t i, j; 5685 char *p; 5686 5687 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 5688 mask = &flex_conf->flex_mask[i]; 5689 p = flowtype_to_str(mask->flow_type); 5690 printf("\n %s:\t", p ? p : "unknown"); 5691 for (j = 0; j < num; j++) 5692 printf(" %02x", mask->mask[j]); 5693 } 5694 printf("\n"); 5695 } 5696 5697 static inline void 5698 print_fdir_flow_type(uint32_t flow_types_mask) 5699 { 5700 int i; 5701 char *p; 5702 5703 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 5704 if (!(flow_types_mask & (1 << i))) 5705 continue; 5706 p = flowtype_to_str(i); 5707 if (p) 5708 printf(" %s", p); 5709 else 5710 printf(" unknown"); 5711 } 5712 printf("\n"); 5713 } 5714 5715 static int 5716 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 5717 struct rte_eth_fdir_stats *fdir_stat) 5718 { 5719 int ret = -ENOTSUP; 5720 5721 #ifdef RTE_NET_I40E 5722 if (ret == -ENOTSUP) { 5723 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 5724 if (!ret) 5725 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 5726 } 5727 #endif 5728 #ifdef RTE_NET_IXGBE 5729 if (ret == -ENOTSUP) { 5730 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 5731 if (!ret) 5732 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 5733 } 5734 #endif 5735 switch (ret) { 5736 case 0: 5737 break; 5738 case -ENOTSUP: 5739 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 5740 port_id); 5741 break; 5742 default: 5743 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 5744 break; 5745 } 5746 return ret; 5747 } 5748 5749 void 5750 fdir_get_infos(portid_t port_id) 5751 { 5752 struct rte_eth_fdir_stats fdir_stat; 5753 struct rte_eth_fdir_info fdir_info; 5754 5755 static const char *fdir_stats_border = "########################"; 5756 5757 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5758 return; 5759 5760 memset(&fdir_info, 0, sizeof(fdir_info)); 5761 memset(&fdir_stat, 0, sizeof(fdir_stat)); 5762 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 5763 return; 5764 5765 printf("\n %s FDIR infos for port %-2d %s\n", 5766 fdir_stats_border, port_id, fdir_stats_border); 5767 printf(" MODE: "); 5768 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 5769 printf(" PERFECT\n"); 5770 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 5771 printf(" PERFECT-MAC-VLAN\n"); 5772 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5773 printf(" PERFECT-TUNNEL\n"); 5774 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 5775 printf(" SIGNATURE\n"); 5776 else 5777 printf(" DISABLE\n"); 5778 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5779 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5780 printf(" SUPPORTED FLOW TYPE: "); 5781 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5782 } 5783 printf(" FLEX PAYLOAD INFO:\n"); 5784 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5785 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5786 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5787 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5788 fdir_info.flex_payload_unit, 5789 fdir_info.max_flex_payload_segment_num, 5790 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5791 printf(" MASK: "); 5792 print_fdir_mask(&fdir_info.mask); 5793 if (fdir_info.flex_conf.nb_payloads > 0) { 5794 printf(" FLEX PAYLOAD SRC OFFSET:"); 5795 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5796 } 5797 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5798 printf(" FLEX MASK CFG:"); 5799 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5800 } 5801 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5802 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5803 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5804 fdir_info.guarant_spc, fdir_info.best_spc); 5805 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5806 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5807 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5808 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5809 fdir_stat.collision, fdir_stat.free, 5810 fdir_stat.maxhash, fdir_stat.maxlen, 5811 fdir_stat.add, fdir_stat.remove, 5812 fdir_stat.f_add, fdir_stat.f_remove); 5813 printf(" %s############################%s\n", 5814 fdir_stats_border, fdir_stats_border); 5815 } 5816 5817 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5818 5819 void 5820 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5821 { 5822 struct rte_port *port; 5823 struct rte_eth_fdir_flex_conf *flex_conf; 5824 int i, idx = 0; 5825 5826 port = &ports[port_id]; 5827 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5828 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5829 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5830 idx = i; 5831 break; 5832 } 5833 } 5834 if (i >= RTE_ETH_FLOW_MAX) { 5835 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5836 idx = flex_conf->nb_flexmasks; 5837 flex_conf->nb_flexmasks++; 5838 } else { 5839 fprintf(stderr, 5840 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5841 cfg->flow_type); 5842 return; 5843 } 5844 } 5845 rte_memcpy(&flex_conf->flex_mask[idx], 5846 cfg, 5847 sizeof(struct rte_eth_fdir_flex_mask)); 5848 } 5849 5850 void 5851 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5852 { 5853 struct rte_port *port; 5854 struct rte_eth_fdir_flex_conf *flex_conf; 5855 int i, idx = 0; 5856 5857 port = &ports[port_id]; 5858 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5859 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5860 if (cfg->type == flex_conf->flex_set[i].type) { 5861 idx = i; 5862 break; 5863 } 5864 } 5865 if (i >= RTE_ETH_PAYLOAD_MAX) { 5866 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5867 idx = flex_conf->nb_payloads; 5868 flex_conf->nb_payloads++; 5869 } else { 5870 fprintf(stderr, 5871 "The flex payload table is full. Can not set flex payload for type(%u).", 5872 cfg->type); 5873 return; 5874 } 5875 } 5876 rte_memcpy(&flex_conf->flex_set[idx], 5877 cfg, 5878 sizeof(struct rte_eth_flex_payload_cfg)); 5879 5880 } 5881 5882 void 5883 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5884 { 5885 #ifdef RTE_NET_IXGBE 5886 int diag; 5887 5888 if (is_rx) 5889 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5890 else 5891 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5892 5893 if (diag == 0) 5894 return; 5895 fprintf(stderr, 5896 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5897 is_rx ? "rx" : "tx", port_id, diag); 5898 return; 5899 #endif 5900 fprintf(stderr, "VF %s setting not supported for port %d\n", 5901 is_rx ? "Rx" : "Tx", port_id); 5902 RTE_SET_USED(vf); 5903 RTE_SET_USED(on); 5904 } 5905 5906 int 5907 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5908 { 5909 int diag; 5910 struct rte_eth_link link; 5911 int ret; 5912 5913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5914 return 1; 5915 ret = eth_link_get_nowait_print_err(port_id, &link); 5916 if (ret < 0) 5917 return 1; 5918 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5919 rate > link.link_speed) { 5920 fprintf(stderr, 5921 "Invalid rate value:%u bigger than link speed: %u\n", 5922 rate, link.link_speed); 5923 return 1; 5924 } 5925 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5926 if (diag == 0) 5927 return diag; 5928 fprintf(stderr, 5929 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5930 port_id, diag); 5931 return diag; 5932 } 5933 5934 int 5935 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5936 { 5937 int diag = -ENOTSUP; 5938 5939 RTE_SET_USED(vf); 5940 RTE_SET_USED(rate); 5941 RTE_SET_USED(q_msk); 5942 5943 #ifdef RTE_NET_IXGBE 5944 if (diag == -ENOTSUP) 5945 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5946 q_msk); 5947 #endif 5948 #ifdef RTE_NET_BNXT 5949 if (diag == -ENOTSUP) 5950 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5951 #endif 5952 if (diag == 0) 5953 return diag; 5954 5955 fprintf(stderr, 5956 "%s for port_id=%d failed diag=%d\n", 5957 __func__, port_id, diag); 5958 return diag; 5959 } 5960 5961 /* 5962 * Functions to manage the set of filtered Multicast MAC addresses. 5963 * 5964 * A pool of filtered multicast MAC addresses is associated with each port. 5965 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5966 * The address of the pool and the number of valid multicast MAC addresses 5967 * recorded in the pool are stored in the fields "mc_addr_pool" and 5968 * "mc_addr_nb" of the "rte_port" data structure. 5969 * 5970 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5971 * to be supplied a contiguous array of multicast MAC addresses. 5972 * To comply with this constraint, the set of multicast addresses recorded 5973 * into the pool are systematically compacted at the beginning of the pool. 5974 * Hence, when a multicast address is removed from the pool, all following 5975 * addresses, if any, are copied back to keep the set contiguous. 5976 */ 5977 #define MCAST_POOL_INC 32 5978 5979 static int 5980 mcast_addr_pool_extend(struct rte_port *port) 5981 { 5982 struct rte_ether_addr *mc_pool; 5983 size_t mc_pool_size; 5984 5985 /* 5986 * If a free entry is available at the end of the pool, just 5987 * increment the number of recorded multicast addresses. 5988 */ 5989 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5990 port->mc_addr_nb++; 5991 return 0; 5992 } 5993 5994 /* 5995 * [re]allocate a pool with MCAST_POOL_INC more entries. 5996 * The previous test guarantees that port->mc_addr_nb is a multiple 5997 * of MCAST_POOL_INC. 5998 */ 5999 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6000 MCAST_POOL_INC); 6001 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6002 mc_pool_size); 6003 if (mc_pool == NULL) { 6004 fprintf(stderr, 6005 "allocation of pool of %u multicast addresses failed\n", 6006 port->mc_addr_nb + MCAST_POOL_INC); 6007 return -ENOMEM; 6008 } 6009 6010 port->mc_addr_pool = mc_pool; 6011 port->mc_addr_nb++; 6012 return 0; 6013 6014 } 6015 6016 static void 6017 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6018 { 6019 if (mcast_addr_pool_extend(port) != 0) 6020 return; 6021 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6022 } 6023 6024 static void 6025 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6026 { 6027 port->mc_addr_nb--; 6028 if (addr_idx == port->mc_addr_nb) { 6029 /* No need to recompact the set of multicast addresses. */ 6030 if (port->mc_addr_nb == 0) { 6031 /* free the pool of multicast addresses. */ 6032 free(port->mc_addr_pool); 6033 port->mc_addr_pool = NULL; 6034 } 6035 return; 6036 } 6037 memmove(&port->mc_addr_pool[addr_idx], 6038 &port->mc_addr_pool[addr_idx + 1], 6039 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6040 } 6041 6042 static int 6043 eth_port_multicast_addr_list_set(portid_t port_id) 6044 { 6045 struct rte_port *port; 6046 int diag; 6047 6048 port = &ports[port_id]; 6049 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6050 port->mc_addr_nb); 6051 if (diag < 0) 6052 fprintf(stderr, 6053 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6054 port_id, port->mc_addr_nb, diag); 6055 6056 return diag; 6057 } 6058 6059 void 6060 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6061 { 6062 struct rte_port *port; 6063 uint32_t i; 6064 6065 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6066 return; 6067 6068 port = &ports[port_id]; 6069 6070 /* 6071 * Check that the added multicast MAC address is not already recorded 6072 * in the pool of multicast addresses. 6073 */ 6074 for (i = 0; i < port->mc_addr_nb; i++) { 6075 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6076 fprintf(stderr, 6077 "multicast address already filtered by port\n"); 6078 return; 6079 } 6080 } 6081 6082 mcast_addr_pool_append(port, mc_addr); 6083 if (eth_port_multicast_addr_list_set(port_id) < 0) 6084 /* Rollback on failure, remove the address from the pool */ 6085 mcast_addr_pool_remove(port, i); 6086 } 6087 6088 void 6089 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6090 { 6091 struct rte_port *port; 6092 uint32_t i; 6093 6094 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6095 return; 6096 6097 port = &ports[port_id]; 6098 6099 /* 6100 * Search the pool of multicast MAC addresses for the removed address. 6101 */ 6102 for (i = 0; i < port->mc_addr_nb; i++) { 6103 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6104 break; 6105 } 6106 if (i == port->mc_addr_nb) { 6107 fprintf(stderr, "multicast address not filtered by port %d\n", 6108 port_id); 6109 return; 6110 } 6111 6112 mcast_addr_pool_remove(port, i); 6113 if (eth_port_multicast_addr_list_set(port_id) < 0) 6114 /* Rollback on failure, add the address back into the pool */ 6115 mcast_addr_pool_append(port, mc_addr); 6116 } 6117 6118 void 6119 port_dcb_info_display(portid_t port_id) 6120 { 6121 struct rte_eth_dcb_info dcb_info; 6122 uint16_t i; 6123 int ret; 6124 static const char *border = "================"; 6125 6126 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6127 return; 6128 6129 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6130 if (ret) { 6131 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6132 port_id); 6133 return; 6134 } 6135 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6136 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6137 printf("\n TC : "); 6138 for (i = 0; i < dcb_info.nb_tcs; i++) 6139 printf("\t%4d", i); 6140 printf("\n Priority : "); 6141 for (i = 0; i < dcb_info.nb_tcs; i++) 6142 printf("\t%4d", dcb_info.prio_tc[i]); 6143 printf("\n BW percent :"); 6144 for (i = 0; i < dcb_info.nb_tcs; i++) 6145 printf("\t%4d%%", dcb_info.tc_bws[i]); 6146 printf("\n RXQ base : "); 6147 for (i = 0; i < dcb_info.nb_tcs; i++) 6148 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6149 printf("\n RXQ number :"); 6150 for (i = 0; i < dcb_info.nb_tcs; i++) 6151 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6152 printf("\n TXQ base : "); 6153 for (i = 0; i < dcb_info.nb_tcs; i++) 6154 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6155 printf("\n TXQ number :"); 6156 for (i = 0; i < dcb_info.nb_tcs; i++) 6157 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6158 printf("\n"); 6159 } 6160 6161 uint8_t * 6162 open_file(const char *file_path, uint32_t *size) 6163 { 6164 int fd = open(file_path, O_RDONLY); 6165 off_t pkg_size; 6166 uint8_t *buf = NULL; 6167 int ret = 0; 6168 struct stat st_buf; 6169 6170 if (size) 6171 *size = 0; 6172 6173 if (fd == -1) { 6174 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6175 return buf; 6176 } 6177 6178 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6179 close(fd); 6180 fprintf(stderr, "%s: File operations failed\n", __func__); 6181 return buf; 6182 } 6183 6184 pkg_size = st_buf.st_size; 6185 if (pkg_size < 0) { 6186 close(fd); 6187 fprintf(stderr, "%s: File operations failed\n", __func__); 6188 return buf; 6189 } 6190 6191 buf = (uint8_t *)malloc(pkg_size); 6192 if (!buf) { 6193 close(fd); 6194 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6195 return buf; 6196 } 6197 6198 ret = read(fd, buf, pkg_size); 6199 if (ret < 0) { 6200 close(fd); 6201 fprintf(stderr, "%s: File read operation failed\n", __func__); 6202 close_file(buf); 6203 return NULL; 6204 } 6205 6206 if (size) 6207 *size = pkg_size; 6208 6209 close(fd); 6210 6211 return buf; 6212 } 6213 6214 int 6215 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6216 { 6217 FILE *fh = fopen(file_path, "wb"); 6218 6219 if (fh == NULL) { 6220 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6221 return -1; 6222 } 6223 6224 if (fwrite(buf, 1, size, fh) != size) { 6225 fclose(fh); 6226 fprintf(stderr, "%s: File write operation failed\n", __func__); 6227 return -1; 6228 } 6229 6230 fclose(fh); 6231 6232 return 0; 6233 } 6234 6235 int 6236 close_file(uint8_t *buf) 6237 { 6238 if (buf) { 6239 free((void *)buf); 6240 return 0; 6241 } 6242 6243 return -1; 6244 } 6245 6246 void 6247 port_queue_region_info_display(portid_t port_id, void *buf) 6248 { 6249 #ifdef RTE_NET_I40E 6250 uint16_t i, j; 6251 struct rte_pmd_i40e_queue_regions *info = 6252 (struct rte_pmd_i40e_queue_regions *)buf; 6253 static const char *queue_region_info_stats_border = "-------"; 6254 6255 if (!info->queue_region_number) 6256 printf("there is no region has been set before"); 6257 6258 printf("\n %s All queue region info for port=%2d %s", 6259 queue_region_info_stats_border, port_id, 6260 queue_region_info_stats_border); 6261 printf("\n queue_region_number: %-14u \n", 6262 info->queue_region_number); 6263 6264 for (i = 0; i < info->queue_region_number; i++) { 6265 printf("\n region_id: %-14u queue_number: %-14u " 6266 "queue_start_index: %-14u \n", 6267 info->region[i].region_id, 6268 info->region[i].queue_num, 6269 info->region[i].queue_start_index); 6270 6271 printf(" user_priority_num is %-14u :", 6272 info->region[i].user_priority_num); 6273 for (j = 0; j < info->region[i].user_priority_num; j++) 6274 printf(" %-14u ", info->region[i].user_priority[j]); 6275 6276 printf("\n flowtype_num is %-14u :", 6277 info->region[i].flowtype_num); 6278 for (j = 0; j < info->region[i].flowtype_num; j++) 6279 printf(" %-14u ", info->region[i].hw_flowtype[j]); 6280 } 6281 #else 6282 RTE_SET_USED(port_id); 6283 RTE_SET_USED(buf); 6284 #endif 6285 6286 printf("\n\n"); 6287 } 6288 6289 void 6290 show_macs(portid_t port_id) 6291 { 6292 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6293 struct rte_eth_dev_info dev_info; 6294 int32_t i, rc, num_macs = 0; 6295 6296 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6297 return; 6298 6299 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6300 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6301 if (rc < 0) 6302 return; 6303 6304 for (i = 0; i < rc; i++) { 6305 6306 /* skip zero address */ 6307 if (rte_is_zero_ether_addr(&addr[i])) 6308 continue; 6309 6310 num_macs++; 6311 } 6312 6313 printf("Number of MAC address added: %d\n", num_macs); 6314 6315 for (i = 0; i < rc; i++) { 6316 6317 /* skip zero address */ 6318 if (rte_is_zero_ether_addr(&addr[i])) 6319 continue; 6320 6321 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6322 printf(" %s\n", buf); 6323 } 6324 } 6325 6326 void 6327 show_mcast_macs(portid_t port_id) 6328 { 6329 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6330 struct rte_ether_addr *addr; 6331 struct rte_port *port; 6332 uint32_t i; 6333 6334 port = &ports[port_id]; 6335 6336 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6337 6338 for (i = 0; i < port->mc_addr_nb; i++) { 6339 addr = &port->mc_addr_pool[i]; 6340 6341 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6342 printf(" %s\n", buf); 6343 } 6344 } 6345