1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 { 174 .mode = RTE_ETH_FEC_LLRS, 175 .name = "llrs", 176 }, 177 }; 178 179 static const struct { 180 char str[32]; 181 uint16_t ftype; 182 } flowtype_str_table[] = { 183 {"raw", RTE_ETH_FLOW_RAW}, 184 {"ipv4", RTE_ETH_FLOW_IPV4}, 185 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 186 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 187 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 188 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 189 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 190 {"ipv6", RTE_ETH_FLOW_IPV6}, 191 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 192 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 193 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 194 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 195 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 196 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 197 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 198 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 199 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 200 {"port", RTE_ETH_FLOW_PORT}, 201 {"vxlan", RTE_ETH_FLOW_VXLAN}, 202 {"geneve", RTE_ETH_FLOW_GENEVE}, 203 {"nvgre", RTE_ETH_FLOW_NVGRE}, 204 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 205 {"gtpu", RTE_ETH_FLOW_GTPU}, 206 }; 207 208 static void 209 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 210 { 211 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 212 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 213 printf("%s%s", name, buf); 214 } 215 216 static void 217 nic_xstats_display_periodic(portid_t port_id) 218 { 219 struct xstat_display_info *xstats_info; 220 uint64_t *prev_values, *curr_values; 221 uint64_t diff_value, value_rate; 222 struct timespec cur_time; 223 uint64_t *ids_supp; 224 size_t ids_supp_sz; 225 uint64_t diff_ns; 226 unsigned int i; 227 int rc; 228 229 xstats_info = &ports[port_id].xstats_info; 230 231 ids_supp_sz = xstats_info->ids_supp_sz; 232 if (ids_supp_sz == 0) 233 return; 234 235 printf("\n"); 236 237 ids_supp = xstats_info->ids_supp; 238 prev_values = xstats_info->prev_values; 239 curr_values = xstats_info->curr_values; 240 241 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 242 ids_supp_sz); 243 if (rc != (int)ids_supp_sz) { 244 fprintf(stderr, 245 "Failed to get values of %zu xstats for port %u - return code %d\n", 246 ids_supp_sz, port_id, rc); 247 return; 248 } 249 250 diff_ns = 0; 251 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 252 uint64_t ns; 253 254 ns = cur_time.tv_sec * NS_PER_SEC; 255 ns += cur_time.tv_nsec; 256 257 if (xstats_info->prev_ns != 0) 258 diff_ns = ns - xstats_info->prev_ns; 259 xstats_info->prev_ns = ns; 260 } 261 262 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 263 for (i = 0; i < ids_supp_sz; i++) { 264 diff_value = (curr_values[i] > prev_values[i]) ? 265 (curr_values[i] - prev_values[i]) : 0; 266 prev_values[i] = curr_values[i]; 267 value_rate = diff_ns > 0 ? 268 (double)diff_value / diff_ns * NS_PER_SEC : 0; 269 270 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 271 xstats_display[i].name, curr_values[i], value_rate); 272 } 273 } 274 275 void 276 nic_stats_display(portid_t port_id) 277 { 278 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 279 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 283 struct timespec cur_time; 284 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 285 diff_ns; 286 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 287 struct rte_eth_stats stats; 288 static const char *nic_stats_border = "########################"; 289 int ret; 290 291 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 292 print_valid_ports(); 293 return; 294 } 295 ret = rte_eth_stats_get(port_id, &stats); 296 if (ret != 0) { 297 fprintf(stderr, 298 "%s: Error: failed to get stats (port %u): %d", 299 __func__, port_id, ret); 300 return; 301 } 302 printf("\n %s NIC statistics for port %-2d %s\n", 303 nic_stats_border, port_id, nic_stats_border); 304 305 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 306 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 307 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 308 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 309 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 310 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 311 312 diff_ns = 0; 313 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 314 uint64_t ns; 315 316 ns = cur_time.tv_sec * NS_PER_SEC; 317 ns += cur_time.tv_nsec; 318 319 if (prev_ns[port_id] != 0) 320 diff_ns = ns - prev_ns[port_id]; 321 prev_ns[port_id] = ns; 322 } 323 324 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 325 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 326 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 327 (stats.opackets - prev_pkts_tx[port_id]) : 0; 328 prev_pkts_rx[port_id] = stats.ipackets; 329 prev_pkts_tx[port_id] = stats.opackets; 330 mpps_rx = diff_ns > 0 ? 331 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 332 mpps_tx = diff_ns > 0 ? 333 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 334 335 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 336 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 337 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 338 (stats.obytes - prev_bytes_tx[port_id]) : 0; 339 prev_bytes_rx[port_id] = stats.ibytes; 340 prev_bytes_tx[port_id] = stats.obytes; 341 mbps_rx = diff_ns > 0 ? 342 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 343 mbps_tx = diff_ns > 0 ? 344 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 345 346 printf("\n Throughput (since last show)\n"); 347 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 348 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 349 mpps_tx, mbps_tx * 8); 350 351 if (xstats_display_num > 0) 352 nic_xstats_display_periodic(port_id); 353 354 printf(" %s############################%s\n", 355 nic_stats_border, nic_stats_border); 356 } 357 358 void 359 nic_stats_clear(portid_t port_id) 360 { 361 int ret; 362 363 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 364 print_valid_ports(); 365 return; 366 } 367 368 ret = rte_eth_stats_reset(port_id); 369 if (ret != 0) { 370 fprintf(stderr, 371 "%s: Error: failed to reset stats (port %u): %s", 372 __func__, port_id, strerror(-ret)); 373 return; 374 } 375 376 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 377 if (ret != 0) { 378 if (ret < 0) 379 ret = -ret; 380 fprintf(stderr, 381 "%s: Error: failed to get stats (port %u): %s", 382 __func__, port_id, strerror(ret)); 383 return; 384 } 385 printf("\n NIC statistics for port %d cleared\n", port_id); 386 } 387 388 void 389 nic_xstats_display(portid_t port_id) 390 { 391 struct rte_eth_xstat *xstats; 392 int cnt_xstats, idx_xstat; 393 struct rte_eth_xstat_name *xstats_names; 394 395 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 396 print_valid_ports(); 397 return; 398 } 399 printf("###### NIC extended statistics for port %-2d\n", port_id); 400 if (!rte_eth_dev_is_valid_port(port_id)) { 401 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 402 return; 403 } 404 405 /* Get count */ 406 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 407 if (cnt_xstats < 0) { 408 fprintf(stderr, "Error: Cannot get count of xstats\n"); 409 return; 410 } 411 412 /* Get id-name lookup table */ 413 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 414 if (xstats_names == NULL) { 415 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 416 return; 417 } 418 if (cnt_xstats != rte_eth_xstats_get_names( 419 port_id, xstats_names, cnt_xstats)) { 420 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 421 free(xstats_names); 422 return; 423 } 424 425 /* Get stats themselves */ 426 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 427 if (xstats == NULL) { 428 fprintf(stderr, "Cannot allocate memory for xstats\n"); 429 free(xstats_names); 430 return; 431 } 432 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 433 fprintf(stderr, "Error: Unable to get xstats\n"); 434 free(xstats_names); 435 free(xstats); 436 return; 437 } 438 439 /* Display xstats */ 440 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 441 if (xstats_hide_zero && !xstats[idx_xstat].value) 442 continue; 443 printf("%s: %"PRIu64"\n", 444 xstats_names[idx_xstat].name, 445 xstats[idx_xstat].value); 446 } 447 free(xstats_names); 448 free(xstats); 449 } 450 451 void 452 nic_xstats_clear(portid_t port_id) 453 { 454 int ret; 455 456 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 457 print_valid_ports(); 458 return; 459 } 460 461 ret = rte_eth_xstats_reset(port_id); 462 if (ret != 0) { 463 fprintf(stderr, 464 "%s: Error: failed to reset xstats (port %u): %s\n", 465 __func__, port_id, strerror(-ret)); 466 return; 467 } 468 469 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 470 if (ret != 0) { 471 if (ret < 0) 472 ret = -ret; 473 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 474 __func__, port_id, strerror(ret)); 475 return; 476 } 477 } 478 479 static const char * 480 get_queue_state_name(uint8_t queue_state) 481 { 482 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 483 return "stopped"; 484 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 485 return "started"; 486 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 487 return "hairpin"; 488 else 489 return "unknown"; 490 } 491 492 void 493 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_rxq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 512 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 513 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 514 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 515 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 516 printf("\nRX drop packets: %s", 517 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 518 printf("\nRX deferred start: %s", 519 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 520 printf("\nRX scattered packets: %s", 521 (qinfo.scattered_rx != 0) ? "on" : "off"); 522 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 523 if (qinfo.rx_buf_size != 0) 524 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 525 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 526 527 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 528 printf("\nBurst mode: %s%s", 529 mode.info, 530 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 531 " (per queue)" : ""); 532 533 printf("\n"); 534 } 535 536 void 537 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 538 { 539 struct rte_eth_burst_mode mode; 540 struct rte_eth_txq_info qinfo; 541 int32_t rc; 542 static const char *info_border = "*********************"; 543 544 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 545 if (rc != 0) { 546 fprintf(stderr, 547 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 548 port_id, queue_id, strerror(-rc), rc); 549 return; 550 } 551 552 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 553 info_border, port_id, queue_id, info_border); 554 555 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 556 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 557 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 558 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 559 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 560 printf("\nTX deferred start: %s", 561 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 562 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 563 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 564 565 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 566 printf("\nBurst mode: %s%s", 567 mode.info, 568 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 569 " (per queue)" : ""); 570 571 printf("\n"); 572 } 573 574 static int bus_match_all(const struct rte_bus *bus, const void *data) 575 { 576 RTE_SET_USED(bus); 577 RTE_SET_USED(data); 578 return 0; 579 } 580 581 static void 582 device_infos_display_speeds(uint32_t speed_capa) 583 { 584 printf("\n\tDevice speed capability:"); 585 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 586 printf(" Autonegotiate (all speeds)"); 587 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 588 printf(" Disable autonegotiate (fixed speed) "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 590 printf(" 10 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 592 printf(" 10 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 594 printf(" 100 Mbps half-duplex "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 596 printf(" 100 Mbps full-duplex "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 598 printf(" 1 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 600 printf(" 2.5 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 602 printf(" 5 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 604 printf(" 10 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 606 printf(" 20 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 608 printf(" 25 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 610 printf(" 40 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 612 printf(" 50 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 614 printf(" 56 Gbps "); 615 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 616 printf(" 100 Gbps "); 617 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 618 printf(" 200 Gbps "); 619 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 620 printf(" 400 Gbps "); 621 } 622 623 void 624 device_infos_display(const char *identifier) 625 { 626 static const char *info_border = "*********************"; 627 struct rte_bus *start = NULL, *next; 628 struct rte_dev_iterator dev_iter; 629 char name[RTE_ETH_NAME_MAX_LEN]; 630 struct rte_ether_addr mac_addr; 631 struct rte_device *dev; 632 struct rte_devargs da; 633 portid_t port_id; 634 struct rte_eth_dev_info dev_info; 635 char devstr[128]; 636 637 memset(&da, 0, sizeof(da)); 638 if (!identifier) 639 goto skip_parse; 640 641 if (rte_devargs_parsef(&da, "%s", identifier)) { 642 fprintf(stderr, "cannot parse identifier\n"); 643 return; 644 } 645 646 skip_parse: 647 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 648 649 start = next; 650 if (identifier && da.bus != next) 651 continue; 652 653 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 654 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 655 656 if (rte_dev_driver(dev) == NULL) 657 continue; 658 /* Check for matching device if identifier is present */ 659 if (identifier && 660 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 661 continue; 662 printf("\n%s Infos for device %s %s\n", 663 info_border, rte_dev_name(dev), info_border); 664 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 665 printf("\nBus information: %s", 666 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 667 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 668 printf("\nDevargs: %s", 669 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 670 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 671 printf("\n"); 672 673 /* List ports with matching device name */ 674 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 675 printf("\n\tPort id: %-2d", port_id); 676 if (eth_macaddr_get_print_err(port_id, 677 &mac_addr) == 0) 678 print_ethaddr("\n\tMAC address: ", 679 &mac_addr); 680 rte_eth_dev_get_name_by_port(port_id, name); 681 printf("\n\tDevice name: %s", name); 682 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 683 device_infos_display_speeds(dev_info.speed_capa); 684 printf("\n"); 685 } 686 } 687 }; 688 rte_devargs_reset(&da); 689 } 690 691 static void 692 print_dev_capabilities(uint64_t capabilities) 693 { 694 uint64_t single_capa; 695 int begin; 696 int end; 697 int bit; 698 699 if (capabilities == 0) 700 return; 701 702 begin = rte_ctz64(capabilities); 703 end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities); 704 705 single_capa = 1ULL << begin; 706 for (bit = begin; bit < end; bit++) { 707 if (capabilities & single_capa) 708 printf(" %s", 709 rte_eth_dev_capability_name(single_capa)); 710 single_capa <<= 1; 711 } 712 } 713 714 uint64_t 715 str_to_rsstypes(const char *str) 716 { 717 uint16_t i; 718 719 for (i = 0; rss_type_table[i].str != NULL; i++) { 720 if (strcmp(rss_type_table[i].str, str) == 0) 721 return rss_type_table[i].rss_type; 722 } 723 724 return 0; 725 } 726 727 const char * 728 rsstypes_to_str(uint64_t rss_type) 729 { 730 uint16_t i; 731 732 for (i = 0; rss_type_table[i].str != NULL; i++) { 733 if (rss_type_table[i].rss_type == rss_type) 734 return rss_type_table[i].str; 735 } 736 737 return NULL; 738 } 739 740 static void 741 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 742 { 743 uint16_t user_defined_str_len; 744 uint16_t total_len = 0; 745 uint16_t str_len = 0; 746 uint64_t rss_offload; 747 uint16_t i; 748 749 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 750 rss_offload = RTE_BIT64(i); 751 if ((offload_types & rss_offload) != 0) { 752 const char *p = rsstypes_to_str(rss_offload); 753 754 user_defined_str_len = 755 strlen("user-defined-") + (i / 10 + 1); 756 str_len = p ? strlen(p) : user_defined_str_len; 757 str_len += 2; /* add two spaces */ 758 if (total_len + str_len >= char_num_per_line) { 759 total_len = 0; 760 printf("\n"); 761 } 762 763 if (p) 764 printf(" %s", p); 765 else 766 printf(" user-defined-%u", i); 767 total_len += str_len; 768 } 769 } 770 printf("\n"); 771 } 772 773 void 774 port_infos_display(portid_t port_id) 775 { 776 struct rte_port *port; 777 struct rte_ether_addr mac_addr; 778 struct rte_eth_link link; 779 struct rte_eth_dev_info dev_info; 780 int vlan_offload; 781 struct rte_mempool * mp; 782 static const char *info_border = "*********************"; 783 uint16_t mtu; 784 char name[RTE_ETH_NAME_MAX_LEN]; 785 int ret; 786 char fw_version[ETHDEV_FWVERS_LEN]; 787 788 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 789 print_valid_ports(); 790 return; 791 } 792 port = &ports[port_id]; 793 ret = eth_link_get_nowait_print_err(port_id, &link); 794 if (ret < 0) 795 return; 796 797 ret = eth_dev_info_get_print_err(port_id, &dev_info); 798 if (ret != 0) 799 return; 800 801 printf("\n%s Infos for port %-2d %s\n", 802 info_border, port_id, info_border); 803 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 804 print_ethaddr("MAC address: ", &mac_addr); 805 rte_eth_dev_get_name_by_port(port_id, name); 806 printf("\nDevice name: %s", name); 807 printf("\nDriver name: %s", dev_info.driver_name); 808 809 if (rte_eth_dev_fw_version_get(port_id, fw_version, 810 ETHDEV_FWVERS_LEN) == 0) 811 printf("\nFirmware-version: %s", fw_version); 812 else 813 printf("\nFirmware-version: %s", "not available"); 814 815 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 816 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 817 printf("\nConnect to socket: %u", port->socket_id); 818 819 if (port_numa[port_id] != NUMA_NO_CONFIG) { 820 mp = mbuf_pool_find(port_numa[port_id], 0); 821 if (mp) 822 printf("\nmemory allocation on the socket: %d", 823 port_numa[port_id]); 824 } else 825 printf("\nmemory allocation on the socket: %u",port->socket_id); 826 827 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 828 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 829 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 830 ("full-duplex") : ("half-duplex")); 831 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 832 ("On") : ("Off")); 833 834 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 835 printf("MTU: %u\n", mtu); 836 837 printf("Promiscuous mode: %s\n", 838 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 839 printf("Allmulticast mode: %s\n", 840 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 841 printf("Maximum number of MAC addresses: %u\n", 842 (unsigned int)(port->dev_info.max_mac_addrs)); 843 printf("Maximum number of MAC addresses of hash filtering: %u\n", 844 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 845 846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 847 if (vlan_offload >= 0){ 848 printf("VLAN offload: \n"); 849 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 850 printf(" strip on, "); 851 else 852 printf(" strip off, "); 853 854 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 855 printf("filter on, "); 856 else 857 printf("filter off, "); 858 859 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 860 printf("extend on, "); 861 else 862 printf("extend off, "); 863 864 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 865 printf("qinq strip on\n"); 866 else 867 printf("qinq strip off\n"); 868 } 869 870 if (dev_info.hash_key_size > 0) 871 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 872 if (dev_info.reta_size > 0) 873 printf("Redirection table size: %u\n", dev_info.reta_size); 874 if (!dev_info.flow_type_rss_offloads) 875 printf("No RSS offload flow type is supported.\n"); 876 else { 877 printf("Supported RSS offload flow types:\n"); 878 rss_offload_types_display(dev_info.flow_type_rss_offloads, 879 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 880 } 881 882 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 883 printf("Maximum configurable length of RX packet: %u\n", 884 dev_info.max_rx_pktlen); 885 printf("Maximum configurable size of LRO aggregated packet: %u\n", 886 dev_info.max_lro_pkt_size); 887 if (dev_info.max_vfs) 888 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 889 if (dev_info.max_vmdq_pools) 890 printf("Maximum number of VMDq pools: %u\n", 891 dev_info.max_vmdq_pools); 892 893 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 894 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 895 printf("Max possible number of RXDs per queue: %hu\n", 896 dev_info.rx_desc_lim.nb_max); 897 printf("Min possible number of RXDs per queue: %hu\n", 898 dev_info.rx_desc_lim.nb_min); 899 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 900 901 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 902 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 903 printf("Max possible number of TXDs per queue: %hu\n", 904 dev_info.tx_desc_lim.nb_max); 905 printf("Min possible number of TXDs per queue: %hu\n", 906 dev_info.tx_desc_lim.nb_min); 907 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 908 printf("Max segment number per packet: %hu\n", 909 dev_info.tx_desc_lim.nb_seg_max); 910 printf("Max segment number per MTU/TSO: %hu\n", 911 dev_info.tx_desc_lim.nb_mtu_seg_max); 912 913 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 914 print_dev_capabilities(dev_info.dev_capa); 915 printf(" )\n"); 916 /* Show switch info only if valid switch domain and port id is set */ 917 if (dev_info.switch_info.domain_id != 918 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 919 if (dev_info.switch_info.name) 920 printf("Switch name: %s\n", dev_info.switch_info.name); 921 922 printf("Switch domain Id: %u\n", 923 dev_info.switch_info.domain_id); 924 printf("Switch Port Id: %u\n", 925 dev_info.switch_info.port_id); 926 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 927 printf("Switch Rx domain: %u\n", 928 dev_info.switch_info.rx_domain); 929 } 930 printf("Device error handling mode: "); 931 switch (dev_info.err_handle_mode) { 932 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 933 printf("none\n"); 934 break; 935 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 936 printf("passive\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 939 printf("proactive\n"); 940 break; 941 default: 942 printf("unknown\n"); 943 break; 944 } 945 printf("Device private info:\n"); 946 ret = rte_eth_dev_priv_dump(port_id, stdout); 947 if (ret == -ENOTSUP) 948 printf(" none\n"); 949 else if (ret < 0) 950 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 951 ret, strerror(-ret)); 952 } 953 954 void 955 port_summary_header_display(void) 956 { 957 uint16_t port_number; 958 959 port_number = rte_eth_dev_count_avail(); 960 printf("Number of available ports: %i\n", port_number); 961 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 962 "Driver", "Status", "Link"); 963 } 964 965 void 966 port_summary_display(portid_t port_id) 967 { 968 struct rte_ether_addr mac_addr; 969 struct rte_eth_link link; 970 struct rte_eth_dev_info dev_info; 971 char name[RTE_ETH_NAME_MAX_LEN]; 972 int ret; 973 974 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 975 print_valid_ports(); 976 return; 977 } 978 979 ret = eth_link_get_nowait_print_err(port_id, &link); 980 if (ret < 0) 981 return; 982 983 ret = eth_dev_info_get_print_err(port_id, &dev_info); 984 if (ret != 0) 985 return; 986 987 rte_eth_dev_get_name_by_port(port_id, name); 988 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 989 if (ret != 0) 990 return; 991 992 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 993 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 994 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 995 rte_eth_link_speed_to_str(link.link_speed)); 996 } 997 998 void 999 port_eeprom_display(portid_t port_id) 1000 { 1001 struct rte_dev_eeprom_info einfo; 1002 int ret; 1003 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1004 print_valid_ports(); 1005 return; 1006 } 1007 1008 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1009 if (len_eeprom < 0) { 1010 switch (len_eeprom) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", 1022 len_eeprom); 1023 break; 1024 } 1025 return; 1026 } 1027 1028 einfo.offset = 0; 1029 einfo.length = len_eeprom; 1030 einfo.data = calloc(1, len_eeprom); 1031 if (!einfo.data) { 1032 fprintf(stderr, 1033 "Allocation of port %u eeprom data failed\n", 1034 port_id); 1035 return; 1036 } 1037 1038 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1039 if (ret != 0) { 1040 switch (ret) { 1041 case -ENODEV: 1042 fprintf(stderr, "port index %d invalid\n", port_id); 1043 break; 1044 case -ENOTSUP: 1045 fprintf(stderr, "operation not supported by device\n"); 1046 break; 1047 case -EIO: 1048 fprintf(stderr, "device is removed\n"); 1049 break; 1050 default: 1051 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1052 break; 1053 } 1054 free(einfo.data); 1055 return; 1056 } 1057 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1058 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1059 free(einfo.data); 1060 } 1061 1062 void 1063 port_module_eeprom_display(portid_t port_id) 1064 { 1065 struct rte_eth_dev_module_info minfo; 1066 struct rte_dev_eeprom_info einfo; 1067 int ret; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1070 print_valid_ports(); 1071 return; 1072 } 1073 1074 1075 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 return; 1093 } 1094 1095 einfo.offset = 0; 1096 einfo.length = minfo.eeprom_len; 1097 einfo.data = calloc(1, minfo.eeprom_len); 1098 if (!einfo.data) { 1099 fprintf(stderr, 1100 "Allocation of port %u eeprom data failed\n", 1101 port_id); 1102 return; 1103 } 1104 1105 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1106 if (ret != 0) { 1107 switch (ret) { 1108 case -ENODEV: 1109 fprintf(stderr, "port index %d invalid\n", port_id); 1110 break; 1111 case -ENOTSUP: 1112 fprintf(stderr, "operation not supported by device\n"); 1113 break; 1114 case -EIO: 1115 fprintf(stderr, "device is removed\n"); 1116 break; 1117 default: 1118 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1119 ret); 1120 break; 1121 } 1122 free(einfo.data); 1123 return; 1124 } 1125 1126 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1127 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1128 free(einfo.data); 1129 } 1130 1131 int 1132 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1133 { 1134 uint16_t pid; 1135 1136 if (port_id == (portid_t)RTE_PORT_ALL) 1137 return 0; 1138 1139 RTE_ETH_FOREACH_DEV(pid) 1140 if (port_id == pid) 1141 return 0; 1142 1143 if (warning == ENABLED_WARN) 1144 fprintf(stderr, "Invalid port %d\n", port_id); 1145 1146 return 1; 1147 } 1148 1149 void print_valid_ports(void) 1150 { 1151 portid_t pid; 1152 1153 printf("The valid ports array is ["); 1154 RTE_ETH_FOREACH_DEV(pid) { 1155 printf(" %d", pid); 1156 } 1157 printf(" ]\n"); 1158 } 1159 1160 static int 1161 vlan_id_is_invalid(uint16_t vlan_id) 1162 { 1163 if (vlan_id < 4096) 1164 return 0; 1165 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1166 return 1; 1167 } 1168 1169 static uint32_t 1170 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1171 { 1172 uint32_t overhead_len; 1173 1174 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1175 overhead_len = max_rx_pktlen - max_mtu; 1176 else 1177 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1178 1179 return overhead_len; 1180 } 1181 1182 static int 1183 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1184 { 1185 struct rte_eth_dev_info dev_info; 1186 uint32_t overhead_len; 1187 uint32_t frame_size; 1188 int ret; 1189 1190 ret = rte_eth_dev_info_get(port_id, &dev_info); 1191 if (ret != 0) 1192 return ret; 1193 1194 if (mtu < dev_info.min_mtu) { 1195 fprintf(stderr, 1196 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1197 mtu, dev_info.min_mtu, port_id); 1198 return -EINVAL; 1199 } 1200 if (mtu > dev_info.max_mtu) { 1201 fprintf(stderr, 1202 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1203 mtu, dev_info.max_mtu, port_id); 1204 return -EINVAL; 1205 } 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 frame_size = mtu + overhead_len; 1210 if (frame_size > dev_info.max_rx_pktlen) { 1211 fprintf(stderr, 1212 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1213 frame_size, dev_info.max_rx_pktlen, port_id); 1214 return -EINVAL; 1215 } 1216 1217 return 0; 1218 } 1219 1220 void 1221 port_mtu_set(portid_t port_id, uint16_t mtu) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 int diag; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 1229 diag = eth_dev_validate_mtu(port_id, mtu); 1230 if (diag != 0) 1231 return; 1232 1233 if (port->need_reconfig == 0) { 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag != 0) { 1236 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1237 return; 1238 } 1239 } 1240 1241 port->dev_conf.rxmode.mtu = mtu; 1242 } 1243 1244 /* Generic flow management functions. */ 1245 1246 static struct port_flow_tunnel * 1247 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1248 { 1249 struct port_flow_tunnel *flow_tunnel; 1250 1251 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1252 if (flow_tunnel->id == port_tunnel_id) 1253 goto out; 1254 } 1255 flow_tunnel = NULL; 1256 1257 out: 1258 return flow_tunnel; 1259 } 1260 1261 const char * 1262 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1263 { 1264 const char *type; 1265 switch (tunnel->type) { 1266 default: 1267 type = "unknown"; 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_VXLAN: 1270 type = "vxlan"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_GRE: 1273 type = "gre"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_NVGRE: 1276 type = "nvgre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_GENEVE: 1279 type = "geneve"; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 struct port_flow_tunnel * 1287 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1288 { 1289 struct rte_port *port = &ports[port_id]; 1290 struct port_flow_tunnel *flow_tunnel; 1291 1292 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1293 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1294 goto out; 1295 } 1296 flow_tunnel = NULL; 1297 1298 out: 1299 return flow_tunnel; 1300 } 1301 1302 void port_flow_tunnel_list(portid_t port_id) 1303 { 1304 struct rte_port *port = &ports[port_id]; 1305 struct port_flow_tunnel *flt; 1306 1307 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1308 printf("port %u tunnel #%u type=%s", 1309 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1310 if (flt->tunnel.tun_id) 1311 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1312 printf("\n"); 1313 } 1314 } 1315 1316 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1317 { 1318 struct rte_port *port = &ports[port_id]; 1319 struct port_flow_tunnel *flt; 1320 1321 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1322 if (flt->id == tunnel_id) 1323 break; 1324 } 1325 if (flt) { 1326 LIST_REMOVE(flt, chain); 1327 free(flt); 1328 printf("port %u: flow tunnel #%u destroyed\n", 1329 port_id, tunnel_id); 1330 } 1331 } 1332 1333 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1334 { 1335 struct rte_port *port = &ports[port_id]; 1336 enum rte_flow_item_type type; 1337 struct port_flow_tunnel *flt; 1338 1339 if (!strcmp(ops->type, "vxlan")) 1340 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1341 else if (!strcmp(ops->type, "gre")) 1342 type = RTE_FLOW_ITEM_TYPE_GRE; 1343 else if (!strcmp(ops->type, "nvgre")) 1344 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1345 else if (!strcmp(ops->type, "geneve")) 1346 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1347 else { 1348 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1349 ops->type); 1350 return; 1351 } 1352 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1353 if (flt->tunnel.type == type) 1354 break; 1355 } 1356 if (!flt) { 1357 flt = calloc(1, sizeof(*flt)); 1358 if (!flt) { 1359 fprintf(stderr, "failed to allocate port flt object\n"); 1360 return; 1361 } 1362 flt->tunnel.type = type; 1363 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1364 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1365 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1366 } 1367 printf("port %d: flow tunnel #%u type %s\n", 1368 port_id, flt->id, ops->type); 1369 } 1370 1371 /** Generate a port_flow entry from attributes/pattern/actions. */ 1372 static struct port_flow * 1373 port_flow_new(const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions, 1376 struct rte_flow_error *error) 1377 { 1378 const struct rte_flow_conv_rule rule = { 1379 .attr_ro = attr, 1380 .pattern_ro = pattern, 1381 .actions_ro = actions, 1382 }; 1383 struct port_flow *pf; 1384 int ret; 1385 1386 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1387 if (ret < 0) 1388 return NULL; 1389 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1390 if (!pf) { 1391 rte_flow_error_set 1392 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1393 "calloc() failed"); 1394 return NULL; 1395 } 1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1397 error) >= 0) 1398 return pf; 1399 free(pf); 1400 return NULL; 1401 } 1402 1403 /** Print a message out of a flow error. */ 1404 static int 1405 port_flow_complain(struct rte_flow_error *error) 1406 { 1407 static const char *const errstrlist[] = { 1408 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1409 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1410 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1411 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1412 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1413 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1417 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1418 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1419 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1421 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1422 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1423 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1424 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1425 }; 1426 const char *errstr; 1427 char buf[32]; 1428 int err = rte_errno; 1429 1430 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1431 !errstrlist[error->type]) 1432 errstr = "unknown type"; 1433 else 1434 errstr = errstrlist[error->type]; 1435 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1436 __func__, error->type, errstr, 1437 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1438 error->cause), buf) : "", 1439 error->message ? error->message : "(no stated reason)", 1440 rte_strerror(err)); 1441 1442 switch (error->type) { 1443 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1444 fprintf(stderr, "The status suggests the use of \"transfer\" " 1445 "as the possible cause of the failure. Make " 1446 "sure that the flow in question and its " 1447 "indirect components (if any) are managed " 1448 "via \"transfer\" proxy port. Use command " 1449 "\"show port (port_id) flow transfer proxy\" " 1450 "to figure out the proxy port ID\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 return -err; 1457 } 1458 1459 static void 1460 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1461 { 1462 uint16_t total_len = 0; 1463 uint16_t str_len; 1464 uint16_t i; 1465 1466 if (rss_types == 0) 1467 return; 1468 1469 for (i = 0; rss_type_table[i].str; i++) { 1470 if (rss_type_table[i].rss_type == 0) 1471 continue; 1472 1473 if ((rss_types & rss_type_table[i].rss_type) == 1474 rss_type_table[i].rss_type) { 1475 /* Contain two spaces */ 1476 str_len = strlen(rss_type_table[i].str) + 2; 1477 if (total_len + str_len > char_num_per_line) { 1478 printf("\n"); 1479 total_len = 0; 1480 } 1481 printf(" %s", rss_type_table[i].str); 1482 total_len += str_len; 1483 } 1484 } 1485 printf("\n"); 1486 } 1487 1488 static void 1489 rss_config_display(struct rte_flow_action_rss *rss_conf) 1490 { 1491 uint8_t i; 1492 1493 if (rss_conf == NULL) { 1494 fprintf(stderr, "Invalid rule\n"); 1495 return; 1496 } 1497 1498 printf("RSS:\n" 1499 " queues:"); 1500 if (rss_conf->queue_num == 0) 1501 printf(" none"); 1502 for (i = 0; i < rss_conf->queue_num; i++) 1503 printf(" %d", rss_conf->queue[i]); 1504 printf("\n"); 1505 1506 printf(" function: "); 1507 switch (rss_conf->func) { 1508 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1509 printf("default\n"); 1510 break; 1511 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1512 printf("toeplitz\n"); 1513 break; 1514 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1515 printf("simple_xor\n"); 1516 break; 1517 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1518 printf("symmetric_toeplitz\n"); 1519 break; 1520 default: 1521 printf("Unknown function\n"); 1522 return; 1523 } 1524 1525 printf(" RSS key:\n"); 1526 if (rss_conf->key_len == 0) { 1527 printf(" none"); 1528 } else { 1529 printf(" key_len: %u\n", rss_conf->key_len); 1530 printf(" key: "); 1531 if (rss_conf->key == NULL) { 1532 printf("none"); 1533 } else { 1534 for (i = 0; i < rss_conf->key_len; i++) 1535 printf("%02X", rss_conf->key[i]); 1536 } 1537 } 1538 printf("\n"); 1539 1540 printf(" types:\n"); 1541 if (rss_conf->types == 0) { 1542 printf(" none\n"); 1543 return; 1544 } 1545 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1546 } 1547 1548 static struct port_indirect_action * 1549 action_get_by_id(portid_t port_id, uint32_t id) 1550 { 1551 struct rte_port *port; 1552 struct port_indirect_action **ppia; 1553 struct port_indirect_action *pia = NULL; 1554 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return NULL; 1558 port = &ports[port_id]; 1559 ppia = &port->actions_list; 1560 while (*ppia) { 1561 if ((*ppia)->id == id) { 1562 pia = *ppia; 1563 break; 1564 } 1565 ppia = &(*ppia)->next; 1566 } 1567 if (!pia) 1568 fprintf(stderr, 1569 "Failed to find indirect action #%u on port %u\n", 1570 id, port_id); 1571 return pia; 1572 } 1573 1574 static int 1575 action_alloc(portid_t port_id, uint32_t id, 1576 struct port_indirect_action **action) 1577 { 1578 struct rte_port *port; 1579 struct port_indirect_action **ppia; 1580 struct port_indirect_action *pia = NULL; 1581 1582 *action = NULL; 1583 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1584 port_id == (portid_t)RTE_PORT_ALL) 1585 return -EINVAL; 1586 port = &ports[port_id]; 1587 if (id == UINT32_MAX) { 1588 /* taking first available ID */ 1589 if (port->actions_list) { 1590 if (port->actions_list->id == UINT32_MAX - 1) { 1591 fprintf(stderr, 1592 "Highest indirect action ID is already assigned, delete it first\n"); 1593 return -ENOMEM; 1594 } 1595 id = port->actions_list->id + 1; 1596 } else { 1597 id = 0; 1598 } 1599 } 1600 pia = calloc(1, sizeof(*pia)); 1601 if (!pia) { 1602 fprintf(stderr, 1603 "Allocation of port %u indirect action failed\n", 1604 port_id); 1605 return -ENOMEM; 1606 } 1607 ppia = &port->actions_list; 1608 while (*ppia && (*ppia)->id > id) 1609 ppia = &(*ppia)->next; 1610 if (*ppia && (*ppia)->id == id) { 1611 fprintf(stderr, 1612 "Indirect action #%u is already assigned, delete it first\n", 1613 id); 1614 free(pia); 1615 return -EINVAL; 1616 } 1617 pia->next = *ppia; 1618 pia->id = id; 1619 *ppia = pia; 1620 *action = pia; 1621 return 0; 1622 } 1623 1624 static int 1625 template_alloc(uint32_t id, struct port_template **template, 1626 struct port_template **list) 1627 { 1628 struct port_template *lst = *list; 1629 struct port_template **ppt; 1630 struct port_template *pt = NULL; 1631 1632 *template = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest template ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of port template failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Template #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *template = pt; 1664 return 0; 1665 } 1666 1667 static int 1668 table_alloc(uint32_t id, struct port_table **table, 1669 struct port_table **list) 1670 { 1671 struct port_table *lst = *list; 1672 struct port_table **ppt; 1673 struct port_table *pt = NULL; 1674 1675 *table = NULL; 1676 if (id == UINT32_MAX) { 1677 /* taking first available ID */ 1678 if (lst) { 1679 if (lst->id == UINT32_MAX - 1) { 1680 printf("Highest table ID is already" 1681 " assigned, delete it first\n"); 1682 return -ENOMEM; 1683 } 1684 id = lst->id + 1; 1685 } else { 1686 id = 0; 1687 } 1688 } 1689 pt = calloc(1, sizeof(*pt)); 1690 if (!pt) { 1691 printf("Allocation of table failed\n"); 1692 return -ENOMEM; 1693 } 1694 ppt = list; 1695 while (*ppt && (*ppt)->id > id) 1696 ppt = &(*ppt)->next; 1697 if (*ppt && (*ppt)->id == id) { 1698 printf("Table #%u is already assigned," 1699 " delete it first\n", id); 1700 free(pt); 1701 return -EINVAL; 1702 } 1703 pt->next = *ppt; 1704 pt->id = id; 1705 *ppt = pt; 1706 *table = pt; 1707 return 0; 1708 } 1709 1710 /** Get info about flow management resources. */ 1711 int 1712 port_flow_get_info(portid_t port_id) 1713 { 1714 struct rte_flow_port_info port_info; 1715 struct rte_flow_queue_info queue_info; 1716 struct rte_flow_error error; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1719 port_id == (portid_t)RTE_PORT_ALL) 1720 return -EINVAL; 1721 /* Poisoning to make sure PMDs update it in case of error. */ 1722 memset(&error, 0x99, sizeof(error)); 1723 memset(&port_info, 0, sizeof(port_info)); 1724 memset(&queue_info, 0, sizeof(queue_info)); 1725 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1726 return port_flow_complain(&error); 1727 printf("Flow engine resources on port %u:\n" 1728 "Number of queues: %d\n" 1729 "Size of queues: %d\n" 1730 "Number of counters: %d\n" 1731 "Number of aging objects: %d\n" 1732 "Number of meter actions: %d\n", 1733 port_id, port_info.max_nb_queues, 1734 queue_info.max_size, 1735 port_info.max_nb_counters, 1736 port_info.max_nb_aging_objects, 1737 port_info.max_nb_meters); 1738 return 0; 1739 } 1740 1741 /** Configure flow management resources. */ 1742 int 1743 port_flow_configure(portid_t port_id, 1744 const struct rte_flow_port_attr *port_attr, 1745 uint16_t nb_queue, 1746 const struct rte_flow_queue_attr *queue_attr) 1747 { 1748 struct rte_port *port; 1749 struct rte_flow_error error; 1750 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1751 int std_queue; 1752 1753 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1754 port_id == (portid_t)RTE_PORT_ALL) 1755 return -EINVAL; 1756 port = &ports[port_id]; 1757 port->queue_nb = nb_queue; 1758 port->queue_sz = queue_attr->size; 1759 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1760 attr_list[std_queue] = queue_attr; 1761 /* Poisoning to make sure PMDs update it in case of error. */ 1762 memset(&error, 0x66, sizeof(error)); 1763 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1764 return port_flow_complain(&error); 1765 printf("Configure flows on port %u: " 1766 "number of queues %d with %d elements\n", 1767 port_id, nb_queue, queue_attr->size); 1768 return 0; 1769 } 1770 1771 static int 1772 action_handle_create(portid_t port_id, 1773 struct port_indirect_action *pia, 1774 const struct rte_flow_indir_action_conf *conf, 1775 const struct rte_flow_action *action, 1776 struct rte_flow_error *error) 1777 { 1778 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1779 struct rte_flow_action_age *age = 1780 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1781 1782 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1783 age->context = &pia->age_type; 1784 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1785 struct rte_flow_action_conntrack *ct = 1786 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1787 1788 memcpy(ct, &conntrack_context, sizeof(*ct)); 1789 } 1790 pia->type = action->type; 1791 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1792 error); 1793 return pia->handle ? 0 : -1; 1794 } 1795 1796 static int 1797 action_list_handle_create(portid_t port_id, 1798 struct port_indirect_action *pia, 1799 const struct rte_flow_indir_action_conf *conf, 1800 const struct rte_flow_action *actions, 1801 struct rte_flow_error *error) 1802 { 1803 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1804 pia->list_handle = 1805 rte_flow_action_list_handle_create(port_id, conf, 1806 actions, error); 1807 return pia->list_handle ? 0 : -1; 1808 } 1809 /** Create indirect action */ 1810 int 1811 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1812 const struct rte_flow_indir_action_conf *conf, 1813 const struct rte_flow_action *action) 1814 { 1815 struct port_indirect_action *pia; 1816 int ret; 1817 struct rte_flow_error error; 1818 1819 ret = action_alloc(port_id, id, &pia); 1820 if (ret) 1821 return ret; 1822 /* Poisoning to make sure PMDs update it in case of error. */ 1823 memset(&error, 0x22, sizeof(error)); 1824 ret = indirect_list ? 1825 action_list_handle_create(port_id, pia, conf, action, &error) : 1826 action_handle_create(port_id, pia, conf, action, &error); 1827 if (ret) { 1828 uint32_t destroy_id = pia->id; 1829 port_action_handle_destroy(port_id, 1, &destroy_id); 1830 return port_flow_complain(&error); 1831 } 1832 printf("Indirect action #%u created\n", pia->id); 1833 return 0; 1834 } 1835 1836 /** Destroy indirect action */ 1837 int 1838 port_action_handle_destroy(portid_t port_id, 1839 uint32_t n, 1840 const uint32_t *actions) 1841 { 1842 struct rte_port *port; 1843 struct port_indirect_action **tmp; 1844 int ret = 0; 1845 1846 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1847 port_id == (portid_t)RTE_PORT_ALL) 1848 return -EINVAL; 1849 port = &ports[port_id]; 1850 tmp = &port->actions_list; 1851 while (*tmp) { 1852 uint32_t i; 1853 1854 for (i = 0; i != n; ++i) { 1855 struct rte_flow_error error; 1856 struct port_indirect_action *pia = *tmp; 1857 1858 if (actions[i] != pia->id) 1859 continue; 1860 /* 1861 * Poisoning to make sure PMDs update it in case 1862 * of error. 1863 */ 1864 memset(&error, 0x33, sizeof(error)); 1865 1866 if (pia->handle) { 1867 ret = pia->type == 1868 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1869 rte_flow_action_list_handle_destroy 1870 (port_id, pia->list_handle, &error) : 1871 rte_flow_action_handle_destroy 1872 (port_id, pia->handle, &error); 1873 if (ret) { 1874 ret = port_flow_complain(&error); 1875 continue; 1876 } 1877 } 1878 *tmp = pia->next; 1879 printf("Indirect action #%u destroyed\n", pia->id); 1880 free(pia); 1881 break; 1882 } 1883 if (i == n) 1884 tmp = &(*tmp)->next; 1885 } 1886 return ret; 1887 } 1888 1889 int 1890 port_action_handle_flush(portid_t port_id) 1891 { 1892 struct rte_port *port; 1893 struct port_indirect_action **tmp; 1894 int ret = 0; 1895 1896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1897 port_id == (portid_t)RTE_PORT_ALL) 1898 return -EINVAL; 1899 port = &ports[port_id]; 1900 tmp = &port->actions_list; 1901 while (*tmp != NULL) { 1902 struct rte_flow_error error; 1903 struct port_indirect_action *pia = *tmp; 1904 1905 /* Poisoning to make sure PMDs update it in case of error. */ 1906 memset(&error, 0x44, sizeof(error)); 1907 if (pia->handle != NULL) { 1908 ret = pia->type == 1909 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1910 rte_flow_action_list_handle_destroy 1911 (port_id, pia->list_handle, &error) : 1912 rte_flow_action_handle_destroy 1913 (port_id, pia->handle, &error); 1914 if (ret) { 1915 printf("Indirect action #%u not destroyed\n", 1916 pia->id); 1917 ret = port_flow_complain(&error); 1918 } 1919 tmp = &pia->next; 1920 } else { 1921 *tmp = pia->next; 1922 free(pia); 1923 } 1924 } 1925 return ret; 1926 } 1927 1928 /** Get indirect action by port + id */ 1929 struct rte_flow_action_handle * 1930 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1931 { 1932 1933 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1934 1935 return (pia) ? pia->handle : NULL; 1936 } 1937 1938 /** Update indirect action */ 1939 int 1940 port_action_handle_update(portid_t port_id, uint32_t id, 1941 const struct rte_flow_action *action) 1942 { 1943 struct rte_flow_error error; 1944 struct rte_flow_action_handle *action_handle; 1945 struct port_indirect_action *pia; 1946 struct rte_flow_update_meter_mark mtr_update; 1947 const void *update; 1948 1949 action_handle = port_action_handle_get_by_id(port_id, id); 1950 if (!action_handle) 1951 return -EINVAL; 1952 pia = action_get_by_id(port_id, id); 1953 if (!pia) 1954 return -EINVAL; 1955 switch (pia->type) { 1956 case RTE_FLOW_ACTION_TYPE_AGE: 1957 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1958 update = action->conf; 1959 break; 1960 case RTE_FLOW_ACTION_TYPE_METER_MARK: 1961 memcpy(&mtr_update.meter_mark, action->conf, 1962 sizeof(struct rte_flow_action_meter_mark)); 1963 if (mtr_update.meter_mark.profile) 1964 mtr_update.profile_valid = 1; 1965 if (mtr_update.meter_mark.policy) 1966 mtr_update.policy_valid = 1; 1967 mtr_update.color_mode_valid = 1; 1968 mtr_update.state_valid = 1; 1969 update = &mtr_update; 1970 break; 1971 default: 1972 update = action; 1973 break; 1974 } 1975 if (rte_flow_action_handle_update(port_id, action_handle, update, 1976 &error)) { 1977 return port_flow_complain(&error); 1978 } 1979 printf("Indirect action #%u updated\n", id); 1980 return 0; 1981 } 1982 1983 static void 1984 port_action_handle_query_dump(portid_t port_id, 1985 const struct port_indirect_action *pia, 1986 union port_action_query *query) 1987 { 1988 if (!pia || !query) 1989 return; 1990 switch (pia->type) { 1991 case RTE_FLOW_ACTION_TYPE_AGE: 1992 printf("Indirect AGE action:\n" 1993 " aged: %u\n" 1994 " sec_since_last_hit_valid: %u\n" 1995 " sec_since_last_hit: %" PRIu32 "\n", 1996 query->age.aged, 1997 query->age.sec_since_last_hit_valid, 1998 query->age.sec_since_last_hit); 1999 break; 2000 case RTE_FLOW_ACTION_TYPE_COUNT: 2001 printf("Indirect COUNT action:\n" 2002 " hits_set: %u\n" 2003 " bytes_set: %u\n" 2004 " hits: %" PRIu64 "\n" 2005 " bytes: %" PRIu64 "\n", 2006 query->count.hits_set, 2007 query->count.bytes_set, 2008 query->count.hits, 2009 query->count.bytes); 2010 break; 2011 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2012 printf("Conntrack Context:\n" 2013 " Peer: %u, Flow dir: %s, Enable: %u\n" 2014 " Live: %u, SACK: %u, CACK: %u\n" 2015 " Packet dir: %s, Liberal: %u, State: %u\n" 2016 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2017 " Last Seq: %u, Last ACK: %u\n" 2018 " Last Win: %u, Last End: %u\n", 2019 query->ct.peer_port, 2020 query->ct.is_original_dir ? "Original" : "Reply", 2021 query->ct.enable, query->ct.live_connection, 2022 query->ct.selective_ack, query->ct.challenge_ack_passed, 2023 query->ct.last_direction ? "Original" : "Reply", 2024 query->ct.liberal_mode, query->ct.state, 2025 query->ct.max_ack_window, query->ct.retransmission_limit, 2026 query->ct.last_index, query->ct.last_seq, 2027 query->ct.last_ack, query->ct.last_window, 2028 query->ct.last_end); 2029 printf(" Original Dir:\n" 2030 " scale: %u, fin: %u, ack seen: %u\n" 2031 " unacked data: %u\n Sent end: %u," 2032 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2033 query->ct.original_dir.scale, 2034 query->ct.original_dir.close_initiated, 2035 query->ct.original_dir.last_ack_seen, 2036 query->ct.original_dir.data_unacked, 2037 query->ct.original_dir.sent_end, 2038 query->ct.original_dir.reply_end, 2039 query->ct.original_dir.max_win, 2040 query->ct.original_dir.max_ack); 2041 printf(" Reply Dir:\n" 2042 " scale: %u, fin: %u, ack seen: %u\n" 2043 " unacked data: %u\n Sent end: %u," 2044 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2045 query->ct.reply_dir.scale, 2046 query->ct.reply_dir.close_initiated, 2047 query->ct.reply_dir.last_ack_seen, 2048 query->ct.reply_dir.data_unacked, 2049 query->ct.reply_dir.sent_end, 2050 query->ct.reply_dir.reply_end, 2051 query->ct.reply_dir.max_win, 2052 query->ct.reply_dir.max_ack); 2053 break; 2054 case RTE_FLOW_ACTION_TYPE_QUOTA: 2055 printf("Indirect QUOTA action %u\n" 2056 " unused quota: %" PRId64 "\n", 2057 pia->id, query->quota.quota); 2058 break; 2059 default: 2060 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2061 pia->type, pia->id, port_id); 2062 break; 2063 } 2064 2065 } 2066 2067 void 2068 port_action_handle_query_update(portid_t port_id, uint32_t id, 2069 enum rte_flow_query_update_mode qu_mode, 2070 const struct rte_flow_action *action) 2071 { 2072 int ret; 2073 struct rte_flow_error error; 2074 struct port_indirect_action *pia; 2075 union port_action_query query; 2076 2077 pia = action_get_by_id(port_id, id); 2078 if (!pia || !pia->handle) 2079 return; 2080 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2081 &query, qu_mode, &error); 2082 if (ret) 2083 port_flow_complain(&error); 2084 else 2085 port_action_handle_query_dump(port_id, pia, &query); 2086 2087 } 2088 2089 int 2090 port_action_handle_query(portid_t port_id, uint32_t id) 2091 { 2092 struct rte_flow_error error; 2093 struct port_indirect_action *pia; 2094 union port_action_query query; 2095 2096 pia = action_get_by_id(port_id, id); 2097 if (!pia) 2098 return -EINVAL; 2099 switch (pia->type) { 2100 case RTE_FLOW_ACTION_TYPE_AGE: 2101 case RTE_FLOW_ACTION_TYPE_COUNT: 2102 case RTE_FLOW_ACTION_TYPE_QUOTA: 2103 break; 2104 default: 2105 fprintf(stderr, 2106 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2107 id, pia->type, port_id); 2108 return -ENOTSUP; 2109 } 2110 /* Poisoning to make sure PMDs update it in case of error. */ 2111 memset(&error, 0x55, sizeof(error)); 2112 memset(&query, 0, sizeof(query)); 2113 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2114 return port_flow_complain(&error); 2115 port_action_handle_query_dump(port_id, pia, &query); 2116 return 0; 2117 } 2118 2119 static struct port_flow_tunnel * 2120 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2121 const struct rte_flow_item *pattern, 2122 const struct rte_flow_action *actions, 2123 const struct tunnel_ops *tunnel_ops) 2124 { 2125 int ret; 2126 struct rte_port *port; 2127 struct port_flow_tunnel *pft; 2128 struct rte_flow_error error; 2129 2130 port = &ports[port_id]; 2131 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2132 if (!pft) { 2133 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2134 tunnel_ops->id); 2135 return NULL; 2136 } 2137 if (tunnel_ops->actions) { 2138 uint32_t num_actions; 2139 const struct rte_flow_action *aptr; 2140 2141 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2142 &pft->pmd_actions, 2143 &pft->num_pmd_actions, 2144 &error); 2145 if (ret) { 2146 port_flow_complain(&error); 2147 return NULL; 2148 } 2149 for (aptr = actions, num_actions = 1; 2150 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2151 aptr++, num_actions++); 2152 pft->actions = malloc( 2153 (num_actions + pft->num_pmd_actions) * 2154 sizeof(actions[0])); 2155 if (!pft->actions) { 2156 rte_flow_tunnel_action_decap_release( 2157 port_id, pft->actions, 2158 pft->num_pmd_actions, &error); 2159 return NULL; 2160 } 2161 rte_memcpy(pft->actions, pft->pmd_actions, 2162 pft->num_pmd_actions * sizeof(actions[0])); 2163 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2164 num_actions * sizeof(actions[0])); 2165 } 2166 if (tunnel_ops->items) { 2167 uint32_t num_items; 2168 const struct rte_flow_item *iptr; 2169 2170 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2171 &pft->pmd_items, 2172 &pft->num_pmd_items, 2173 &error); 2174 if (ret) { 2175 port_flow_complain(&error); 2176 return NULL; 2177 } 2178 for (iptr = pattern, num_items = 1; 2179 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2180 iptr++, num_items++); 2181 pft->items = malloc((num_items + pft->num_pmd_items) * 2182 sizeof(pattern[0])); 2183 if (!pft->items) { 2184 rte_flow_tunnel_item_release( 2185 port_id, pft->pmd_items, 2186 pft->num_pmd_items, &error); 2187 return NULL; 2188 } 2189 rte_memcpy(pft->items, pft->pmd_items, 2190 pft->num_pmd_items * sizeof(pattern[0])); 2191 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2192 num_items * sizeof(pattern[0])); 2193 } 2194 2195 return pft; 2196 } 2197 2198 static void 2199 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2200 const struct tunnel_ops *tunnel_ops, 2201 struct port_flow_tunnel *pft) 2202 { 2203 struct rte_flow_error error; 2204 2205 if (tunnel_ops->actions) { 2206 free(pft->actions); 2207 rte_flow_tunnel_action_decap_release( 2208 port_id, pft->pmd_actions, 2209 pft->num_pmd_actions, &error); 2210 pft->actions = NULL; 2211 pft->pmd_actions = NULL; 2212 } 2213 if (tunnel_ops->items) { 2214 free(pft->items); 2215 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2216 pft->num_pmd_items, 2217 &error); 2218 pft->items = NULL; 2219 pft->pmd_items = NULL; 2220 } 2221 } 2222 2223 /** Add port meter policy */ 2224 int 2225 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2226 const struct rte_flow_action *actions) 2227 { 2228 struct rte_mtr_error error; 2229 const struct rte_flow_action *act = actions; 2230 const struct rte_flow_action *start; 2231 struct rte_mtr_meter_policy_params policy; 2232 uint32_t i = 0, act_n; 2233 int ret; 2234 2235 for (i = 0; i < RTE_COLORS; i++) { 2236 for (act_n = 0, start = act; 2237 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2238 act_n++; 2239 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2240 policy.actions[i] = start; 2241 else 2242 policy.actions[i] = NULL; 2243 act++; 2244 } 2245 ret = rte_mtr_meter_policy_add(port_id, 2246 policy_id, 2247 &policy, &error); 2248 if (ret) 2249 print_mtr_err_msg(&error); 2250 return ret; 2251 } 2252 2253 struct rte_flow_meter_profile * 2254 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2255 { 2256 struct rte_mtr_error error; 2257 struct rte_flow_meter_profile *profile; 2258 2259 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2260 if (!profile) 2261 print_mtr_err_msg(&error); 2262 return profile; 2263 } 2264 struct rte_flow_meter_policy * 2265 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2266 { 2267 struct rte_mtr_error error; 2268 struct rte_flow_meter_policy *policy; 2269 2270 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2271 if (!policy) 2272 print_mtr_err_msg(&error); 2273 return policy; 2274 } 2275 2276 /** Validate flow rule. */ 2277 int 2278 port_flow_validate(portid_t port_id, 2279 const struct rte_flow_attr *attr, 2280 const struct rte_flow_item *pattern, 2281 const struct rte_flow_action *actions, 2282 const struct tunnel_ops *tunnel_ops) 2283 { 2284 struct rte_flow_error error; 2285 struct port_flow_tunnel *pft = NULL; 2286 int ret; 2287 2288 /* Poisoning to make sure PMDs update it in case of error. */ 2289 memset(&error, 0x11, sizeof(error)); 2290 if (tunnel_ops->enabled) { 2291 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2292 actions, tunnel_ops); 2293 if (!pft) 2294 return -ENOENT; 2295 if (pft->items) 2296 pattern = pft->items; 2297 if (pft->actions) 2298 actions = pft->actions; 2299 } 2300 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2301 if (tunnel_ops->enabled) 2302 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2303 if (ret) 2304 return port_flow_complain(&error); 2305 printf("Flow rule validated\n"); 2306 return 0; 2307 } 2308 2309 /** Return age action structure if exists, otherwise NULL. */ 2310 static struct rte_flow_action_age * 2311 age_action_get(const struct rte_flow_action *actions) 2312 { 2313 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2314 switch (actions->type) { 2315 case RTE_FLOW_ACTION_TYPE_AGE: 2316 return (struct rte_flow_action_age *) 2317 (uintptr_t)actions->conf; 2318 default: 2319 break; 2320 } 2321 } 2322 return NULL; 2323 } 2324 2325 /** Create pattern template */ 2326 int 2327 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2328 const struct rte_flow_pattern_template_attr *attr, 2329 const struct rte_flow_item *pattern) 2330 { 2331 struct rte_port *port; 2332 struct port_template *pit; 2333 int ret; 2334 struct rte_flow_error error; 2335 2336 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2337 port_id == (portid_t)RTE_PORT_ALL) 2338 return -EINVAL; 2339 port = &ports[port_id]; 2340 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2341 if (ret) 2342 return ret; 2343 /* Poisoning to make sure PMDs update it in case of error. */ 2344 memset(&error, 0x22, sizeof(error)); 2345 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2346 attr, pattern, &error); 2347 if (!pit->template.pattern_template) { 2348 uint32_t destroy_id = pit->id; 2349 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2350 return port_flow_complain(&error); 2351 } 2352 printf("Pattern template #%u created\n", pit->id); 2353 return 0; 2354 } 2355 2356 /** Destroy pattern template */ 2357 int 2358 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2359 const uint32_t *template) 2360 { 2361 struct rte_port *port; 2362 struct port_template **tmp; 2363 int ret = 0; 2364 2365 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2366 port_id == (portid_t)RTE_PORT_ALL) 2367 return -EINVAL; 2368 port = &ports[port_id]; 2369 tmp = &port->pattern_templ_list; 2370 while (*tmp) { 2371 uint32_t i; 2372 2373 for (i = 0; i != n; ++i) { 2374 struct rte_flow_error error; 2375 struct port_template *pit = *tmp; 2376 2377 if (template[i] != pit->id) 2378 continue; 2379 /* 2380 * Poisoning to make sure PMDs update it in case 2381 * of error. 2382 */ 2383 memset(&error, 0x33, sizeof(error)); 2384 2385 if (pit->template.pattern_template && 2386 rte_flow_pattern_template_destroy(port_id, 2387 pit->template.pattern_template, 2388 &error)) { 2389 ret = port_flow_complain(&error); 2390 continue; 2391 } 2392 *tmp = pit->next; 2393 printf("Pattern template #%u destroyed\n", pit->id); 2394 free(pit); 2395 break; 2396 } 2397 if (i == n) 2398 tmp = &(*tmp)->next; 2399 } 2400 return ret; 2401 } 2402 2403 /** Flush pattern template */ 2404 int 2405 port_flow_pattern_template_flush(portid_t port_id) 2406 { 2407 struct rte_port *port; 2408 struct port_template **tmp; 2409 int ret = 0; 2410 2411 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2412 port_id == (portid_t)RTE_PORT_ALL) 2413 return -EINVAL; 2414 port = &ports[port_id]; 2415 tmp = &port->pattern_templ_list; 2416 while (*tmp) { 2417 struct rte_flow_error error; 2418 struct port_template *pit = *tmp; 2419 2420 /* 2421 * Poisoning to make sure PMDs update it in case 2422 * of error. 2423 */ 2424 memset(&error, 0x33, sizeof(error)); 2425 if (pit->template.pattern_template && 2426 rte_flow_pattern_template_destroy(port_id, 2427 pit->template.pattern_template, &error)) { 2428 printf("Pattern template #%u not destroyed\n", pit->id); 2429 ret = port_flow_complain(&error); 2430 tmp = &pit->next; 2431 } else { 2432 *tmp = pit->next; 2433 free(pit); 2434 } 2435 } 2436 return ret; 2437 } 2438 2439 /** Create actions template */ 2440 int 2441 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2442 const struct rte_flow_actions_template_attr *attr, 2443 const struct rte_flow_action *actions, 2444 const struct rte_flow_action *masks) 2445 { 2446 struct rte_port *port; 2447 struct port_template *pat; 2448 int ret; 2449 struct rte_flow_error error; 2450 2451 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2452 port_id == (portid_t)RTE_PORT_ALL) 2453 return -EINVAL; 2454 port = &ports[port_id]; 2455 ret = template_alloc(id, &pat, &port->actions_templ_list); 2456 if (ret) 2457 return ret; 2458 /* Poisoning to make sure PMDs update it in case of error. */ 2459 memset(&error, 0x22, sizeof(error)); 2460 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2461 attr, actions, masks, &error); 2462 if (!pat->template.actions_template) { 2463 uint32_t destroy_id = pat->id; 2464 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2465 return port_flow_complain(&error); 2466 } 2467 printf("Actions template #%u created\n", pat->id); 2468 return 0; 2469 } 2470 2471 /** Destroy actions template */ 2472 int 2473 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2474 const uint32_t *template) 2475 { 2476 struct rte_port *port; 2477 struct port_template **tmp; 2478 int ret = 0; 2479 2480 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2481 port_id == (portid_t)RTE_PORT_ALL) 2482 return -EINVAL; 2483 port = &ports[port_id]; 2484 tmp = &port->actions_templ_list; 2485 while (*tmp) { 2486 uint32_t i; 2487 2488 for (i = 0; i != n; ++i) { 2489 struct rte_flow_error error; 2490 struct port_template *pat = *tmp; 2491 2492 if (template[i] != pat->id) 2493 continue; 2494 /* 2495 * Poisoning to make sure PMDs update it in case 2496 * of error. 2497 */ 2498 memset(&error, 0x33, sizeof(error)); 2499 2500 if (pat->template.actions_template && 2501 rte_flow_actions_template_destroy(port_id, 2502 pat->template.actions_template, &error)) { 2503 ret = port_flow_complain(&error); 2504 continue; 2505 } 2506 *tmp = pat->next; 2507 printf("Actions template #%u destroyed\n", pat->id); 2508 free(pat); 2509 break; 2510 } 2511 if (i == n) 2512 tmp = &(*tmp)->next; 2513 } 2514 return ret; 2515 } 2516 2517 /** Flush actions template */ 2518 int 2519 port_flow_actions_template_flush(portid_t port_id) 2520 { 2521 struct rte_port *port; 2522 struct port_template **tmp; 2523 int ret = 0; 2524 2525 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2526 port_id == (portid_t)RTE_PORT_ALL) 2527 return -EINVAL; 2528 port = &ports[port_id]; 2529 tmp = &port->actions_templ_list; 2530 while (*tmp) { 2531 struct rte_flow_error error; 2532 struct port_template *pat = *tmp; 2533 2534 /* 2535 * Poisoning to make sure PMDs update it in case 2536 * of error. 2537 */ 2538 memset(&error, 0x33, sizeof(error)); 2539 2540 if (pat->template.actions_template && 2541 rte_flow_actions_template_destroy(port_id, 2542 pat->template.actions_template, &error)) { 2543 ret = port_flow_complain(&error); 2544 printf("Actions template #%u not destroyed\n", pat->id); 2545 tmp = &pat->next; 2546 } else { 2547 *tmp = pat->next; 2548 free(pat); 2549 } 2550 } 2551 return ret; 2552 } 2553 2554 /** Create table */ 2555 int 2556 port_flow_template_table_create(portid_t port_id, uint32_t id, 2557 const struct rte_flow_template_table_attr *table_attr, 2558 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2559 uint32_t nb_actions_templates, uint32_t *actions_templates) 2560 { 2561 struct rte_port *port; 2562 struct port_table *pt; 2563 struct port_template *temp = NULL; 2564 int ret; 2565 uint32_t i; 2566 struct rte_flow_error error; 2567 struct rte_flow_pattern_template 2568 *flow_pattern_templates[nb_pattern_templates]; 2569 struct rte_flow_actions_template 2570 *flow_actions_templates[nb_actions_templates]; 2571 2572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2573 port_id == (portid_t)RTE_PORT_ALL) 2574 return -EINVAL; 2575 port = &ports[port_id]; 2576 for (i = 0; i < nb_pattern_templates; ++i) { 2577 bool found = false; 2578 temp = port->pattern_templ_list; 2579 while (temp) { 2580 if (pattern_templates[i] == temp->id) { 2581 flow_pattern_templates[i] = 2582 temp->template.pattern_template; 2583 found = true; 2584 break; 2585 } 2586 temp = temp->next; 2587 } 2588 if (!found) { 2589 printf("Pattern template #%u is invalid\n", 2590 pattern_templates[i]); 2591 return -EINVAL; 2592 } 2593 } 2594 for (i = 0; i < nb_actions_templates; ++i) { 2595 bool found = false; 2596 temp = port->actions_templ_list; 2597 while (temp) { 2598 if (actions_templates[i] == temp->id) { 2599 flow_actions_templates[i] = 2600 temp->template.actions_template; 2601 found = true; 2602 break; 2603 } 2604 temp = temp->next; 2605 } 2606 if (!found) { 2607 printf("Actions template #%u is invalid\n", 2608 actions_templates[i]); 2609 return -EINVAL; 2610 } 2611 } 2612 ret = table_alloc(id, &pt, &port->table_list); 2613 if (ret) 2614 return ret; 2615 /* Poisoning to make sure PMDs update it in case of error. */ 2616 memset(&error, 0x22, sizeof(error)); 2617 pt->table = rte_flow_template_table_create(port_id, table_attr, 2618 flow_pattern_templates, nb_pattern_templates, 2619 flow_actions_templates, nb_actions_templates, 2620 &error); 2621 2622 if (!pt->table) { 2623 uint32_t destroy_id = pt->id; 2624 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2625 return port_flow_complain(&error); 2626 } 2627 pt->nb_pattern_templates = nb_pattern_templates; 2628 pt->nb_actions_templates = nb_actions_templates; 2629 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2630 sizeof(struct rte_flow_attr)); 2631 printf("Template table #%u created\n", pt->id); 2632 return 0; 2633 } 2634 2635 /** Destroy table */ 2636 int 2637 port_flow_template_table_destroy(portid_t port_id, 2638 uint32_t n, const uint32_t *table) 2639 { 2640 struct rte_port *port; 2641 struct port_table **tmp; 2642 int ret = 0; 2643 2644 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2645 port_id == (portid_t)RTE_PORT_ALL) 2646 return -EINVAL; 2647 port = &ports[port_id]; 2648 tmp = &port->table_list; 2649 while (*tmp) { 2650 uint32_t i; 2651 2652 for (i = 0; i != n; ++i) { 2653 struct rte_flow_error error; 2654 struct port_table *pt = *tmp; 2655 2656 if (table[i] != pt->id) 2657 continue; 2658 /* 2659 * Poisoning to make sure PMDs update it in case 2660 * of error. 2661 */ 2662 memset(&error, 0x33, sizeof(error)); 2663 2664 if (pt->table && 2665 rte_flow_template_table_destroy(port_id, 2666 pt->table, 2667 &error)) { 2668 ret = port_flow_complain(&error); 2669 continue; 2670 } 2671 *tmp = pt->next; 2672 printf("Template table #%u destroyed\n", pt->id); 2673 free(pt); 2674 break; 2675 } 2676 if (i == n) 2677 tmp = &(*tmp)->next; 2678 } 2679 return ret; 2680 } 2681 2682 /** Flush table */ 2683 int 2684 port_flow_template_table_flush(portid_t port_id) 2685 { 2686 struct rte_port *port; 2687 struct port_table **tmp; 2688 int ret = 0; 2689 2690 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2691 port_id == (portid_t)RTE_PORT_ALL) 2692 return -EINVAL; 2693 port = &ports[port_id]; 2694 tmp = &port->table_list; 2695 while (*tmp) { 2696 struct rte_flow_error error; 2697 struct port_table *pt = *tmp; 2698 2699 /* 2700 * Poisoning to make sure PMDs update it in case 2701 * of error. 2702 */ 2703 memset(&error, 0x33, sizeof(error)); 2704 2705 if (pt->table && 2706 rte_flow_template_table_destroy(port_id, 2707 pt->table, 2708 &error)) { 2709 ret = port_flow_complain(&error); 2710 printf("Template table #%u not destroyed\n", pt->id); 2711 tmp = &pt->next; 2712 } else { 2713 *tmp = pt->next; 2714 free(pt); 2715 } 2716 } 2717 return ret; 2718 } 2719 2720 /** Enqueue create flow rule operation. */ 2721 int 2722 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2723 bool postpone, uint32_t table_id, uint32_t rule_idx, 2724 uint32_t pattern_idx, uint32_t actions_idx, 2725 const struct rte_flow_item *pattern, 2726 const struct rte_flow_action *actions) 2727 { 2728 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2729 struct rte_flow *flow; 2730 struct rte_port *port; 2731 struct port_flow *pf; 2732 struct port_table *pt; 2733 uint32_t id = 0; 2734 bool found; 2735 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2736 struct rte_flow_action_age *age = age_action_get(actions); 2737 struct queue_job *job; 2738 2739 port = &ports[port_id]; 2740 if (port->flow_list) { 2741 if (port->flow_list->id == UINT32_MAX) { 2742 printf("Highest rule ID is already assigned," 2743 " delete it first"); 2744 return -ENOMEM; 2745 } 2746 id = port->flow_list->id + 1; 2747 } 2748 2749 if (queue_id >= port->queue_nb) { 2750 printf("Queue #%u is invalid\n", queue_id); 2751 return -EINVAL; 2752 } 2753 2754 found = false; 2755 pt = port->table_list; 2756 while (pt) { 2757 if (table_id == pt->id) { 2758 found = true; 2759 break; 2760 } 2761 pt = pt->next; 2762 } 2763 if (!found) { 2764 printf("Table #%u is invalid\n", table_id); 2765 return -EINVAL; 2766 } 2767 2768 if (pattern_idx >= pt->nb_pattern_templates) { 2769 printf("Pattern template index #%u is invalid," 2770 " %u templates present in the table\n", 2771 pattern_idx, pt->nb_pattern_templates); 2772 return -EINVAL; 2773 } 2774 if (actions_idx >= pt->nb_actions_templates) { 2775 printf("Actions template index #%u is invalid," 2776 " %u templates present in the table\n", 2777 actions_idx, pt->nb_actions_templates); 2778 return -EINVAL; 2779 } 2780 2781 job = calloc(1, sizeof(*job)); 2782 if (!job) { 2783 printf("Queue flow create job allocate failed\n"); 2784 return -ENOMEM; 2785 } 2786 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2787 2788 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2789 if (!pf) { 2790 free(job); 2791 return port_flow_complain(&error); 2792 } 2793 if (age) { 2794 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2795 age->context = &pf->age_type; 2796 } 2797 /* Poisoning to make sure PMDs update it in case of error. */ 2798 memset(&error, 0x11, sizeof(error)); 2799 if (rule_idx == UINT32_MAX) 2800 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2801 pattern, pattern_idx, actions, actions_idx, job, &error); 2802 else 2803 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2804 rule_idx, actions, actions_idx, job, &error); 2805 if (!flow) { 2806 uint64_t flow_id = pf->id; 2807 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2808 free(job); 2809 return port_flow_complain(&error); 2810 } 2811 2812 pf->next = port->flow_list; 2813 pf->id = id; 2814 pf->table = pt; 2815 pf->flow = flow; 2816 job->pf = pf; 2817 port->flow_list = pf; 2818 printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id); 2819 return 0; 2820 } 2821 2822 /** Enqueue number of destroy flow rules operations. */ 2823 int 2824 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2825 bool postpone, uint32_t n, const uint64_t *rule) 2826 { 2827 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2828 struct rte_port *port; 2829 struct port_flow **tmp; 2830 int ret = 0; 2831 struct queue_job *job; 2832 2833 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2834 port_id == (portid_t)RTE_PORT_ALL) 2835 return -EINVAL; 2836 port = &ports[port_id]; 2837 2838 if (queue_id >= port->queue_nb) { 2839 printf("Queue #%u is invalid\n", queue_id); 2840 return -EINVAL; 2841 } 2842 2843 tmp = &port->flow_list; 2844 while (*tmp) { 2845 uint32_t i; 2846 2847 for (i = 0; i != n; ++i) { 2848 struct rte_flow_error error; 2849 struct port_flow *pf = *tmp; 2850 2851 if (rule[i] != pf->id) 2852 continue; 2853 /* 2854 * Poisoning to make sure PMD 2855 * update it in case of error. 2856 */ 2857 memset(&error, 0x33, sizeof(error)); 2858 job = calloc(1, sizeof(*job)); 2859 if (!job) { 2860 printf("Queue flow destroy job allocate failed\n"); 2861 return -ENOMEM; 2862 } 2863 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2864 job->pf = pf; 2865 2866 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2867 pf->flow, job, &error)) { 2868 free(job); 2869 ret = port_flow_complain(&error); 2870 continue; 2871 } 2872 printf("Flow rule #%"PRIu64" destruction enqueued\n", 2873 pf->id); 2874 *tmp = pf->next; 2875 break; 2876 } 2877 if (i == n) 2878 tmp = &(*tmp)->next; 2879 } 2880 return ret; 2881 } 2882 2883 static void 2884 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2885 struct port_indirect_action *pia, 2886 struct queue_job *job, 2887 const struct rte_flow_op_attr *attr, 2888 const struct rte_flow_indir_action_conf *conf, 2889 const struct rte_flow_action *action, 2890 struct rte_flow_error *error) 2891 { 2892 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2893 struct rte_flow_action_age *age = 2894 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2895 2896 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2897 age->context = &pia->age_type; 2898 } 2899 /* Poisoning to make sure PMDs update it in case of error. */ 2900 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2901 attr, conf, action, 2902 job, error); 2903 pia->type = action->type; 2904 } 2905 2906 static void 2907 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2908 struct port_indirect_action *pia, 2909 struct queue_job *job, 2910 const struct rte_flow_op_attr *attr, 2911 const struct rte_flow_indir_action_conf *conf, 2912 const struct rte_flow_action *action, 2913 struct rte_flow_error *error) 2914 { 2915 /* Poisoning to make sure PMDs update it in case of error. */ 2916 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2917 pia->list_handle = rte_flow_async_action_list_handle_create 2918 (port_id, queue_id, attr, conf, action, 2919 job, error); 2920 } 2921 2922 /** Enqueue update flow rule operation. */ 2923 int 2924 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2925 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2926 const struct rte_flow_action *actions) 2927 { 2928 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2929 struct rte_port *port; 2930 struct port_flow *pf, *uf; 2931 struct port_flow **tmp; 2932 struct port_table *pt; 2933 bool found; 2934 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2935 struct rte_flow_action_age *age = age_action_get(actions); 2936 struct queue_job *job; 2937 2938 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2939 port_id == (portid_t)RTE_PORT_ALL) 2940 return -EINVAL; 2941 port = &ports[port_id]; 2942 2943 if (queue_id >= port->queue_nb) { 2944 printf("Queue #%u is invalid\n", queue_id); 2945 return -EINVAL; 2946 } 2947 2948 found = false; 2949 tmp = &port->flow_list; 2950 while (*tmp) { 2951 pf = *tmp; 2952 if (rule_idx == pf->id) { 2953 found = true; 2954 break; 2955 } 2956 tmp = &(*tmp)->next; 2957 } 2958 if (!found) { 2959 printf("Flow rule #%u is invalid\n", rule_idx); 2960 return -EINVAL; 2961 } 2962 2963 pt = pf->table; 2964 if (actions_idx >= pt->nb_actions_templates) { 2965 printf("Actions template index #%u is invalid," 2966 " %u templates present in the table\n", 2967 actions_idx, pt->nb_actions_templates); 2968 return -EINVAL; 2969 } 2970 2971 job = calloc(1, sizeof(*job)); 2972 if (!job) { 2973 printf("Queue flow create job allocate failed\n"); 2974 return -ENOMEM; 2975 } 2976 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2977 2978 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2979 if (!uf) { 2980 free(job); 2981 return port_flow_complain(&error); 2982 } 2983 2984 if (age) { 2985 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2986 age->context = &uf->age_type; 2987 } 2988 2989 /* 2990 * Poisoning to make sure PMD update it in case of error. 2991 */ 2992 memset(&error, 0x44, sizeof(error)); 2993 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2994 actions, actions_idx, job, &error)) { 2995 free(uf); 2996 free(job); 2997 return port_flow_complain(&error); 2998 } 2999 uf->next = pf->next; 3000 uf->id = pf->id; 3001 uf->table = pt; 3002 uf->flow = pf->flow; 3003 *tmp = uf; 3004 job->pf = pf; 3005 3006 printf("Flow rule #%"PRIu64" update enqueued\n", pf->id); 3007 return 0; 3008 } 3009 3010 /** Enqueue indirect action create operation. */ 3011 int 3012 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 3013 bool postpone, uint32_t id, 3014 const struct rte_flow_indir_action_conf *conf, 3015 const struct rte_flow_action *action) 3016 { 3017 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3018 struct rte_port *port; 3019 struct port_indirect_action *pia; 3020 int ret; 3021 struct rte_flow_error error; 3022 struct queue_job *job; 3023 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3024 3025 3026 ret = action_alloc(port_id, id, &pia); 3027 if (ret) 3028 return ret; 3029 3030 port = &ports[port_id]; 3031 if (queue_id >= port->queue_nb) { 3032 printf("Queue #%u is invalid\n", queue_id); 3033 return -EINVAL; 3034 } 3035 job = calloc(1, sizeof(*job)); 3036 if (!job) { 3037 printf("Queue action create job allocate failed\n"); 3038 return -ENOMEM; 3039 } 3040 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3041 job->pia = pia; 3042 3043 /* Poisoning to make sure PMDs update it in case of error. */ 3044 memset(&error, 0x88, sizeof(error)); 3045 3046 if (is_indirect_list) 3047 queue_action_list_handle_create(port_id, queue_id, pia, job, 3048 &attr, conf, action, &error); 3049 else 3050 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3051 conf, action, &error); 3052 3053 if (!pia->handle) { 3054 uint32_t destroy_id = pia->id; 3055 port_queue_action_handle_destroy(port_id, queue_id, 3056 postpone, 1, &destroy_id); 3057 free(job); 3058 return port_flow_complain(&error); 3059 } 3060 printf("Indirect action #%u creation queued\n", pia->id); 3061 return 0; 3062 } 3063 3064 /** Enqueue indirect action destroy operation. */ 3065 int 3066 port_queue_action_handle_destroy(portid_t port_id, 3067 uint32_t queue_id, bool postpone, 3068 uint32_t n, const uint32_t *actions) 3069 { 3070 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3071 struct rte_port *port; 3072 struct port_indirect_action **tmp; 3073 int ret = 0; 3074 struct queue_job *job; 3075 3076 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3077 port_id == (portid_t)RTE_PORT_ALL) 3078 return -EINVAL; 3079 port = &ports[port_id]; 3080 3081 if (queue_id >= port->queue_nb) { 3082 printf("Queue #%u is invalid\n", queue_id); 3083 return -EINVAL; 3084 } 3085 3086 tmp = &port->actions_list; 3087 while (*tmp) { 3088 uint32_t i; 3089 3090 for (i = 0; i != n; ++i) { 3091 struct rte_flow_error error; 3092 struct port_indirect_action *pia = *tmp; 3093 3094 if (actions[i] != pia->id) 3095 continue; 3096 /* 3097 * Poisoning to make sure PMDs update it in case 3098 * of error. 3099 */ 3100 memset(&error, 0x99, sizeof(error)); 3101 job = calloc(1, sizeof(*job)); 3102 if (!job) { 3103 printf("Queue action destroy job allocate failed\n"); 3104 return -ENOMEM; 3105 } 3106 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3107 job->pia = pia; 3108 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3109 rte_flow_async_action_list_handle_destroy 3110 (port_id, queue_id, 3111 &attr, pia->list_handle, 3112 job, &error) : 3113 rte_flow_async_action_handle_destroy 3114 (port_id, queue_id, &attr, pia->handle, 3115 job, &error); 3116 if (ret) { 3117 free(job); 3118 ret = port_flow_complain(&error); 3119 continue; 3120 } 3121 *tmp = pia->next; 3122 printf("Indirect action #%u destruction queued\n", 3123 pia->id); 3124 break; 3125 } 3126 if (i == n) 3127 tmp = &(*tmp)->next; 3128 } 3129 return ret; 3130 } 3131 3132 /** Enqueue indirect action update operation. */ 3133 int 3134 port_queue_action_handle_update(portid_t port_id, 3135 uint32_t queue_id, bool postpone, uint32_t id, 3136 const struct rte_flow_action *action) 3137 { 3138 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3139 struct rte_port *port; 3140 struct rte_flow_error error; 3141 struct rte_flow_action_handle *action_handle; 3142 struct queue_job *job; 3143 struct port_indirect_action *pia; 3144 struct rte_flow_update_meter_mark mtr_update; 3145 const void *update; 3146 3147 action_handle = port_action_handle_get_by_id(port_id, id); 3148 if (!action_handle) 3149 return -EINVAL; 3150 3151 port = &ports[port_id]; 3152 if (queue_id >= port->queue_nb) { 3153 printf("Queue #%u is invalid\n", queue_id); 3154 return -EINVAL; 3155 } 3156 3157 job = calloc(1, sizeof(*job)); 3158 if (!job) { 3159 printf("Queue action update job allocate failed\n"); 3160 return -ENOMEM; 3161 } 3162 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3163 3164 pia = action_get_by_id(port_id, id); 3165 if (!pia) { 3166 free(job); 3167 return -EINVAL; 3168 } 3169 3170 switch (pia->type) { 3171 case RTE_FLOW_ACTION_TYPE_AGE: 3172 update = action->conf; 3173 break; 3174 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3175 rte_memcpy(&mtr_update.meter_mark, action->conf, 3176 sizeof(struct rte_flow_action_meter_mark)); 3177 if (mtr_update.meter_mark.profile) 3178 mtr_update.profile_valid = 1; 3179 if (mtr_update.meter_mark.policy) 3180 mtr_update.policy_valid = 1; 3181 mtr_update.color_mode_valid = 1; 3182 mtr_update.init_color_valid = 1; 3183 mtr_update.state_valid = 1; 3184 update = &mtr_update; 3185 break; 3186 default: 3187 update = action; 3188 break; 3189 } 3190 3191 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3192 action_handle, update, job, &error)) { 3193 free(job); 3194 return port_flow_complain(&error); 3195 } 3196 printf("Indirect action #%u update queued\n", id); 3197 return 0; 3198 } 3199 3200 void 3201 port_queue_action_handle_query_update(portid_t port_id, 3202 uint32_t queue_id, bool postpone, 3203 uint32_t id, 3204 enum rte_flow_query_update_mode qu_mode, 3205 const struct rte_flow_action *action) 3206 { 3207 int ret; 3208 struct rte_flow_error error; 3209 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3210 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3211 struct queue_job *job; 3212 3213 if (!pia || !pia->handle) 3214 return; 3215 job = calloc(1, sizeof(*job)); 3216 if (!job) 3217 return; 3218 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3219 job->pia = pia; 3220 3221 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3222 &attr, pia->handle, 3223 action, 3224 &job->query, 3225 qu_mode, job, 3226 &error); 3227 if (ret) { 3228 port_flow_complain(&error); 3229 free(job); 3230 } else { 3231 printf("port-%u: indirect action #%u update-and-query queued\n", 3232 port_id, id); 3233 } 3234 } 3235 3236 /** Enqueue indirect action query operation. */ 3237 int 3238 port_queue_action_handle_query(portid_t port_id, 3239 uint32_t queue_id, bool postpone, uint32_t id) 3240 { 3241 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3242 struct rte_port *port; 3243 struct rte_flow_error error; 3244 struct rte_flow_action_handle *action_handle; 3245 struct port_indirect_action *pia; 3246 struct queue_job *job; 3247 3248 pia = action_get_by_id(port_id, id); 3249 action_handle = pia ? pia->handle : NULL; 3250 if (!action_handle) 3251 return -EINVAL; 3252 3253 port = &ports[port_id]; 3254 if (queue_id >= port->queue_nb) { 3255 printf("Queue #%u is invalid\n", queue_id); 3256 return -EINVAL; 3257 } 3258 3259 job = calloc(1, sizeof(*job)); 3260 if (!job) { 3261 printf("Queue action update job allocate failed\n"); 3262 return -ENOMEM; 3263 } 3264 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3265 job->pia = pia; 3266 3267 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3268 action_handle, &job->query, job, &error)) { 3269 free(job); 3270 return port_flow_complain(&error); 3271 } 3272 printf("Indirect action #%u update queued\n", id); 3273 return 0; 3274 } 3275 3276 /** Push all the queue operations in the queue to the NIC. */ 3277 int 3278 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3279 { 3280 struct rte_port *port; 3281 struct rte_flow_error error; 3282 int ret = 0; 3283 3284 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3285 port_id == (portid_t)RTE_PORT_ALL) 3286 return -EINVAL; 3287 port = &ports[port_id]; 3288 3289 if (queue_id >= port->queue_nb) { 3290 printf("Queue #%u is invalid\n", queue_id); 3291 return -EINVAL; 3292 } 3293 3294 memset(&error, 0x55, sizeof(error)); 3295 ret = rte_flow_push(port_id, queue_id, &error); 3296 if (ret < 0) { 3297 printf("Failed to push operations in the queue\n"); 3298 return -EINVAL; 3299 } 3300 printf("Queue #%u operations pushed\n", queue_id); 3301 return ret; 3302 } 3303 3304 /** Pull queue operation results from the queue. */ 3305 static int 3306 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3307 const uint64_t *rule, int nb_flows) 3308 { 3309 struct rte_port *port = &ports[port_id]; 3310 struct rte_flow_op_result *res; 3311 struct rte_flow_error error; 3312 uint32_t n = nb_flows; 3313 int ret = 0; 3314 int i; 3315 3316 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3317 if (!res) { 3318 printf("Failed to allocate memory for pulled results\n"); 3319 return -ENOMEM; 3320 } 3321 3322 memset(&error, 0x66, sizeof(error)); 3323 while (nb_flows > 0) { 3324 int success = 0; 3325 3326 if (n > port->queue_sz) 3327 n = port->queue_sz; 3328 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3329 if (ret < 0) { 3330 free(res); 3331 return ret; 3332 } 3333 ret = rte_flow_push(port_id, queue_id, &error); 3334 if (ret < 0) { 3335 printf("Failed to push operations in the queue: %s\n", 3336 strerror(-ret)); 3337 free(res); 3338 return ret; 3339 } 3340 while (success < nb_flows) { 3341 ret = rte_flow_pull(port_id, queue_id, res, 3342 port->queue_sz, &error); 3343 if (ret < 0) { 3344 printf("Failed to pull a operation results: %s\n", 3345 strerror(-ret)); 3346 free(res); 3347 return ret; 3348 } 3349 3350 for (i = 0; i < ret; i++) { 3351 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3352 success++; 3353 } 3354 } 3355 rule += n; 3356 nb_flows -= n; 3357 n = nb_flows; 3358 } 3359 3360 free(res); 3361 return ret; 3362 } 3363 3364 /** List simply and destroy all aged flows per queue. */ 3365 void 3366 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3367 { 3368 void **contexts; 3369 int nb_context, total = 0, idx; 3370 uint64_t *rules = NULL; 3371 struct rte_port *port; 3372 struct rte_flow_error error; 3373 enum age_action_context_type *type; 3374 union { 3375 struct port_flow *pf; 3376 struct port_indirect_action *pia; 3377 } ctx; 3378 3379 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3380 port_id == (portid_t)RTE_PORT_ALL) 3381 return; 3382 port = &ports[port_id]; 3383 if (queue_id >= port->queue_nb) { 3384 printf("Error: queue #%u is invalid\n", queue_id); 3385 return; 3386 } 3387 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3388 if (total < 0) { 3389 port_flow_complain(&error); 3390 return; 3391 } 3392 printf("Port %u queue %u total aged flows: %d\n", 3393 port_id, queue_id, total); 3394 if (total == 0) 3395 return; 3396 contexts = calloc(total, sizeof(void *)); 3397 if (contexts == NULL) { 3398 printf("Cannot allocate contexts for aged flow\n"); 3399 return; 3400 } 3401 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3402 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3403 total, &error); 3404 if (nb_context > total) { 3405 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3406 port_id, queue_id, nb_context, total); 3407 free(contexts); 3408 return; 3409 } 3410 if (destroy) { 3411 rules = malloc(sizeof(uint32_t) * nb_context); 3412 if (rules == NULL) 3413 printf("Cannot allocate memory for destroy aged flow\n"); 3414 } 3415 total = 0; 3416 for (idx = 0; idx < nb_context; idx++) { 3417 if (!contexts[idx]) { 3418 printf("Error: get Null context in port %u queue %u\n", 3419 port_id, queue_id); 3420 continue; 3421 } 3422 type = (enum age_action_context_type *)contexts[idx]; 3423 switch (*type) { 3424 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3425 ctx.pf = container_of(type, struct port_flow, age_type); 3426 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3427 "\t%c%c%c\t\n", 3428 "Flow", 3429 ctx.pf->id, 3430 ctx.pf->rule.attr->group, 3431 ctx.pf->rule.attr->priority, 3432 ctx.pf->rule.attr->ingress ? 'i' : '-', 3433 ctx.pf->rule.attr->egress ? 'e' : '-', 3434 ctx.pf->rule.attr->transfer ? 't' : '-'); 3435 if (rules != NULL) { 3436 rules[total] = ctx.pf->id; 3437 total++; 3438 } 3439 break; 3440 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3441 ctx.pia = container_of(type, 3442 struct port_indirect_action, 3443 age_type); 3444 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3445 ctx.pia->id); 3446 break; 3447 default: 3448 printf("Error: invalid context type %u\n", port_id); 3449 break; 3450 } 3451 } 3452 if (rules != NULL) { 3453 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3454 free(rules); 3455 } 3456 printf("\n%d flows destroyed\n", total); 3457 free(contexts); 3458 } 3459 3460 /** Pull queue operation results from the queue. */ 3461 int 3462 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3463 { 3464 struct rte_port *port; 3465 struct rte_flow_op_result *res; 3466 struct rte_flow_error error; 3467 int ret = 0; 3468 int success = 0; 3469 int i; 3470 struct queue_job *job; 3471 3472 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3473 port_id == (portid_t)RTE_PORT_ALL) 3474 return -EINVAL; 3475 port = &ports[port_id]; 3476 3477 if (queue_id >= port->queue_nb) { 3478 printf("Queue #%u is invalid\n", queue_id); 3479 return -EINVAL; 3480 } 3481 3482 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3483 if (!res) { 3484 printf("Failed to allocate memory for pulled results\n"); 3485 return -ENOMEM; 3486 } 3487 3488 memset(&error, 0x66, sizeof(error)); 3489 ret = rte_flow_pull(port_id, queue_id, res, 3490 port->queue_sz, &error); 3491 if (ret < 0) { 3492 printf("Failed to pull a operation results\n"); 3493 free(res); 3494 return -EINVAL; 3495 } 3496 3497 for (i = 0; i < ret; i++) { 3498 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3499 success++; 3500 job = (struct queue_job *)res[i].user_data; 3501 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3502 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3503 free(job->pf); 3504 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3505 free(job->pia); 3506 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3507 port_action_handle_query_dump(port_id, job->pia, 3508 &job->query); 3509 free(job); 3510 } 3511 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3512 queue_id, ret, ret - success, success); 3513 free(res); 3514 return ret; 3515 } 3516 3517 /* Set group miss actions */ 3518 int 3519 port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr, 3520 const struct rte_flow_action *actions) 3521 { 3522 struct rte_flow_group_attr gattr = { 3523 .ingress = attr->ingress, 3524 .egress = attr->egress, 3525 .transfer = attr->transfer, 3526 }; 3527 struct rte_flow_error error; 3528 int ret = 0; 3529 3530 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3531 port_id == (portid_t)RTE_PORT_ALL) 3532 return -EINVAL; 3533 3534 memset(&error, 0x66, sizeof(error)); 3535 ret = rte_flow_group_set_miss_actions(port_id, attr->group, &gattr, actions, &error); 3536 3537 if (ret < 0) 3538 return port_flow_complain(&error); 3539 3540 printf("Group #%u set miss actions succeeded\n", attr->group); 3541 return ret; 3542 } 3543 3544 /** Create flow rule. */ 3545 int 3546 port_flow_create(portid_t port_id, 3547 const struct rte_flow_attr *attr, 3548 const struct rte_flow_item *pattern, 3549 const struct rte_flow_action *actions, 3550 const struct tunnel_ops *tunnel_ops, 3551 uintptr_t user_id) 3552 { 3553 struct rte_flow *flow; 3554 struct rte_port *port; 3555 struct port_flow *pf; 3556 uint32_t id = 0; 3557 struct rte_flow_error error; 3558 struct port_flow_tunnel *pft = NULL; 3559 struct rte_flow_action_age *age = age_action_get(actions); 3560 3561 port = &ports[port_id]; 3562 if (port->flow_list) { 3563 if (port->flow_list->id == UINT32_MAX) { 3564 fprintf(stderr, 3565 "Highest rule ID is already assigned, delete it first"); 3566 return -ENOMEM; 3567 } 3568 id = port->flow_list->id + 1; 3569 } 3570 if (tunnel_ops->enabled) { 3571 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3572 actions, tunnel_ops); 3573 if (!pft) 3574 return -ENOENT; 3575 if (pft->items) 3576 pattern = pft->items; 3577 if (pft->actions) 3578 actions = pft->actions; 3579 } 3580 pf = port_flow_new(attr, pattern, actions, &error); 3581 if (!pf) 3582 return port_flow_complain(&error); 3583 if (age) { 3584 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3585 age->context = &pf->age_type; 3586 } 3587 /* Poisoning to make sure PMDs update it in case of error. */ 3588 memset(&error, 0x22, sizeof(error)); 3589 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3590 if (!flow) { 3591 if (tunnel_ops->enabled) 3592 port_flow_tunnel_offload_cmd_release(port_id, 3593 tunnel_ops, pft); 3594 free(pf); 3595 return port_flow_complain(&error); 3596 } 3597 pf->next = port->flow_list; 3598 pf->id = id; 3599 pf->user_id = user_id; 3600 pf->flow = flow; 3601 port->flow_list = pf; 3602 if (tunnel_ops->enabled) 3603 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3604 if (user_id) 3605 printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n", 3606 pf->id, pf->user_id); 3607 else 3608 printf("Flow rule #%"PRIu64" created\n", pf->id); 3609 return 0; 3610 } 3611 3612 /** Destroy a number of flow rules. */ 3613 int 3614 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule, 3615 bool is_user_id) 3616 { 3617 struct rte_port *port; 3618 struct port_flow **tmp; 3619 int ret = 0; 3620 3621 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3622 port_id == (portid_t)RTE_PORT_ALL) 3623 return -EINVAL; 3624 port = &ports[port_id]; 3625 tmp = &port->flow_list; 3626 while (*tmp) { 3627 uint32_t i; 3628 3629 for (i = 0; i != n; ++i) { 3630 struct rte_flow_error error; 3631 struct port_flow *pf = *tmp; 3632 3633 if (rule[i] != (is_user_id ? pf->user_id : pf->id)) 3634 continue; 3635 /* 3636 * Poisoning to make sure PMDs update it in case 3637 * of error. 3638 */ 3639 memset(&error, 0x33, sizeof(error)); 3640 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3641 ret = port_flow_complain(&error); 3642 continue; 3643 } 3644 if (is_user_id) 3645 printf("Flow rule #%"PRIu64" destroyed, " 3646 "user-id 0x%"PRIx64"\n", 3647 pf->id, pf->user_id); 3648 else 3649 printf("Flow rule #%"PRIu64" destroyed\n", 3650 pf->id); 3651 *tmp = pf->next; 3652 free(pf); 3653 break; 3654 } 3655 if (i == n) 3656 tmp = &(*tmp)->next; 3657 } 3658 return ret; 3659 } 3660 3661 /** Remove all flow rules. */ 3662 int 3663 port_flow_flush(portid_t port_id) 3664 { 3665 struct rte_flow_error error; 3666 struct rte_port *port; 3667 int ret = 0; 3668 3669 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3670 port_id == (portid_t)RTE_PORT_ALL) 3671 return -EINVAL; 3672 3673 port = &ports[port_id]; 3674 3675 if (port->flow_list == NULL) 3676 return ret; 3677 3678 /* Poisoning to make sure PMDs update it in case of error. */ 3679 memset(&error, 0x44, sizeof(error)); 3680 if (rte_flow_flush(port_id, &error)) { 3681 port_flow_complain(&error); 3682 } 3683 3684 while (port->flow_list) { 3685 struct port_flow *pf = port->flow_list->next; 3686 3687 free(port->flow_list); 3688 port->flow_list = pf; 3689 } 3690 return ret; 3691 } 3692 3693 /** Dump flow rules. */ 3694 int 3695 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id, 3696 const char *file_name, bool is_user_id) 3697 { 3698 int ret = 0; 3699 FILE *file = stdout; 3700 struct rte_flow_error error; 3701 struct rte_port *port; 3702 struct port_flow *pflow; 3703 struct rte_flow *tmpFlow = NULL; 3704 bool found = false; 3705 3706 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3707 port_id == (portid_t)RTE_PORT_ALL) 3708 return -EINVAL; 3709 3710 if (!dump_all) { 3711 port = &ports[port_id]; 3712 pflow = port->flow_list; 3713 while (pflow) { 3714 if (rule_id != 3715 (is_user_id ? pflow->user_id : pflow->id)) { 3716 pflow = pflow->next; 3717 } else { 3718 tmpFlow = pflow->flow; 3719 if (tmpFlow) 3720 found = true; 3721 break; 3722 } 3723 } 3724 if (found == false) { 3725 fprintf(stderr, "Failed to dump to flow %"PRIu64"\n", 3726 rule_id); 3727 return -EINVAL; 3728 } 3729 } 3730 3731 if (file_name && strlen(file_name)) { 3732 file = fopen(file_name, "w"); 3733 if (!file) { 3734 fprintf(stderr, "Failed to create file %s: %s\n", 3735 file_name, strerror(errno)); 3736 return -errno; 3737 } 3738 } 3739 3740 if (!dump_all) 3741 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3742 else 3743 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3744 if (ret) { 3745 port_flow_complain(&error); 3746 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3747 } else 3748 printf("Flow dump finished\n"); 3749 if (file_name && strlen(file_name)) 3750 fclose(file); 3751 return ret; 3752 } 3753 3754 /** Query a flow rule. */ 3755 int 3756 port_flow_query(portid_t port_id, uint64_t rule, 3757 const struct rte_flow_action *action, bool is_user_id) 3758 { 3759 struct rte_flow_error error; 3760 struct rte_port *port; 3761 struct port_flow *pf; 3762 const char *name; 3763 union { 3764 struct rte_flow_query_count count; 3765 struct rte_flow_action_rss rss_conf; 3766 struct rte_flow_query_age age; 3767 } query; 3768 int ret; 3769 3770 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3771 port_id == (portid_t)RTE_PORT_ALL) 3772 return -EINVAL; 3773 port = &ports[port_id]; 3774 for (pf = port->flow_list; pf; pf = pf->next) 3775 if ((is_user_id ? pf->user_id : pf->id) == rule) 3776 break; 3777 if (!pf) { 3778 fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule); 3779 return -ENOENT; 3780 } 3781 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3782 &name, sizeof(name), 3783 (void *)(uintptr_t)action->type, &error); 3784 if (ret < 0) 3785 return port_flow_complain(&error); 3786 switch (action->type) { 3787 case RTE_FLOW_ACTION_TYPE_COUNT: 3788 case RTE_FLOW_ACTION_TYPE_RSS: 3789 case RTE_FLOW_ACTION_TYPE_AGE: 3790 break; 3791 default: 3792 fprintf(stderr, "Cannot query action type %d (%s)\n", 3793 action->type, name); 3794 return -ENOTSUP; 3795 } 3796 /* Poisoning to make sure PMDs update it in case of error. */ 3797 memset(&error, 0x55, sizeof(error)); 3798 memset(&query, 0, sizeof(query)); 3799 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3800 return port_flow_complain(&error); 3801 switch (action->type) { 3802 case RTE_FLOW_ACTION_TYPE_COUNT: 3803 printf("%s:\n" 3804 " hits_set: %u\n" 3805 " bytes_set: %u\n" 3806 " hits: %" PRIu64 "\n" 3807 " bytes: %" PRIu64 "\n", 3808 name, 3809 query.count.hits_set, 3810 query.count.bytes_set, 3811 query.count.hits, 3812 query.count.bytes); 3813 break; 3814 case RTE_FLOW_ACTION_TYPE_RSS: 3815 rss_config_display(&query.rss_conf); 3816 break; 3817 case RTE_FLOW_ACTION_TYPE_AGE: 3818 printf("%s:\n" 3819 " aged: %u\n" 3820 " sec_since_last_hit_valid: %u\n" 3821 " sec_since_last_hit: %" PRIu32 "\n", 3822 name, 3823 query.age.aged, 3824 query.age.sec_since_last_hit_valid, 3825 query.age.sec_since_last_hit); 3826 break; 3827 default: 3828 fprintf(stderr, 3829 "Cannot display result for action type %d (%s)\n", 3830 action->type, name); 3831 break; 3832 } 3833 return 0; 3834 } 3835 3836 /** List simply and destroy all aged flows. */ 3837 void 3838 port_flow_aged(portid_t port_id, uint8_t destroy) 3839 { 3840 void **contexts; 3841 int nb_context, total = 0, idx; 3842 struct rte_flow_error error; 3843 enum age_action_context_type *type; 3844 union { 3845 struct port_flow *pf; 3846 struct port_indirect_action *pia; 3847 } ctx; 3848 3849 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3850 port_id == (portid_t)RTE_PORT_ALL) 3851 return; 3852 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3853 printf("Port %u total aged flows: %d\n", port_id, total); 3854 if (total < 0) { 3855 port_flow_complain(&error); 3856 return; 3857 } 3858 if (total == 0) 3859 return; 3860 contexts = malloc(sizeof(void *) * total); 3861 if (contexts == NULL) { 3862 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3863 return; 3864 } 3865 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3866 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3867 if (nb_context != total) { 3868 fprintf(stderr, 3869 "Port:%d get aged flows count(%d) != total(%d)\n", 3870 port_id, nb_context, total); 3871 free(contexts); 3872 return; 3873 } 3874 total = 0; 3875 for (idx = 0; idx < nb_context; idx++) { 3876 if (!contexts[idx]) { 3877 fprintf(stderr, "Error: get Null context in port %u\n", 3878 port_id); 3879 continue; 3880 } 3881 type = (enum age_action_context_type *)contexts[idx]; 3882 switch (*type) { 3883 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3884 ctx.pf = container_of(type, struct port_flow, age_type); 3885 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3886 "\t%c%c%c\t\n", 3887 "Flow", 3888 ctx.pf->id, 3889 ctx.pf->rule.attr->group, 3890 ctx.pf->rule.attr->priority, 3891 ctx.pf->rule.attr->ingress ? 'i' : '-', 3892 ctx.pf->rule.attr->egress ? 'e' : '-', 3893 ctx.pf->rule.attr->transfer ? 't' : '-'); 3894 if (destroy && !port_flow_destroy(port_id, 1, 3895 &ctx.pf->id, false)) 3896 total++; 3897 break; 3898 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3899 ctx.pia = container_of(type, 3900 struct port_indirect_action, age_type); 3901 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3902 ctx.pia->id); 3903 break; 3904 default: 3905 fprintf(stderr, "Error: invalid context type %u\n", 3906 port_id); 3907 break; 3908 } 3909 } 3910 printf("\n%d flows destroyed\n", total); 3911 free(contexts); 3912 } 3913 3914 /** List flow rules. */ 3915 void 3916 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3917 { 3918 struct rte_port *port; 3919 struct port_flow *pf; 3920 struct port_flow *list = NULL; 3921 uint32_t i; 3922 3923 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3924 port_id == (portid_t)RTE_PORT_ALL) 3925 return; 3926 port = &ports[port_id]; 3927 if (!port->flow_list) 3928 return; 3929 /* Sort flows by group, priority and ID. */ 3930 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3931 struct port_flow **tmp; 3932 const struct rte_flow_attr *curr = pf->rule.attr; 3933 3934 if (n) { 3935 /* Filter out unwanted groups. */ 3936 for (i = 0; i != n; ++i) 3937 if (curr->group == group[i]) 3938 break; 3939 if (i == n) 3940 continue; 3941 } 3942 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3943 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3944 3945 if (curr->group > comp->group || 3946 (curr->group == comp->group && 3947 curr->priority > comp->priority) || 3948 (curr->group == comp->group && 3949 curr->priority == comp->priority && 3950 pf->id > (*tmp)->id)) 3951 continue; 3952 break; 3953 } 3954 pf->tmp = *tmp; 3955 *tmp = pf; 3956 } 3957 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3958 for (pf = list; pf != NULL; pf = pf->tmp) { 3959 const struct rte_flow_item *item = pf->rule.pattern; 3960 const struct rte_flow_action *action = pf->rule.actions; 3961 const char *name; 3962 3963 printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3964 pf->id, 3965 pf->rule.attr->group, 3966 pf->rule.attr->priority, 3967 pf->rule.attr->ingress ? 'i' : '-', 3968 pf->rule.attr->egress ? 'e' : '-', 3969 pf->rule.attr->transfer ? 't' : '-'); 3970 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3971 if ((uint32_t)item->type > INT_MAX) 3972 name = "PMD_INTERNAL"; 3973 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3974 &name, sizeof(name), 3975 (void *)(uintptr_t)item->type, 3976 NULL) <= 0) 3977 name = "[UNKNOWN]"; 3978 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3979 printf("%s ", name); 3980 ++item; 3981 } 3982 printf("=>"); 3983 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3984 if ((uint32_t)action->type > INT_MAX) 3985 name = "PMD_INTERNAL"; 3986 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3987 &name, sizeof(name), 3988 (void *)(uintptr_t)action->type, 3989 NULL) <= 0) 3990 name = "[UNKNOWN]"; 3991 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3992 printf(" %s", name); 3993 ++action; 3994 } 3995 printf("\n"); 3996 } 3997 } 3998 3999 /** Restrict ingress traffic to the defined flow rules. */ 4000 int 4001 port_flow_isolate(portid_t port_id, int set) 4002 { 4003 struct rte_flow_error error; 4004 4005 /* Poisoning to make sure PMDs update it in case of error. */ 4006 memset(&error, 0x66, sizeof(error)); 4007 if (rte_flow_isolate(port_id, set, &error)) 4008 return port_flow_complain(&error); 4009 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 4010 port_id, 4011 set ? "now restricted" : "not restricted anymore"); 4012 return 0; 4013 } 4014 4015 /* 4016 * RX/TX ring descriptors display functions. 4017 */ 4018 int 4019 rx_queue_id_is_invalid(queueid_t rxq_id) 4020 { 4021 if (rxq_id < nb_rxq) 4022 return 0; 4023 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 4024 rxq_id, nb_rxq); 4025 return 1; 4026 } 4027 4028 int 4029 tx_queue_id_is_invalid(queueid_t txq_id) 4030 { 4031 if (txq_id < nb_txq) 4032 return 0; 4033 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 4034 txq_id, nb_txq); 4035 return 1; 4036 } 4037 4038 static int 4039 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 4040 { 4041 struct rte_port *port = &ports[port_id]; 4042 struct rte_eth_rxq_info rx_qinfo; 4043 int ret; 4044 4045 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 4046 if (ret == 0) { 4047 *ring_size = rx_qinfo.nb_desc; 4048 return ret; 4049 } 4050 4051 if (ret != -ENOTSUP) 4052 return ret; 4053 /* 4054 * If the rte_eth_rx_queue_info_get is not support for this PMD, 4055 * ring_size stored in testpmd will be used for validity verification. 4056 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4057 * being 0, it will use a default value provided by PMDs to setup this 4058 * rxq. If the default value is 0, it will use the 4059 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4060 */ 4061 if (port->nb_rx_desc[rxq_id]) 4062 *ring_size = port->nb_rx_desc[rxq_id]; 4063 else if (port->dev_info.default_rxportconf.ring_size) 4064 *ring_size = port->dev_info.default_rxportconf.ring_size; 4065 else 4066 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4067 return 0; 4068 } 4069 4070 static int 4071 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4072 { 4073 struct rte_port *port = &ports[port_id]; 4074 struct rte_eth_txq_info tx_qinfo; 4075 int ret; 4076 4077 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4078 if (ret == 0) { 4079 *ring_size = tx_qinfo.nb_desc; 4080 return ret; 4081 } 4082 4083 if (ret != -ENOTSUP) 4084 return ret; 4085 /* 4086 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4087 * ring_size stored in testpmd will be used for validity verification. 4088 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4089 * being 0, it will use a default value provided by PMDs to setup this 4090 * txq. If the default value is 0, it will use the 4091 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4092 */ 4093 if (port->nb_tx_desc[txq_id]) 4094 *ring_size = port->nb_tx_desc[txq_id]; 4095 else if (port->dev_info.default_txportconf.ring_size) 4096 *ring_size = port->dev_info.default_txportconf.ring_size; 4097 else 4098 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4099 return 0; 4100 } 4101 4102 static int 4103 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4104 { 4105 uint16_t ring_size; 4106 int ret; 4107 4108 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4109 if (ret) 4110 return 1; 4111 4112 if (rxdesc_id < ring_size) 4113 return 0; 4114 4115 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4116 rxdesc_id, ring_size); 4117 return 1; 4118 } 4119 4120 static int 4121 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4122 { 4123 uint16_t ring_size; 4124 int ret; 4125 4126 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4127 if (ret) 4128 return 1; 4129 4130 if (txdesc_id < ring_size) 4131 return 0; 4132 4133 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4134 txdesc_id, ring_size); 4135 return 1; 4136 } 4137 4138 static const struct rte_memzone * 4139 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4140 { 4141 char mz_name[RTE_MEMZONE_NAMESIZE]; 4142 const struct rte_memzone *mz; 4143 4144 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4145 port_id, q_id, ring_name); 4146 mz = rte_memzone_lookup(mz_name); 4147 if (mz == NULL) 4148 fprintf(stderr, 4149 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4150 ring_name, port_id, q_id, mz_name); 4151 return mz; 4152 } 4153 4154 union igb_ring_dword { 4155 uint64_t dword; 4156 struct { 4157 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4158 uint32_t lo; 4159 uint32_t hi; 4160 #else 4161 uint32_t hi; 4162 uint32_t lo; 4163 #endif 4164 } words; 4165 }; 4166 4167 struct igb_ring_desc_32_bytes { 4168 union igb_ring_dword lo_dword; 4169 union igb_ring_dword hi_dword; 4170 union igb_ring_dword resv1; 4171 union igb_ring_dword resv2; 4172 }; 4173 4174 struct igb_ring_desc_16_bytes { 4175 union igb_ring_dword lo_dword; 4176 union igb_ring_dword hi_dword; 4177 }; 4178 4179 static void 4180 ring_rxd_display_dword(union igb_ring_dword dword) 4181 { 4182 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4183 (unsigned)dword.words.hi); 4184 } 4185 4186 static void 4187 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4188 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4189 portid_t port_id, 4190 #else 4191 __rte_unused portid_t port_id, 4192 #endif 4193 uint16_t desc_id) 4194 { 4195 struct igb_ring_desc_16_bytes *ring = 4196 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4197 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4198 int ret; 4199 struct rte_eth_dev_info dev_info; 4200 4201 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4202 if (ret != 0) 4203 return; 4204 4205 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4206 /* 32 bytes RX descriptor, i40e only */ 4207 struct igb_ring_desc_32_bytes *ring = 4208 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4209 ring[desc_id].lo_dword.dword = 4210 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4211 ring_rxd_display_dword(ring[desc_id].lo_dword); 4212 ring[desc_id].hi_dword.dword = 4213 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4214 ring_rxd_display_dword(ring[desc_id].hi_dword); 4215 ring[desc_id].resv1.dword = 4216 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4217 ring_rxd_display_dword(ring[desc_id].resv1); 4218 ring[desc_id].resv2.dword = 4219 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4220 ring_rxd_display_dword(ring[desc_id].resv2); 4221 4222 return; 4223 } 4224 #endif 4225 /* 16 bytes RX descriptor */ 4226 ring[desc_id].lo_dword.dword = 4227 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4228 ring_rxd_display_dword(ring[desc_id].lo_dword); 4229 ring[desc_id].hi_dword.dword = 4230 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4231 ring_rxd_display_dword(ring[desc_id].hi_dword); 4232 } 4233 4234 static void 4235 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4236 { 4237 struct igb_ring_desc_16_bytes *ring; 4238 struct igb_ring_desc_16_bytes txd; 4239 4240 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4241 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4242 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4243 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4244 (unsigned)txd.lo_dword.words.lo, 4245 (unsigned)txd.lo_dword.words.hi, 4246 (unsigned)txd.hi_dword.words.lo, 4247 (unsigned)txd.hi_dword.words.hi); 4248 } 4249 4250 void 4251 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4252 { 4253 const struct rte_memzone *rx_mz; 4254 4255 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4256 return; 4257 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4258 if (rx_mz == NULL) 4259 return; 4260 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4261 } 4262 4263 void 4264 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4265 { 4266 const struct rte_memzone *tx_mz; 4267 4268 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4269 return; 4270 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4271 if (tx_mz == NULL) 4272 return; 4273 ring_tx_descriptor_display(tx_mz, txd_id); 4274 } 4275 4276 void 4277 fwd_lcores_config_display(void) 4278 { 4279 lcoreid_t lc_id; 4280 4281 printf("List of forwarding lcores:"); 4282 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4283 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4284 printf("\n"); 4285 } 4286 void 4287 rxtx_config_display(void) 4288 { 4289 portid_t pid; 4290 queueid_t qid; 4291 4292 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4293 cur_fwd_eng->fwd_mode_name, 4294 cur_fwd_eng->status ? "-" : "", 4295 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4296 retry_enabled == 0 ? "" : " with retry", 4297 nb_pkt_per_burst); 4298 4299 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4300 printf(" packet len=%u - nb packet segments=%d\n", 4301 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4302 4303 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4304 nb_fwd_lcores, nb_fwd_ports); 4305 4306 RTE_ETH_FOREACH_DEV(pid) { 4307 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4308 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4309 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4310 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4311 struct rte_eth_rxq_info rx_qinfo; 4312 struct rte_eth_txq_info tx_qinfo; 4313 uint16_t rx_free_thresh_tmp; 4314 uint16_t tx_free_thresh_tmp; 4315 uint16_t tx_rs_thresh_tmp; 4316 uint16_t nb_rx_desc_tmp; 4317 uint16_t nb_tx_desc_tmp; 4318 uint64_t offloads_tmp; 4319 uint8_t pthresh_tmp; 4320 uint8_t hthresh_tmp; 4321 uint8_t wthresh_tmp; 4322 int32_t rc; 4323 4324 /* per port config */ 4325 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4326 (unsigned int)pid, nb_rxq, nb_txq); 4327 4328 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4329 ports[pid].dev_conf.rxmode.offloads, 4330 ports[pid].dev_conf.txmode.offloads); 4331 4332 /* per rx queue config only for first queue to be less verbose */ 4333 for (qid = 0; qid < 1; qid++) { 4334 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4335 if (rc) { 4336 nb_rx_desc_tmp = nb_rx_desc[qid]; 4337 rx_free_thresh_tmp = 4338 rx_conf[qid].rx_free_thresh; 4339 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4340 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4341 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4342 offloads_tmp = rx_conf[qid].offloads; 4343 } else { 4344 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4345 rx_free_thresh_tmp = 4346 rx_qinfo.conf.rx_free_thresh; 4347 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4348 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4349 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4350 offloads_tmp = rx_qinfo.conf.offloads; 4351 } 4352 4353 printf(" RX queue: %d\n", qid); 4354 printf(" RX desc=%d - RX free threshold=%d\n", 4355 nb_rx_desc_tmp, rx_free_thresh_tmp); 4356 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4357 " wthresh=%d\n", 4358 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4359 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4360 if (rx_conf->share_group > 0) 4361 printf(" share_group=%u share_qid=%u", 4362 rx_conf->share_group, 4363 rx_conf->share_qid); 4364 printf("\n"); 4365 } 4366 4367 /* per tx queue config only for first queue to be less verbose */ 4368 for (qid = 0; qid < 1; qid++) { 4369 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4370 if (rc) { 4371 nb_tx_desc_tmp = nb_tx_desc[qid]; 4372 tx_free_thresh_tmp = 4373 tx_conf[qid].tx_free_thresh; 4374 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4375 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4376 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4377 offloads_tmp = tx_conf[qid].offloads; 4378 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4379 } else { 4380 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4381 tx_free_thresh_tmp = 4382 tx_qinfo.conf.tx_free_thresh; 4383 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4384 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4385 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4386 offloads_tmp = tx_qinfo.conf.offloads; 4387 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4388 } 4389 4390 printf(" TX queue: %d\n", qid); 4391 printf(" TX desc=%d - TX free threshold=%d\n", 4392 nb_tx_desc_tmp, tx_free_thresh_tmp); 4393 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4394 " wthresh=%d\n", 4395 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4396 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4397 offloads_tmp, tx_rs_thresh_tmp); 4398 } 4399 } 4400 } 4401 4402 void 4403 port_rss_reta_info(portid_t port_id, 4404 struct rte_eth_rss_reta_entry64 *reta_conf, 4405 uint16_t nb_entries) 4406 { 4407 uint16_t i, idx, shift; 4408 int ret; 4409 4410 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4411 return; 4412 4413 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4414 if (ret != 0) { 4415 fprintf(stderr, 4416 "Failed to get RSS RETA info, return code = %d\n", 4417 ret); 4418 return; 4419 } 4420 4421 for (i = 0; i < nb_entries; i++) { 4422 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4423 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4424 if (!(reta_conf[idx].mask & (1ULL << shift))) 4425 continue; 4426 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4427 i, reta_conf[idx].reta[shift]); 4428 } 4429 } 4430 4431 /* 4432 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4433 * key of the port. 4434 */ 4435 void 4436 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4437 { 4438 struct rte_eth_rss_conf rss_conf = {0}; 4439 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4440 uint64_t rss_hf; 4441 uint8_t i; 4442 int diag; 4443 struct rte_eth_dev_info dev_info; 4444 uint8_t hash_key_size; 4445 int ret; 4446 4447 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4448 return; 4449 4450 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4451 if (ret != 0) 4452 return; 4453 4454 if (dev_info.hash_key_size > 0 && 4455 dev_info.hash_key_size <= sizeof(rss_key)) 4456 hash_key_size = dev_info.hash_key_size; 4457 else { 4458 fprintf(stderr, 4459 "dev_info did not provide a valid hash key size\n"); 4460 return; 4461 } 4462 4463 /* Get RSS hash key if asked to display it */ 4464 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4465 rss_conf.rss_key_len = hash_key_size; 4466 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4467 if (diag != 0) { 4468 switch (diag) { 4469 case -ENODEV: 4470 fprintf(stderr, "port index %d invalid\n", port_id); 4471 break; 4472 case -ENOTSUP: 4473 fprintf(stderr, "operation not supported by device\n"); 4474 break; 4475 default: 4476 fprintf(stderr, "operation failed - diag=%d\n", diag); 4477 break; 4478 } 4479 return; 4480 } 4481 rss_hf = rss_conf.rss_hf; 4482 if (rss_hf == 0) { 4483 printf("RSS disabled\n"); 4484 return; 4485 } 4486 printf("RSS functions:\n"); 4487 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4488 if (!show_rss_key) 4489 return; 4490 printf("RSS key:\n"); 4491 for (i = 0; i < hash_key_size; i++) 4492 printf("%02X", rss_key[i]); 4493 printf("\n"); 4494 } 4495 4496 void 4497 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4498 uint8_t hash_key_len) 4499 { 4500 struct rte_eth_rss_conf rss_conf; 4501 int diag; 4502 4503 rss_conf.rss_key = NULL; 4504 rss_conf.rss_key_len = 0; 4505 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4506 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4507 if (diag == 0) { 4508 rss_conf.rss_key = hash_key; 4509 rss_conf.rss_key_len = hash_key_len; 4510 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4511 } 4512 if (diag == 0) 4513 return; 4514 4515 switch (diag) { 4516 case -ENODEV: 4517 fprintf(stderr, "port index %d invalid\n", port_id); 4518 break; 4519 case -ENOTSUP: 4520 fprintf(stderr, "operation not supported by device\n"); 4521 break; 4522 default: 4523 fprintf(stderr, "operation failed - diag=%d\n", diag); 4524 break; 4525 } 4526 } 4527 4528 /* 4529 * Check whether a shared rxq scheduled on other lcores. 4530 */ 4531 static bool 4532 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4533 portid_t src_port, queueid_t src_rxq, 4534 uint32_t share_group, queueid_t share_rxq) 4535 { 4536 streamid_t sm_id; 4537 streamid_t nb_fs_per_lcore; 4538 lcoreid_t nb_fc; 4539 lcoreid_t lc_id; 4540 struct fwd_stream *fs; 4541 struct rte_port *port; 4542 struct rte_eth_dev_info *dev_info; 4543 struct rte_eth_rxconf *rxq_conf; 4544 4545 nb_fc = cur_fwd_config.nb_fwd_lcores; 4546 /* Check remaining cores. */ 4547 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4548 sm_id = fwd_lcores[lc_id]->stream_idx; 4549 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4550 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4551 sm_id++) { 4552 fs = fwd_streams[sm_id]; 4553 port = &ports[fs->rx_port]; 4554 dev_info = &port->dev_info; 4555 rxq_conf = &port->rxq[fs->rx_queue].conf; 4556 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4557 == 0 || rxq_conf->share_group == 0) 4558 /* Not shared rxq. */ 4559 continue; 4560 if (domain_id != port->dev_info.switch_info.domain_id) 4561 continue; 4562 if (rxq_conf->share_group != share_group) 4563 continue; 4564 if (rxq_conf->share_qid != share_rxq) 4565 continue; 4566 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4567 share_group, share_rxq); 4568 printf(" lcore %hhu Port %hu queue %hu\n", 4569 src_lc, src_port, src_rxq); 4570 printf(" lcore %hhu Port %hu queue %hu\n", 4571 lc_id, fs->rx_port, fs->rx_queue); 4572 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4573 nb_rxq); 4574 return true; 4575 } 4576 } 4577 return false; 4578 } 4579 4580 /* 4581 * Check shared rxq configuration. 4582 * 4583 * Shared group must not being scheduled on different core. 4584 */ 4585 bool 4586 pkt_fwd_shared_rxq_check(void) 4587 { 4588 streamid_t sm_id; 4589 streamid_t nb_fs_per_lcore; 4590 lcoreid_t nb_fc; 4591 lcoreid_t lc_id; 4592 struct fwd_stream *fs; 4593 uint16_t domain_id; 4594 struct rte_port *port; 4595 struct rte_eth_dev_info *dev_info; 4596 struct rte_eth_rxconf *rxq_conf; 4597 4598 if (rxq_share == 0) 4599 return true; 4600 nb_fc = cur_fwd_config.nb_fwd_lcores; 4601 /* 4602 * Check streams on each core, make sure the same switch domain + 4603 * group + queue doesn't get scheduled on other cores. 4604 */ 4605 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4606 sm_id = fwd_lcores[lc_id]->stream_idx; 4607 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4608 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4609 sm_id++) { 4610 fs = fwd_streams[sm_id]; 4611 /* Update lcore info stream being scheduled. */ 4612 fs->lcore = fwd_lcores[lc_id]; 4613 port = &ports[fs->rx_port]; 4614 dev_info = &port->dev_info; 4615 rxq_conf = &port->rxq[fs->rx_queue].conf; 4616 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4617 == 0 || rxq_conf->share_group == 0) 4618 /* Not shared rxq. */ 4619 continue; 4620 /* Check shared rxq not scheduled on remaining cores. */ 4621 domain_id = port->dev_info.switch_info.domain_id; 4622 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4623 fs->rx_port, 4624 fs->rx_queue, 4625 rxq_conf->share_group, 4626 rxq_conf->share_qid)) 4627 return false; 4628 } 4629 } 4630 return true; 4631 } 4632 4633 /* 4634 * Setup forwarding configuration for each logical core. 4635 */ 4636 static void 4637 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4638 { 4639 streamid_t nb_fs_per_lcore; 4640 streamid_t nb_fs; 4641 streamid_t sm_id; 4642 lcoreid_t nb_extra; 4643 lcoreid_t nb_fc; 4644 lcoreid_t nb_lc; 4645 lcoreid_t lc_id; 4646 4647 nb_fs = cfg->nb_fwd_streams; 4648 nb_fc = cfg->nb_fwd_lcores; 4649 if (nb_fs <= nb_fc) { 4650 nb_fs_per_lcore = 1; 4651 nb_extra = 0; 4652 } else { 4653 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4654 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4655 } 4656 4657 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4658 sm_id = 0; 4659 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4660 fwd_lcores[lc_id]->stream_idx = sm_id; 4661 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4662 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4663 } 4664 4665 /* 4666 * Assign extra remaining streams, if any. 4667 */ 4668 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4669 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4670 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4671 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4672 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4673 } 4674 } 4675 4676 static portid_t 4677 fwd_topology_tx_port_get(portid_t rxp) 4678 { 4679 static int warning_once = 1; 4680 4681 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4682 4683 switch (port_topology) { 4684 default: 4685 case PORT_TOPOLOGY_PAIRED: 4686 if ((rxp & 0x1) == 0) { 4687 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4688 return rxp + 1; 4689 if (warning_once) { 4690 fprintf(stderr, 4691 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4692 warning_once = 0; 4693 } 4694 return rxp; 4695 } 4696 return rxp - 1; 4697 case PORT_TOPOLOGY_CHAINED: 4698 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4699 case PORT_TOPOLOGY_LOOP: 4700 return rxp; 4701 } 4702 } 4703 4704 static void 4705 simple_fwd_config_setup(void) 4706 { 4707 portid_t i; 4708 4709 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4710 cur_fwd_config.nb_fwd_streams = 4711 (streamid_t) cur_fwd_config.nb_fwd_ports; 4712 4713 /* reinitialize forwarding streams */ 4714 init_fwd_streams(); 4715 4716 /* 4717 * In the simple forwarding test, the number of forwarding cores 4718 * must be lower or equal to the number of forwarding ports. 4719 */ 4720 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4721 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4722 cur_fwd_config.nb_fwd_lcores = 4723 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4724 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4725 4726 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4727 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4728 fwd_streams[i]->rx_queue = 0; 4729 fwd_streams[i]->tx_port = 4730 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4731 fwd_streams[i]->tx_queue = 0; 4732 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4733 fwd_streams[i]->retry_enabled = retry_enabled; 4734 } 4735 } 4736 4737 /** 4738 * For the RSS forwarding test all streams distributed over lcores. Each stream 4739 * being composed of a RX queue to poll on a RX port for input messages, 4740 * associated with a TX queue of a TX port where to send forwarded packets. 4741 */ 4742 static void 4743 rss_fwd_config_setup(void) 4744 { 4745 portid_t rxp; 4746 portid_t txp; 4747 queueid_t rxq; 4748 queueid_t nb_q; 4749 streamid_t sm_id; 4750 int start; 4751 int end; 4752 4753 nb_q = nb_rxq; 4754 if (nb_q > nb_txq) 4755 nb_q = nb_txq; 4756 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4757 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4758 cur_fwd_config.nb_fwd_streams = 4759 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4760 4761 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4762 cur_fwd_config.nb_fwd_lcores = 4763 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4764 4765 /* reinitialize forwarding streams */ 4766 init_fwd_streams(); 4767 4768 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4769 4770 if (proc_id > 0 && nb_q % num_procs != 0) 4771 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4772 4773 /** 4774 * In multi-process, All queues are allocated to different 4775 * processes based on num_procs and proc_id. For example: 4776 * if supports 4 queues(nb_q), 2 processes(num_procs), 4777 * the 0~1 queue for primary process. 4778 * the 2~3 queue for secondary process. 4779 */ 4780 start = proc_id * nb_q / num_procs; 4781 end = start + nb_q / num_procs; 4782 rxp = 0; 4783 rxq = start; 4784 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4785 struct fwd_stream *fs; 4786 4787 fs = fwd_streams[sm_id]; 4788 txp = fwd_topology_tx_port_get(rxp); 4789 fs->rx_port = fwd_ports_ids[rxp]; 4790 fs->rx_queue = rxq; 4791 fs->tx_port = fwd_ports_ids[txp]; 4792 fs->tx_queue = rxq; 4793 fs->peer_addr = fs->tx_port; 4794 fs->retry_enabled = retry_enabled; 4795 rxp++; 4796 if (rxp < nb_fwd_ports) 4797 continue; 4798 rxp = 0; 4799 rxq++; 4800 if (rxq >= end) 4801 rxq = start; 4802 } 4803 } 4804 4805 static uint16_t 4806 get_fwd_port_total_tc_num(void) 4807 { 4808 struct rte_eth_dcb_info dcb_info; 4809 uint16_t total_tc_num = 0; 4810 unsigned int i; 4811 4812 for (i = 0; i < nb_fwd_ports; i++) { 4813 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4814 total_tc_num += dcb_info.nb_tcs; 4815 } 4816 4817 return total_tc_num; 4818 } 4819 4820 /** 4821 * For the DCB forwarding test, each core is assigned on each traffic class. 4822 * 4823 * Each core is assigned a multi-stream, each stream being composed of 4824 * a RX queue to poll on a RX port for input messages, associated with 4825 * a TX queue of a TX port where to send forwarded packets. All RX and 4826 * TX queues are mapping to the same traffic class. 4827 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4828 * the same core 4829 */ 4830 static void 4831 dcb_fwd_config_setup(void) 4832 { 4833 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4834 portid_t txp, rxp = 0; 4835 queueid_t txq, rxq = 0; 4836 lcoreid_t lc_id; 4837 uint16_t nb_rx_queue, nb_tx_queue; 4838 uint16_t i, j, k, sm_id = 0; 4839 uint16_t total_tc_num; 4840 struct rte_port *port; 4841 uint8_t tc = 0; 4842 portid_t pid; 4843 int ret; 4844 4845 /* 4846 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4847 * or RTE_PORT_STOPPED. 4848 * 4849 * Re-configure ports to get updated mapping between tc and queue in 4850 * case the queue number of the port is changed. Skip for started ports 4851 * since modifying queue number and calling dev_configure need to stop 4852 * ports first. 4853 */ 4854 for (pid = 0; pid < nb_fwd_ports; pid++) { 4855 if (port_is_started(pid) == 1) 4856 continue; 4857 4858 port = &ports[pid]; 4859 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4860 &port->dev_conf); 4861 if (ret < 0) { 4862 fprintf(stderr, 4863 "Failed to re-configure port %d, ret = %d.\n", 4864 pid, ret); 4865 return; 4866 } 4867 } 4868 4869 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4870 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4871 cur_fwd_config.nb_fwd_streams = 4872 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4873 total_tc_num = get_fwd_port_total_tc_num(); 4874 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4875 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4876 4877 /* reinitialize forwarding streams */ 4878 init_fwd_streams(); 4879 sm_id = 0; 4880 txp = 1; 4881 /* get the dcb info on the first RX and TX ports */ 4882 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4883 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4884 4885 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4886 fwd_lcores[lc_id]->stream_nb = 0; 4887 fwd_lcores[lc_id]->stream_idx = sm_id; 4888 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4889 /* if the nb_queue is zero, means this tc is 4890 * not enabled on the POOL 4891 */ 4892 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4893 break; 4894 k = fwd_lcores[lc_id]->stream_nb + 4895 fwd_lcores[lc_id]->stream_idx; 4896 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4897 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4898 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4899 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4900 for (j = 0; j < nb_rx_queue; j++) { 4901 struct fwd_stream *fs; 4902 4903 fs = fwd_streams[k + j]; 4904 fs->rx_port = fwd_ports_ids[rxp]; 4905 fs->rx_queue = rxq + j; 4906 fs->tx_port = fwd_ports_ids[txp]; 4907 fs->tx_queue = txq + j % nb_tx_queue; 4908 fs->peer_addr = fs->tx_port; 4909 fs->retry_enabled = retry_enabled; 4910 } 4911 fwd_lcores[lc_id]->stream_nb += 4912 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4913 } 4914 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4915 4916 tc++; 4917 if (tc < rxp_dcb_info.nb_tcs) 4918 continue; 4919 /* Restart from TC 0 on next RX port */ 4920 tc = 0; 4921 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4922 rxp = (portid_t) 4923 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4924 else 4925 rxp++; 4926 if (rxp >= nb_fwd_ports) 4927 return; 4928 /* get the dcb information on next RX and TX ports */ 4929 if ((rxp & 0x1) == 0) 4930 txp = (portid_t) (rxp + 1); 4931 else 4932 txp = (portid_t) (rxp - 1); 4933 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4934 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4935 } 4936 } 4937 4938 static void 4939 icmp_echo_config_setup(void) 4940 { 4941 portid_t rxp; 4942 queueid_t rxq; 4943 lcoreid_t lc_id; 4944 uint16_t sm_id; 4945 4946 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4947 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4948 (nb_txq * nb_fwd_ports); 4949 else 4950 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4951 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4952 cur_fwd_config.nb_fwd_streams = 4953 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4954 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4955 cur_fwd_config.nb_fwd_lcores = 4956 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4957 if (verbose_level > 0) { 4958 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4959 __FUNCTION__, 4960 cur_fwd_config.nb_fwd_lcores, 4961 cur_fwd_config.nb_fwd_ports, 4962 cur_fwd_config.nb_fwd_streams); 4963 } 4964 4965 /* reinitialize forwarding streams */ 4966 init_fwd_streams(); 4967 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4968 rxp = 0; rxq = 0; 4969 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4970 if (verbose_level > 0) 4971 printf(" core=%d: \n", lc_id); 4972 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4973 struct fwd_stream *fs; 4974 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4975 fs->rx_port = fwd_ports_ids[rxp]; 4976 fs->rx_queue = rxq; 4977 fs->tx_port = fs->rx_port; 4978 fs->tx_queue = rxq; 4979 fs->peer_addr = fs->tx_port; 4980 fs->retry_enabled = retry_enabled; 4981 if (verbose_level > 0) 4982 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4983 sm_id, fs->rx_port, fs->rx_queue, 4984 fs->tx_queue); 4985 rxq = (queueid_t) (rxq + 1); 4986 if (rxq == nb_rxq) { 4987 rxq = 0; 4988 rxp = (portid_t) (rxp + 1); 4989 } 4990 } 4991 } 4992 } 4993 4994 void 4995 fwd_config_setup(void) 4996 { 4997 struct rte_port *port; 4998 portid_t pt_id; 4999 unsigned int i; 5000 5001 cur_fwd_config.fwd_eng = cur_fwd_eng; 5002 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 5003 icmp_echo_config_setup(); 5004 return; 5005 } 5006 5007 if ((nb_rxq > 1) && (nb_txq > 1)){ 5008 if (dcb_config) { 5009 for (i = 0; i < nb_fwd_ports; i++) { 5010 pt_id = fwd_ports_ids[i]; 5011 port = &ports[pt_id]; 5012 if (!port->dcb_flag) { 5013 fprintf(stderr, 5014 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 5015 return; 5016 } 5017 } 5018 if (nb_fwd_lcores == 1) { 5019 fprintf(stderr, 5020 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 5021 return; 5022 } 5023 5024 dcb_fwd_config_setup(); 5025 } else 5026 rss_fwd_config_setup(); 5027 } 5028 else 5029 simple_fwd_config_setup(); 5030 } 5031 5032 static const char * 5033 mp_alloc_to_str(uint8_t mode) 5034 { 5035 switch (mode) { 5036 case MP_ALLOC_NATIVE: 5037 return "native"; 5038 case MP_ALLOC_ANON: 5039 return "anon"; 5040 case MP_ALLOC_XMEM: 5041 return "xmem"; 5042 case MP_ALLOC_XMEM_HUGE: 5043 return "xmemhuge"; 5044 case MP_ALLOC_XBUF: 5045 return "xbuf"; 5046 default: 5047 return "invalid"; 5048 } 5049 } 5050 5051 void 5052 pkt_fwd_config_display(struct fwd_config *cfg) 5053 { 5054 struct fwd_stream *fs; 5055 lcoreid_t lc_id; 5056 streamid_t sm_id; 5057 5058 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5059 "NUMA support %s, MP allocation mode: %s\n", 5060 cfg->fwd_eng->fwd_mode_name, 5061 cfg->fwd_eng->status ? "-" : "", 5062 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5063 retry_enabled == 0 ? "" : " with retry", 5064 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5065 numa_support == 1 ? "enabled" : "disabled", 5066 mp_alloc_to_str(mp_alloc_type)); 5067 5068 if (retry_enabled) 5069 printf("TX retry num: %u, delay between TX retries: %uus\n", 5070 burst_tx_retry_num, burst_tx_delay_time); 5071 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5072 printf("Logical Core %u (socket %u) forwards packets on " 5073 "%d streams:", 5074 fwd_lcores_cpuids[lc_id], 5075 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5076 fwd_lcores[lc_id]->stream_nb); 5077 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5078 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5079 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5080 "P=%d/Q=%d (socket %u) ", 5081 fs->rx_port, fs->rx_queue, 5082 ports[fs->rx_port].socket_id, 5083 fs->tx_port, fs->tx_queue, 5084 ports[fs->tx_port].socket_id); 5085 print_ethaddr("peer=", 5086 &peer_eth_addrs[fs->peer_addr]); 5087 } 5088 printf("\n"); 5089 } 5090 printf("\n"); 5091 } 5092 5093 void 5094 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5095 { 5096 struct rte_ether_addr new_peer_addr; 5097 if (!rte_eth_dev_is_valid_port(port_id)) { 5098 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5099 return; 5100 } 5101 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5102 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5103 peer_addr); 5104 return; 5105 } 5106 peer_eth_addrs[port_id] = new_peer_addr; 5107 } 5108 5109 int 5110 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5111 { 5112 unsigned int i; 5113 unsigned int lcore_cpuid; 5114 int record_now; 5115 5116 record_now = 0; 5117 again: 5118 for (i = 0; i < nb_lc; i++) { 5119 lcore_cpuid = lcorelist[i]; 5120 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5121 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5122 return -1; 5123 } 5124 if (lcore_cpuid == rte_get_main_lcore()) { 5125 fprintf(stderr, 5126 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5127 lcore_cpuid); 5128 return -1; 5129 } 5130 if (record_now) 5131 fwd_lcores_cpuids[i] = lcore_cpuid; 5132 } 5133 if (record_now == 0) { 5134 record_now = 1; 5135 goto again; 5136 } 5137 nb_cfg_lcores = (lcoreid_t) nb_lc; 5138 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5139 printf("previous number of forwarding cores %u - changed to " 5140 "number of configured cores %u\n", 5141 (unsigned int) nb_fwd_lcores, nb_lc); 5142 nb_fwd_lcores = (lcoreid_t) nb_lc; 5143 } 5144 5145 return 0; 5146 } 5147 5148 int 5149 set_fwd_lcores_mask(uint64_t lcoremask) 5150 { 5151 unsigned int lcorelist[64]; 5152 unsigned int nb_lc; 5153 unsigned int i; 5154 5155 if (lcoremask == 0) { 5156 fprintf(stderr, "Invalid NULL mask of cores\n"); 5157 return -1; 5158 } 5159 nb_lc = 0; 5160 for (i = 0; i < 64; i++) { 5161 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5162 continue; 5163 lcorelist[nb_lc++] = i; 5164 } 5165 return set_fwd_lcores_list(lcorelist, nb_lc); 5166 } 5167 5168 void 5169 set_fwd_lcores_number(uint16_t nb_lc) 5170 { 5171 if (test_done == 0) { 5172 fprintf(stderr, "Please stop forwarding first\n"); 5173 return; 5174 } 5175 if (nb_lc > nb_cfg_lcores) { 5176 fprintf(stderr, 5177 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5178 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5179 return; 5180 } 5181 nb_fwd_lcores = (lcoreid_t) nb_lc; 5182 printf("Number of forwarding cores set to %u\n", 5183 (unsigned int) nb_fwd_lcores); 5184 } 5185 5186 void 5187 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5188 { 5189 unsigned int i; 5190 portid_t port_id; 5191 int record_now; 5192 5193 record_now = 0; 5194 again: 5195 for (i = 0; i < nb_pt; i++) { 5196 port_id = (portid_t) portlist[i]; 5197 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5198 return; 5199 if (record_now) 5200 fwd_ports_ids[i] = port_id; 5201 } 5202 if (record_now == 0) { 5203 record_now = 1; 5204 goto again; 5205 } 5206 nb_cfg_ports = (portid_t) nb_pt; 5207 if (nb_fwd_ports != (portid_t) nb_pt) { 5208 printf("previous number of forwarding ports %u - changed to " 5209 "number of configured ports %u\n", 5210 (unsigned int) nb_fwd_ports, nb_pt); 5211 nb_fwd_ports = (portid_t) nb_pt; 5212 } 5213 } 5214 5215 /** 5216 * Parse the user input and obtain the list of forwarding ports 5217 * 5218 * @param[in] list 5219 * String containing the user input. User can specify 5220 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5221 * For example, if the user wants to use all the available 5222 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5223 * If the user wants to use only the ports 1,2 then the input 5224 * is 1,2. 5225 * valid characters are '-' and ',' 5226 * @param[out] values 5227 * This array will be filled with a list of port IDs 5228 * based on the user input 5229 * Note that duplicate entries are discarded and only the first 5230 * count entries in this array are port IDs and all the rest 5231 * will contain default values 5232 * @param[in] maxsize 5233 * This parameter denotes 2 things 5234 * 1) Number of elements in the values array 5235 * 2) Maximum value of each element in the values array 5236 * @return 5237 * On success, returns total count of parsed port IDs 5238 * On failure, returns 0 5239 */ 5240 static unsigned int 5241 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5242 { 5243 unsigned int count = 0; 5244 char *end = NULL; 5245 int min, max; 5246 int value, i; 5247 unsigned int marked[maxsize]; 5248 5249 if (list == NULL || values == NULL) 5250 return 0; 5251 5252 for (i = 0; i < (int)maxsize; i++) 5253 marked[i] = 0; 5254 5255 min = INT_MAX; 5256 5257 do { 5258 /*Remove the blank spaces if any*/ 5259 while (isblank(*list)) 5260 list++; 5261 if (*list == '\0') 5262 break; 5263 errno = 0; 5264 value = strtol(list, &end, 10); 5265 if (errno || end == NULL) 5266 return 0; 5267 if (value < 0 || value >= (int)maxsize) 5268 return 0; 5269 while (isblank(*end)) 5270 end++; 5271 if (*end == '-' && min == INT_MAX) { 5272 min = value; 5273 } else if ((*end == ',') || (*end == '\0')) { 5274 max = value; 5275 if (min == INT_MAX) 5276 min = value; 5277 for (i = min; i <= max; i++) { 5278 if (count < maxsize) { 5279 if (marked[i]) 5280 continue; 5281 values[count] = i; 5282 marked[i] = 1; 5283 count++; 5284 } 5285 } 5286 min = INT_MAX; 5287 } else 5288 return 0; 5289 list = end + 1; 5290 } while (*end != '\0'); 5291 5292 return count; 5293 } 5294 5295 void 5296 parse_fwd_portlist(const char *portlist) 5297 { 5298 unsigned int portcount; 5299 unsigned int portindex[RTE_MAX_ETHPORTS]; 5300 unsigned int i, valid_port_count = 0; 5301 5302 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5303 if (!portcount) 5304 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5305 5306 /* 5307 * Here we verify the validity of the ports 5308 * and thereby calculate the total number of 5309 * valid ports 5310 */ 5311 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5312 if (rte_eth_dev_is_valid_port(portindex[i])) { 5313 portindex[valid_port_count] = portindex[i]; 5314 valid_port_count++; 5315 } 5316 } 5317 5318 set_fwd_ports_list(portindex, valid_port_count); 5319 } 5320 5321 void 5322 set_fwd_ports_mask(uint64_t portmask) 5323 { 5324 unsigned int portlist[64]; 5325 unsigned int nb_pt; 5326 unsigned int i; 5327 5328 if (portmask == 0) { 5329 fprintf(stderr, "Invalid NULL mask of ports\n"); 5330 return; 5331 } 5332 nb_pt = 0; 5333 RTE_ETH_FOREACH_DEV(i) { 5334 if (! ((uint64_t)(1ULL << i) & portmask)) 5335 continue; 5336 portlist[nb_pt++] = i; 5337 } 5338 set_fwd_ports_list(portlist, nb_pt); 5339 } 5340 5341 void 5342 set_fwd_ports_number(uint16_t nb_pt) 5343 { 5344 if (nb_pt > nb_cfg_ports) { 5345 fprintf(stderr, 5346 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5347 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5348 return; 5349 } 5350 nb_fwd_ports = (portid_t) nb_pt; 5351 printf("Number of forwarding ports set to %u\n", 5352 (unsigned int) nb_fwd_ports); 5353 } 5354 5355 int 5356 port_is_forwarding(portid_t port_id) 5357 { 5358 unsigned int i; 5359 5360 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5361 return -1; 5362 5363 for (i = 0; i < nb_fwd_ports; i++) { 5364 if (fwd_ports_ids[i] == port_id) 5365 return 1; 5366 } 5367 5368 return 0; 5369 } 5370 5371 void 5372 set_nb_pkt_per_burst(uint16_t nb) 5373 { 5374 if (nb > MAX_PKT_BURST) { 5375 fprintf(stderr, 5376 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5377 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5378 return; 5379 } 5380 nb_pkt_per_burst = nb; 5381 printf("Number of packets per burst set to %u\n", 5382 (unsigned int) nb_pkt_per_burst); 5383 } 5384 5385 static const char * 5386 tx_split_get_name(enum tx_pkt_split split) 5387 { 5388 uint32_t i; 5389 5390 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5391 if (tx_split_name[i].split == split) 5392 return tx_split_name[i].name; 5393 } 5394 return NULL; 5395 } 5396 5397 void 5398 set_tx_pkt_split(const char *name) 5399 { 5400 uint32_t i; 5401 5402 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5403 if (strcmp(tx_split_name[i].name, name) == 0) { 5404 tx_pkt_split = tx_split_name[i].split; 5405 return; 5406 } 5407 } 5408 fprintf(stderr, "unknown value: \"%s\"\n", name); 5409 } 5410 5411 int 5412 parse_fec_mode(const char *name, uint32_t *fec_capa) 5413 { 5414 uint8_t i; 5415 5416 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5417 if (strcmp(fec_mode_name[i].name, name) == 0) { 5418 *fec_capa = 5419 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5420 return 0; 5421 } 5422 } 5423 return -1; 5424 } 5425 5426 void 5427 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5428 { 5429 unsigned int i, j; 5430 5431 printf("FEC capabilities:\n"); 5432 5433 for (i = 0; i < num; i++) { 5434 printf("%s : ", 5435 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5436 5437 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5438 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5439 speed_fec_capa[i].capa) 5440 printf("%s ", fec_mode_name[j].name); 5441 } 5442 printf("\n"); 5443 } 5444 } 5445 5446 void 5447 show_rx_pkt_offsets(void) 5448 { 5449 uint32_t i, n; 5450 5451 n = rx_pkt_nb_offs; 5452 printf("Number of offsets: %u\n", n); 5453 if (n) { 5454 printf("Segment offsets: "); 5455 for (i = 0; i != n - 1; i++) 5456 printf("%hu,", rx_pkt_seg_offsets[i]); 5457 printf("%hu\n", rx_pkt_seg_lengths[i]); 5458 } 5459 } 5460 5461 void 5462 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5463 { 5464 unsigned int i; 5465 5466 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5467 printf("nb segments per RX packets=%u >= " 5468 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5469 return; 5470 } 5471 5472 /* 5473 * No extra check here, the segment length will be checked by PMD 5474 * in the extended queue setup. 5475 */ 5476 for (i = 0; i < nb_offs; i++) { 5477 if (seg_offsets[i] >= UINT16_MAX) { 5478 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5479 i, seg_offsets[i]); 5480 return; 5481 } 5482 } 5483 5484 for (i = 0; i < nb_offs; i++) 5485 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5486 5487 rx_pkt_nb_offs = (uint8_t) nb_offs; 5488 } 5489 5490 void 5491 show_rx_pkt_segments(void) 5492 { 5493 uint32_t i, n; 5494 5495 n = rx_pkt_nb_segs; 5496 printf("Number of segments: %u\n", n); 5497 if (n) { 5498 printf("Segment sizes: "); 5499 for (i = 0; i != n - 1; i++) 5500 printf("%hu,", rx_pkt_seg_lengths[i]); 5501 printf("%hu\n", rx_pkt_seg_lengths[i]); 5502 } 5503 } 5504 5505 static const char *get_ptype_str(uint32_t ptype) 5506 { 5507 const char *str; 5508 5509 switch (ptype) { 5510 case RTE_PTYPE_L2_ETHER: 5511 str = "eth"; 5512 break; 5513 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5514 str = "ipv4"; 5515 break; 5516 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5517 str = "ipv6"; 5518 break; 5519 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5520 str = "ipv4-tcp"; 5521 break; 5522 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5523 str = "ipv4-udp"; 5524 break; 5525 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5526 str = "ipv4-sctp"; 5527 break; 5528 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5529 str = "ipv6-tcp"; 5530 break; 5531 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5532 str = "ipv6-udp"; 5533 break; 5534 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5535 str = "ipv6-sctp"; 5536 break; 5537 case RTE_PTYPE_TUNNEL_GRENAT: 5538 str = "grenat"; 5539 break; 5540 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5541 str = "inner-eth"; 5542 break; 5543 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5544 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5545 str = "inner-ipv4"; 5546 break; 5547 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5548 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5549 str = "inner-ipv6"; 5550 break; 5551 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5553 str = "inner-ipv4-tcp"; 5554 break; 5555 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5556 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5557 str = "inner-ipv4-udp"; 5558 break; 5559 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5560 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5561 str = "inner-ipv4-sctp"; 5562 break; 5563 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5564 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5565 str = "inner-ipv6-tcp"; 5566 break; 5567 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5568 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5569 str = "inner-ipv6-udp"; 5570 break; 5571 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5572 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5573 str = "inner-ipv6-sctp"; 5574 break; 5575 default: 5576 str = "unsupported"; 5577 } 5578 5579 return str; 5580 } 5581 5582 void 5583 show_rx_pkt_hdrs(void) 5584 { 5585 uint32_t i, n; 5586 5587 n = rx_pkt_nb_segs; 5588 printf("Number of segments: %u\n", n); 5589 if (n) { 5590 printf("Packet segs: "); 5591 for (i = 0; i < n - 1; i++) 5592 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5593 printf("payload\n"); 5594 } 5595 } 5596 5597 void 5598 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5599 { 5600 unsigned int i; 5601 5602 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5603 printf("nb segments per RX packets=%u > " 5604 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5605 return; 5606 } 5607 5608 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5609 5610 for (i = 0; i < nb_segs; i++) 5611 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5612 /* 5613 * We calculate the number of hdrs, but payload is not included, 5614 * so rx_pkt_nb_segs would increase 1. 5615 */ 5616 rx_pkt_nb_segs = nb_segs + 1; 5617 } 5618 5619 void 5620 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5621 { 5622 unsigned int i; 5623 5624 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5625 printf("nb segments per RX packets=%u >= " 5626 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5627 return; 5628 } 5629 5630 /* 5631 * No extra check here, the segment length will be checked by PMD 5632 * in the extended queue setup. 5633 */ 5634 for (i = 0; i < nb_segs; i++) { 5635 if (seg_lengths[i] >= UINT16_MAX) { 5636 printf("length[%u]=%u > UINT16_MAX - give up\n", 5637 i, seg_lengths[i]); 5638 return; 5639 } 5640 } 5641 5642 for (i = 0; i < nb_segs; i++) 5643 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5644 5645 rx_pkt_nb_segs = (uint8_t) nb_segs; 5646 } 5647 5648 void 5649 show_tx_pkt_segments(void) 5650 { 5651 uint32_t i, n; 5652 const char *split; 5653 5654 n = tx_pkt_nb_segs; 5655 split = tx_split_get_name(tx_pkt_split); 5656 5657 printf("Number of segments: %u\n", n); 5658 printf("Segment sizes: "); 5659 for (i = 0; i != n - 1; i++) 5660 printf("%hu,", tx_pkt_seg_lengths[i]); 5661 printf("%hu\n", tx_pkt_seg_lengths[i]); 5662 printf("Split packet: %s\n", split); 5663 } 5664 5665 static bool 5666 nb_segs_is_invalid(unsigned int nb_segs) 5667 { 5668 uint16_t ring_size; 5669 uint16_t queue_id; 5670 uint16_t port_id; 5671 int ret; 5672 5673 RTE_ETH_FOREACH_DEV(port_id) { 5674 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5675 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5676 if (ret) { 5677 /* Port may not be initialized yet, can't say 5678 * the port is invalid in this stage. 5679 */ 5680 continue; 5681 } 5682 if (ring_size < nb_segs) { 5683 printf("nb segments per TX packets=%u >= TX " 5684 "queue(%u) ring_size=%u - txpkts ignored\n", 5685 nb_segs, queue_id, ring_size); 5686 return true; 5687 } 5688 } 5689 } 5690 5691 return false; 5692 } 5693 5694 void 5695 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5696 { 5697 uint16_t tx_pkt_len; 5698 unsigned int i; 5699 5700 /* 5701 * For single segment settings failed check is ignored. 5702 * It is a very basic capability to send the single segment 5703 * packets, suppose it is always supported. 5704 */ 5705 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5706 fprintf(stderr, 5707 "Tx segment size(%u) is not supported - txpkts ignored\n", 5708 nb_segs); 5709 return; 5710 } 5711 5712 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5713 fprintf(stderr, 5714 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5715 nb_segs, RTE_MAX_SEGS_PER_PKT); 5716 return; 5717 } 5718 5719 /* 5720 * Check that each segment length is greater or equal than 5721 * the mbuf data size. 5722 * Check also that the total packet length is greater or equal than the 5723 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5724 * 20 + 8). 5725 */ 5726 tx_pkt_len = 0; 5727 for (i = 0; i < nb_segs; i++) { 5728 if (seg_lengths[i] > mbuf_data_size[0]) { 5729 fprintf(stderr, 5730 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5731 i, seg_lengths[i], mbuf_data_size[0]); 5732 return; 5733 } 5734 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5735 } 5736 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5737 fprintf(stderr, "total packet length=%u < %d - give up\n", 5738 (unsigned) tx_pkt_len, 5739 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5740 return; 5741 } 5742 5743 for (i = 0; i < nb_segs; i++) 5744 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5745 5746 tx_pkt_length = tx_pkt_len; 5747 tx_pkt_nb_segs = (uint8_t) nb_segs; 5748 } 5749 5750 void 5751 show_tx_pkt_times(void) 5752 { 5753 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5754 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5755 } 5756 5757 void 5758 set_tx_pkt_times(unsigned int *tx_times) 5759 { 5760 tx_pkt_times_inter = tx_times[0]; 5761 tx_pkt_times_intra = tx_times[1]; 5762 } 5763 5764 #ifdef RTE_LIB_GRO 5765 void 5766 setup_gro(const char *onoff, portid_t port_id) 5767 { 5768 if (!rte_eth_dev_is_valid_port(port_id)) { 5769 fprintf(stderr, "invalid port id %u\n", port_id); 5770 return; 5771 } 5772 if (test_done == 0) { 5773 fprintf(stderr, 5774 "Before enable/disable GRO, please stop forwarding first\n"); 5775 return; 5776 } 5777 if (strcmp(onoff, "on") == 0) { 5778 if (gro_ports[port_id].enable != 0) { 5779 fprintf(stderr, 5780 "Port %u has enabled GRO. Please disable GRO first\n", 5781 port_id); 5782 return; 5783 } 5784 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5785 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5786 gro_ports[port_id].param.max_flow_num = 5787 GRO_DEFAULT_FLOW_NUM; 5788 gro_ports[port_id].param.max_item_per_flow = 5789 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5790 } 5791 gro_ports[port_id].enable = 1; 5792 } else { 5793 if (gro_ports[port_id].enable == 0) { 5794 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5795 return; 5796 } 5797 gro_ports[port_id].enable = 0; 5798 } 5799 } 5800 5801 void 5802 setup_gro_flush_cycles(uint8_t cycles) 5803 { 5804 if (test_done == 0) { 5805 fprintf(stderr, 5806 "Before change flush interval for GRO, please stop forwarding first.\n"); 5807 return; 5808 } 5809 5810 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5811 GRO_DEFAULT_FLUSH_CYCLES) { 5812 fprintf(stderr, 5813 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5814 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5815 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5816 } 5817 5818 gro_flush_cycles = cycles; 5819 } 5820 5821 void 5822 show_gro(portid_t port_id) 5823 { 5824 struct rte_gro_param *param; 5825 uint32_t max_pkts_num; 5826 5827 param = &gro_ports[port_id].param; 5828 5829 if (!rte_eth_dev_is_valid_port(port_id)) { 5830 fprintf(stderr, "Invalid port id %u.\n", port_id); 5831 return; 5832 } 5833 if (gro_ports[port_id].enable) { 5834 printf("GRO type: TCP/IPv4\n"); 5835 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5836 max_pkts_num = param->max_flow_num * 5837 param->max_item_per_flow; 5838 } else 5839 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5840 printf("Max number of packets to perform GRO: %u\n", 5841 max_pkts_num); 5842 printf("Flushing cycles: %u\n", gro_flush_cycles); 5843 } else 5844 printf("Port %u doesn't enable GRO.\n", port_id); 5845 } 5846 #endif /* RTE_LIB_GRO */ 5847 5848 #ifdef RTE_LIB_GSO 5849 void 5850 setup_gso(const char *mode, portid_t port_id) 5851 { 5852 if (!rte_eth_dev_is_valid_port(port_id)) { 5853 fprintf(stderr, "invalid port id %u\n", port_id); 5854 return; 5855 } 5856 if (strcmp(mode, "on") == 0) { 5857 if (test_done == 0) { 5858 fprintf(stderr, 5859 "before enabling GSO, please stop forwarding first\n"); 5860 return; 5861 } 5862 gso_ports[port_id].enable = 1; 5863 } else if (strcmp(mode, "off") == 0) { 5864 if (test_done == 0) { 5865 fprintf(stderr, 5866 "before disabling GSO, please stop forwarding first\n"); 5867 return; 5868 } 5869 gso_ports[port_id].enable = 0; 5870 } 5871 } 5872 #endif /* RTE_LIB_GSO */ 5873 5874 char* 5875 list_pkt_forwarding_modes(void) 5876 { 5877 static char fwd_modes[128] = ""; 5878 const char *separator = "|"; 5879 struct fwd_engine *fwd_eng; 5880 unsigned i = 0; 5881 5882 if (strlen (fwd_modes) == 0) { 5883 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5884 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5885 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5886 strncat(fwd_modes, separator, 5887 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5888 } 5889 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5890 } 5891 5892 return fwd_modes; 5893 } 5894 5895 char* 5896 list_pkt_forwarding_retry_modes(void) 5897 { 5898 static char fwd_modes[128] = ""; 5899 const char *separator = "|"; 5900 struct fwd_engine *fwd_eng; 5901 unsigned i = 0; 5902 5903 if (strlen(fwd_modes) == 0) { 5904 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5905 if (fwd_eng == &rx_only_engine) 5906 continue; 5907 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5908 sizeof(fwd_modes) - 5909 strlen(fwd_modes) - 1); 5910 strncat(fwd_modes, separator, 5911 sizeof(fwd_modes) - 5912 strlen(fwd_modes) - 1); 5913 } 5914 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5915 } 5916 5917 return fwd_modes; 5918 } 5919 5920 void 5921 set_pkt_forwarding_mode(const char *fwd_mode_name) 5922 { 5923 struct fwd_engine *fwd_eng; 5924 unsigned i; 5925 5926 i = 0; 5927 while ((fwd_eng = fwd_engines[i]) != NULL) { 5928 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5929 printf("Set %s packet forwarding mode%s\n", 5930 fwd_mode_name, 5931 retry_enabled == 0 ? "" : " with retry"); 5932 cur_fwd_eng = fwd_eng; 5933 return; 5934 } 5935 i++; 5936 } 5937 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5938 } 5939 5940 void 5941 add_rx_dump_callbacks(portid_t portid) 5942 { 5943 struct rte_eth_dev_info dev_info; 5944 uint16_t queue; 5945 int ret; 5946 5947 if (port_id_is_invalid(portid, ENABLED_WARN)) 5948 return; 5949 5950 ret = eth_dev_info_get_print_err(portid, &dev_info); 5951 if (ret != 0) 5952 return; 5953 5954 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5955 if (!ports[portid].rx_dump_cb[queue]) 5956 ports[portid].rx_dump_cb[queue] = 5957 rte_eth_add_rx_callback(portid, queue, 5958 dump_rx_pkts, NULL); 5959 } 5960 5961 void 5962 add_tx_dump_callbacks(portid_t portid) 5963 { 5964 struct rte_eth_dev_info dev_info; 5965 uint16_t queue; 5966 int ret; 5967 5968 if (port_id_is_invalid(portid, ENABLED_WARN)) 5969 return; 5970 5971 ret = eth_dev_info_get_print_err(portid, &dev_info); 5972 if (ret != 0) 5973 return; 5974 5975 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5976 if (!ports[portid].tx_dump_cb[queue]) 5977 ports[portid].tx_dump_cb[queue] = 5978 rte_eth_add_tx_callback(portid, queue, 5979 dump_tx_pkts, NULL); 5980 } 5981 5982 void 5983 remove_rx_dump_callbacks(portid_t portid) 5984 { 5985 struct rte_eth_dev_info dev_info; 5986 uint16_t queue; 5987 int ret; 5988 5989 if (port_id_is_invalid(portid, ENABLED_WARN)) 5990 return; 5991 5992 ret = eth_dev_info_get_print_err(portid, &dev_info); 5993 if (ret != 0) 5994 return; 5995 5996 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5997 if (ports[portid].rx_dump_cb[queue]) { 5998 rte_eth_remove_rx_callback(portid, queue, 5999 ports[portid].rx_dump_cb[queue]); 6000 ports[portid].rx_dump_cb[queue] = NULL; 6001 } 6002 } 6003 6004 void 6005 remove_tx_dump_callbacks(portid_t portid) 6006 { 6007 struct rte_eth_dev_info dev_info; 6008 uint16_t queue; 6009 int ret; 6010 6011 if (port_id_is_invalid(portid, ENABLED_WARN)) 6012 return; 6013 6014 ret = eth_dev_info_get_print_err(portid, &dev_info); 6015 if (ret != 0) 6016 return; 6017 6018 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 6019 if (ports[portid].tx_dump_cb[queue]) { 6020 rte_eth_remove_tx_callback(portid, queue, 6021 ports[portid].tx_dump_cb[queue]); 6022 ports[portid].tx_dump_cb[queue] = NULL; 6023 } 6024 } 6025 6026 void 6027 configure_rxtx_dump_callbacks(uint16_t verbose) 6028 { 6029 portid_t portid; 6030 6031 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 6032 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 6033 return; 6034 #endif 6035 6036 RTE_ETH_FOREACH_DEV(portid) 6037 { 6038 if (verbose == 1 || verbose > 2) 6039 add_rx_dump_callbacks(portid); 6040 else 6041 remove_rx_dump_callbacks(portid); 6042 if (verbose >= 2) 6043 add_tx_dump_callbacks(portid); 6044 else 6045 remove_tx_dump_callbacks(portid); 6046 } 6047 } 6048 6049 void 6050 set_verbose_level(uint16_t vb_level) 6051 { 6052 printf("Change verbose level from %u to %u\n", 6053 (unsigned int) verbose_level, (unsigned int) vb_level); 6054 verbose_level = vb_level; 6055 configure_rxtx_dump_callbacks(verbose_level); 6056 } 6057 6058 void 6059 vlan_extend_set(portid_t port_id, int on) 6060 { 6061 int diag; 6062 int vlan_offload; 6063 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6064 6065 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6066 return; 6067 6068 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6069 6070 if (on) { 6071 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6072 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6073 } else { 6074 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6075 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6076 } 6077 6078 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6079 if (diag < 0) { 6080 fprintf(stderr, 6081 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6082 port_id, on, diag); 6083 return; 6084 } 6085 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6086 } 6087 6088 void 6089 rx_vlan_strip_set(portid_t port_id, int on) 6090 { 6091 int diag; 6092 int vlan_offload; 6093 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6094 6095 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6096 return; 6097 6098 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6099 6100 if (on) { 6101 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6102 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6103 } else { 6104 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6105 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6106 } 6107 6108 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6109 if (diag < 0) { 6110 fprintf(stderr, 6111 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6112 __func__, port_id, on, diag); 6113 return; 6114 } 6115 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6116 } 6117 6118 void 6119 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6120 { 6121 int diag; 6122 6123 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6124 return; 6125 6126 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6127 if (diag < 0) 6128 fprintf(stderr, 6129 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6130 __func__, port_id, queue_id, on, diag); 6131 } 6132 6133 void 6134 rx_vlan_filter_set(portid_t port_id, int on) 6135 { 6136 int diag; 6137 int vlan_offload; 6138 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6139 6140 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6141 return; 6142 6143 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6144 6145 if (on) { 6146 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6147 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6148 } else { 6149 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6150 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6151 } 6152 6153 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6154 if (diag < 0) { 6155 fprintf(stderr, 6156 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6157 __func__, port_id, on, diag); 6158 return; 6159 } 6160 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6161 } 6162 6163 void 6164 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6165 { 6166 int diag; 6167 int vlan_offload; 6168 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6169 6170 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6171 return; 6172 6173 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6174 6175 if (on) { 6176 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6177 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6178 } else { 6179 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6180 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6181 } 6182 6183 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6184 if (diag < 0) { 6185 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6186 __func__, port_id, on, diag); 6187 return; 6188 } 6189 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6190 } 6191 6192 int 6193 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6194 { 6195 int diag; 6196 6197 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6198 return 1; 6199 if (vlan_id_is_invalid(vlan_id)) 6200 return 1; 6201 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6202 if (diag == 0) 6203 return 0; 6204 fprintf(stderr, 6205 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6206 port_id, vlan_id, on, diag); 6207 return -1; 6208 } 6209 6210 void 6211 rx_vlan_all_filter_set(portid_t port_id, int on) 6212 { 6213 uint16_t vlan_id; 6214 6215 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6216 return; 6217 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6218 if (rx_vft_set(port_id, vlan_id, on)) 6219 break; 6220 } 6221 } 6222 6223 void 6224 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6225 { 6226 int diag; 6227 6228 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6229 return; 6230 6231 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6232 if (diag == 0) 6233 return; 6234 6235 fprintf(stderr, 6236 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6237 port_id, vlan_type, tp_id, diag); 6238 } 6239 6240 void 6241 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6242 { 6243 struct rte_eth_dev_info dev_info; 6244 int ret; 6245 6246 if (vlan_id_is_invalid(vlan_id)) 6247 return; 6248 6249 if (ports[port_id].dev_conf.txmode.offloads & 6250 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6251 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6252 return; 6253 } 6254 6255 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6256 if (ret != 0) 6257 return; 6258 6259 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6260 fprintf(stderr, 6261 "Error: vlan insert is not supported by port %d\n", 6262 port_id); 6263 return; 6264 } 6265 6266 tx_vlan_reset(port_id); 6267 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6268 ports[port_id].tx_vlan_id = vlan_id; 6269 } 6270 6271 void 6272 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6273 { 6274 struct rte_eth_dev_info dev_info; 6275 int ret; 6276 6277 if (vlan_id_is_invalid(vlan_id)) 6278 return; 6279 if (vlan_id_is_invalid(vlan_id_outer)) 6280 return; 6281 6282 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6283 if (ret != 0) 6284 return; 6285 6286 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6287 fprintf(stderr, 6288 "Error: qinq insert not supported by port %d\n", 6289 port_id); 6290 return; 6291 } 6292 6293 tx_vlan_reset(port_id); 6294 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6295 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6296 ports[port_id].tx_vlan_id = vlan_id; 6297 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6298 } 6299 6300 void 6301 tx_vlan_reset(portid_t port_id) 6302 { 6303 ports[port_id].dev_conf.txmode.offloads &= 6304 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6305 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6306 ports[port_id].tx_vlan_id = 0; 6307 ports[port_id].tx_vlan_id_outer = 0; 6308 } 6309 6310 void 6311 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6312 { 6313 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6314 return; 6315 6316 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6317 } 6318 6319 void 6320 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6321 { 6322 int ret; 6323 6324 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6325 return; 6326 6327 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6328 return; 6329 6330 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6331 fprintf(stderr, "map_value not in required range 0..%d\n", 6332 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6333 return; 6334 } 6335 6336 if (!is_rx) { /* tx */ 6337 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6338 map_value); 6339 if (ret) { 6340 fprintf(stderr, 6341 "failed to set tx queue stats mapping.\n"); 6342 return; 6343 } 6344 } else { /* rx */ 6345 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6346 map_value); 6347 if (ret) { 6348 fprintf(stderr, 6349 "failed to set rx queue stats mapping.\n"); 6350 return; 6351 } 6352 } 6353 } 6354 6355 void 6356 set_xstats_hide_zero(uint8_t on_off) 6357 { 6358 xstats_hide_zero = on_off; 6359 } 6360 6361 void 6362 set_record_core_cycles(uint8_t on_off) 6363 { 6364 record_core_cycles = on_off; 6365 } 6366 6367 void 6368 set_record_burst_stats(uint8_t on_off) 6369 { 6370 record_burst_stats = on_off; 6371 } 6372 6373 uint16_t 6374 str_to_flowtype(const char *string) 6375 { 6376 uint8_t i; 6377 6378 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6379 if (!strcmp(flowtype_str_table[i].str, string)) 6380 return flowtype_str_table[i].ftype; 6381 } 6382 6383 if (isdigit(string[0])) { 6384 int val = atoi(string); 6385 if (val > 0 && val < 64) 6386 return (uint16_t)val; 6387 } 6388 6389 return RTE_ETH_FLOW_UNKNOWN; 6390 } 6391 6392 const char* 6393 flowtype_to_str(uint16_t flow_type) 6394 { 6395 uint8_t i; 6396 6397 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6398 if (flowtype_str_table[i].ftype == flow_type) 6399 return flowtype_str_table[i].str; 6400 } 6401 6402 return NULL; 6403 } 6404 6405 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6406 6407 static inline void 6408 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6409 { 6410 struct rte_eth_flex_payload_cfg *cfg; 6411 uint32_t i, j; 6412 6413 for (i = 0; i < flex_conf->nb_payloads; i++) { 6414 cfg = &flex_conf->flex_set[i]; 6415 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6416 printf("\n RAW: "); 6417 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6418 printf("\n L2_PAYLOAD: "); 6419 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6420 printf("\n L3_PAYLOAD: "); 6421 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6422 printf("\n L4_PAYLOAD: "); 6423 else 6424 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6425 for (j = 0; j < num; j++) 6426 printf(" %-5u", cfg->src_offset[j]); 6427 } 6428 printf("\n"); 6429 } 6430 6431 static inline void 6432 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6433 { 6434 struct rte_eth_fdir_flex_mask *mask; 6435 uint32_t i, j; 6436 const char *p; 6437 6438 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6439 mask = &flex_conf->flex_mask[i]; 6440 p = flowtype_to_str(mask->flow_type); 6441 printf("\n %s:\t", p ? p : "unknown"); 6442 for (j = 0; j < num; j++) 6443 printf(" %02x", mask->mask[j]); 6444 } 6445 printf("\n"); 6446 } 6447 6448 static inline void 6449 print_fdir_flow_type(uint32_t flow_types_mask) 6450 { 6451 int i; 6452 const char *p; 6453 6454 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6455 if (!(flow_types_mask & (1 << i))) 6456 continue; 6457 p = flowtype_to_str(i); 6458 if (p) 6459 printf(" %s", p); 6460 else 6461 printf(" unknown"); 6462 } 6463 printf("\n"); 6464 } 6465 6466 static int 6467 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6468 struct rte_eth_fdir_stats *fdir_stat) 6469 { 6470 int ret = -ENOTSUP; 6471 6472 #ifdef RTE_NET_I40E 6473 if (ret == -ENOTSUP) { 6474 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6475 if (!ret) 6476 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6477 } 6478 #endif 6479 #ifdef RTE_NET_IXGBE 6480 if (ret == -ENOTSUP) { 6481 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6482 if (!ret) 6483 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6484 } 6485 #endif 6486 switch (ret) { 6487 case 0: 6488 break; 6489 case -ENOTSUP: 6490 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6491 port_id); 6492 break; 6493 default: 6494 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6495 break; 6496 } 6497 return ret; 6498 } 6499 6500 void 6501 fdir_get_infos(portid_t port_id) 6502 { 6503 struct rte_eth_fdir_stats fdir_stat; 6504 struct rte_eth_fdir_info fdir_info; 6505 6506 static const char *fdir_stats_border = "########################"; 6507 6508 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6509 return; 6510 6511 memset(&fdir_info, 0, sizeof(fdir_info)); 6512 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6513 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6514 return; 6515 6516 printf("\n %s FDIR infos for port %-2d %s\n", 6517 fdir_stats_border, port_id, fdir_stats_border); 6518 printf(" MODE: "); 6519 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6520 printf(" PERFECT\n"); 6521 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6522 printf(" PERFECT-MAC-VLAN\n"); 6523 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6524 printf(" PERFECT-TUNNEL\n"); 6525 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6526 printf(" SIGNATURE\n"); 6527 else 6528 printf(" DISABLE\n"); 6529 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6530 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6531 printf(" SUPPORTED FLOW TYPE: "); 6532 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6533 } 6534 printf(" FLEX PAYLOAD INFO:\n"); 6535 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6536 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6537 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6538 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6539 fdir_info.flex_payload_unit, 6540 fdir_info.max_flex_payload_segment_num, 6541 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6542 if (fdir_info.flex_conf.nb_payloads > 0) { 6543 printf(" FLEX PAYLOAD SRC OFFSET:"); 6544 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6545 } 6546 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6547 printf(" FLEX MASK CFG:"); 6548 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6549 } 6550 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6551 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6552 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6553 fdir_info.guarant_spc, fdir_info.best_spc); 6554 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6555 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6556 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6557 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6558 fdir_stat.collision, fdir_stat.free, 6559 fdir_stat.maxhash, fdir_stat.maxlen, 6560 fdir_stat.add, fdir_stat.remove, 6561 fdir_stat.f_add, fdir_stat.f_remove); 6562 printf(" %s############################%s\n", 6563 fdir_stats_border, fdir_stats_border); 6564 } 6565 6566 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6567 6568 void 6569 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6570 { 6571 #ifdef RTE_NET_IXGBE 6572 int diag; 6573 6574 if (is_rx) 6575 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6576 else 6577 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6578 6579 if (diag == 0) 6580 return; 6581 fprintf(stderr, 6582 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6583 is_rx ? "rx" : "tx", port_id, diag); 6584 return; 6585 #endif 6586 fprintf(stderr, "VF %s setting not supported for port %d\n", 6587 is_rx ? "Rx" : "Tx", port_id); 6588 RTE_SET_USED(vf); 6589 RTE_SET_USED(on); 6590 } 6591 6592 int 6593 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6594 { 6595 int diag; 6596 struct rte_eth_link link; 6597 int ret; 6598 6599 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6600 return 1; 6601 ret = eth_link_get_nowait_print_err(port_id, &link); 6602 if (ret < 0) 6603 return 1; 6604 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6605 rate > link.link_speed) { 6606 fprintf(stderr, 6607 "Invalid rate value:%u bigger than link speed: %u\n", 6608 rate, link.link_speed); 6609 return 1; 6610 } 6611 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6612 if (diag == 0) 6613 return diag; 6614 fprintf(stderr, 6615 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6616 port_id, diag); 6617 return diag; 6618 } 6619 6620 int 6621 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6622 { 6623 int diag = -ENOTSUP; 6624 6625 RTE_SET_USED(vf); 6626 RTE_SET_USED(rate); 6627 RTE_SET_USED(q_msk); 6628 6629 #ifdef RTE_NET_IXGBE 6630 if (diag == -ENOTSUP) 6631 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6632 q_msk); 6633 #endif 6634 #ifdef RTE_NET_BNXT 6635 if (diag == -ENOTSUP) 6636 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6637 #endif 6638 if (diag == 0) 6639 return diag; 6640 6641 fprintf(stderr, 6642 "%s for port_id=%d failed diag=%d\n", 6643 __func__, port_id, diag); 6644 return diag; 6645 } 6646 6647 int 6648 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6649 { 6650 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6651 return -EINVAL; 6652 6653 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6654 } 6655 6656 /* 6657 * Functions to manage the set of filtered Multicast MAC addresses. 6658 * 6659 * A pool of filtered multicast MAC addresses is associated with each port. 6660 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6661 * The address of the pool and the number of valid multicast MAC addresses 6662 * recorded in the pool are stored in the fields "mc_addr_pool" and 6663 * "mc_addr_nb" of the "rte_port" data structure. 6664 * 6665 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6666 * to be supplied a contiguous array of multicast MAC addresses. 6667 * To comply with this constraint, the set of multicast addresses recorded 6668 * into the pool are systematically compacted at the beginning of the pool. 6669 * Hence, when a multicast address is removed from the pool, all following 6670 * addresses, if any, are copied back to keep the set contiguous. 6671 */ 6672 #define MCAST_POOL_INC 32 6673 6674 static int 6675 mcast_addr_pool_extend(struct rte_port *port) 6676 { 6677 struct rte_ether_addr *mc_pool; 6678 size_t mc_pool_size; 6679 6680 /* 6681 * If a free entry is available at the end of the pool, just 6682 * increment the number of recorded multicast addresses. 6683 */ 6684 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6685 port->mc_addr_nb++; 6686 return 0; 6687 } 6688 6689 /* 6690 * [re]allocate a pool with MCAST_POOL_INC more entries. 6691 * The previous test guarantees that port->mc_addr_nb is a multiple 6692 * of MCAST_POOL_INC. 6693 */ 6694 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6695 MCAST_POOL_INC); 6696 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6697 mc_pool_size); 6698 if (mc_pool == NULL) { 6699 fprintf(stderr, 6700 "allocation of pool of %u multicast addresses failed\n", 6701 port->mc_addr_nb + MCAST_POOL_INC); 6702 return -ENOMEM; 6703 } 6704 6705 port->mc_addr_pool = mc_pool; 6706 port->mc_addr_nb++; 6707 return 0; 6708 6709 } 6710 6711 static void 6712 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6713 { 6714 if (mcast_addr_pool_extend(port) != 0) 6715 return; 6716 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6717 } 6718 6719 static void 6720 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6721 { 6722 port->mc_addr_nb--; 6723 if (addr_idx == port->mc_addr_nb) { 6724 /* No need to recompact the set of multicast addresses. */ 6725 if (port->mc_addr_nb == 0) { 6726 /* free the pool of multicast addresses. */ 6727 free(port->mc_addr_pool); 6728 port->mc_addr_pool = NULL; 6729 } 6730 return; 6731 } 6732 memmove(&port->mc_addr_pool[addr_idx], 6733 &port->mc_addr_pool[addr_idx + 1], 6734 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6735 } 6736 6737 int 6738 mcast_addr_pool_destroy(portid_t port_id) 6739 { 6740 struct rte_port *port; 6741 6742 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6743 port_id == (portid_t)RTE_PORT_ALL) 6744 return -EINVAL; 6745 port = &ports[port_id]; 6746 6747 if (port->mc_addr_nb != 0) { 6748 /* free the pool of multicast addresses. */ 6749 free(port->mc_addr_pool); 6750 port->mc_addr_pool = NULL; 6751 port->mc_addr_nb = 0; 6752 } 6753 return 0; 6754 } 6755 6756 static int 6757 eth_port_multicast_addr_list_set(portid_t port_id) 6758 { 6759 struct rte_port *port; 6760 int diag; 6761 6762 port = &ports[port_id]; 6763 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6764 port->mc_addr_nb); 6765 if (diag < 0) 6766 fprintf(stderr, 6767 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6768 port_id, port->mc_addr_nb, diag); 6769 6770 return diag; 6771 } 6772 6773 void 6774 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6775 { 6776 struct rte_port *port; 6777 uint32_t i; 6778 6779 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6780 return; 6781 6782 port = &ports[port_id]; 6783 6784 /* 6785 * Check that the added multicast MAC address is not already recorded 6786 * in the pool of multicast addresses. 6787 */ 6788 for (i = 0; i < port->mc_addr_nb; i++) { 6789 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6790 fprintf(stderr, 6791 "multicast address already filtered by port\n"); 6792 return; 6793 } 6794 } 6795 6796 mcast_addr_pool_append(port, mc_addr); 6797 if (eth_port_multicast_addr_list_set(port_id) < 0) 6798 /* Rollback on failure, remove the address from the pool */ 6799 mcast_addr_pool_remove(port, i); 6800 } 6801 6802 void 6803 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6804 { 6805 struct rte_port *port; 6806 uint32_t i; 6807 6808 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6809 return; 6810 6811 port = &ports[port_id]; 6812 6813 /* 6814 * Search the pool of multicast MAC addresses for the removed address. 6815 */ 6816 for (i = 0; i < port->mc_addr_nb; i++) { 6817 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6818 break; 6819 } 6820 if (i == port->mc_addr_nb) { 6821 fprintf(stderr, "multicast address not filtered by port %d\n", 6822 port_id); 6823 return; 6824 } 6825 6826 mcast_addr_pool_remove(port, i); 6827 if (eth_port_multicast_addr_list_set(port_id) < 0) 6828 /* Rollback on failure, add the address back into the pool */ 6829 mcast_addr_pool_append(port, mc_addr); 6830 } 6831 6832 void 6833 port_dcb_info_display(portid_t port_id) 6834 { 6835 struct rte_eth_dcb_info dcb_info; 6836 uint16_t i; 6837 int ret; 6838 static const char *border = "================"; 6839 6840 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6841 return; 6842 6843 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6844 if (ret) { 6845 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6846 port_id); 6847 return; 6848 } 6849 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6850 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6851 printf("\n TC : "); 6852 for (i = 0; i < dcb_info.nb_tcs; i++) 6853 printf("\t%4d", i); 6854 printf("\n Priority : "); 6855 for (i = 0; i < dcb_info.nb_tcs; i++) 6856 printf("\t%4d", dcb_info.prio_tc[i]); 6857 printf("\n BW percent :"); 6858 for (i = 0; i < dcb_info.nb_tcs; i++) 6859 printf("\t%4d%%", dcb_info.tc_bws[i]); 6860 printf("\n RXQ base : "); 6861 for (i = 0; i < dcb_info.nb_tcs; i++) 6862 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6863 printf("\n RXQ number :"); 6864 for (i = 0; i < dcb_info.nb_tcs; i++) 6865 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6866 printf("\n TXQ base : "); 6867 for (i = 0; i < dcb_info.nb_tcs; i++) 6868 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6869 printf("\n TXQ number :"); 6870 for (i = 0; i < dcb_info.nb_tcs; i++) 6871 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6872 printf("\n"); 6873 } 6874 6875 uint8_t * 6876 open_file(const char *file_path, uint32_t *size) 6877 { 6878 int fd = open(file_path, O_RDONLY); 6879 off_t pkg_size; 6880 uint8_t *buf = NULL; 6881 int ret = 0; 6882 struct stat st_buf; 6883 6884 if (size) 6885 *size = 0; 6886 6887 if (fd == -1) { 6888 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6889 return buf; 6890 } 6891 6892 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6893 close(fd); 6894 fprintf(stderr, "%s: File operations failed\n", __func__); 6895 return buf; 6896 } 6897 6898 pkg_size = st_buf.st_size; 6899 if (pkg_size < 0) { 6900 close(fd); 6901 fprintf(stderr, "%s: File operations failed\n", __func__); 6902 return buf; 6903 } 6904 6905 buf = (uint8_t *)malloc(pkg_size); 6906 if (!buf) { 6907 close(fd); 6908 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6909 return buf; 6910 } 6911 6912 ret = read(fd, buf, pkg_size); 6913 if (ret < 0) { 6914 close(fd); 6915 fprintf(stderr, "%s: File read operation failed\n", __func__); 6916 close_file(buf); 6917 return NULL; 6918 } 6919 6920 if (size) 6921 *size = pkg_size; 6922 6923 close(fd); 6924 6925 return buf; 6926 } 6927 6928 int 6929 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6930 { 6931 FILE *fh = fopen(file_path, "wb"); 6932 6933 if (fh == NULL) { 6934 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6935 return -1; 6936 } 6937 6938 if (fwrite(buf, 1, size, fh) != size) { 6939 fclose(fh); 6940 fprintf(stderr, "%s: File write operation failed\n", __func__); 6941 return -1; 6942 } 6943 6944 fclose(fh); 6945 6946 return 0; 6947 } 6948 6949 int 6950 close_file(uint8_t *buf) 6951 { 6952 if (buf) { 6953 free((void *)buf); 6954 return 0; 6955 } 6956 6957 return -1; 6958 } 6959 6960 void 6961 show_macs(portid_t port_id) 6962 { 6963 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6964 struct rte_eth_dev_info dev_info; 6965 int32_t i, rc, num_macs = 0; 6966 6967 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6968 return; 6969 6970 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6971 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6972 if (rc < 0) 6973 return; 6974 6975 for (i = 0; i < rc; i++) { 6976 6977 /* skip zero address */ 6978 if (rte_is_zero_ether_addr(&addr[i])) 6979 continue; 6980 6981 num_macs++; 6982 } 6983 6984 printf("Number of MAC address added: %d\n", num_macs); 6985 6986 for (i = 0; i < rc; i++) { 6987 6988 /* skip zero address */ 6989 if (rte_is_zero_ether_addr(&addr[i])) 6990 continue; 6991 6992 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6993 printf(" %s\n", buf); 6994 } 6995 } 6996 6997 void 6998 show_mcast_macs(portid_t port_id) 6999 { 7000 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 7001 struct rte_ether_addr *addr; 7002 struct rte_port *port; 7003 uint32_t i; 7004 7005 port = &ports[port_id]; 7006 7007 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 7008 7009 for (i = 0; i < port->mc_addr_nb; i++) { 7010 addr = &port->mc_addr_pool[i]; 7011 7012 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 7013 printf(" %s\n", buf); 7014 } 7015 } 7016