1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdbool.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <stdint.h> 14 #include <inttypes.h> 15 16 #include <sys/queue.h> 17 #include <sys/types.h> 18 #include <sys/stat.h> 19 #include <fcntl.h> 20 #include <unistd.h> 21 22 #include <rte_common.h> 23 #include <rte_byteorder.h> 24 #include <rte_debug.h> 25 #include <rte_log.h> 26 #include <rte_memory.h> 27 #include <rte_memcpy.h> 28 #include <rte_memzone.h> 29 #include <rte_launch.h> 30 #include <rte_bus.h> 31 #include <rte_eal.h> 32 #include <rte_per_lcore.h> 33 #include <rte_lcore.h> 34 #include <rte_branch_prediction.h> 35 #include <rte_mempool.h> 36 #include <rte_mbuf.h> 37 #include <rte_interrupts.h> 38 #include <rte_ether.h> 39 #include <rte_ethdev.h> 40 #include <rte_string_fns.h> 41 #include <rte_cycles.h> 42 #include <rte_flow.h> 43 #include <rte_mtr.h> 44 #include <rte_errno.h> 45 #ifdef RTE_NET_IXGBE 46 #include <rte_pmd_ixgbe.h> 47 #endif 48 #ifdef RTE_NET_I40E 49 #include <rte_pmd_i40e.h> 50 #endif 51 #ifdef RTE_NET_BNXT 52 #include <rte_pmd_bnxt.h> 53 #endif 54 #ifdef RTE_LIB_GRO 55 #include <rte_gro.h> 56 #endif 57 #include <rte_hexdump.h> 58 59 #include "testpmd.h" 60 #include "cmdline_mtr.h" 61 62 #define ETHDEV_FWVERS_LEN 32 63 64 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 65 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 66 #else 67 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 68 #endif 69 70 #define NS_PER_SEC 1E9 71 72 static const struct { 73 enum tx_pkt_split split; 74 const char *name; 75 } tx_split_name[] = { 76 { 77 .split = TX_PKT_SPLIT_OFF, 78 .name = "off", 79 }, 80 { 81 .split = TX_PKT_SPLIT_ON, 82 .name = "on", 83 }, 84 { 85 .split = TX_PKT_SPLIT_RND, 86 .name = "rand", 87 }, 88 }; 89 90 const struct rss_type_info rss_type_table[] = { 91 /* Group types */ 92 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 93 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 94 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 95 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 96 { "none", 0 }, 97 { "ip", RTE_ETH_RSS_IP }, 98 { "udp", RTE_ETH_RSS_UDP }, 99 { "tcp", RTE_ETH_RSS_TCP }, 100 { "sctp", RTE_ETH_RSS_SCTP }, 101 { "tunnel", RTE_ETH_RSS_TUNNEL }, 102 { "vlan", RTE_ETH_RSS_VLAN }, 103 104 /* Individual type */ 105 { "ipv4", RTE_ETH_RSS_IPV4 }, 106 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 107 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 108 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 109 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 110 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 111 { "ipv6", RTE_ETH_RSS_IPV6 }, 112 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 113 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 114 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 115 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 116 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 117 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 118 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 119 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 120 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 121 { "port", RTE_ETH_RSS_PORT }, 122 { "vxlan", RTE_ETH_RSS_VXLAN }, 123 { "geneve", RTE_ETH_RSS_GENEVE }, 124 { "nvgre", RTE_ETH_RSS_NVGRE }, 125 { "gtpu", RTE_ETH_RSS_GTPU }, 126 { "eth", RTE_ETH_RSS_ETH }, 127 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 128 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 129 { "esp", RTE_ETH_RSS_ESP }, 130 { "ah", RTE_ETH_RSS_AH }, 131 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 132 { "pfcp", RTE_ETH_RSS_PFCP }, 133 { "pppoe", RTE_ETH_RSS_PPPOE }, 134 { "ecpri", RTE_ETH_RSS_ECPRI }, 135 { "mpls", RTE_ETH_RSS_MPLS }, 136 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 137 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 138 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 139 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 140 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 141 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 142 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 143 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 144 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 145 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 146 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 147 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 148 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 149 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 150 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 151 { NULL, 0}, 152 }; 153 154 static const struct { 155 enum rte_eth_fec_mode mode; 156 const char *name; 157 } fec_mode_name[] = { 158 { 159 .mode = RTE_ETH_FEC_NOFEC, 160 .name = "off", 161 }, 162 { 163 .mode = RTE_ETH_FEC_AUTO, 164 .name = "auto", 165 }, 166 { 167 .mode = RTE_ETH_FEC_BASER, 168 .name = "baser", 169 }, 170 { 171 .mode = RTE_ETH_FEC_RS, 172 .name = "rs", 173 }, 174 { 175 .mode = RTE_ETH_FEC_LLRS, 176 .name = "llrs", 177 }, 178 }; 179 180 static const struct { 181 char str[32]; 182 uint16_t ftype; 183 } flowtype_str_table[] = { 184 {"raw", RTE_ETH_FLOW_RAW}, 185 {"ipv4", RTE_ETH_FLOW_IPV4}, 186 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 187 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 188 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 189 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 190 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 191 {"ipv6", RTE_ETH_FLOW_IPV6}, 192 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 193 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 194 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 195 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 196 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 197 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 198 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 199 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 200 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 201 {"port", RTE_ETH_FLOW_PORT}, 202 {"vxlan", RTE_ETH_FLOW_VXLAN}, 203 {"geneve", RTE_ETH_FLOW_GENEVE}, 204 {"nvgre", RTE_ETH_FLOW_NVGRE}, 205 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 206 {"gtpu", RTE_ETH_FLOW_GTPU}, 207 }; 208 209 static void 210 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 211 { 212 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 213 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 214 printf("%s%s", name, buf); 215 } 216 217 static void 218 nic_xstats_display_periodic(portid_t port_id) 219 { 220 struct xstat_display_info *xstats_info; 221 uint64_t *prev_values, *curr_values; 222 uint64_t diff_value, value_rate; 223 struct timespec cur_time; 224 uint64_t *ids_supp; 225 size_t ids_supp_sz; 226 uint64_t diff_ns; 227 unsigned int i; 228 int rc; 229 230 xstats_info = &ports[port_id].xstats_info; 231 232 ids_supp_sz = xstats_info->ids_supp_sz; 233 if (ids_supp_sz == 0) 234 return; 235 236 printf("\n"); 237 238 ids_supp = xstats_info->ids_supp; 239 prev_values = xstats_info->prev_values; 240 curr_values = xstats_info->curr_values; 241 242 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 243 ids_supp_sz); 244 if (rc != (int)ids_supp_sz) { 245 fprintf(stderr, 246 "Failed to get values of %zu xstats for port %u - return code %d\n", 247 ids_supp_sz, port_id, rc); 248 return; 249 } 250 251 diff_ns = 0; 252 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 253 uint64_t ns; 254 255 ns = cur_time.tv_sec * NS_PER_SEC; 256 ns += cur_time.tv_nsec; 257 258 if (xstats_info->prev_ns != 0) 259 diff_ns = ns - xstats_info->prev_ns; 260 xstats_info->prev_ns = ns; 261 } 262 263 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 264 for (i = 0; i < ids_supp_sz; i++) { 265 diff_value = (curr_values[i] > prev_values[i]) ? 266 (curr_values[i] - prev_values[i]) : 0; 267 prev_values[i] = curr_values[i]; 268 value_rate = diff_ns > 0 ? 269 (double)diff_value / diff_ns * NS_PER_SEC : 0; 270 271 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 272 xstats_display[i].name, curr_values[i], value_rate); 273 } 274 } 275 276 void 277 nic_stats_display(portid_t port_id) 278 { 279 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 283 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 284 struct timespec cur_time; 285 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 286 diff_ns; 287 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 288 struct rte_eth_stats stats; 289 static const char *nic_stats_border = "########################"; 290 int ret; 291 292 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 293 print_valid_ports(); 294 return; 295 } 296 ret = rte_eth_stats_get(port_id, &stats); 297 if (ret != 0) { 298 fprintf(stderr, 299 "%s: Error: failed to get stats (port %u): %d", 300 __func__, port_id, ret); 301 return; 302 } 303 printf("\n %s NIC statistics for port %-2d %s\n", 304 nic_stats_border, port_id, nic_stats_border); 305 306 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 307 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 308 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 309 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 310 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 311 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 312 313 diff_ns = 0; 314 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 315 uint64_t ns; 316 317 ns = cur_time.tv_sec * NS_PER_SEC; 318 ns += cur_time.tv_nsec; 319 320 if (prev_ns[port_id] != 0) 321 diff_ns = ns - prev_ns[port_id]; 322 prev_ns[port_id] = ns; 323 } 324 325 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 326 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 327 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 328 (stats.opackets - prev_pkts_tx[port_id]) : 0; 329 prev_pkts_rx[port_id] = stats.ipackets; 330 prev_pkts_tx[port_id] = stats.opackets; 331 mpps_rx = diff_ns > 0 ? 332 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 333 mpps_tx = diff_ns > 0 ? 334 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 335 336 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 337 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 338 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 339 (stats.obytes - prev_bytes_tx[port_id]) : 0; 340 prev_bytes_rx[port_id] = stats.ibytes; 341 prev_bytes_tx[port_id] = stats.obytes; 342 mbps_rx = diff_ns > 0 ? 343 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 344 mbps_tx = diff_ns > 0 ? 345 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 346 347 printf("\n Throughput (since last show)\n"); 348 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 349 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 350 mpps_tx, mbps_tx * 8); 351 352 if (xstats_display_num > 0) 353 nic_xstats_display_periodic(port_id); 354 355 printf(" %s############################%s\n", 356 nic_stats_border, nic_stats_border); 357 } 358 359 void 360 nic_stats_clear(portid_t port_id) 361 { 362 int ret; 363 364 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 365 print_valid_ports(); 366 return; 367 } 368 369 ret = rte_eth_stats_reset(port_id); 370 if (ret != 0) { 371 fprintf(stderr, 372 "%s: Error: failed to reset stats (port %u): %s", 373 __func__, port_id, strerror(-ret)); 374 return; 375 } 376 377 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 378 if (ret != 0) { 379 if (ret < 0) 380 ret = -ret; 381 fprintf(stderr, 382 "%s: Error: failed to get stats (port %u): %s", 383 __func__, port_id, strerror(ret)); 384 return; 385 } 386 printf("\n NIC statistics for port %d cleared\n", port_id); 387 } 388 389 void 390 nic_xstats_display(portid_t port_id) 391 { 392 struct rte_eth_xstat *xstats; 393 int cnt_xstats, idx_xstat; 394 struct rte_eth_xstat_name *xstats_names; 395 396 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 397 print_valid_ports(); 398 return; 399 } 400 printf("###### NIC extended statistics for port %-2d\n", port_id); 401 if (!rte_eth_dev_is_valid_port(port_id)) { 402 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 403 return; 404 } 405 406 /* Get count */ 407 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 408 if (cnt_xstats < 0) { 409 fprintf(stderr, "Error: Cannot get count of xstats\n"); 410 return; 411 } 412 413 /* Get id-name lookup table */ 414 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 415 if (xstats_names == NULL) { 416 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 417 return; 418 } 419 if (cnt_xstats != rte_eth_xstats_get_names( 420 port_id, xstats_names, cnt_xstats)) { 421 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 422 free(xstats_names); 423 return; 424 } 425 426 /* Get stats themselves */ 427 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 428 if (xstats == NULL) { 429 fprintf(stderr, "Cannot allocate memory for xstats\n"); 430 free(xstats_names); 431 return; 432 } 433 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 434 fprintf(stderr, "Error: Unable to get xstats\n"); 435 free(xstats_names); 436 free(xstats); 437 return; 438 } 439 440 /* Display xstats */ 441 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 442 if (xstats_hide_zero && !xstats[idx_xstat].value) 443 continue; 444 printf("%s: %"PRIu64"\n", 445 xstats_names[idx_xstat].name, 446 xstats[idx_xstat].value); 447 } 448 free(xstats_names); 449 free(xstats); 450 } 451 452 void 453 nic_xstats_clear(portid_t port_id) 454 { 455 int ret; 456 457 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 458 print_valid_ports(); 459 return; 460 } 461 462 ret = rte_eth_xstats_reset(port_id); 463 if (ret != 0) { 464 fprintf(stderr, 465 "%s: Error: failed to reset xstats (port %u): %s\n", 466 __func__, port_id, strerror(-ret)); 467 return; 468 } 469 470 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 471 if (ret != 0) { 472 if (ret < 0) 473 ret = -ret; 474 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 475 __func__, port_id, strerror(ret)); 476 return; 477 } 478 } 479 480 static const char * 481 get_queue_state_name(uint8_t queue_state) 482 { 483 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 484 return "stopped"; 485 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 486 return "started"; 487 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 488 return "hairpin"; 489 else 490 return "unknown"; 491 } 492 493 void 494 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 495 { 496 struct rte_eth_burst_mode mode; 497 struct rte_eth_rxq_info qinfo; 498 int32_t rc; 499 static const char *info_border = "*********************"; 500 501 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 502 if (rc != 0) { 503 fprintf(stderr, 504 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 505 port_id, queue_id, strerror(-rc), rc); 506 return; 507 } 508 509 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 510 info_border, port_id, queue_id, info_border); 511 512 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 513 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 514 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 515 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 516 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 517 printf("\nRX drop packets: %s", 518 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 519 printf("\nRX deferred start: %s", 520 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 521 printf("\nRX scattered packets: %s", 522 (qinfo.scattered_rx != 0) ? "on" : "off"); 523 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 524 if (qinfo.rx_buf_size != 0) 525 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 526 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 527 528 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 529 printf("\nBurst mode: %s%s", 530 mode.info, 531 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 532 " (per queue)" : ""); 533 534 printf("\n"); 535 } 536 537 void 538 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 539 { 540 struct rte_eth_burst_mode mode; 541 struct rte_eth_txq_info qinfo; 542 int32_t rc; 543 static const char *info_border = "*********************"; 544 545 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 546 if (rc != 0) { 547 fprintf(stderr, 548 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 549 port_id, queue_id, strerror(-rc), rc); 550 return; 551 } 552 553 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 554 info_border, port_id, queue_id, info_border); 555 556 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 557 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 558 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 559 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 560 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 561 printf("\nTX deferred start: %s", 562 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 563 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 564 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 565 566 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 567 printf("\nBurst mode: %s%s", 568 mode.info, 569 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 570 " (per queue)" : ""); 571 572 printf("\n"); 573 } 574 575 static int bus_match_all(const struct rte_bus *bus, const void *data) 576 { 577 RTE_SET_USED(bus); 578 RTE_SET_USED(data); 579 return 0; 580 } 581 582 static void 583 device_infos_display_speeds(uint32_t speed_capa) 584 { 585 printf("\n\tDevice speed capability:"); 586 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 587 printf(" Autonegotiate (all speeds)"); 588 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 589 printf(" Disable autonegotiate (fixed speed) "); 590 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 591 printf(" 10 Mbps half-duplex "); 592 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 593 printf(" 10 Mbps full-duplex "); 594 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 595 printf(" 100 Mbps half-duplex "); 596 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 597 printf(" 100 Mbps full-duplex "); 598 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 599 printf(" 1 Gbps "); 600 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 601 printf(" 2.5 Gbps "); 602 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 603 printf(" 5 Gbps "); 604 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 605 printf(" 10 Gbps "); 606 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 607 printf(" 20 Gbps "); 608 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 609 printf(" 25 Gbps "); 610 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 611 printf(" 40 Gbps "); 612 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 613 printf(" 50 Gbps "); 614 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 615 printf(" 56 Gbps "); 616 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 617 printf(" 100 Gbps "); 618 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 619 printf(" 200 Gbps "); 620 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 621 printf(" 400 Gbps "); 622 } 623 624 void 625 device_infos_display(const char *identifier) 626 { 627 static const char *info_border = "*********************"; 628 struct rte_bus *start = NULL, *next; 629 struct rte_dev_iterator dev_iter; 630 char name[RTE_ETH_NAME_MAX_LEN]; 631 struct rte_ether_addr mac_addr; 632 struct rte_device *dev; 633 struct rte_devargs da; 634 portid_t port_id; 635 struct rte_eth_dev_info dev_info; 636 char devstr[128]; 637 638 memset(&da, 0, sizeof(da)); 639 if (!identifier) 640 goto skip_parse; 641 642 if (rte_devargs_parsef(&da, "%s", identifier)) { 643 fprintf(stderr, "cannot parse identifier\n"); 644 return; 645 } 646 647 skip_parse: 648 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 649 650 start = next; 651 if (identifier && da.bus != next) 652 continue; 653 654 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 655 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 656 657 if (rte_dev_driver(dev) == NULL) 658 continue; 659 /* Check for matching device if identifier is present */ 660 if (identifier && 661 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 662 continue; 663 printf("\n%s Infos for device %s %s\n", 664 info_border, rte_dev_name(dev), info_border); 665 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 666 printf("\nBus information: %s", 667 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 668 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 669 printf("\nDevargs: %s", 670 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 671 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 672 printf("\n"); 673 674 /* List ports with matching device name */ 675 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 676 printf("\n\tPort id: %-2d", port_id); 677 if (eth_macaddr_get_print_err(port_id, 678 &mac_addr) == 0) 679 print_ethaddr("\n\tMAC address: ", 680 &mac_addr); 681 rte_eth_dev_get_name_by_port(port_id, name); 682 printf("\n\tDevice name: %s", name); 683 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 684 device_infos_display_speeds(dev_info.speed_capa); 685 printf("\n"); 686 } 687 } 688 }; 689 rte_devargs_reset(&da); 690 } 691 692 static void 693 print_dev_capabilities(uint64_t capabilities) 694 { 695 uint64_t single_capa; 696 int begin; 697 int end; 698 int bit; 699 700 if (capabilities == 0) 701 return; 702 703 begin = rte_ctz64(capabilities); 704 end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities); 705 706 single_capa = 1ULL << begin; 707 for (bit = begin; bit < end; bit++) { 708 if (capabilities & single_capa) 709 printf(" %s", 710 rte_eth_dev_capability_name(single_capa)); 711 single_capa <<= 1; 712 } 713 } 714 715 uint64_t 716 str_to_rsstypes(const char *str) 717 { 718 uint16_t i; 719 720 for (i = 0; rss_type_table[i].str != NULL; i++) { 721 if (strcmp(rss_type_table[i].str, str) == 0) 722 return rss_type_table[i].rss_type; 723 } 724 725 return 0; 726 } 727 728 const char * 729 rsstypes_to_str(uint64_t rss_type) 730 { 731 uint16_t i; 732 733 for (i = 0; rss_type_table[i].str != NULL; i++) { 734 if (rss_type_table[i].rss_type == rss_type) 735 return rss_type_table[i].str; 736 } 737 738 return NULL; 739 } 740 741 static void 742 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 743 { 744 uint16_t user_defined_str_len; 745 uint16_t total_len = 0; 746 uint16_t str_len = 0; 747 uint64_t rss_offload; 748 uint16_t i; 749 750 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 751 rss_offload = RTE_BIT64(i); 752 if ((offload_types & rss_offload) != 0) { 753 const char *p = rsstypes_to_str(rss_offload); 754 755 user_defined_str_len = 756 strlen("user-defined-") + (i / 10 + 1); 757 str_len = p ? strlen(p) : user_defined_str_len; 758 str_len += 2; /* add two spaces */ 759 if (total_len + str_len >= char_num_per_line) { 760 total_len = 0; 761 printf("\n"); 762 } 763 764 if (p) 765 printf(" %s", p); 766 else 767 printf(" user-defined-%u", i); 768 total_len += str_len; 769 } 770 } 771 printf("\n"); 772 } 773 774 void 775 port_infos_display(portid_t port_id) 776 { 777 struct rte_port *port; 778 struct rte_ether_addr mac_addr; 779 struct rte_eth_link link; 780 struct rte_eth_dev_info dev_info; 781 int vlan_offload; 782 struct rte_mempool * mp; 783 static const char *info_border = "*********************"; 784 uint16_t mtu; 785 char name[RTE_ETH_NAME_MAX_LEN]; 786 int ret; 787 char fw_version[ETHDEV_FWVERS_LEN]; 788 789 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 790 print_valid_ports(); 791 return; 792 } 793 port = &ports[port_id]; 794 ret = eth_link_get_nowait_print_err(port_id, &link); 795 if (ret < 0) 796 return; 797 798 ret = eth_dev_info_get_print_err(port_id, &dev_info); 799 if (ret != 0) 800 return; 801 802 printf("\n%s Infos for port %-2d %s\n", 803 info_border, port_id, info_border); 804 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 805 print_ethaddr("MAC address: ", &mac_addr); 806 rte_eth_dev_get_name_by_port(port_id, name); 807 printf("\nDevice name: %s", name); 808 printf("\nDriver name: %s", dev_info.driver_name); 809 810 if (rte_eth_dev_fw_version_get(port_id, fw_version, 811 ETHDEV_FWVERS_LEN) == 0) 812 printf("\nFirmware-version: %s", fw_version); 813 else 814 printf("\nFirmware-version: %s", "not available"); 815 816 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 817 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 818 printf("\nConnect to socket: %u", port->socket_id); 819 820 if (port_numa[port_id] != NUMA_NO_CONFIG) { 821 mp = mbuf_pool_find(port_numa[port_id], 0); 822 if (mp) 823 printf("\nmemory allocation on the socket: %d", 824 port_numa[port_id]); 825 } else 826 printf("\nmemory allocation on the socket: %u",port->socket_id); 827 828 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 829 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 830 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 831 ("full-duplex") : ("half-duplex")); 832 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 833 ("On") : ("Off")); 834 835 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 836 printf("MTU: %u\n", mtu); 837 838 printf("Promiscuous mode: %s\n", 839 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 840 printf("Allmulticast mode: %s\n", 841 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 842 printf("Maximum number of MAC addresses: %u\n", 843 (unsigned int)(port->dev_info.max_mac_addrs)); 844 printf("Maximum number of MAC addresses of hash filtering: %u\n", 845 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 846 847 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 848 if (vlan_offload >= 0){ 849 printf("VLAN offload: \n"); 850 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 851 printf(" strip on, "); 852 else 853 printf(" strip off, "); 854 855 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 856 printf("filter on, "); 857 else 858 printf("filter off, "); 859 860 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 861 printf("extend on, "); 862 else 863 printf("extend off, "); 864 865 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 866 printf("qinq strip on\n"); 867 else 868 printf("qinq strip off\n"); 869 } 870 871 if (dev_info.hash_key_size > 0) 872 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 873 if (dev_info.reta_size > 0) 874 printf("Redirection table size: %u\n", dev_info.reta_size); 875 if (!dev_info.flow_type_rss_offloads) 876 printf("No RSS offload flow type is supported.\n"); 877 else { 878 printf("Supported RSS offload flow types:\n"); 879 rss_offload_types_display(dev_info.flow_type_rss_offloads, 880 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 881 } 882 883 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 884 if (dev_info.max_rx_bufsize != UINT32_MAX) 885 printf("Maximum size of RX buffer: %u\n", dev_info.max_rx_bufsize); 886 printf("Maximum configurable length of RX packet: %u\n", 887 dev_info.max_rx_pktlen); 888 printf("Maximum configurable size of LRO aggregated packet: %u\n", 889 dev_info.max_lro_pkt_size); 890 if (dev_info.max_vfs) 891 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 892 if (dev_info.max_vmdq_pools) 893 printf("Maximum number of VMDq pools: %u\n", 894 dev_info.max_vmdq_pools); 895 896 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 897 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 898 printf("Max possible number of RXDs per queue: %hu\n", 899 dev_info.rx_desc_lim.nb_max); 900 printf("Min possible number of RXDs per queue: %hu\n", 901 dev_info.rx_desc_lim.nb_min); 902 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 903 904 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 905 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 906 printf("Max possible number of TXDs per queue: %hu\n", 907 dev_info.tx_desc_lim.nb_max); 908 printf("Min possible number of TXDs per queue: %hu\n", 909 dev_info.tx_desc_lim.nb_min); 910 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 911 printf("Max segment number per packet: %hu\n", 912 dev_info.tx_desc_lim.nb_seg_max); 913 printf("Max segment number per MTU/TSO: %hu\n", 914 dev_info.tx_desc_lim.nb_mtu_seg_max); 915 916 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 917 print_dev_capabilities(dev_info.dev_capa); 918 printf(" )\n"); 919 /* Show switch info only if valid switch domain and port id is set */ 920 if (dev_info.switch_info.domain_id != 921 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 922 if (dev_info.switch_info.name) 923 printf("Switch name: %s\n", dev_info.switch_info.name); 924 925 printf("Switch domain Id: %u\n", 926 dev_info.switch_info.domain_id); 927 printf("Switch Port Id: %u\n", 928 dev_info.switch_info.port_id); 929 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 930 printf("Switch Rx domain: %u\n", 931 dev_info.switch_info.rx_domain); 932 } 933 printf("Device error handling mode: "); 934 switch (dev_info.err_handle_mode) { 935 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 936 printf("none\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 939 printf("passive\n"); 940 break; 941 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 942 printf("proactive\n"); 943 break; 944 default: 945 printf("unknown\n"); 946 break; 947 } 948 printf("Device private info:\n"); 949 ret = rte_eth_dev_priv_dump(port_id, stdout); 950 if (ret == -ENOTSUP) 951 printf(" none\n"); 952 else if (ret < 0) 953 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 954 ret, strerror(-ret)); 955 } 956 957 void 958 port_summary_header_display(void) 959 { 960 uint16_t port_number; 961 962 port_number = rte_eth_dev_count_avail(); 963 printf("Number of available ports: %i\n", port_number); 964 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 965 "Driver", "Status", "Link"); 966 } 967 968 void 969 port_summary_display(portid_t port_id) 970 { 971 struct rte_ether_addr mac_addr; 972 struct rte_eth_link link; 973 struct rte_eth_dev_info dev_info; 974 char name[RTE_ETH_NAME_MAX_LEN]; 975 int ret; 976 977 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 978 print_valid_ports(); 979 return; 980 } 981 982 ret = eth_link_get_nowait_print_err(port_id, &link); 983 if (ret < 0) 984 return; 985 986 ret = eth_dev_info_get_print_err(port_id, &dev_info); 987 if (ret != 0) 988 return; 989 990 rte_eth_dev_get_name_by_port(port_id, name); 991 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 992 if (ret != 0) 993 return; 994 995 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 996 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 997 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 998 rte_eth_link_speed_to_str(link.link_speed)); 999 } 1000 1001 void 1002 port_eeprom_display(portid_t port_id) 1003 { 1004 struct rte_dev_eeprom_info einfo; 1005 int ret; 1006 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1007 print_valid_ports(); 1008 return; 1009 } 1010 1011 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1012 if (len_eeprom < 0) { 1013 switch (len_eeprom) { 1014 case -ENODEV: 1015 fprintf(stderr, "port index %d invalid\n", port_id); 1016 break; 1017 case -ENOTSUP: 1018 fprintf(stderr, "operation not supported by device\n"); 1019 break; 1020 case -EIO: 1021 fprintf(stderr, "device is removed\n"); 1022 break; 1023 default: 1024 fprintf(stderr, "Unable to get EEPROM: %d\n", 1025 len_eeprom); 1026 break; 1027 } 1028 return; 1029 } 1030 1031 einfo.offset = 0; 1032 einfo.length = len_eeprom; 1033 einfo.data = calloc(1, len_eeprom); 1034 if (!einfo.data) { 1035 fprintf(stderr, 1036 "Allocation of port %u eeprom data failed\n", 1037 port_id); 1038 return; 1039 } 1040 1041 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1042 if (ret != 0) { 1043 switch (ret) { 1044 case -ENODEV: 1045 fprintf(stderr, "port index %d invalid\n", port_id); 1046 break; 1047 case -ENOTSUP: 1048 fprintf(stderr, "operation not supported by device\n"); 1049 break; 1050 case -EIO: 1051 fprintf(stderr, "device is removed\n"); 1052 break; 1053 default: 1054 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1055 break; 1056 } 1057 free(einfo.data); 1058 return; 1059 } 1060 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1061 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1062 free(einfo.data); 1063 } 1064 1065 void 1066 port_module_eeprom_display(portid_t port_id) 1067 { 1068 struct rte_eth_dev_module_info minfo; 1069 struct rte_dev_eeprom_info einfo; 1070 int ret; 1071 1072 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1073 print_valid_ports(); 1074 return; 1075 } 1076 1077 1078 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1079 if (ret != 0) { 1080 switch (ret) { 1081 case -ENODEV: 1082 fprintf(stderr, "port index %d invalid\n", port_id); 1083 break; 1084 case -ENOTSUP: 1085 fprintf(stderr, "operation not supported by device\n"); 1086 break; 1087 case -EIO: 1088 fprintf(stderr, "device is removed\n"); 1089 break; 1090 default: 1091 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1092 ret); 1093 break; 1094 } 1095 return; 1096 } 1097 1098 einfo.offset = 0; 1099 einfo.length = minfo.eeprom_len; 1100 einfo.data = calloc(1, minfo.eeprom_len); 1101 if (!einfo.data) { 1102 fprintf(stderr, 1103 "Allocation of port %u eeprom data failed\n", 1104 port_id); 1105 return; 1106 } 1107 1108 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1109 if (ret != 0) { 1110 switch (ret) { 1111 case -ENODEV: 1112 fprintf(stderr, "port index %d invalid\n", port_id); 1113 break; 1114 case -ENOTSUP: 1115 fprintf(stderr, "operation not supported by device\n"); 1116 break; 1117 case -EIO: 1118 fprintf(stderr, "device is removed\n"); 1119 break; 1120 default: 1121 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1122 ret); 1123 break; 1124 } 1125 free(einfo.data); 1126 return; 1127 } 1128 1129 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1130 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1131 free(einfo.data); 1132 } 1133 1134 int 1135 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1136 { 1137 uint16_t pid; 1138 1139 if (port_id == (portid_t)RTE_PORT_ALL) 1140 return 0; 1141 1142 RTE_ETH_FOREACH_DEV(pid) 1143 if (port_id == pid) 1144 return 0; 1145 1146 if (warning == ENABLED_WARN) 1147 fprintf(stderr, "Invalid port %d\n", port_id); 1148 1149 return 1; 1150 } 1151 1152 void print_valid_ports(void) 1153 { 1154 portid_t pid; 1155 1156 printf("The valid ports array is ["); 1157 RTE_ETH_FOREACH_DEV(pid) { 1158 printf(" %d", pid); 1159 } 1160 printf(" ]\n"); 1161 } 1162 1163 static int 1164 vlan_id_is_invalid(uint16_t vlan_id) 1165 { 1166 if (vlan_id < 4096) 1167 return 0; 1168 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1169 return 1; 1170 } 1171 1172 static uint32_t 1173 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1174 { 1175 uint32_t overhead_len; 1176 1177 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1178 overhead_len = max_rx_pktlen - max_mtu; 1179 else 1180 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1181 1182 return overhead_len; 1183 } 1184 1185 static int 1186 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1187 { 1188 struct rte_eth_dev_info dev_info; 1189 uint32_t overhead_len; 1190 uint32_t frame_size; 1191 int ret; 1192 1193 ret = rte_eth_dev_info_get(port_id, &dev_info); 1194 if (ret != 0) 1195 return ret; 1196 1197 if (mtu < dev_info.min_mtu) { 1198 fprintf(stderr, 1199 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1200 mtu, dev_info.min_mtu, port_id); 1201 return -EINVAL; 1202 } 1203 if (mtu > dev_info.max_mtu) { 1204 fprintf(stderr, 1205 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1206 mtu, dev_info.max_mtu, port_id); 1207 return -EINVAL; 1208 } 1209 1210 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1211 dev_info.max_mtu); 1212 frame_size = mtu + overhead_len; 1213 if (frame_size > dev_info.max_rx_pktlen) { 1214 fprintf(stderr, 1215 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1216 frame_size, dev_info.max_rx_pktlen, port_id); 1217 return -EINVAL; 1218 } 1219 1220 return 0; 1221 } 1222 1223 void 1224 port_mtu_set(portid_t port_id, uint16_t mtu) 1225 { 1226 struct rte_port *port = &ports[port_id]; 1227 int diag; 1228 1229 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1230 return; 1231 1232 diag = eth_dev_validate_mtu(port_id, mtu); 1233 if (diag != 0) 1234 return; 1235 1236 if (port->need_reconfig == 0) { 1237 diag = rte_eth_dev_set_mtu(port_id, mtu); 1238 if (diag != 0) { 1239 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1240 return; 1241 } 1242 } 1243 1244 port->dev_conf.rxmode.mtu = mtu; 1245 } 1246 1247 /* Generic flow management functions. */ 1248 1249 static struct port_flow_tunnel * 1250 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1251 { 1252 struct port_flow_tunnel *flow_tunnel; 1253 1254 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1255 if (flow_tunnel->id == port_tunnel_id) 1256 goto out; 1257 } 1258 flow_tunnel = NULL; 1259 1260 out: 1261 return flow_tunnel; 1262 } 1263 1264 const char * 1265 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1266 { 1267 const char *type; 1268 switch (tunnel->type) { 1269 default: 1270 type = "unknown"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_VXLAN: 1273 type = "vxlan"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_GRE: 1276 type = "gre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_NVGRE: 1279 type = "nvgre"; 1280 break; 1281 case RTE_FLOW_ITEM_TYPE_GENEVE: 1282 type = "geneve"; 1283 break; 1284 } 1285 1286 return type; 1287 } 1288 1289 struct port_flow_tunnel * 1290 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1291 { 1292 struct rte_port *port = &ports[port_id]; 1293 struct port_flow_tunnel *flow_tunnel; 1294 1295 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1296 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1297 goto out; 1298 } 1299 flow_tunnel = NULL; 1300 1301 out: 1302 return flow_tunnel; 1303 } 1304 1305 void port_flow_tunnel_list(portid_t port_id) 1306 { 1307 struct rte_port *port = &ports[port_id]; 1308 struct port_flow_tunnel *flt; 1309 1310 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1311 printf("port %u tunnel #%u type=%s", 1312 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1313 if (flt->tunnel.tun_id) 1314 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1315 printf("\n"); 1316 } 1317 } 1318 1319 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1320 { 1321 struct rte_port *port = &ports[port_id]; 1322 struct port_flow_tunnel *flt; 1323 1324 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1325 if (flt->id == tunnel_id) 1326 break; 1327 } 1328 if (flt) { 1329 LIST_REMOVE(flt, chain); 1330 free(flt); 1331 printf("port %u: flow tunnel #%u destroyed\n", 1332 port_id, tunnel_id); 1333 } 1334 } 1335 1336 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1337 { 1338 struct rte_port *port = &ports[port_id]; 1339 enum rte_flow_item_type type; 1340 struct port_flow_tunnel *flt; 1341 1342 if (!strcmp(ops->type, "vxlan")) 1343 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1344 else if (!strcmp(ops->type, "gre")) 1345 type = RTE_FLOW_ITEM_TYPE_GRE; 1346 else if (!strcmp(ops->type, "nvgre")) 1347 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1348 else if (!strcmp(ops->type, "geneve")) 1349 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1350 else { 1351 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1352 ops->type); 1353 return; 1354 } 1355 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1356 if (flt->tunnel.type == type) 1357 break; 1358 } 1359 if (!flt) { 1360 flt = calloc(1, sizeof(*flt)); 1361 if (!flt) { 1362 fprintf(stderr, "failed to allocate port flt object\n"); 1363 return; 1364 } 1365 flt->tunnel.type = type; 1366 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1367 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1368 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1369 } 1370 printf("port %d: flow tunnel #%u type %s\n", 1371 port_id, flt->id, ops->type); 1372 } 1373 1374 /** Generate a port_flow entry from attributes/pattern/actions. */ 1375 static struct port_flow * 1376 port_flow_new(const struct rte_flow_attr *attr, 1377 const struct rte_flow_item *pattern, 1378 const struct rte_flow_action *actions, 1379 struct rte_flow_error *error) 1380 { 1381 const struct rte_flow_conv_rule rule = { 1382 .attr_ro = attr, 1383 .pattern_ro = pattern, 1384 .actions_ro = actions, 1385 }; 1386 struct port_flow *pf; 1387 int ret; 1388 1389 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1390 if (ret < 0) 1391 return NULL; 1392 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1393 if (!pf) { 1394 rte_flow_error_set 1395 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1396 "calloc() failed"); 1397 return NULL; 1398 } 1399 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1400 error) >= 0) 1401 return pf; 1402 free(pf); 1403 return NULL; 1404 } 1405 1406 /** Print a message out of a flow error. */ 1407 static int 1408 port_flow_complain(struct rte_flow_error *error) 1409 { 1410 static const char *const errstrlist[] = { 1411 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1412 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1413 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1417 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1418 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1419 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1421 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1422 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1423 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1424 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1425 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1426 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1427 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1428 }; 1429 const char *errstr; 1430 char buf[32]; 1431 int err = rte_errno; 1432 1433 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1434 !errstrlist[error->type]) 1435 errstr = "unknown type"; 1436 else 1437 errstr = errstrlist[error->type]; 1438 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1439 __func__, error->type, errstr, 1440 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1441 error->cause), buf) : "", 1442 error->message ? error->message : "(no stated reason)", 1443 rte_strerror(err)); 1444 1445 switch (error->type) { 1446 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1447 fprintf(stderr, "The status suggests the use of \"transfer\" " 1448 "as the possible cause of the failure. Make " 1449 "sure that the flow in question and its " 1450 "indirect components (if any) are managed " 1451 "via \"transfer\" proxy port. Use command " 1452 "\"show port (port_id) flow transfer proxy\" " 1453 "to figure out the proxy port ID\n"); 1454 break; 1455 default: 1456 break; 1457 } 1458 1459 return -err; 1460 } 1461 1462 static void 1463 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1464 { 1465 uint16_t total_len = 0; 1466 uint16_t str_len; 1467 uint16_t i; 1468 1469 if (rss_types == 0) 1470 return; 1471 1472 for (i = 0; rss_type_table[i].str; i++) { 1473 if (rss_type_table[i].rss_type == 0) 1474 continue; 1475 1476 if ((rss_types & rss_type_table[i].rss_type) == 1477 rss_type_table[i].rss_type) { 1478 /* Contain two spaces */ 1479 str_len = strlen(rss_type_table[i].str) + 2; 1480 if (total_len + str_len > char_num_per_line) { 1481 printf("\n"); 1482 total_len = 0; 1483 } 1484 printf(" %s", rss_type_table[i].str); 1485 total_len += str_len; 1486 } 1487 } 1488 printf("\n"); 1489 } 1490 1491 static void 1492 rss_config_display(struct rte_flow_action_rss *rss_conf) 1493 { 1494 uint8_t i; 1495 1496 if (rss_conf == NULL) { 1497 fprintf(stderr, "Invalid rule\n"); 1498 return; 1499 } 1500 1501 printf("RSS:\n" 1502 " queues:"); 1503 if (rss_conf->queue_num == 0) 1504 printf(" none"); 1505 for (i = 0; i < rss_conf->queue_num; i++) 1506 printf(" %d", rss_conf->queue[i]); 1507 printf("\n"); 1508 1509 printf(" function: %s\n", rte_eth_dev_rss_algo_name(rss_conf->func)); 1510 1511 printf(" RSS key:\n"); 1512 if (rss_conf->key_len == 0) { 1513 printf(" none"); 1514 } else { 1515 printf(" key_len: %u\n", rss_conf->key_len); 1516 printf(" key: "); 1517 if (rss_conf->key == NULL) { 1518 printf("none"); 1519 } else { 1520 for (i = 0; i < rss_conf->key_len; i++) 1521 printf("%02X", rss_conf->key[i]); 1522 } 1523 } 1524 printf("\n"); 1525 1526 printf(" types:\n"); 1527 if (rss_conf->types == 0) { 1528 printf(" none\n"); 1529 return; 1530 } 1531 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1532 } 1533 1534 static struct port_indirect_action * 1535 action_get_by_id(portid_t port_id, uint32_t id) 1536 { 1537 struct rte_port *port; 1538 struct port_indirect_action **ppia; 1539 struct port_indirect_action *pia = NULL; 1540 1541 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1542 port_id == (portid_t)RTE_PORT_ALL) 1543 return NULL; 1544 port = &ports[port_id]; 1545 ppia = &port->actions_list; 1546 while (*ppia) { 1547 if ((*ppia)->id == id) { 1548 pia = *ppia; 1549 break; 1550 } 1551 ppia = &(*ppia)->next; 1552 } 1553 if (!pia) 1554 fprintf(stderr, 1555 "Failed to find indirect action #%u on port %u\n", 1556 id, port_id); 1557 return pia; 1558 } 1559 1560 static int 1561 action_alloc(portid_t port_id, uint32_t id, 1562 struct port_indirect_action **action) 1563 { 1564 struct rte_port *port; 1565 struct port_indirect_action **ppia; 1566 struct port_indirect_action *pia = NULL; 1567 1568 *action = NULL; 1569 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1570 port_id == (portid_t)RTE_PORT_ALL) 1571 return -EINVAL; 1572 port = &ports[port_id]; 1573 if (id == UINT32_MAX) { 1574 /* taking first available ID */ 1575 if (port->actions_list) { 1576 if (port->actions_list->id == UINT32_MAX - 1) { 1577 fprintf(stderr, 1578 "Highest indirect action ID is already assigned, delete it first\n"); 1579 return -ENOMEM; 1580 } 1581 id = port->actions_list->id + 1; 1582 } else { 1583 id = 0; 1584 } 1585 } 1586 pia = calloc(1, sizeof(*pia)); 1587 if (!pia) { 1588 fprintf(stderr, 1589 "Allocation of port %u indirect action failed\n", 1590 port_id); 1591 return -ENOMEM; 1592 } 1593 ppia = &port->actions_list; 1594 while (*ppia && (*ppia)->id > id) 1595 ppia = &(*ppia)->next; 1596 if (*ppia && (*ppia)->id == id) { 1597 fprintf(stderr, 1598 "Indirect action #%u is already assigned, delete it first\n", 1599 id); 1600 free(pia); 1601 return -EINVAL; 1602 } 1603 pia->next = *ppia; 1604 pia->id = id; 1605 *ppia = pia; 1606 *action = pia; 1607 return 0; 1608 } 1609 1610 static int 1611 template_alloc(uint32_t id, struct port_template **template, 1612 struct port_template **list) 1613 { 1614 struct port_template *lst = *list; 1615 struct port_template **ppt; 1616 struct port_template *pt = NULL; 1617 1618 *template = NULL; 1619 if (id == UINT32_MAX) { 1620 /* taking first available ID */ 1621 if (lst) { 1622 if (lst->id == UINT32_MAX - 1) { 1623 printf("Highest template ID is already" 1624 " assigned, delete it first\n"); 1625 return -ENOMEM; 1626 } 1627 id = lst->id + 1; 1628 } else { 1629 id = 0; 1630 } 1631 } 1632 pt = calloc(1, sizeof(*pt)); 1633 if (!pt) { 1634 printf("Allocation of port template failed\n"); 1635 return -ENOMEM; 1636 } 1637 ppt = list; 1638 while (*ppt && (*ppt)->id > id) 1639 ppt = &(*ppt)->next; 1640 if (*ppt && (*ppt)->id == id) { 1641 printf("Template #%u is already assigned," 1642 " delete it first\n", id); 1643 free(pt); 1644 return -EINVAL; 1645 } 1646 pt->next = *ppt; 1647 pt->id = id; 1648 *ppt = pt; 1649 *template = pt; 1650 return 0; 1651 } 1652 1653 static int 1654 table_alloc(uint32_t id, struct port_table **table, 1655 struct port_table **list) 1656 { 1657 struct port_table *lst = *list; 1658 struct port_table **ppt; 1659 struct port_table *pt = NULL; 1660 1661 *table = NULL; 1662 if (id == UINT32_MAX) { 1663 /* taking first available ID */ 1664 if (lst) { 1665 if (lst->id == UINT32_MAX - 1) { 1666 printf("Highest table ID is already" 1667 " assigned, delete it first\n"); 1668 return -ENOMEM; 1669 } 1670 id = lst->id + 1; 1671 } else { 1672 id = 0; 1673 } 1674 } 1675 pt = calloc(1, sizeof(*pt)); 1676 if (!pt) { 1677 printf("Allocation of table failed\n"); 1678 return -ENOMEM; 1679 } 1680 ppt = list; 1681 while (*ppt && (*ppt)->id > id) 1682 ppt = &(*ppt)->next; 1683 if (*ppt && (*ppt)->id == id) { 1684 printf("Table #%u is already assigned," 1685 " delete it first\n", id); 1686 free(pt); 1687 return -EINVAL; 1688 } 1689 pt->next = *ppt; 1690 pt->id = id; 1691 *ppt = pt; 1692 *table = pt; 1693 return 0; 1694 } 1695 1696 /** Get info about flow management resources. */ 1697 int 1698 port_flow_get_info(portid_t port_id) 1699 { 1700 struct rte_flow_port_info port_info; 1701 struct rte_flow_queue_info queue_info; 1702 struct rte_flow_error error; 1703 1704 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1705 port_id == (portid_t)RTE_PORT_ALL) 1706 return -EINVAL; 1707 /* Poisoning to make sure PMDs update it in case of error. */ 1708 memset(&error, 0x99, sizeof(error)); 1709 memset(&port_info, 0, sizeof(port_info)); 1710 memset(&queue_info, 0, sizeof(queue_info)); 1711 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1712 return port_flow_complain(&error); 1713 printf("Flow engine resources on port %u:\n" 1714 "Number of queues: %d\n" 1715 "Size of queues: %d\n" 1716 "Number of counters: %d\n" 1717 "Number of aging objects: %d\n" 1718 "Number of meter actions: %d\n", 1719 port_id, port_info.max_nb_queues, 1720 queue_info.max_size, 1721 port_info.max_nb_counters, 1722 port_info.max_nb_aging_objects, 1723 port_info.max_nb_meters); 1724 return 0; 1725 } 1726 1727 /** Configure flow management resources. */ 1728 int 1729 port_flow_configure(portid_t port_id, 1730 const struct rte_flow_port_attr *port_attr, 1731 uint16_t nb_queue, 1732 const struct rte_flow_queue_attr *queue_attr) 1733 { 1734 struct rte_port *port; 1735 struct rte_flow_error error; 1736 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1737 int std_queue; 1738 1739 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1740 port_id == (portid_t)RTE_PORT_ALL) 1741 return -EINVAL; 1742 port = &ports[port_id]; 1743 port->queue_nb = nb_queue; 1744 port->queue_sz = queue_attr->size; 1745 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1746 attr_list[std_queue] = queue_attr; 1747 /* Poisoning to make sure PMDs update it in case of error. */ 1748 memset(&error, 0x66, sizeof(error)); 1749 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1750 return port_flow_complain(&error); 1751 printf("Configure flows on port %u: " 1752 "number of queues %d with %d elements\n", 1753 port_id, nb_queue, queue_attr->size); 1754 return 0; 1755 } 1756 1757 static int 1758 action_handle_create(portid_t port_id, 1759 struct port_indirect_action *pia, 1760 const struct rte_flow_indir_action_conf *conf, 1761 const struct rte_flow_action *action, 1762 struct rte_flow_error *error) 1763 { 1764 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1765 struct rte_flow_action_age *age = 1766 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1767 1768 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1769 age->context = &pia->age_type; 1770 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1771 struct rte_flow_action_conntrack *ct = 1772 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1773 1774 memcpy(ct, &conntrack_context, sizeof(*ct)); 1775 } 1776 pia->type = action->type; 1777 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1778 error); 1779 return pia->handle ? 0 : -1; 1780 } 1781 1782 static int 1783 action_list_handle_create(portid_t port_id, 1784 struct port_indirect_action *pia, 1785 const struct rte_flow_indir_action_conf *conf, 1786 const struct rte_flow_action *actions, 1787 struct rte_flow_error *error) 1788 { 1789 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1790 pia->list_handle = 1791 rte_flow_action_list_handle_create(port_id, conf, 1792 actions, error); 1793 return pia->list_handle ? 0 : -1; 1794 } 1795 /** Create indirect action */ 1796 int 1797 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1798 const struct rte_flow_indir_action_conf *conf, 1799 const struct rte_flow_action *action) 1800 { 1801 struct port_indirect_action *pia; 1802 int ret; 1803 struct rte_flow_error error; 1804 1805 ret = action_alloc(port_id, id, &pia); 1806 if (ret) 1807 return ret; 1808 /* Poisoning to make sure PMDs update it in case of error. */ 1809 memset(&error, 0x22, sizeof(error)); 1810 ret = indirect_list ? 1811 action_list_handle_create(port_id, pia, conf, action, &error) : 1812 action_handle_create(port_id, pia, conf, action, &error); 1813 if (ret) { 1814 uint32_t destroy_id = pia->id; 1815 port_action_handle_destroy(port_id, 1, &destroy_id); 1816 return port_flow_complain(&error); 1817 } 1818 printf("Indirect action #%u created\n", pia->id); 1819 return 0; 1820 } 1821 1822 /** Destroy indirect action */ 1823 int 1824 port_action_handle_destroy(portid_t port_id, 1825 uint32_t n, 1826 const uint32_t *actions) 1827 { 1828 struct rte_port *port; 1829 struct port_indirect_action **tmp; 1830 int ret = 0; 1831 1832 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1833 port_id == (portid_t)RTE_PORT_ALL) 1834 return -EINVAL; 1835 port = &ports[port_id]; 1836 tmp = &port->actions_list; 1837 while (*tmp) { 1838 uint32_t i; 1839 1840 for (i = 0; i != n; ++i) { 1841 struct rte_flow_error error; 1842 struct port_indirect_action *pia = *tmp; 1843 1844 if (actions[i] != pia->id) 1845 continue; 1846 /* 1847 * Poisoning to make sure PMDs update it in case 1848 * of error. 1849 */ 1850 memset(&error, 0x33, sizeof(error)); 1851 1852 if (pia->handle) { 1853 ret = pia->type == 1854 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1855 rte_flow_action_list_handle_destroy 1856 (port_id, pia->list_handle, &error) : 1857 rte_flow_action_handle_destroy 1858 (port_id, pia->handle, &error); 1859 if (ret) { 1860 ret = port_flow_complain(&error); 1861 continue; 1862 } 1863 } 1864 *tmp = pia->next; 1865 printf("Indirect action #%u destroyed\n", pia->id); 1866 free(pia); 1867 break; 1868 } 1869 if (i == n) 1870 tmp = &(*tmp)->next; 1871 } 1872 return ret; 1873 } 1874 1875 int 1876 port_action_handle_flush(portid_t port_id) 1877 { 1878 struct rte_port *port; 1879 struct port_indirect_action **tmp; 1880 int ret = 0; 1881 1882 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1883 port_id == (portid_t)RTE_PORT_ALL) 1884 return -EINVAL; 1885 port = &ports[port_id]; 1886 tmp = &port->actions_list; 1887 while (*tmp != NULL) { 1888 struct rte_flow_error error; 1889 struct port_indirect_action *pia = *tmp; 1890 1891 /* Poisoning to make sure PMDs update it in case of error. */ 1892 memset(&error, 0x44, sizeof(error)); 1893 if (pia->handle != NULL) { 1894 ret = pia->type == 1895 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1896 rte_flow_action_list_handle_destroy 1897 (port_id, pia->list_handle, &error) : 1898 rte_flow_action_handle_destroy 1899 (port_id, pia->handle, &error); 1900 if (ret) { 1901 printf("Indirect action #%u not destroyed\n", 1902 pia->id); 1903 ret = port_flow_complain(&error); 1904 } 1905 tmp = &pia->next; 1906 } else { 1907 *tmp = pia->next; 1908 free(pia); 1909 } 1910 } 1911 return ret; 1912 } 1913 1914 /** Get indirect action by port + id */ 1915 struct rte_flow_action_handle * 1916 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1917 { 1918 1919 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1920 1921 return (pia) ? pia->handle : NULL; 1922 } 1923 1924 /** Update indirect action */ 1925 int 1926 port_action_handle_update(portid_t port_id, uint32_t id, 1927 const struct rte_flow_action *action) 1928 { 1929 struct rte_flow_error error; 1930 struct rte_flow_action_handle *action_handle; 1931 struct port_indirect_action *pia; 1932 struct rte_flow_update_meter_mark mtr_update; 1933 const void *update; 1934 1935 action_handle = port_action_handle_get_by_id(port_id, id); 1936 if (!action_handle) 1937 return -EINVAL; 1938 pia = action_get_by_id(port_id, id); 1939 if (!pia) 1940 return -EINVAL; 1941 switch (pia->type) { 1942 case RTE_FLOW_ACTION_TYPE_AGE: 1943 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1944 update = action->conf; 1945 break; 1946 case RTE_FLOW_ACTION_TYPE_METER_MARK: 1947 memcpy(&mtr_update.meter_mark, action->conf, 1948 sizeof(struct rte_flow_action_meter_mark)); 1949 if (mtr_update.meter_mark.profile) 1950 mtr_update.profile_valid = 1; 1951 if (mtr_update.meter_mark.policy) 1952 mtr_update.policy_valid = 1; 1953 mtr_update.color_mode_valid = 1; 1954 mtr_update.state_valid = 1; 1955 update = &mtr_update; 1956 break; 1957 default: 1958 update = action; 1959 break; 1960 } 1961 if (rte_flow_action_handle_update(port_id, action_handle, update, 1962 &error)) { 1963 return port_flow_complain(&error); 1964 } 1965 printf("Indirect action #%u updated\n", id); 1966 return 0; 1967 } 1968 1969 static void 1970 port_action_handle_query_dump(portid_t port_id, 1971 const struct port_indirect_action *pia, 1972 union port_action_query *query) 1973 { 1974 if (!pia || !query) 1975 return; 1976 switch (pia->type) { 1977 case RTE_FLOW_ACTION_TYPE_AGE: 1978 printf("Indirect AGE action:\n" 1979 " aged: %u\n" 1980 " sec_since_last_hit_valid: %u\n" 1981 " sec_since_last_hit: %" PRIu32 "\n", 1982 query->age.aged, 1983 query->age.sec_since_last_hit_valid, 1984 query->age.sec_since_last_hit); 1985 break; 1986 case RTE_FLOW_ACTION_TYPE_COUNT: 1987 printf("Indirect COUNT action:\n" 1988 " hits_set: %u\n" 1989 " bytes_set: %u\n" 1990 " hits: %" PRIu64 "\n" 1991 " bytes: %" PRIu64 "\n", 1992 query->count.hits_set, 1993 query->count.bytes_set, 1994 query->count.hits, 1995 query->count.bytes); 1996 break; 1997 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1998 printf("Conntrack Context:\n" 1999 " Peer: %u, Flow dir: %s, Enable: %u\n" 2000 " Live: %u, SACK: %u, CACK: %u\n" 2001 " Packet dir: %s, Liberal: %u, State: %u\n" 2002 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2003 " Last Seq: %u, Last ACK: %u\n" 2004 " Last Win: %u, Last End: %u\n", 2005 query->ct.peer_port, 2006 query->ct.is_original_dir ? "Original" : "Reply", 2007 query->ct.enable, query->ct.live_connection, 2008 query->ct.selective_ack, query->ct.challenge_ack_passed, 2009 query->ct.last_direction ? "Original" : "Reply", 2010 query->ct.liberal_mode, query->ct.state, 2011 query->ct.max_ack_window, query->ct.retransmission_limit, 2012 query->ct.last_index, query->ct.last_seq, 2013 query->ct.last_ack, query->ct.last_window, 2014 query->ct.last_end); 2015 printf(" Original Dir:\n" 2016 " scale: %u, fin: %u, ack seen: %u\n" 2017 " unacked data: %u\n Sent end: %u," 2018 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2019 query->ct.original_dir.scale, 2020 query->ct.original_dir.close_initiated, 2021 query->ct.original_dir.last_ack_seen, 2022 query->ct.original_dir.data_unacked, 2023 query->ct.original_dir.sent_end, 2024 query->ct.original_dir.reply_end, 2025 query->ct.original_dir.max_win, 2026 query->ct.original_dir.max_ack); 2027 printf(" Reply Dir:\n" 2028 " scale: %u, fin: %u, ack seen: %u\n" 2029 " unacked data: %u\n Sent end: %u," 2030 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2031 query->ct.reply_dir.scale, 2032 query->ct.reply_dir.close_initiated, 2033 query->ct.reply_dir.last_ack_seen, 2034 query->ct.reply_dir.data_unacked, 2035 query->ct.reply_dir.sent_end, 2036 query->ct.reply_dir.reply_end, 2037 query->ct.reply_dir.max_win, 2038 query->ct.reply_dir.max_ack); 2039 break; 2040 case RTE_FLOW_ACTION_TYPE_QUOTA: 2041 printf("Indirect QUOTA action %u\n" 2042 " unused quota: %" PRId64 "\n", 2043 pia->id, query->quota.quota); 2044 break; 2045 default: 2046 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2047 pia->type, pia->id, port_id); 2048 break; 2049 } 2050 2051 } 2052 2053 void 2054 port_action_handle_query_update(portid_t port_id, uint32_t id, 2055 enum rte_flow_query_update_mode qu_mode, 2056 const struct rte_flow_action *action) 2057 { 2058 int ret; 2059 struct rte_flow_error error; 2060 struct port_indirect_action *pia; 2061 union port_action_query query; 2062 2063 pia = action_get_by_id(port_id, id); 2064 if (!pia || !pia->handle) 2065 return; 2066 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2067 &query, qu_mode, &error); 2068 if (ret) 2069 port_flow_complain(&error); 2070 else 2071 port_action_handle_query_dump(port_id, pia, &query); 2072 2073 } 2074 2075 int 2076 port_action_handle_query(portid_t port_id, uint32_t id) 2077 { 2078 struct rte_flow_error error; 2079 struct port_indirect_action *pia; 2080 union port_action_query query; 2081 2082 pia = action_get_by_id(port_id, id); 2083 if (!pia) 2084 return -EINVAL; 2085 switch (pia->type) { 2086 case RTE_FLOW_ACTION_TYPE_AGE: 2087 case RTE_FLOW_ACTION_TYPE_COUNT: 2088 case RTE_FLOW_ACTION_TYPE_QUOTA: 2089 break; 2090 default: 2091 fprintf(stderr, 2092 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2093 id, pia->type, port_id); 2094 return -ENOTSUP; 2095 } 2096 /* Poisoning to make sure PMDs update it in case of error. */ 2097 memset(&error, 0x55, sizeof(error)); 2098 memset(&query, 0, sizeof(query)); 2099 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2100 return port_flow_complain(&error); 2101 port_action_handle_query_dump(port_id, pia, &query); 2102 return 0; 2103 } 2104 2105 static struct port_flow_tunnel * 2106 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2107 const struct rte_flow_item *pattern, 2108 const struct rte_flow_action *actions, 2109 const struct tunnel_ops *tunnel_ops) 2110 { 2111 int ret; 2112 struct rte_port *port; 2113 struct port_flow_tunnel *pft; 2114 struct rte_flow_error error; 2115 2116 port = &ports[port_id]; 2117 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2118 if (!pft) { 2119 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2120 tunnel_ops->id); 2121 return NULL; 2122 } 2123 if (tunnel_ops->actions) { 2124 uint32_t num_actions; 2125 const struct rte_flow_action *aptr; 2126 2127 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2128 &pft->pmd_actions, 2129 &pft->num_pmd_actions, 2130 &error); 2131 if (ret) { 2132 port_flow_complain(&error); 2133 return NULL; 2134 } 2135 for (aptr = actions, num_actions = 1; 2136 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2137 aptr++, num_actions++); 2138 pft->actions = malloc( 2139 (num_actions + pft->num_pmd_actions) * 2140 sizeof(actions[0])); 2141 if (!pft->actions) { 2142 rte_flow_tunnel_action_decap_release( 2143 port_id, pft->actions, 2144 pft->num_pmd_actions, &error); 2145 return NULL; 2146 } 2147 rte_memcpy(pft->actions, pft->pmd_actions, 2148 pft->num_pmd_actions * sizeof(actions[0])); 2149 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2150 num_actions * sizeof(actions[0])); 2151 } 2152 if (tunnel_ops->items) { 2153 uint32_t num_items; 2154 const struct rte_flow_item *iptr; 2155 2156 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2157 &pft->pmd_items, 2158 &pft->num_pmd_items, 2159 &error); 2160 if (ret) { 2161 port_flow_complain(&error); 2162 return NULL; 2163 } 2164 for (iptr = pattern, num_items = 1; 2165 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2166 iptr++, num_items++); 2167 pft->items = malloc((num_items + pft->num_pmd_items) * 2168 sizeof(pattern[0])); 2169 if (!pft->items) { 2170 rte_flow_tunnel_item_release( 2171 port_id, pft->pmd_items, 2172 pft->num_pmd_items, &error); 2173 return NULL; 2174 } 2175 rte_memcpy(pft->items, pft->pmd_items, 2176 pft->num_pmd_items * sizeof(pattern[0])); 2177 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2178 num_items * sizeof(pattern[0])); 2179 } 2180 2181 return pft; 2182 } 2183 2184 static void 2185 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2186 const struct tunnel_ops *tunnel_ops, 2187 struct port_flow_tunnel *pft) 2188 { 2189 struct rte_flow_error error; 2190 2191 if (tunnel_ops->actions) { 2192 free(pft->actions); 2193 rte_flow_tunnel_action_decap_release( 2194 port_id, pft->pmd_actions, 2195 pft->num_pmd_actions, &error); 2196 pft->actions = NULL; 2197 pft->pmd_actions = NULL; 2198 } 2199 if (tunnel_ops->items) { 2200 free(pft->items); 2201 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2202 pft->num_pmd_items, 2203 &error); 2204 pft->items = NULL; 2205 pft->pmd_items = NULL; 2206 } 2207 } 2208 2209 /** Add port meter policy */ 2210 int 2211 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2212 const struct rte_flow_action *actions) 2213 { 2214 struct rte_mtr_error error; 2215 const struct rte_flow_action *act = actions; 2216 const struct rte_flow_action *start; 2217 struct rte_mtr_meter_policy_params policy; 2218 uint32_t i = 0, act_n; 2219 int ret; 2220 2221 for (i = 0; i < RTE_COLORS; i++) { 2222 for (act_n = 0, start = act; 2223 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2224 act_n++; 2225 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2226 policy.actions[i] = start; 2227 else 2228 policy.actions[i] = NULL; 2229 act++; 2230 } 2231 ret = rte_mtr_meter_policy_add(port_id, 2232 policy_id, 2233 &policy, &error); 2234 if (ret) 2235 print_mtr_err_msg(&error); 2236 return ret; 2237 } 2238 2239 struct rte_flow_meter_profile * 2240 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2241 { 2242 struct rte_mtr_error error; 2243 struct rte_flow_meter_profile *profile; 2244 2245 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2246 if (!profile) 2247 print_mtr_err_msg(&error); 2248 return profile; 2249 } 2250 struct rte_flow_meter_policy * 2251 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2252 { 2253 struct rte_mtr_error error; 2254 struct rte_flow_meter_policy *policy; 2255 2256 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2257 if (!policy) 2258 print_mtr_err_msg(&error); 2259 return policy; 2260 } 2261 2262 /** Validate flow rule. */ 2263 int 2264 port_flow_validate(portid_t port_id, 2265 const struct rte_flow_attr *attr, 2266 const struct rte_flow_item *pattern, 2267 const struct rte_flow_action *actions, 2268 const struct tunnel_ops *tunnel_ops) 2269 { 2270 struct rte_flow_error error; 2271 struct port_flow_tunnel *pft = NULL; 2272 int ret; 2273 2274 /* Poisoning to make sure PMDs update it in case of error. */ 2275 memset(&error, 0x11, sizeof(error)); 2276 if (tunnel_ops->enabled) { 2277 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2278 actions, tunnel_ops); 2279 if (!pft) 2280 return -ENOENT; 2281 if (pft->items) 2282 pattern = pft->items; 2283 if (pft->actions) 2284 actions = pft->actions; 2285 } 2286 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2287 if (tunnel_ops->enabled) 2288 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2289 if (ret) 2290 return port_flow_complain(&error); 2291 printf("Flow rule validated\n"); 2292 return 0; 2293 } 2294 2295 /** Return age action structure if exists, otherwise NULL. */ 2296 static struct rte_flow_action_age * 2297 age_action_get(const struct rte_flow_action *actions) 2298 { 2299 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2300 switch (actions->type) { 2301 case RTE_FLOW_ACTION_TYPE_AGE: 2302 return (struct rte_flow_action_age *) 2303 (uintptr_t)actions->conf; 2304 default: 2305 break; 2306 } 2307 } 2308 return NULL; 2309 } 2310 2311 /** Create pattern template */ 2312 int 2313 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2314 const struct rte_flow_pattern_template_attr *attr, 2315 const struct rte_flow_item *pattern) 2316 { 2317 struct rte_port *port; 2318 struct port_template *pit; 2319 int ret; 2320 struct rte_flow_error error; 2321 2322 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2323 port_id == (portid_t)RTE_PORT_ALL) 2324 return -EINVAL; 2325 port = &ports[port_id]; 2326 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2327 if (ret) 2328 return ret; 2329 /* Poisoning to make sure PMDs update it in case of error. */ 2330 memset(&error, 0x22, sizeof(error)); 2331 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2332 attr, pattern, &error); 2333 if (!pit->template.pattern_template) { 2334 uint32_t destroy_id = pit->id; 2335 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2336 return port_flow_complain(&error); 2337 } 2338 printf("Pattern template #%u created\n", pit->id); 2339 return 0; 2340 } 2341 2342 /** Destroy pattern template */ 2343 int 2344 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2345 const uint32_t *template) 2346 { 2347 struct rte_port *port; 2348 struct port_template **tmp; 2349 int ret = 0; 2350 2351 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2352 port_id == (portid_t)RTE_PORT_ALL) 2353 return -EINVAL; 2354 port = &ports[port_id]; 2355 tmp = &port->pattern_templ_list; 2356 while (*tmp) { 2357 uint32_t i; 2358 2359 for (i = 0; i != n; ++i) { 2360 struct rte_flow_error error; 2361 struct port_template *pit = *tmp; 2362 2363 if (template[i] != pit->id) 2364 continue; 2365 /* 2366 * Poisoning to make sure PMDs update it in case 2367 * of error. 2368 */ 2369 memset(&error, 0x33, sizeof(error)); 2370 2371 if (pit->template.pattern_template && 2372 rte_flow_pattern_template_destroy(port_id, 2373 pit->template.pattern_template, 2374 &error)) { 2375 ret = port_flow_complain(&error); 2376 continue; 2377 } 2378 *tmp = pit->next; 2379 printf("Pattern template #%u destroyed\n", pit->id); 2380 free(pit); 2381 break; 2382 } 2383 if (i == n) 2384 tmp = &(*tmp)->next; 2385 } 2386 return ret; 2387 } 2388 2389 /** Flush pattern template */ 2390 int 2391 port_flow_pattern_template_flush(portid_t port_id) 2392 { 2393 struct rte_port *port; 2394 struct port_template **tmp; 2395 int ret = 0; 2396 2397 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2398 port_id == (portid_t)RTE_PORT_ALL) 2399 return -EINVAL; 2400 port = &ports[port_id]; 2401 tmp = &port->pattern_templ_list; 2402 while (*tmp) { 2403 struct rte_flow_error error; 2404 struct port_template *pit = *tmp; 2405 2406 /* 2407 * Poisoning to make sure PMDs update it in case 2408 * of error. 2409 */ 2410 memset(&error, 0x33, sizeof(error)); 2411 if (pit->template.pattern_template && 2412 rte_flow_pattern_template_destroy(port_id, 2413 pit->template.pattern_template, &error)) { 2414 printf("Pattern template #%u not destroyed\n", pit->id); 2415 ret = port_flow_complain(&error); 2416 tmp = &pit->next; 2417 } else { 2418 *tmp = pit->next; 2419 free(pit); 2420 } 2421 } 2422 return ret; 2423 } 2424 2425 /** Create actions template */ 2426 int 2427 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2428 const struct rte_flow_actions_template_attr *attr, 2429 const struct rte_flow_action *actions, 2430 const struct rte_flow_action *masks) 2431 { 2432 struct rte_port *port; 2433 struct port_template *pat; 2434 int ret; 2435 struct rte_flow_error error; 2436 2437 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2438 port_id == (portid_t)RTE_PORT_ALL) 2439 return -EINVAL; 2440 port = &ports[port_id]; 2441 ret = template_alloc(id, &pat, &port->actions_templ_list); 2442 if (ret) 2443 return ret; 2444 /* Poisoning to make sure PMDs update it in case of error. */ 2445 memset(&error, 0x22, sizeof(error)); 2446 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2447 attr, actions, masks, &error); 2448 if (!pat->template.actions_template) { 2449 uint32_t destroy_id = pat->id; 2450 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2451 return port_flow_complain(&error); 2452 } 2453 printf("Actions template #%u created\n", pat->id); 2454 return 0; 2455 } 2456 2457 /** Destroy actions template */ 2458 int 2459 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2460 const uint32_t *template) 2461 { 2462 struct rte_port *port; 2463 struct port_template **tmp; 2464 int ret = 0; 2465 2466 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2467 port_id == (portid_t)RTE_PORT_ALL) 2468 return -EINVAL; 2469 port = &ports[port_id]; 2470 tmp = &port->actions_templ_list; 2471 while (*tmp) { 2472 uint32_t i; 2473 2474 for (i = 0; i != n; ++i) { 2475 struct rte_flow_error error; 2476 struct port_template *pat = *tmp; 2477 2478 if (template[i] != pat->id) 2479 continue; 2480 /* 2481 * Poisoning to make sure PMDs update it in case 2482 * of error. 2483 */ 2484 memset(&error, 0x33, sizeof(error)); 2485 2486 if (pat->template.actions_template && 2487 rte_flow_actions_template_destroy(port_id, 2488 pat->template.actions_template, &error)) { 2489 ret = port_flow_complain(&error); 2490 continue; 2491 } 2492 *tmp = pat->next; 2493 printf("Actions template #%u destroyed\n", pat->id); 2494 free(pat); 2495 break; 2496 } 2497 if (i == n) 2498 tmp = &(*tmp)->next; 2499 } 2500 return ret; 2501 } 2502 2503 /** Flush actions template */ 2504 int 2505 port_flow_actions_template_flush(portid_t port_id) 2506 { 2507 struct rte_port *port; 2508 struct port_template **tmp; 2509 int ret = 0; 2510 2511 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2512 port_id == (portid_t)RTE_PORT_ALL) 2513 return -EINVAL; 2514 port = &ports[port_id]; 2515 tmp = &port->actions_templ_list; 2516 while (*tmp) { 2517 struct rte_flow_error error; 2518 struct port_template *pat = *tmp; 2519 2520 /* 2521 * Poisoning to make sure PMDs update it in case 2522 * of error. 2523 */ 2524 memset(&error, 0x33, sizeof(error)); 2525 2526 if (pat->template.actions_template && 2527 rte_flow_actions_template_destroy(port_id, 2528 pat->template.actions_template, &error)) { 2529 ret = port_flow_complain(&error); 2530 printf("Actions template #%u not destroyed\n", pat->id); 2531 tmp = &pat->next; 2532 } else { 2533 *tmp = pat->next; 2534 free(pat); 2535 } 2536 } 2537 return ret; 2538 } 2539 2540 /** Create table */ 2541 int 2542 port_flow_template_table_create(portid_t port_id, uint32_t id, 2543 const struct rte_flow_template_table_attr *table_attr, 2544 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2545 uint32_t nb_actions_templates, uint32_t *actions_templates) 2546 { 2547 struct rte_port *port; 2548 struct port_table *pt; 2549 struct port_template *temp = NULL; 2550 int ret; 2551 uint32_t i; 2552 struct rte_flow_error error; 2553 struct rte_flow_pattern_template 2554 *flow_pattern_templates[nb_pattern_templates]; 2555 struct rte_flow_actions_template 2556 *flow_actions_templates[nb_actions_templates]; 2557 2558 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2559 port_id == (portid_t)RTE_PORT_ALL) 2560 return -EINVAL; 2561 port = &ports[port_id]; 2562 for (i = 0; i < nb_pattern_templates; ++i) { 2563 bool found = false; 2564 temp = port->pattern_templ_list; 2565 while (temp) { 2566 if (pattern_templates[i] == temp->id) { 2567 flow_pattern_templates[i] = 2568 temp->template.pattern_template; 2569 found = true; 2570 break; 2571 } 2572 temp = temp->next; 2573 } 2574 if (!found) { 2575 printf("Pattern template #%u is invalid\n", 2576 pattern_templates[i]); 2577 return -EINVAL; 2578 } 2579 } 2580 for (i = 0; i < nb_actions_templates; ++i) { 2581 bool found = false; 2582 temp = port->actions_templ_list; 2583 while (temp) { 2584 if (actions_templates[i] == temp->id) { 2585 flow_actions_templates[i] = 2586 temp->template.actions_template; 2587 found = true; 2588 break; 2589 } 2590 temp = temp->next; 2591 } 2592 if (!found) { 2593 printf("Actions template #%u is invalid\n", 2594 actions_templates[i]); 2595 return -EINVAL; 2596 } 2597 } 2598 ret = table_alloc(id, &pt, &port->table_list); 2599 if (ret) 2600 return ret; 2601 /* Poisoning to make sure PMDs update it in case of error. */ 2602 memset(&error, 0x22, sizeof(error)); 2603 pt->table = rte_flow_template_table_create(port_id, table_attr, 2604 flow_pattern_templates, nb_pattern_templates, 2605 flow_actions_templates, nb_actions_templates, 2606 &error); 2607 2608 if (!pt->table) { 2609 uint32_t destroy_id = pt->id; 2610 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2611 return port_flow_complain(&error); 2612 } 2613 pt->nb_pattern_templates = nb_pattern_templates; 2614 pt->nb_actions_templates = nb_actions_templates; 2615 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2616 sizeof(struct rte_flow_attr)); 2617 printf("Template table #%u created\n", pt->id); 2618 return 0; 2619 } 2620 2621 /** Destroy table */ 2622 int 2623 port_flow_template_table_destroy(portid_t port_id, 2624 uint32_t n, const uint32_t *table) 2625 { 2626 struct rte_port *port; 2627 struct port_table **tmp; 2628 int ret = 0; 2629 2630 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2631 port_id == (portid_t)RTE_PORT_ALL) 2632 return -EINVAL; 2633 port = &ports[port_id]; 2634 tmp = &port->table_list; 2635 while (*tmp) { 2636 uint32_t i; 2637 2638 for (i = 0; i != n; ++i) { 2639 struct rte_flow_error error; 2640 struct port_table *pt = *tmp; 2641 2642 if (table[i] != pt->id) 2643 continue; 2644 /* 2645 * Poisoning to make sure PMDs update it in case 2646 * of error. 2647 */ 2648 memset(&error, 0x33, sizeof(error)); 2649 2650 if (pt->table && 2651 rte_flow_template_table_destroy(port_id, 2652 pt->table, 2653 &error)) { 2654 ret = port_flow_complain(&error); 2655 continue; 2656 } 2657 *tmp = pt->next; 2658 printf("Template table #%u destroyed\n", pt->id); 2659 free(pt); 2660 break; 2661 } 2662 if (i == n) 2663 tmp = &(*tmp)->next; 2664 } 2665 return ret; 2666 } 2667 2668 /** Flush table */ 2669 int 2670 port_flow_template_table_flush(portid_t port_id) 2671 { 2672 struct rte_port *port; 2673 struct port_table **tmp; 2674 int ret = 0; 2675 2676 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2677 port_id == (portid_t)RTE_PORT_ALL) 2678 return -EINVAL; 2679 port = &ports[port_id]; 2680 tmp = &port->table_list; 2681 while (*tmp) { 2682 struct rte_flow_error error; 2683 struct port_table *pt = *tmp; 2684 2685 /* 2686 * Poisoning to make sure PMDs update it in case 2687 * of error. 2688 */ 2689 memset(&error, 0x33, sizeof(error)); 2690 2691 if (pt->table && 2692 rte_flow_template_table_destroy(port_id, 2693 pt->table, 2694 &error)) { 2695 ret = port_flow_complain(&error); 2696 printf("Template table #%u not destroyed\n", pt->id); 2697 tmp = &pt->next; 2698 } else { 2699 *tmp = pt->next; 2700 free(pt); 2701 } 2702 } 2703 return ret; 2704 } 2705 2706 /** Enqueue create flow rule operation. */ 2707 int 2708 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2709 bool postpone, uint32_t table_id, uint32_t rule_idx, 2710 uint32_t pattern_idx, uint32_t actions_idx, 2711 const struct rte_flow_item *pattern, 2712 const struct rte_flow_action *actions) 2713 { 2714 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2715 struct rte_flow *flow; 2716 struct rte_port *port; 2717 struct port_flow *pf; 2718 struct port_table *pt; 2719 uint32_t id = 0; 2720 bool found; 2721 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2722 struct rte_flow_action_age *age = age_action_get(actions); 2723 struct queue_job *job; 2724 2725 port = &ports[port_id]; 2726 if (port->flow_list) { 2727 if (port->flow_list->id == UINT32_MAX) { 2728 printf("Highest rule ID is already assigned," 2729 " delete it first"); 2730 return -ENOMEM; 2731 } 2732 id = port->flow_list->id + 1; 2733 } 2734 2735 if (queue_id >= port->queue_nb) { 2736 printf("Queue #%u is invalid\n", queue_id); 2737 return -EINVAL; 2738 } 2739 2740 found = false; 2741 pt = port->table_list; 2742 while (pt) { 2743 if (table_id == pt->id) { 2744 found = true; 2745 break; 2746 } 2747 pt = pt->next; 2748 } 2749 if (!found) { 2750 printf("Table #%u is invalid\n", table_id); 2751 return -EINVAL; 2752 } 2753 2754 if (pattern_idx >= pt->nb_pattern_templates) { 2755 printf("Pattern template index #%u is invalid," 2756 " %u templates present in the table\n", 2757 pattern_idx, pt->nb_pattern_templates); 2758 return -EINVAL; 2759 } 2760 if (actions_idx >= pt->nb_actions_templates) { 2761 printf("Actions template index #%u is invalid," 2762 " %u templates present in the table\n", 2763 actions_idx, pt->nb_actions_templates); 2764 return -EINVAL; 2765 } 2766 2767 job = calloc(1, sizeof(*job)); 2768 if (!job) { 2769 printf("Queue flow create job allocate failed\n"); 2770 return -ENOMEM; 2771 } 2772 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2773 2774 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2775 if (!pf) { 2776 free(job); 2777 return port_flow_complain(&error); 2778 } 2779 if (age) { 2780 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2781 age->context = &pf->age_type; 2782 } 2783 /* Poisoning to make sure PMDs update it in case of error. */ 2784 memset(&error, 0x11, sizeof(error)); 2785 if (rule_idx == UINT32_MAX) 2786 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2787 pattern, pattern_idx, actions, actions_idx, job, &error); 2788 else 2789 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2790 rule_idx, actions, actions_idx, job, &error); 2791 if (!flow) { 2792 uint64_t flow_id = pf->id; 2793 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2794 free(job); 2795 return port_flow_complain(&error); 2796 } 2797 2798 pf->next = port->flow_list; 2799 pf->id = id; 2800 pf->table = pt; 2801 pf->flow = flow; 2802 job->pf = pf; 2803 port->flow_list = pf; 2804 printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id); 2805 return 0; 2806 } 2807 2808 /** Enqueue number of destroy flow rules operations. */ 2809 int 2810 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2811 bool postpone, uint32_t n, const uint64_t *rule) 2812 { 2813 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2814 struct rte_port *port; 2815 struct port_flow **tmp; 2816 int ret = 0; 2817 struct queue_job *job; 2818 2819 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2820 port_id == (portid_t)RTE_PORT_ALL) 2821 return -EINVAL; 2822 port = &ports[port_id]; 2823 2824 if (queue_id >= port->queue_nb) { 2825 printf("Queue #%u is invalid\n", queue_id); 2826 return -EINVAL; 2827 } 2828 2829 tmp = &port->flow_list; 2830 while (*tmp) { 2831 uint32_t i; 2832 2833 for (i = 0; i != n; ++i) { 2834 struct rte_flow_error error; 2835 struct port_flow *pf = *tmp; 2836 2837 if (rule[i] != pf->id) 2838 continue; 2839 /* 2840 * Poisoning to make sure PMD 2841 * update it in case of error. 2842 */ 2843 memset(&error, 0x33, sizeof(error)); 2844 job = calloc(1, sizeof(*job)); 2845 if (!job) { 2846 printf("Queue flow destroy job allocate failed\n"); 2847 return -ENOMEM; 2848 } 2849 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2850 job->pf = pf; 2851 2852 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2853 pf->flow, job, &error)) { 2854 free(job); 2855 ret = port_flow_complain(&error); 2856 continue; 2857 } 2858 printf("Flow rule #%"PRIu64" destruction enqueued\n", 2859 pf->id); 2860 *tmp = pf->next; 2861 break; 2862 } 2863 if (i == n) 2864 tmp = &(*tmp)->next; 2865 } 2866 return ret; 2867 } 2868 2869 static void 2870 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2871 struct port_indirect_action *pia, 2872 struct queue_job *job, 2873 const struct rte_flow_op_attr *attr, 2874 const struct rte_flow_indir_action_conf *conf, 2875 const struct rte_flow_action *action, 2876 struct rte_flow_error *error) 2877 { 2878 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2879 struct rte_flow_action_age *age = 2880 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2881 2882 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2883 age->context = &pia->age_type; 2884 } 2885 /* Poisoning to make sure PMDs update it in case of error. */ 2886 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2887 attr, conf, action, 2888 job, error); 2889 pia->type = action->type; 2890 } 2891 2892 static void 2893 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2894 struct port_indirect_action *pia, 2895 struct queue_job *job, 2896 const struct rte_flow_op_attr *attr, 2897 const struct rte_flow_indir_action_conf *conf, 2898 const struct rte_flow_action *action, 2899 struct rte_flow_error *error) 2900 { 2901 /* Poisoning to make sure PMDs update it in case of error. */ 2902 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2903 pia->list_handle = rte_flow_async_action_list_handle_create 2904 (port_id, queue_id, attr, conf, action, 2905 job, error); 2906 } 2907 2908 /** Enqueue update flow rule operation. */ 2909 int 2910 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2911 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2912 const struct rte_flow_action *actions) 2913 { 2914 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2915 struct rte_port *port; 2916 struct port_flow *pf, *uf; 2917 struct port_flow **tmp; 2918 struct port_table *pt; 2919 bool found; 2920 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2921 struct rte_flow_action_age *age = age_action_get(actions); 2922 struct queue_job *job; 2923 2924 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2925 port_id == (portid_t)RTE_PORT_ALL) 2926 return -EINVAL; 2927 port = &ports[port_id]; 2928 2929 if (queue_id >= port->queue_nb) { 2930 printf("Queue #%u is invalid\n", queue_id); 2931 return -EINVAL; 2932 } 2933 2934 found = false; 2935 tmp = &port->flow_list; 2936 while (*tmp) { 2937 pf = *tmp; 2938 if (rule_idx == pf->id) { 2939 found = true; 2940 break; 2941 } 2942 tmp = &(*tmp)->next; 2943 } 2944 if (!found) { 2945 printf("Flow rule #%u is invalid\n", rule_idx); 2946 return -EINVAL; 2947 } 2948 2949 pt = pf->table; 2950 if (actions_idx >= pt->nb_actions_templates) { 2951 printf("Actions template index #%u is invalid," 2952 " %u templates present in the table\n", 2953 actions_idx, pt->nb_actions_templates); 2954 return -EINVAL; 2955 } 2956 2957 job = calloc(1, sizeof(*job)); 2958 if (!job) { 2959 printf("Queue flow create job allocate failed\n"); 2960 return -ENOMEM; 2961 } 2962 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2963 2964 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2965 if (!uf) { 2966 free(job); 2967 return port_flow_complain(&error); 2968 } 2969 2970 if (age) { 2971 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2972 age->context = &uf->age_type; 2973 } 2974 2975 /* 2976 * Poisoning to make sure PMD update it in case of error. 2977 */ 2978 memset(&error, 0x44, sizeof(error)); 2979 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2980 actions, actions_idx, job, &error)) { 2981 free(uf); 2982 free(job); 2983 return port_flow_complain(&error); 2984 } 2985 uf->next = pf->next; 2986 uf->id = pf->id; 2987 uf->table = pt; 2988 uf->flow = pf->flow; 2989 *tmp = uf; 2990 job->pf = pf; 2991 2992 printf("Flow rule #%"PRIu64" update enqueued\n", pf->id); 2993 return 0; 2994 } 2995 2996 /** Enqueue indirect action create operation. */ 2997 int 2998 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2999 bool postpone, uint32_t id, 3000 const struct rte_flow_indir_action_conf *conf, 3001 const struct rte_flow_action *action) 3002 { 3003 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3004 struct rte_port *port; 3005 struct port_indirect_action *pia; 3006 int ret; 3007 struct rte_flow_error error; 3008 struct queue_job *job; 3009 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3010 3011 3012 ret = action_alloc(port_id, id, &pia); 3013 if (ret) 3014 return ret; 3015 3016 port = &ports[port_id]; 3017 if (queue_id >= port->queue_nb) { 3018 printf("Queue #%u is invalid\n", queue_id); 3019 return -EINVAL; 3020 } 3021 job = calloc(1, sizeof(*job)); 3022 if (!job) { 3023 printf("Queue action create job allocate failed\n"); 3024 return -ENOMEM; 3025 } 3026 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3027 job->pia = pia; 3028 3029 /* Poisoning to make sure PMDs update it in case of error. */ 3030 memset(&error, 0x88, sizeof(error)); 3031 3032 if (is_indirect_list) 3033 queue_action_list_handle_create(port_id, queue_id, pia, job, 3034 &attr, conf, action, &error); 3035 else 3036 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3037 conf, action, &error); 3038 3039 if (!pia->handle) { 3040 uint32_t destroy_id = pia->id; 3041 port_queue_action_handle_destroy(port_id, queue_id, 3042 postpone, 1, &destroy_id); 3043 free(job); 3044 return port_flow_complain(&error); 3045 } 3046 printf("Indirect action #%u creation queued\n", pia->id); 3047 return 0; 3048 } 3049 3050 /** Enqueue indirect action destroy operation. */ 3051 int 3052 port_queue_action_handle_destroy(portid_t port_id, 3053 uint32_t queue_id, bool postpone, 3054 uint32_t n, const uint32_t *actions) 3055 { 3056 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3057 struct rte_port *port; 3058 struct port_indirect_action **tmp; 3059 int ret = 0; 3060 struct queue_job *job; 3061 3062 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3063 port_id == (portid_t)RTE_PORT_ALL) 3064 return -EINVAL; 3065 port = &ports[port_id]; 3066 3067 if (queue_id >= port->queue_nb) { 3068 printf("Queue #%u is invalid\n", queue_id); 3069 return -EINVAL; 3070 } 3071 3072 tmp = &port->actions_list; 3073 while (*tmp) { 3074 uint32_t i; 3075 3076 for (i = 0; i != n; ++i) { 3077 struct rte_flow_error error; 3078 struct port_indirect_action *pia = *tmp; 3079 3080 if (actions[i] != pia->id) 3081 continue; 3082 /* 3083 * Poisoning to make sure PMDs update it in case 3084 * of error. 3085 */ 3086 memset(&error, 0x99, sizeof(error)); 3087 job = calloc(1, sizeof(*job)); 3088 if (!job) { 3089 printf("Queue action destroy job allocate failed\n"); 3090 return -ENOMEM; 3091 } 3092 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3093 job->pia = pia; 3094 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3095 rte_flow_async_action_list_handle_destroy 3096 (port_id, queue_id, 3097 &attr, pia->list_handle, 3098 job, &error) : 3099 rte_flow_async_action_handle_destroy 3100 (port_id, queue_id, &attr, pia->handle, 3101 job, &error); 3102 if (ret) { 3103 free(job); 3104 ret = port_flow_complain(&error); 3105 continue; 3106 } 3107 *tmp = pia->next; 3108 printf("Indirect action #%u destruction queued\n", 3109 pia->id); 3110 break; 3111 } 3112 if (i == n) 3113 tmp = &(*tmp)->next; 3114 } 3115 return ret; 3116 } 3117 3118 /** Enqueue indirect action update operation. */ 3119 int 3120 port_queue_action_handle_update(portid_t port_id, 3121 uint32_t queue_id, bool postpone, uint32_t id, 3122 const struct rte_flow_action *action) 3123 { 3124 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3125 struct rte_port *port; 3126 struct rte_flow_error error; 3127 struct rte_flow_action_handle *action_handle; 3128 struct queue_job *job; 3129 struct port_indirect_action *pia; 3130 struct rte_flow_update_meter_mark mtr_update; 3131 const void *update; 3132 3133 action_handle = port_action_handle_get_by_id(port_id, id); 3134 if (!action_handle) 3135 return -EINVAL; 3136 3137 port = &ports[port_id]; 3138 if (queue_id >= port->queue_nb) { 3139 printf("Queue #%u is invalid\n", queue_id); 3140 return -EINVAL; 3141 } 3142 3143 job = calloc(1, sizeof(*job)); 3144 if (!job) { 3145 printf("Queue action update job allocate failed\n"); 3146 return -ENOMEM; 3147 } 3148 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3149 3150 pia = action_get_by_id(port_id, id); 3151 if (!pia) { 3152 free(job); 3153 return -EINVAL; 3154 } 3155 3156 switch (pia->type) { 3157 case RTE_FLOW_ACTION_TYPE_AGE: 3158 update = action->conf; 3159 break; 3160 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3161 rte_memcpy(&mtr_update.meter_mark, action->conf, 3162 sizeof(struct rte_flow_action_meter_mark)); 3163 if (mtr_update.meter_mark.profile) 3164 mtr_update.profile_valid = 1; 3165 if (mtr_update.meter_mark.policy) 3166 mtr_update.policy_valid = 1; 3167 mtr_update.color_mode_valid = 1; 3168 mtr_update.state_valid = 1; 3169 update = &mtr_update; 3170 break; 3171 default: 3172 update = action; 3173 break; 3174 } 3175 3176 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3177 action_handle, update, job, &error)) { 3178 free(job); 3179 return port_flow_complain(&error); 3180 } 3181 printf("Indirect action #%u update queued\n", id); 3182 return 0; 3183 } 3184 3185 void 3186 port_queue_action_handle_query_update(portid_t port_id, 3187 uint32_t queue_id, bool postpone, 3188 uint32_t id, 3189 enum rte_flow_query_update_mode qu_mode, 3190 const struct rte_flow_action *action) 3191 { 3192 int ret; 3193 struct rte_flow_error error; 3194 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3195 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3196 struct queue_job *job; 3197 3198 if (!pia || !pia->handle) 3199 return; 3200 job = calloc(1, sizeof(*job)); 3201 if (!job) 3202 return; 3203 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3204 job->pia = pia; 3205 3206 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3207 &attr, pia->handle, 3208 action, 3209 &job->query, 3210 qu_mode, job, 3211 &error); 3212 if (ret) { 3213 port_flow_complain(&error); 3214 free(job); 3215 } else { 3216 printf("port-%u: indirect action #%u update-and-query queued\n", 3217 port_id, id); 3218 } 3219 } 3220 3221 /** Enqueue indirect action query operation. */ 3222 int 3223 port_queue_action_handle_query(portid_t port_id, 3224 uint32_t queue_id, bool postpone, uint32_t id) 3225 { 3226 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3227 struct rte_port *port; 3228 struct rte_flow_error error; 3229 struct rte_flow_action_handle *action_handle; 3230 struct port_indirect_action *pia; 3231 struct queue_job *job; 3232 3233 pia = action_get_by_id(port_id, id); 3234 action_handle = pia ? pia->handle : NULL; 3235 if (!action_handle) 3236 return -EINVAL; 3237 3238 port = &ports[port_id]; 3239 if (queue_id >= port->queue_nb) { 3240 printf("Queue #%u is invalid\n", queue_id); 3241 return -EINVAL; 3242 } 3243 3244 job = calloc(1, sizeof(*job)); 3245 if (!job) { 3246 printf("Queue action update job allocate failed\n"); 3247 return -ENOMEM; 3248 } 3249 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3250 job->pia = pia; 3251 3252 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3253 action_handle, &job->query, job, &error)) { 3254 free(job); 3255 return port_flow_complain(&error); 3256 } 3257 printf("Indirect action #%u update queued\n", id); 3258 return 0; 3259 } 3260 3261 /** Push all the queue operations in the queue to the NIC. */ 3262 int 3263 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3264 { 3265 struct rte_port *port; 3266 struct rte_flow_error error; 3267 int ret = 0; 3268 3269 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3270 port_id == (portid_t)RTE_PORT_ALL) 3271 return -EINVAL; 3272 port = &ports[port_id]; 3273 3274 if (queue_id >= port->queue_nb) { 3275 printf("Queue #%u is invalid\n", queue_id); 3276 return -EINVAL; 3277 } 3278 3279 memset(&error, 0x55, sizeof(error)); 3280 ret = rte_flow_push(port_id, queue_id, &error); 3281 if (ret < 0) { 3282 printf("Failed to push operations in the queue\n"); 3283 return -EINVAL; 3284 } 3285 printf("Queue #%u operations pushed\n", queue_id); 3286 return ret; 3287 } 3288 3289 /** Calculate the hash result for a given pattern in a given table. */ 3290 int 3291 port_flow_hash_calc(portid_t port_id, uint32_t table_id, 3292 uint8_t pattern_template_index, const struct rte_flow_item pattern[]) 3293 { 3294 uint32_t hash; 3295 bool found; 3296 struct port_table *pt; 3297 struct rte_port *port; 3298 struct rte_flow_error error; 3299 int ret = 0; 3300 3301 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3302 port_id == (portid_t)RTE_PORT_ALL) 3303 return -EINVAL; 3304 port = &ports[port_id]; 3305 3306 found = false; 3307 pt = port->table_list; 3308 while (pt) { 3309 if (table_id == pt->id) { 3310 found = true; 3311 break; 3312 } 3313 pt = pt->next; 3314 } 3315 if (!found) { 3316 printf("Table #%u is invalid\n", table_id); 3317 return -EINVAL; 3318 } 3319 3320 memset(&error, 0x55, sizeof(error)); 3321 ret = rte_flow_calc_table_hash(port_id, pt->table, pattern, 3322 pattern_template_index, &hash, &error); 3323 if (ret < 0) { 3324 printf("Failed to calculate hash "); 3325 switch (abs(ret)) { 3326 case ENODEV: 3327 printf("no such device\n"); 3328 break; 3329 case ENOTSUP: 3330 printf("device doesn't support this operation\n"); 3331 break; 3332 default: 3333 printf("\n"); 3334 break; 3335 } 3336 return ret; 3337 } 3338 printf("Hash results 0x%x\n", hash); 3339 return 0; 3340 } 3341 3342 /** Pull queue operation results from the queue. */ 3343 static int 3344 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3345 const uint64_t *rule, int nb_flows) 3346 { 3347 struct rte_port *port = &ports[port_id]; 3348 struct rte_flow_op_result *res; 3349 struct rte_flow_error error; 3350 uint32_t n = nb_flows; 3351 int ret = 0; 3352 int i; 3353 3354 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3355 if (!res) { 3356 printf("Failed to allocate memory for pulled results\n"); 3357 return -ENOMEM; 3358 } 3359 3360 memset(&error, 0x66, sizeof(error)); 3361 while (nb_flows > 0) { 3362 int success = 0; 3363 3364 if (n > port->queue_sz) 3365 n = port->queue_sz; 3366 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3367 if (ret < 0) { 3368 free(res); 3369 return ret; 3370 } 3371 ret = rte_flow_push(port_id, queue_id, &error); 3372 if (ret < 0) { 3373 printf("Failed to push operations in the queue: %s\n", 3374 strerror(-ret)); 3375 free(res); 3376 return ret; 3377 } 3378 while (success < nb_flows) { 3379 ret = rte_flow_pull(port_id, queue_id, res, 3380 port->queue_sz, &error); 3381 if (ret < 0) { 3382 printf("Failed to pull a operation results: %s\n", 3383 strerror(-ret)); 3384 free(res); 3385 return ret; 3386 } 3387 3388 for (i = 0; i < ret; i++) { 3389 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3390 success++; 3391 } 3392 } 3393 rule += n; 3394 nb_flows -= n; 3395 n = nb_flows; 3396 } 3397 3398 free(res); 3399 return ret; 3400 } 3401 3402 /** List simply and destroy all aged flows per queue. */ 3403 void 3404 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3405 { 3406 void **contexts; 3407 int nb_context, total = 0, idx; 3408 uint64_t *rules = NULL; 3409 struct rte_port *port; 3410 struct rte_flow_error error; 3411 enum age_action_context_type *type; 3412 union { 3413 struct port_flow *pf; 3414 struct port_indirect_action *pia; 3415 } ctx; 3416 3417 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3418 port_id == (portid_t)RTE_PORT_ALL) 3419 return; 3420 port = &ports[port_id]; 3421 if (queue_id >= port->queue_nb) { 3422 printf("Error: queue #%u is invalid\n", queue_id); 3423 return; 3424 } 3425 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3426 if (total < 0) { 3427 port_flow_complain(&error); 3428 return; 3429 } 3430 printf("Port %u queue %u total aged flows: %d\n", 3431 port_id, queue_id, total); 3432 if (total == 0) 3433 return; 3434 contexts = calloc(total, sizeof(void *)); 3435 if (contexts == NULL) { 3436 printf("Cannot allocate contexts for aged flow\n"); 3437 return; 3438 } 3439 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3440 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3441 total, &error); 3442 if (nb_context > total) { 3443 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3444 port_id, queue_id, nb_context, total); 3445 free(contexts); 3446 return; 3447 } 3448 if (destroy) { 3449 rules = malloc(sizeof(uint32_t) * nb_context); 3450 if (rules == NULL) 3451 printf("Cannot allocate memory for destroy aged flow\n"); 3452 } 3453 total = 0; 3454 for (idx = 0; idx < nb_context; idx++) { 3455 if (!contexts[idx]) { 3456 printf("Error: get Null context in port %u queue %u\n", 3457 port_id, queue_id); 3458 continue; 3459 } 3460 type = (enum age_action_context_type *)contexts[idx]; 3461 switch (*type) { 3462 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3463 ctx.pf = container_of(type, struct port_flow, age_type); 3464 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3465 "\t%c%c%c\t\n", 3466 "Flow", 3467 ctx.pf->id, 3468 ctx.pf->rule.attr->group, 3469 ctx.pf->rule.attr->priority, 3470 ctx.pf->rule.attr->ingress ? 'i' : '-', 3471 ctx.pf->rule.attr->egress ? 'e' : '-', 3472 ctx.pf->rule.attr->transfer ? 't' : '-'); 3473 if (rules != NULL) { 3474 rules[total] = ctx.pf->id; 3475 total++; 3476 } 3477 break; 3478 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3479 ctx.pia = container_of(type, 3480 struct port_indirect_action, 3481 age_type); 3482 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3483 ctx.pia->id); 3484 break; 3485 default: 3486 printf("Error: invalid context type %u\n", port_id); 3487 break; 3488 } 3489 } 3490 if (rules != NULL) { 3491 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3492 free(rules); 3493 } 3494 printf("\n%d flows destroyed\n", total); 3495 free(contexts); 3496 } 3497 3498 /** Pull queue operation results from the queue. */ 3499 int 3500 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3501 { 3502 struct rte_port *port; 3503 struct rte_flow_op_result *res; 3504 struct rte_flow_error error; 3505 int ret = 0; 3506 int success = 0; 3507 int i; 3508 struct queue_job *job; 3509 3510 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3511 port_id == (portid_t)RTE_PORT_ALL) 3512 return -EINVAL; 3513 port = &ports[port_id]; 3514 3515 if (queue_id >= port->queue_nb) { 3516 printf("Queue #%u is invalid\n", queue_id); 3517 return -EINVAL; 3518 } 3519 3520 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3521 if (!res) { 3522 printf("Failed to allocate memory for pulled results\n"); 3523 return -ENOMEM; 3524 } 3525 3526 memset(&error, 0x66, sizeof(error)); 3527 ret = rte_flow_pull(port_id, queue_id, res, 3528 port->queue_sz, &error); 3529 if (ret < 0) { 3530 printf("Failed to pull a operation results\n"); 3531 free(res); 3532 return -EINVAL; 3533 } 3534 3535 for (i = 0; i < ret; i++) { 3536 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3537 success++; 3538 job = (struct queue_job *)res[i].user_data; 3539 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3540 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3541 free(job->pf); 3542 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3543 free(job->pia); 3544 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3545 port_action_handle_query_dump(port_id, job->pia, 3546 &job->query); 3547 free(job); 3548 } 3549 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3550 queue_id, ret, ret - success, success); 3551 free(res); 3552 return ret; 3553 } 3554 3555 /* Set group miss actions */ 3556 int 3557 port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr, 3558 const struct rte_flow_action *actions) 3559 { 3560 struct rte_flow_group_attr gattr = { 3561 .ingress = attr->ingress, 3562 .egress = attr->egress, 3563 .transfer = attr->transfer, 3564 }; 3565 struct rte_flow_error error; 3566 int ret = 0; 3567 3568 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3569 port_id == (portid_t)RTE_PORT_ALL) 3570 return -EINVAL; 3571 3572 memset(&error, 0x66, sizeof(error)); 3573 ret = rte_flow_group_set_miss_actions(port_id, attr->group, &gattr, actions, &error); 3574 3575 if (ret < 0) 3576 return port_flow_complain(&error); 3577 3578 printf("Group #%u set miss actions succeeded\n", attr->group); 3579 return ret; 3580 } 3581 3582 /** Create flow rule. */ 3583 int 3584 port_flow_create(portid_t port_id, 3585 const struct rte_flow_attr *attr, 3586 const struct rte_flow_item *pattern, 3587 const struct rte_flow_action *actions, 3588 const struct tunnel_ops *tunnel_ops, 3589 uintptr_t user_id) 3590 { 3591 struct rte_flow *flow; 3592 struct rte_port *port; 3593 struct port_flow *pf; 3594 uint32_t id = 0; 3595 struct rte_flow_error error; 3596 struct port_flow_tunnel *pft = NULL; 3597 struct rte_flow_action_age *age = age_action_get(actions); 3598 3599 port = &ports[port_id]; 3600 if (port->flow_list) { 3601 if (port->flow_list->id == UINT32_MAX) { 3602 fprintf(stderr, 3603 "Highest rule ID is already assigned, delete it first"); 3604 return -ENOMEM; 3605 } 3606 id = port->flow_list->id + 1; 3607 } 3608 if (tunnel_ops->enabled) { 3609 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3610 actions, tunnel_ops); 3611 if (!pft) 3612 return -ENOENT; 3613 if (pft->items) 3614 pattern = pft->items; 3615 if (pft->actions) 3616 actions = pft->actions; 3617 } 3618 pf = port_flow_new(attr, pattern, actions, &error); 3619 if (!pf) 3620 return port_flow_complain(&error); 3621 if (age) { 3622 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3623 age->context = &pf->age_type; 3624 } 3625 /* Poisoning to make sure PMDs update it in case of error. */ 3626 memset(&error, 0x22, sizeof(error)); 3627 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3628 if (!flow) { 3629 if (tunnel_ops->enabled) 3630 port_flow_tunnel_offload_cmd_release(port_id, 3631 tunnel_ops, pft); 3632 free(pf); 3633 return port_flow_complain(&error); 3634 } 3635 pf->next = port->flow_list; 3636 pf->id = id; 3637 pf->user_id = user_id; 3638 pf->flow = flow; 3639 port->flow_list = pf; 3640 if (tunnel_ops->enabled) 3641 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3642 if (user_id) 3643 printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n", 3644 pf->id, pf->user_id); 3645 else 3646 printf("Flow rule #%"PRIu64" created\n", pf->id); 3647 return 0; 3648 } 3649 3650 /** Destroy a number of flow rules. */ 3651 int 3652 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule, 3653 bool is_user_id) 3654 { 3655 struct rte_port *port; 3656 struct port_flow **tmp; 3657 int ret = 0; 3658 3659 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3660 port_id == (portid_t)RTE_PORT_ALL) 3661 return -EINVAL; 3662 port = &ports[port_id]; 3663 tmp = &port->flow_list; 3664 while (*tmp) { 3665 uint32_t i; 3666 3667 for (i = 0; i != n; ++i) { 3668 struct rte_flow_error error; 3669 struct port_flow *pf = *tmp; 3670 3671 if (rule[i] != (is_user_id ? pf->user_id : pf->id)) 3672 continue; 3673 /* 3674 * Poisoning to make sure PMDs update it in case 3675 * of error. 3676 */ 3677 memset(&error, 0x33, sizeof(error)); 3678 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3679 ret = port_flow_complain(&error); 3680 continue; 3681 } 3682 if (is_user_id) 3683 printf("Flow rule #%"PRIu64" destroyed, " 3684 "user-id 0x%"PRIx64"\n", 3685 pf->id, pf->user_id); 3686 else 3687 printf("Flow rule #%"PRIu64" destroyed\n", 3688 pf->id); 3689 *tmp = pf->next; 3690 free(pf); 3691 break; 3692 } 3693 if (i == n) 3694 tmp = &(*tmp)->next; 3695 } 3696 return ret; 3697 } 3698 3699 /** Remove all flow rules. */ 3700 int 3701 port_flow_flush(portid_t port_id) 3702 { 3703 struct rte_flow_error error; 3704 struct rte_port *port; 3705 int ret = 0; 3706 3707 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3708 port_id == (portid_t)RTE_PORT_ALL) 3709 return -EINVAL; 3710 3711 port = &ports[port_id]; 3712 3713 if (port->flow_list == NULL) 3714 return ret; 3715 3716 /* Poisoning to make sure PMDs update it in case of error. */ 3717 memset(&error, 0x44, sizeof(error)); 3718 if (rte_flow_flush(port_id, &error)) { 3719 port_flow_complain(&error); 3720 } 3721 3722 while (port->flow_list) { 3723 struct port_flow *pf = port->flow_list->next; 3724 3725 free(port->flow_list); 3726 port->flow_list = pf; 3727 } 3728 return ret; 3729 } 3730 3731 /** Dump flow rules. */ 3732 int 3733 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id, 3734 const char *file_name, bool is_user_id) 3735 { 3736 int ret = 0; 3737 FILE *file = stdout; 3738 struct rte_flow_error error; 3739 struct rte_port *port; 3740 struct port_flow *pflow; 3741 struct rte_flow *tmpFlow = NULL; 3742 bool found = false; 3743 3744 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3745 port_id == (portid_t)RTE_PORT_ALL) 3746 return -EINVAL; 3747 3748 if (!dump_all) { 3749 port = &ports[port_id]; 3750 pflow = port->flow_list; 3751 while (pflow) { 3752 if (rule_id != 3753 (is_user_id ? pflow->user_id : pflow->id)) { 3754 pflow = pflow->next; 3755 } else { 3756 tmpFlow = pflow->flow; 3757 if (tmpFlow) 3758 found = true; 3759 break; 3760 } 3761 } 3762 if (found == false) { 3763 fprintf(stderr, "Failed to dump to flow %"PRIu64"\n", 3764 rule_id); 3765 return -EINVAL; 3766 } 3767 } 3768 3769 if (file_name && strlen(file_name)) { 3770 file = fopen(file_name, "w"); 3771 if (!file) { 3772 fprintf(stderr, "Failed to create file %s: %s\n", 3773 file_name, strerror(errno)); 3774 return -errno; 3775 } 3776 } 3777 3778 if (!dump_all) 3779 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3780 else 3781 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3782 if (ret) { 3783 port_flow_complain(&error); 3784 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3785 } else 3786 printf("Flow dump finished\n"); 3787 if (file_name && strlen(file_name)) 3788 fclose(file); 3789 return ret; 3790 } 3791 3792 /** Query a flow rule. */ 3793 int 3794 port_flow_query(portid_t port_id, uint64_t rule, 3795 const struct rte_flow_action *action, bool is_user_id) 3796 { 3797 struct rte_flow_error error; 3798 struct rte_port *port; 3799 struct port_flow *pf; 3800 const char *name; 3801 union { 3802 struct rte_flow_query_count count; 3803 struct rte_flow_action_rss rss_conf; 3804 struct rte_flow_query_age age; 3805 } query; 3806 int ret; 3807 3808 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3809 port_id == (portid_t)RTE_PORT_ALL) 3810 return -EINVAL; 3811 port = &ports[port_id]; 3812 for (pf = port->flow_list; pf; pf = pf->next) 3813 if ((is_user_id ? pf->user_id : pf->id) == rule) 3814 break; 3815 if (!pf) { 3816 fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule); 3817 return -ENOENT; 3818 } 3819 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3820 &name, sizeof(name), 3821 (void *)(uintptr_t)action->type, &error); 3822 if (ret < 0) 3823 return port_flow_complain(&error); 3824 switch (action->type) { 3825 case RTE_FLOW_ACTION_TYPE_COUNT: 3826 case RTE_FLOW_ACTION_TYPE_RSS: 3827 case RTE_FLOW_ACTION_TYPE_AGE: 3828 break; 3829 default: 3830 fprintf(stderr, "Cannot query action type %d (%s)\n", 3831 action->type, name); 3832 return -ENOTSUP; 3833 } 3834 /* Poisoning to make sure PMDs update it in case of error. */ 3835 memset(&error, 0x55, sizeof(error)); 3836 memset(&query, 0, sizeof(query)); 3837 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3838 return port_flow_complain(&error); 3839 switch (action->type) { 3840 case RTE_FLOW_ACTION_TYPE_COUNT: 3841 printf("%s:\n" 3842 " hits_set: %u\n" 3843 " bytes_set: %u\n" 3844 " hits: %" PRIu64 "\n" 3845 " bytes: %" PRIu64 "\n", 3846 name, 3847 query.count.hits_set, 3848 query.count.bytes_set, 3849 query.count.hits, 3850 query.count.bytes); 3851 break; 3852 case RTE_FLOW_ACTION_TYPE_RSS: 3853 rss_config_display(&query.rss_conf); 3854 break; 3855 case RTE_FLOW_ACTION_TYPE_AGE: 3856 printf("%s:\n" 3857 " aged: %u\n" 3858 " sec_since_last_hit_valid: %u\n" 3859 " sec_since_last_hit: %" PRIu32 "\n", 3860 name, 3861 query.age.aged, 3862 query.age.sec_since_last_hit_valid, 3863 query.age.sec_since_last_hit); 3864 break; 3865 default: 3866 fprintf(stderr, 3867 "Cannot display result for action type %d (%s)\n", 3868 action->type, name); 3869 break; 3870 } 3871 return 0; 3872 } 3873 3874 /** List simply and destroy all aged flows. */ 3875 void 3876 port_flow_aged(portid_t port_id, uint8_t destroy) 3877 { 3878 void **contexts; 3879 int nb_context, total = 0, idx; 3880 struct rte_flow_error error; 3881 enum age_action_context_type *type; 3882 union { 3883 struct port_flow *pf; 3884 struct port_indirect_action *pia; 3885 } ctx; 3886 3887 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3888 port_id == (portid_t)RTE_PORT_ALL) 3889 return; 3890 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3891 printf("Port %u total aged flows: %d\n", port_id, total); 3892 if (total < 0) { 3893 port_flow_complain(&error); 3894 return; 3895 } 3896 if (total == 0) 3897 return; 3898 contexts = malloc(sizeof(void *) * total); 3899 if (contexts == NULL) { 3900 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3901 return; 3902 } 3903 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3904 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3905 if (nb_context != total) { 3906 fprintf(stderr, 3907 "Port:%d get aged flows count(%d) != total(%d)\n", 3908 port_id, nb_context, total); 3909 free(contexts); 3910 return; 3911 } 3912 total = 0; 3913 for (idx = 0; idx < nb_context; idx++) { 3914 if (!contexts[idx]) { 3915 fprintf(stderr, "Error: get Null context in port %u\n", 3916 port_id); 3917 continue; 3918 } 3919 type = (enum age_action_context_type *)contexts[idx]; 3920 switch (*type) { 3921 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3922 ctx.pf = container_of(type, struct port_flow, age_type); 3923 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3924 "\t%c%c%c\t\n", 3925 "Flow", 3926 ctx.pf->id, 3927 ctx.pf->rule.attr->group, 3928 ctx.pf->rule.attr->priority, 3929 ctx.pf->rule.attr->ingress ? 'i' : '-', 3930 ctx.pf->rule.attr->egress ? 'e' : '-', 3931 ctx.pf->rule.attr->transfer ? 't' : '-'); 3932 if (destroy && !port_flow_destroy(port_id, 1, 3933 &ctx.pf->id, false)) 3934 total++; 3935 break; 3936 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3937 ctx.pia = container_of(type, 3938 struct port_indirect_action, age_type); 3939 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3940 ctx.pia->id); 3941 break; 3942 default: 3943 fprintf(stderr, "Error: invalid context type %u\n", 3944 port_id); 3945 break; 3946 } 3947 } 3948 printf("\n%d flows destroyed\n", total); 3949 free(contexts); 3950 } 3951 3952 /** List flow rules. */ 3953 void 3954 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3955 { 3956 struct rte_port *port; 3957 struct port_flow *pf; 3958 struct port_flow *list = NULL; 3959 uint32_t i; 3960 3961 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3962 port_id == (portid_t)RTE_PORT_ALL) 3963 return; 3964 port = &ports[port_id]; 3965 if (!port->flow_list) 3966 return; 3967 /* Sort flows by group, priority and ID. */ 3968 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3969 struct port_flow **tmp; 3970 const struct rte_flow_attr *curr = pf->rule.attr; 3971 3972 if (n) { 3973 /* Filter out unwanted groups. */ 3974 for (i = 0; i != n; ++i) 3975 if (curr->group == group[i]) 3976 break; 3977 if (i == n) 3978 continue; 3979 } 3980 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3981 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3982 3983 if (curr->group > comp->group || 3984 (curr->group == comp->group && 3985 curr->priority > comp->priority) || 3986 (curr->group == comp->group && 3987 curr->priority == comp->priority && 3988 pf->id > (*tmp)->id)) 3989 continue; 3990 break; 3991 } 3992 pf->tmp = *tmp; 3993 *tmp = pf; 3994 } 3995 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3996 for (pf = list; pf != NULL; pf = pf->tmp) { 3997 const struct rte_flow_item *item = pf->rule.pattern; 3998 const struct rte_flow_action *action = pf->rule.actions; 3999 const char *name; 4000 4001 printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 4002 pf->id, 4003 pf->rule.attr->group, 4004 pf->rule.attr->priority, 4005 pf->rule.attr->ingress ? 'i' : '-', 4006 pf->rule.attr->egress ? 'e' : '-', 4007 pf->rule.attr->transfer ? 't' : '-'); 4008 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 4009 if ((uint32_t)item->type > INT_MAX) 4010 name = "PMD_INTERNAL"; 4011 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 4012 &name, sizeof(name), 4013 (void *)(uintptr_t)item->type, 4014 NULL) <= 0) 4015 name = "[UNKNOWN]"; 4016 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 4017 printf("%s ", name); 4018 ++item; 4019 } 4020 printf("=>"); 4021 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 4022 if ((uint32_t)action->type > INT_MAX) 4023 name = "PMD_INTERNAL"; 4024 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 4025 &name, sizeof(name), 4026 (void *)(uintptr_t)action->type, 4027 NULL) <= 0) 4028 name = "[UNKNOWN]"; 4029 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 4030 printf(" %s", name); 4031 ++action; 4032 } 4033 printf("\n"); 4034 } 4035 } 4036 4037 /** Restrict ingress traffic to the defined flow rules. */ 4038 int 4039 port_flow_isolate(portid_t port_id, int set) 4040 { 4041 struct rte_flow_error error; 4042 4043 /* Poisoning to make sure PMDs update it in case of error. */ 4044 memset(&error, 0x66, sizeof(error)); 4045 if (rte_flow_isolate(port_id, set, &error)) 4046 return port_flow_complain(&error); 4047 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 4048 port_id, 4049 set ? "now restricted" : "not restricted anymore"); 4050 return 0; 4051 } 4052 4053 /* 4054 * RX/TX ring descriptors display functions. 4055 */ 4056 int 4057 rx_queue_id_is_invalid(queueid_t rxq_id) 4058 { 4059 if (rxq_id < nb_rxq) 4060 return 0; 4061 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 4062 rxq_id, nb_rxq); 4063 return 1; 4064 } 4065 4066 int 4067 tx_queue_id_is_invalid(queueid_t txq_id) 4068 { 4069 if (txq_id < nb_txq) 4070 return 0; 4071 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 4072 txq_id, nb_txq); 4073 return 1; 4074 } 4075 4076 static int 4077 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 4078 { 4079 struct rte_port *port = &ports[port_id]; 4080 struct rte_eth_rxq_info rx_qinfo; 4081 int ret; 4082 4083 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 4084 if (ret == 0) { 4085 *ring_size = rx_qinfo.nb_desc; 4086 return ret; 4087 } 4088 4089 if (ret != -ENOTSUP) 4090 return ret; 4091 /* 4092 * If the rte_eth_rx_queue_info_get is not support for this PMD, 4093 * ring_size stored in testpmd will be used for validity verification. 4094 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4095 * being 0, it will use a default value provided by PMDs to setup this 4096 * rxq. If the default value is 0, it will use the 4097 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4098 */ 4099 if (port->nb_rx_desc[rxq_id]) 4100 *ring_size = port->nb_rx_desc[rxq_id]; 4101 else if (port->dev_info.default_rxportconf.ring_size) 4102 *ring_size = port->dev_info.default_rxportconf.ring_size; 4103 else 4104 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4105 return 0; 4106 } 4107 4108 static int 4109 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4110 { 4111 struct rte_port *port = &ports[port_id]; 4112 struct rte_eth_txq_info tx_qinfo; 4113 int ret; 4114 4115 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4116 if (ret == 0) { 4117 *ring_size = tx_qinfo.nb_desc; 4118 return ret; 4119 } 4120 4121 if (ret != -ENOTSUP) 4122 return ret; 4123 /* 4124 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4125 * ring_size stored in testpmd will be used for validity verification. 4126 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4127 * being 0, it will use a default value provided by PMDs to setup this 4128 * txq. If the default value is 0, it will use the 4129 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4130 */ 4131 if (port->nb_tx_desc[txq_id]) 4132 *ring_size = port->nb_tx_desc[txq_id]; 4133 else if (port->dev_info.default_txportconf.ring_size) 4134 *ring_size = port->dev_info.default_txportconf.ring_size; 4135 else 4136 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4137 return 0; 4138 } 4139 4140 static int 4141 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4142 { 4143 uint16_t ring_size; 4144 int ret; 4145 4146 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4147 if (ret) 4148 return 1; 4149 4150 if (rxdesc_id < ring_size) 4151 return 0; 4152 4153 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4154 rxdesc_id, ring_size); 4155 return 1; 4156 } 4157 4158 static int 4159 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4160 { 4161 uint16_t ring_size; 4162 int ret; 4163 4164 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4165 if (ret) 4166 return 1; 4167 4168 if (txdesc_id < ring_size) 4169 return 0; 4170 4171 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4172 txdesc_id, ring_size); 4173 return 1; 4174 } 4175 4176 static const struct rte_memzone * 4177 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4178 { 4179 char mz_name[RTE_MEMZONE_NAMESIZE]; 4180 const struct rte_memzone *mz; 4181 4182 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4183 port_id, q_id, ring_name); 4184 mz = rte_memzone_lookup(mz_name); 4185 if (mz == NULL) 4186 fprintf(stderr, 4187 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4188 ring_name, port_id, q_id, mz_name); 4189 return mz; 4190 } 4191 4192 union igb_ring_dword { 4193 uint64_t dword; 4194 struct { 4195 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4196 uint32_t lo; 4197 uint32_t hi; 4198 #else 4199 uint32_t hi; 4200 uint32_t lo; 4201 #endif 4202 } words; 4203 }; 4204 4205 struct igb_ring_desc_32_bytes { 4206 union igb_ring_dword lo_dword; 4207 union igb_ring_dword hi_dword; 4208 union igb_ring_dword resv1; 4209 union igb_ring_dword resv2; 4210 }; 4211 4212 struct igb_ring_desc_16_bytes { 4213 union igb_ring_dword lo_dword; 4214 union igb_ring_dword hi_dword; 4215 }; 4216 4217 static void 4218 ring_rxd_display_dword(union igb_ring_dword dword) 4219 { 4220 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4221 (unsigned)dword.words.hi); 4222 } 4223 4224 static void 4225 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4226 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4227 portid_t port_id, 4228 #else 4229 __rte_unused portid_t port_id, 4230 #endif 4231 uint16_t desc_id) 4232 { 4233 struct igb_ring_desc_16_bytes *ring = 4234 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4235 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4236 int ret; 4237 struct rte_eth_dev_info dev_info; 4238 4239 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4240 if (ret != 0) 4241 return; 4242 4243 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4244 /* 32 bytes RX descriptor, i40e only */ 4245 struct igb_ring_desc_32_bytes *ring = 4246 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4247 ring[desc_id].lo_dword.dword = 4248 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4249 ring_rxd_display_dword(ring[desc_id].lo_dword); 4250 ring[desc_id].hi_dword.dword = 4251 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4252 ring_rxd_display_dword(ring[desc_id].hi_dword); 4253 ring[desc_id].resv1.dword = 4254 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4255 ring_rxd_display_dword(ring[desc_id].resv1); 4256 ring[desc_id].resv2.dword = 4257 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4258 ring_rxd_display_dword(ring[desc_id].resv2); 4259 4260 return; 4261 } 4262 #endif 4263 /* 16 bytes RX descriptor */ 4264 ring[desc_id].lo_dword.dword = 4265 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4266 ring_rxd_display_dword(ring[desc_id].lo_dword); 4267 ring[desc_id].hi_dword.dword = 4268 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4269 ring_rxd_display_dword(ring[desc_id].hi_dword); 4270 } 4271 4272 static void 4273 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4274 { 4275 struct igb_ring_desc_16_bytes *ring; 4276 struct igb_ring_desc_16_bytes txd; 4277 4278 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4279 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4280 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4281 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4282 (unsigned)txd.lo_dword.words.lo, 4283 (unsigned)txd.lo_dword.words.hi, 4284 (unsigned)txd.hi_dword.words.lo, 4285 (unsigned)txd.hi_dword.words.hi); 4286 } 4287 4288 void 4289 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4290 { 4291 const struct rte_memzone *rx_mz; 4292 4293 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4294 return; 4295 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4296 if (rx_mz == NULL) 4297 return; 4298 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4299 } 4300 4301 void 4302 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4303 { 4304 const struct rte_memzone *tx_mz; 4305 4306 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4307 return; 4308 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4309 if (tx_mz == NULL) 4310 return; 4311 ring_tx_descriptor_display(tx_mz, txd_id); 4312 } 4313 4314 void 4315 fwd_lcores_config_display(void) 4316 { 4317 lcoreid_t lc_id; 4318 4319 printf("List of forwarding lcores:"); 4320 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4321 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4322 printf("\n"); 4323 } 4324 void 4325 rxtx_config_display(void) 4326 { 4327 portid_t pid; 4328 queueid_t qid; 4329 4330 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4331 cur_fwd_eng->fwd_mode_name, 4332 cur_fwd_eng->status ? "-" : "", 4333 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4334 retry_enabled == 0 ? "" : " with retry", 4335 nb_pkt_per_burst); 4336 4337 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4338 printf(" packet len=%u - nb packet segments=%d\n", 4339 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4340 4341 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4342 nb_fwd_lcores, nb_fwd_ports); 4343 4344 RTE_ETH_FOREACH_DEV(pid) { 4345 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4346 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4347 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4348 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4349 struct rte_eth_rxq_info rx_qinfo; 4350 struct rte_eth_txq_info tx_qinfo; 4351 uint16_t rx_free_thresh_tmp; 4352 uint16_t tx_free_thresh_tmp; 4353 uint16_t tx_rs_thresh_tmp; 4354 uint16_t nb_rx_desc_tmp; 4355 uint16_t nb_tx_desc_tmp; 4356 uint64_t offloads_tmp; 4357 uint8_t pthresh_tmp; 4358 uint8_t hthresh_tmp; 4359 uint8_t wthresh_tmp; 4360 int32_t rc; 4361 4362 /* per port config */ 4363 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4364 (unsigned int)pid, nb_rxq, nb_txq); 4365 4366 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4367 ports[pid].dev_conf.rxmode.offloads, 4368 ports[pid].dev_conf.txmode.offloads); 4369 4370 /* per rx queue config only for first queue to be less verbose */ 4371 for (qid = 0; qid < 1; qid++) { 4372 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4373 if (rc) { 4374 nb_rx_desc_tmp = nb_rx_desc[qid]; 4375 rx_free_thresh_tmp = 4376 rx_conf[qid].rx_free_thresh; 4377 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4378 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4379 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4380 offloads_tmp = rx_conf[qid].offloads; 4381 } else { 4382 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4383 rx_free_thresh_tmp = 4384 rx_qinfo.conf.rx_free_thresh; 4385 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4386 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4387 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4388 offloads_tmp = rx_qinfo.conf.offloads; 4389 } 4390 4391 printf(" RX queue: %d\n", qid); 4392 printf(" RX desc=%d - RX free threshold=%d\n", 4393 nb_rx_desc_tmp, rx_free_thresh_tmp); 4394 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4395 " wthresh=%d\n", 4396 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4397 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4398 if (rx_conf->share_group > 0) 4399 printf(" share_group=%u share_qid=%u", 4400 rx_conf->share_group, 4401 rx_conf->share_qid); 4402 printf("\n"); 4403 } 4404 4405 /* per tx queue config only for first queue to be less verbose */ 4406 for (qid = 0; qid < 1; qid++) { 4407 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4408 if (rc) { 4409 nb_tx_desc_tmp = nb_tx_desc[qid]; 4410 tx_free_thresh_tmp = 4411 tx_conf[qid].tx_free_thresh; 4412 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4413 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4414 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4415 offloads_tmp = tx_conf[qid].offloads; 4416 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4417 } else { 4418 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4419 tx_free_thresh_tmp = 4420 tx_qinfo.conf.tx_free_thresh; 4421 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4422 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4423 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4424 offloads_tmp = tx_qinfo.conf.offloads; 4425 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4426 } 4427 4428 printf(" TX queue: %d\n", qid); 4429 printf(" TX desc=%d - TX free threshold=%d\n", 4430 nb_tx_desc_tmp, tx_free_thresh_tmp); 4431 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4432 " wthresh=%d\n", 4433 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4434 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4435 offloads_tmp, tx_rs_thresh_tmp); 4436 } 4437 } 4438 } 4439 4440 void 4441 port_rss_reta_info(portid_t port_id, 4442 struct rte_eth_rss_reta_entry64 *reta_conf, 4443 uint16_t nb_entries) 4444 { 4445 uint16_t i, idx, shift; 4446 int ret; 4447 4448 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4449 return; 4450 4451 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4452 if (ret != 0) { 4453 fprintf(stderr, 4454 "Failed to get RSS RETA info, return code = %d\n", 4455 ret); 4456 return; 4457 } 4458 4459 for (i = 0; i < nb_entries; i++) { 4460 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4461 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4462 if (!(reta_conf[idx].mask & (1ULL << shift))) 4463 continue; 4464 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4465 i, reta_conf[idx].reta[shift]); 4466 } 4467 } 4468 4469 /* 4470 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4471 * key of the port. 4472 */ 4473 void 4474 port_rss_hash_conf_show(portid_t port_id, int show_rss_key, int show_rss_algo) 4475 { 4476 struct rte_eth_rss_conf rss_conf = {0}; 4477 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4478 uint64_t rss_hf; 4479 uint8_t i; 4480 int diag; 4481 struct rte_eth_dev_info dev_info; 4482 uint8_t hash_key_size; 4483 int ret; 4484 4485 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4486 return; 4487 4488 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4489 if (ret != 0) 4490 return; 4491 4492 if (dev_info.hash_key_size > 0 && 4493 dev_info.hash_key_size <= sizeof(rss_key)) 4494 hash_key_size = dev_info.hash_key_size; 4495 else { 4496 fprintf(stderr, 4497 "dev_info did not provide a valid hash key size\n"); 4498 return; 4499 } 4500 4501 /* Get RSS hash key if asked to display it */ 4502 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4503 rss_conf.rss_key_len = hash_key_size; 4504 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4505 if (diag != 0) { 4506 switch (diag) { 4507 case -ENODEV: 4508 fprintf(stderr, "port index %d invalid\n", port_id); 4509 break; 4510 case -ENOTSUP: 4511 fprintf(stderr, "operation not supported by device\n"); 4512 break; 4513 default: 4514 fprintf(stderr, "operation failed - diag=%d\n", diag); 4515 break; 4516 } 4517 return; 4518 } 4519 rss_hf = rss_conf.rss_hf; 4520 if (rss_hf == 0) { 4521 printf("RSS disabled\n"); 4522 return; 4523 } 4524 4525 if (show_rss_algo) { 4526 printf("RSS algorithm:\n %s\n", 4527 rte_eth_dev_rss_algo_name(rss_conf.algorithm)); 4528 return; 4529 } 4530 4531 printf("RSS functions:\n"); 4532 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4533 4534 if (!show_rss_key) 4535 return; 4536 printf("RSS key:\n"); 4537 for (i = 0; i < hash_key_size; i++) 4538 printf("%02X", rss_key[i]); 4539 printf("\n"); 4540 } 4541 4542 void 4543 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4544 uint8_t hash_key_len) 4545 { 4546 struct rte_eth_rss_conf rss_conf; 4547 int diag; 4548 4549 rss_conf.rss_key = NULL; 4550 rss_conf.rss_key_len = 0; 4551 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4552 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4553 if (diag == 0) { 4554 rss_conf.rss_key = hash_key; 4555 rss_conf.rss_key_len = hash_key_len; 4556 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4557 } 4558 if (diag == 0) 4559 return; 4560 4561 switch (diag) { 4562 case -ENODEV: 4563 fprintf(stderr, "port index %d invalid\n", port_id); 4564 break; 4565 case -ENOTSUP: 4566 fprintf(stderr, "operation not supported by device\n"); 4567 break; 4568 default: 4569 fprintf(stderr, "operation failed - diag=%d\n", diag); 4570 break; 4571 } 4572 } 4573 4574 /* 4575 * Check whether a shared rxq scheduled on other lcores. 4576 */ 4577 static bool 4578 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4579 portid_t src_port, queueid_t src_rxq, 4580 uint32_t share_group, queueid_t share_rxq) 4581 { 4582 streamid_t sm_id; 4583 streamid_t nb_fs_per_lcore; 4584 lcoreid_t nb_fc; 4585 lcoreid_t lc_id; 4586 struct fwd_stream *fs; 4587 struct rte_port *port; 4588 struct rte_eth_dev_info *dev_info; 4589 struct rte_eth_rxconf *rxq_conf; 4590 4591 nb_fc = cur_fwd_config.nb_fwd_lcores; 4592 /* Check remaining cores. */ 4593 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4594 sm_id = fwd_lcores[lc_id]->stream_idx; 4595 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4596 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4597 sm_id++) { 4598 fs = fwd_streams[sm_id]; 4599 port = &ports[fs->rx_port]; 4600 dev_info = &port->dev_info; 4601 rxq_conf = &port->rxq[fs->rx_queue].conf; 4602 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4603 == 0 || rxq_conf->share_group == 0) 4604 /* Not shared rxq. */ 4605 continue; 4606 if (domain_id != port->dev_info.switch_info.domain_id) 4607 continue; 4608 if (rxq_conf->share_group != share_group) 4609 continue; 4610 if (rxq_conf->share_qid != share_rxq) 4611 continue; 4612 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4613 share_group, share_rxq); 4614 printf(" lcore %hhu Port %hu queue %hu\n", 4615 src_lc, src_port, src_rxq); 4616 printf(" lcore %hhu Port %hu queue %hu\n", 4617 lc_id, fs->rx_port, fs->rx_queue); 4618 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4619 nb_rxq); 4620 return true; 4621 } 4622 } 4623 return false; 4624 } 4625 4626 /* 4627 * Check shared rxq configuration. 4628 * 4629 * Shared group must not being scheduled on different core. 4630 */ 4631 bool 4632 pkt_fwd_shared_rxq_check(void) 4633 { 4634 streamid_t sm_id; 4635 streamid_t nb_fs_per_lcore; 4636 lcoreid_t nb_fc; 4637 lcoreid_t lc_id; 4638 struct fwd_stream *fs; 4639 uint16_t domain_id; 4640 struct rte_port *port; 4641 struct rte_eth_dev_info *dev_info; 4642 struct rte_eth_rxconf *rxq_conf; 4643 4644 if (rxq_share == 0) 4645 return true; 4646 nb_fc = cur_fwd_config.nb_fwd_lcores; 4647 /* 4648 * Check streams on each core, make sure the same switch domain + 4649 * group + queue doesn't get scheduled on other cores. 4650 */ 4651 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4652 sm_id = fwd_lcores[lc_id]->stream_idx; 4653 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4654 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4655 sm_id++) { 4656 fs = fwd_streams[sm_id]; 4657 /* Update lcore info stream being scheduled. */ 4658 fs->lcore = fwd_lcores[lc_id]; 4659 port = &ports[fs->rx_port]; 4660 dev_info = &port->dev_info; 4661 rxq_conf = &port->rxq[fs->rx_queue].conf; 4662 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4663 == 0 || rxq_conf->share_group == 0) 4664 /* Not shared rxq. */ 4665 continue; 4666 /* Check shared rxq not scheduled on remaining cores. */ 4667 domain_id = port->dev_info.switch_info.domain_id; 4668 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4669 fs->rx_port, 4670 fs->rx_queue, 4671 rxq_conf->share_group, 4672 rxq_conf->share_qid)) 4673 return false; 4674 } 4675 } 4676 return true; 4677 } 4678 4679 /* 4680 * Setup forwarding configuration for each logical core. 4681 */ 4682 static void 4683 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4684 { 4685 streamid_t nb_fs_per_lcore; 4686 streamid_t nb_fs; 4687 streamid_t sm_id; 4688 lcoreid_t nb_extra; 4689 lcoreid_t nb_fc; 4690 lcoreid_t nb_lc; 4691 lcoreid_t lc_id; 4692 4693 nb_fs = cfg->nb_fwd_streams; 4694 nb_fc = cfg->nb_fwd_lcores; 4695 if (nb_fs <= nb_fc) { 4696 nb_fs_per_lcore = 1; 4697 nb_extra = 0; 4698 } else { 4699 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4700 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4701 } 4702 4703 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4704 sm_id = 0; 4705 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4706 fwd_lcores[lc_id]->stream_idx = sm_id; 4707 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4708 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4709 } 4710 4711 /* 4712 * Assign extra remaining streams, if any. 4713 */ 4714 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4715 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4716 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4717 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4718 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4719 } 4720 } 4721 4722 static portid_t 4723 fwd_topology_tx_port_get(portid_t rxp) 4724 { 4725 static int warning_once = 1; 4726 4727 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4728 4729 switch (port_topology) { 4730 default: 4731 case PORT_TOPOLOGY_PAIRED: 4732 if ((rxp & 0x1) == 0) { 4733 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4734 return rxp + 1; 4735 if (warning_once) { 4736 fprintf(stderr, 4737 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4738 warning_once = 0; 4739 } 4740 return rxp; 4741 } 4742 return rxp - 1; 4743 case PORT_TOPOLOGY_CHAINED: 4744 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4745 case PORT_TOPOLOGY_LOOP: 4746 return rxp; 4747 } 4748 } 4749 4750 static void 4751 simple_fwd_config_setup(void) 4752 { 4753 portid_t i; 4754 4755 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4756 cur_fwd_config.nb_fwd_streams = 4757 (streamid_t) cur_fwd_config.nb_fwd_ports; 4758 4759 /* reinitialize forwarding streams */ 4760 init_fwd_streams(); 4761 4762 /* 4763 * In the simple forwarding test, the number of forwarding cores 4764 * must be lower or equal to the number of forwarding ports. 4765 */ 4766 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4767 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4768 cur_fwd_config.nb_fwd_lcores = 4769 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4770 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4771 4772 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4773 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4774 fwd_streams[i]->rx_queue = 0; 4775 fwd_streams[i]->tx_port = 4776 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4777 fwd_streams[i]->tx_queue = 0; 4778 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4779 fwd_streams[i]->retry_enabled = retry_enabled; 4780 } 4781 } 4782 4783 /** 4784 * For the RSS forwarding test all streams distributed over lcores. Each stream 4785 * being composed of a RX queue to poll on a RX port for input messages, 4786 * associated with a TX queue of a TX port where to send forwarded packets. 4787 */ 4788 static void 4789 rss_fwd_config_setup(void) 4790 { 4791 portid_t rxp; 4792 portid_t txp; 4793 queueid_t rxq; 4794 queueid_t nb_q; 4795 streamid_t sm_id; 4796 int start; 4797 int end; 4798 4799 nb_q = nb_rxq; 4800 if (nb_q > nb_txq) 4801 nb_q = nb_txq; 4802 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4803 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4804 cur_fwd_config.nb_fwd_streams = 4805 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4806 4807 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4808 cur_fwd_config.nb_fwd_lcores = 4809 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4810 4811 /* reinitialize forwarding streams */ 4812 init_fwd_streams(); 4813 4814 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4815 4816 if (proc_id > 0 && nb_q % num_procs != 0) 4817 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4818 4819 /** 4820 * In multi-process, All queues are allocated to different 4821 * processes based on num_procs and proc_id. For example: 4822 * if supports 4 queues(nb_q), 2 processes(num_procs), 4823 * the 0~1 queue for primary process. 4824 * the 2~3 queue for secondary process. 4825 */ 4826 start = proc_id * nb_q / num_procs; 4827 end = start + nb_q / num_procs; 4828 rxp = 0; 4829 rxq = start; 4830 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4831 struct fwd_stream *fs; 4832 4833 fs = fwd_streams[sm_id]; 4834 txp = fwd_topology_tx_port_get(rxp); 4835 fs->rx_port = fwd_ports_ids[rxp]; 4836 fs->rx_queue = rxq; 4837 fs->tx_port = fwd_ports_ids[txp]; 4838 fs->tx_queue = rxq; 4839 fs->peer_addr = fs->tx_port; 4840 fs->retry_enabled = retry_enabled; 4841 rxp++; 4842 if (rxp < nb_fwd_ports) 4843 continue; 4844 rxp = 0; 4845 rxq++; 4846 if (rxq >= end) 4847 rxq = start; 4848 } 4849 } 4850 4851 static uint16_t 4852 get_fwd_port_total_tc_num(void) 4853 { 4854 struct rte_eth_dcb_info dcb_info; 4855 uint16_t total_tc_num = 0; 4856 unsigned int i; 4857 4858 for (i = 0; i < nb_fwd_ports; i++) { 4859 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4860 total_tc_num += dcb_info.nb_tcs; 4861 } 4862 4863 return total_tc_num; 4864 } 4865 4866 /** 4867 * For the DCB forwarding test, each core is assigned on each traffic class. 4868 * 4869 * Each core is assigned a multi-stream, each stream being composed of 4870 * a RX queue to poll on a RX port for input messages, associated with 4871 * a TX queue of a TX port where to send forwarded packets. All RX and 4872 * TX queues are mapping to the same traffic class. 4873 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4874 * the same core 4875 */ 4876 static void 4877 dcb_fwd_config_setup(void) 4878 { 4879 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4880 portid_t txp, rxp = 0; 4881 queueid_t txq, rxq = 0; 4882 lcoreid_t lc_id; 4883 uint16_t nb_rx_queue, nb_tx_queue; 4884 uint16_t i, j, k, sm_id = 0; 4885 uint16_t total_tc_num; 4886 struct rte_port *port; 4887 uint8_t tc = 0; 4888 portid_t pid; 4889 int ret; 4890 4891 /* 4892 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4893 * or RTE_PORT_STOPPED. 4894 * 4895 * Re-configure ports to get updated mapping between tc and queue in 4896 * case the queue number of the port is changed. Skip for started ports 4897 * since modifying queue number and calling dev_configure need to stop 4898 * ports first. 4899 */ 4900 for (pid = 0; pid < nb_fwd_ports; pid++) { 4901 if (port_is_started(pid) == 1) 4902 continue; 4903 4904 port = &ports[pid]; 4905 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4906 &port->dev_conf); 4907 if (ret < 0) { 4908 fprintf(stderr, 4909 "Failed to re-configure port %d, ret = %d.\n", 4910 pid, ret); 4911 return; 4912 } 4913 } 4914 4915 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4916 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4917 cur_fwd_config.nb_fwd_streams = 4918 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4919 total_tc_num = get_fwd_port_total_tc_num(); 4920 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4921 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4922 4923 /* reinitialize forwarding streams */ 4924 init_fwd_streams(); 4925 sm_id = 0; 4926 txp = 1; 4927 /* get the dcb info on the first RX and TX ports */ 4928 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4929 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4930 4931 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4932 fwd_lcores[lc_id]->stream_nb = 0; 4933 fwd_lcores[lc_id]->stream_idx = sm_id; 4934 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4935 /* if the nb_queue is zero, means this tc is 4936 * not enabled on the POOL 4937 */ 4938 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4939 break; 4940 k = fwd_lcores[lc_id]->stream_nb + 4941 fwd_lcores[lc_id]->stream_idx; 4942 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4943 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4944 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4945 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4946 for (j = 0; j < nb_rx_queue; j++) { 4947 struct fwd_stream *fs; 4948 4949 fs = fwd_streams[k + j]; 4950 fs->rx_port = fwd_ports_ids[rxp]; 4951 fs->rx_queue = rxq + j; 4952 fs->tx_port = fwd_ports_ids[txp]; 4953 fs->tx_queue = txq + j % nb_tx_queue; 4954 fs->peer_addr = fs->tx_port; 4955 fs->retry_enabled = retry_enabled; 4956 } 4957 fwd_lcores[lc_id]->stream_nb += 4958 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4959 } 4960 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4961 4962 tc++; 4963 if (tc < rxp_dcb_info.nb_tcs) 4964 continue; 4965 /* Restart from TC 0 on next RX port */ 4966 tc = 0; 4967 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4968 rxp = (portid_t) 4969 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4970 else 4971 rxp++; 4972 if (rxp >= nb_fwd_ports) 4973 return; 4974 /* get the dcb information on next RX and TX ports */ 4975 if ((rxp & 0x1) == 0) 4976 txp = (portid_t) (rxp + 1); 4977 else 4978 txp = (portid_t) (rxp - 1); 4979 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4980 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4981 } 4982 } 4983 4984 static void 4985 icmp_echo_config_setup(void) 4986 { 4987 portid_t rxp; 4988 queueid_t rxq; 4989 lcoreid_t lc_id; 4990 uint16_t sm_id; 4991 4992 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4993 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4994 (nb_txq * nb_fwd_ports); 4995 else 4996 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4997 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4998 cur_fwd_config.nb_fwd_streams = 4999 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 5000 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 5001 cur_fwd_config.nb_fwd_lcores = 5002 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 5003 if (verbose_level > 0) { 5004 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 5005 __FUNCTION__, 5006 cur_fwd_config.nb_fwd_lcores, 5007 cur_fwd_config.nb_fwd_ports, 5008 cur_fwd_config.nb_fwd_streams); 5009 } 5010 5011 /* reinitialize forwarding streams */ 5012 init_fwd_streams(); 5013 setup_fwd_config_of_each_lcore(&cur_fwd_config); 5014 rxp = 0; rxq = 0; 5015 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 5016 if (verbose_level > 0) 5017 printf(" core=%d: \n", lc_id); 5018 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5019 struct fwd_stream *fs; 5020 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5021 fs->rx_port = fwd_ports_ids[rxp]; 5022 fs->rx_queue = rxq; 5023 fs->tx_port = fs->rx_port; 5024 fs->tx_queue = rxq; 5025 fs->peer_addr = fs->tx_port; 5026 fs->retry_enabled = retry_enabled; 5027 if (verbose_level > 0) 5028 printf(" stream=%d port=%d rxq=%d txq=%d\n", 5029 sm_id, fs->rx_port, fs->rx_queue, 5030 fs->tx_queue); 5031 rxq = (queueid_t) (rxq + 1); 5032 if (rxq == nb_rxq) { 5033 rxq = 0; 5034 rxp = (portid_t) (rxp + 1); 5035 } 5036 } 5037 } 5038 } 5039 5040 void 5041 fwd_config_setup(void) 5042 { 5043 struct rte_port *port; 5044 portid_t pt_id; 5045 unsigned int i; 5046 5047 cur_fwd_config.fwd_eng = cur_fwd_eng; 5048 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 5049 icmp_echo_config_setup(); 5050 return; 5051 } 5052 5053 if ((nb_rxq > 1) && (nb_txq > 1)){ 5054 if (dcb_config) { 5055 for (i = 0; i < nb_fwd_ports; i++) { 5056 pt_id = fwd_ports_ids[i]; 5057 port = &ports[pt_id]; 5058 if (!port->dcb_flag) { 5059 fprintf(stderr, 5060 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 5061 return; 5062 } 5063 } 5064 if (nb_fwd_lcores == 1) { 5065 fprintf(stderr, 5066 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 5067 return; 5068 } 5069 5070 dcb_fwd_config_setup(); 5071 } else 5072 rss_fwd_config_setup(); 5073 } 5074 else 5075 simple_fwd_config_setup(); 5076 } 5077 5078 static const char * 5079 mp_alloc_to_str(uint8_t mode) 5080 { 5081 switch (mode) { 5082 case MP_ALLOC_NATIVE: 5083 return "native"; 5084 case MP_ALLOC_ANON: 5085 return "anon"; 5086 case MP_ALLOC_XMEM: 5087 return "xmem"; 5088 case MP_ALLOC_XMEM_HUGE: 5089 return "xmemhuge"; 5090 case MP_ALLOC_XBUF: 5091 return "xbuf"; 5092 default: 5093 return "invalid"; 5094 } 5095 } 5096 5097 void 5098 pkt_fwd_config_display(struct fwd_config *cfg) 5099 { 5100 struct fwd_stream *fs; 5101 lcoreid_t lc_id; 5102 streamid_t sm_id; 5103 5104 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5105 "NUMA support %s, MP allocation mode: %s\n", 5106 cfg->fwd_eng->fwd_mode_name, 5107 cfg->fwd_eng->status ? "-" : "", 5108 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5109 retry_enabled == 0 ? "" : " with retry", 5110 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5111 numa_support == 1 ? "enabled" : "disabled", 5112 mp_alloc_to_str(mp_alloc_type)); 5113 5114 if (retry_enabled) 5115 printf("TX retry num: %u, delay between TX retries: %uus\n", 5116 burst_tx_retry_num, burst_tx_delay_time); 5117 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5118 printf("Logical Core %u (socket %u) forwards packets on " 5119 "%d streams:", 5120 fwd_lcores_cpuids[lc_id], 5121 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5122 fwd_lcores[lc_id]->stream_nb); 5123 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5124 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5125 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5126 "P=%d/Q=%d (socket %u) ", 5127 fs->rx_port, fs->rx_queue, 5128 ports[fs->rx_port].socket_id, 5129 fs->tx_port, fs->tx_queue, 5130 ports[fs->tx_port].socket_id); 5131 print_ethaddr("peer=", 5132 &peer_eth_addrs[fs->peer_addr]); 5133 } 5134 printf("\n"); 5135 } 5136 printf("\n"); 5137 } 5138 5139 void 5140 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5141 { 5142 struct rte_ether_addr new_peer_addr; 5143 if (!rte_eth_dev_is_valid_port(port_id)) { 5144 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5145 return; 5146 } 5147 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5148 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5149 peer_addr); 5150 return; 5151 } 5152 peer_eth_addrs[port_id] = new_peer_addr; 5153 } 5154 5155 int 5156 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5157 { 5158 unsigned int i; 5159 unsigned int lcore_cpuid; 5160 int record_now; 5161 5162 record_now = 0; 5163 again: 5164 for (i = 0; i < nb_lc; i++) { 5165 lcore_cpuid = lcorelist[i]; 5166 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5167 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5168 return -1; 5169 } 5170 if (lcore_cpuid == rte_get_main_lcore()) { 5171 fprintf(stderr, 5172 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5173 lcore_cpuid); 5174 return -1; 5175 } 5176 if (record_now) 5177 fwd_lcores_cpuids[i] = lcore_cpuid; 5178 } 5179 if (record_now == 0) { 5180 record_now = 1; 5181 goto again; 5182 } 5183 nb_cfg_lcores = (lcoreid_t) nb_lc; 5184 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5185 printf("previous number of forwarding cores %u - changed to " 5186 "number of configured cores %u\n", 5187 (unsigned int) nb_fwd_lcores, nb_lc); 5188 nb_fwd_lcores = (lcoreid_t) nb_lc; 5189 } 5190 5191 return 0; 5192 } 5193 5194 int 5195 set_fwd_lcores_mask(uint64_t lcoremask) 5196 { 5197 unsigned int lcorelist[64]; 5198 unsigned int nb_lc; 5199 unsigned int i; 5200 5201 if (lcoremask == 0) { 5202 fprintf(stderr, "Invalid NULL mask of cores\n"); 5203 return -1; 5204 } 5205 nb_lc = 0; 5206 for (i = 0; i < 64; i++) { 5207 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5208 continue; 5209 lcorelist[nb_lc++] = i; 5210 } 5211 return set_fwd_lcores_list(lcorelist, nb_lc); 5212 } 5213 5214 void 5215 set_fwd_lcores_number(uint16_t nb_lc) 5216 { 5217 if (test_done == 0) { 5218 fprintf(stderr, "Please stop forwarding first\n"); 5219 return; 5220 } 5221 if (nb_lc > nb_cfg_lcores) { 5222 fprintf(stderr, 5223 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5224 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5225 return; 5226 } 5227 nb_fwd_lcores = (lcoreid_t) nb_lc; 5228 printf("Number of forwarding cores set to %u\n", 5229 (unsigned int) nb_fwd_lcores); 5230 } 5231 5232 void 5233 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5234 { 5235 unsigned int i; 5236 portid_t port_id; 5237 int record_now; 5238 5239 record_now = 0; 5240 again: 5241 for (i = 0; i < nb_pt; i++) { 5242 port_id = (portid_t) portlist[i]; 5243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5244 return; 5245 if (record_now) 5246 fwd_ports_ids[i] = port_id; 5247 } 5248 if (record_now == 0) { 5249 record_now = 1; 5250 goto again; 5251 } 5252 nb_cfg_ports = (portid_t) nb_pt; 5253 if (nb_fwd_ports != (portid_t) nb_pt) { 5254 printf("previous number of forwarding ports %u - changed to " 5255 "number of configured ports %u\n", 5256 (unsigned int) nb_fwd_ports, nb_pt); 5257 nb_fwd_ports = (portid_t) nb_pt; 5258 } 5259 } 5260 5261 /** 5262 * Parse the user input and obtain the list of forwarding ports 5263 * 5264 * @param[in] list 5265 * String containing the user input. User can specify 5266 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5267 * For example, if the user wants to use all the available 5268 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5269 * If the user wants to use only the ports 1,2 then the input 5270 * is 1,2. 5271 * valid characters are '-' and ',' 5272 * @param[out] values 5273 * This array will be filled with a list of port IDs 5274 * based on the user input 5275 * Note that duplicate entries are discarded and only the first 5276 * count entries in this array are port IDs and all the rest 5277 * will contain default values 5278 * @param[in] maxsize 5279 * This parameter denotes 2 things 5280 * 1) Number of elements in the values array 5281 * 2) Maximum value of each element in the values array 5282 * @return 5283 * On success, returns total count of parsed port IDs 5284 * On failure, returns 0 5285 */ 5286 static unsigned int 5287 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5288 { 5289 unsigned int count = 0; 5290 char *end = NULL; 5291 int min, max; 5292 int value, i; 5293 unsigned int marked[maxsize]; 5294 5295 if (list == NULL || values == NULL) 5296 return 0; 5297 5298 for (i = 0; i < (int)maxsize; i++) 5299 marked[i] = 0; 5300 5301 min = INT_MAX; 5302 5303 do { 5304 /*Remove the blank spaces if any*/ 5305 while (isblank(*list)) 5306 list++; 5307 if (*list == '\0') 5308 break; 5309 errno = 0; 5310 value = strtol(list, &end, 10); 5311 if (errno || end == NULL) 5312 return 0; 5313 if (value < 0 || value >= (int)maxsize) 5314 return 0; 5315 while (isblank(*end)) 5316 end++; 5317 if (*end == '-' && min == INT_MAX) { 5318 min = value; 5319 } else if ((*end == ',') || (*end == '\0')) { 5320 max = value; 5321 if (min == INT_MAX) 5322 min = value; 5323 for (i = min; i <= max; i++) { 5324 if (count < maxsize) { 5325 if (marked[i]) 5326 continue; 5327 values[count] = i; 5328 marked[i] = 1; 5329 count++; 5330 } 5331 } 5332 min = INT_MAX; 5333 } else 5334 return 0; 5335 list = end + 1; 5336 } while (*end != '\0'); 5337 5338 return count; 5339 } 5340 5341 void 5342 parse_fwd_portlist(const char *portlist) 5343 { 5344 unsigned int portcount; 5345 unsigned int portindex[RTE_MAX_ETHPORTS]; 5346 unsigned int i, valid_port_count = 0; 5347 5348 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5349 if (!portcount) 5350 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5351 5352 /* 5353 * Here we verify the validity of the ports 5354 * and thereby calculate the total number of 5355 * valid ports 5356 */ 5357 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5358 if (rte_eth_dev_is_valid_port(portindex[i])) { 5359 portindex[valid_port_count] = portindex[i]; 5360 valid_port_count++; 5361 } 5362 } 5363 5364 set_fwd_ports_list(portindex, valid_port_count); 5365 } 5366 5367 void 5368 set_fwd_ports_mask(uint64_t portmask) 5369 { 5370 unsigned int portlist[64]; 5371 unsigned int nb_pt; 5372 unsigned int i; 5373 5374 if (portmask == 0) { 5375 fprintf(stderr, "Invalid NULL mask of ports\n"); 5376 return; 5377 } 5378 nb_pt = 0; 5379 RTE_ETH_FOREACH_DEV(i) { 5380 if (! ((uint64_t)(1ULL << i) & portmask)) 5381 continue; 5382 portlist[nb_pt++] = i; 5383 } 5384 set_fwd_ports_list(portlist, nb_pt); 5385 } 5386 5387 void 5388 set_fwd_ports_number(uint16_t nb_pt) 5389 { 5390 if (nb_pt > nb_cfg_ports) { 5391 fprintf(stderr, 5392 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5393 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5394 return; 5395 } 5396 nb_fwd_ports = (portid_t) nb_pt; 5397 printf("Number of forwarding ports set to %u\n", 5398 (unsigned int) nb_fwd_ports); 5399 } 5400 5401 int 5402 port_is_forwarding(portid_t port_id) 5403 { 5404 unsigned int i; 5405 5406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5407 return -1; 5408 5409 for (i = 0; i < nb_fwd_ports; i++) { 5410 if (fwd_ports_ids[i] == port_id) 5411 return 1; 5412 } 5413 5414 return 0; 5415 } 5416 5417 void 5418 set_nb_pkt_per_burst(uint16_t nb) 5419 { 5420 if (nb > MAX_PKT_BURST) { 5421 fprintf(stderr, 5422 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5423 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5424 return; 5425 } 5426 nb_pkt_per_burst = nb; 5427 printf("Number of packets per burst set to %u\n", 5428 (unsigned int) nb_pkt_per_burst); 5429 } 5430 5431 static const char * 5432 tx_split_get_name(enum tx_pkt_split split) 5433 { 5434 uint32_t i; 5435 5436 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5437 if (tx_split_name[i].split == split) 5438 return tx_split_name[i].name; 5439 } 5440 return NULL; 5441 } 5442 5443 void 5444 set_tx_pkt_split(const char *name) 5445 { 5446 uint32_t i; 5447 5448 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5449 if (strcmp(tx_split_name[i].name, name) == 0) { 5450 tx_pkt_split = tx_split_name[i].split; 5451 return; 5452 } 5453 } 5454 fprintf(stderr, "unknown value: \"%s\"\n", name); 5455 } 5456 5457 int 5458 parse_fec_mode(const char *name, uint32_t *fec_capa) 5459 { 5460 uint8_t i; 5461 5462 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5463 if (strcmp(fec_mode_name[i].name, name) == 0) { 5464 *fec_capa = 5465 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5466 return 0; 5467 } 5468 } 5469 return -1; 5470 } 5471 5472 void 5473 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5474 { 5475 unsigned int i, j; 5476 5477 printf("FEC capabilities:\n"); 5478 5479 for (i = 0; i < num; i++) { 5480 printf("%s : ", 5481 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5482 5483 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5484 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5485 speed_fec_capa[i].capa) 5486 printf("%s ", fec_mode_name[j].name); 5487 } 5488 printf("\n"); 5489 } 5490 } 5491 5492 void 5493 show_rx_pkt_offsets(void) 5494 { 5495 uint32_t i, n; 5496 5497 n = rx_pkt_nb_offs; 5498 printf("Number of offsets: %u\n", n); 5499 if (n) { 5500 printf("Segment offsets: "); 5501 for (i = 0; i != n - 1; i++) 5502 printf("%hu,", rx_pkt_seg_offsets[i]); 5503 printf("%hu\n", rx_pkt_seg_lengths[i]); 5504 } 5505 } 5506 5507 void 5508 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5509 { 5510 unsigned int i; 5511 5512 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5513 printf("nb segments per RX packets=%u >= " 5514 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5515 return; 5516 } 5517 5518 /* 5519 * No extra check here, the segment length will be checked by PMD 5520 * in the extended queue setup. 5521 */ 5522 for (i = 0; i < nb_offs; i++) { 5523 if (seg_offsets[i] >= UINT16_MAX) { 5524 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5525 i, seg_offsets[i]); 5526 return; 5527 } 5528 } 5529 5530 for (i = 0; i < nb_offs; i++) 5531 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5532 5533 rx_pkt_nb_offs = (uint8_t) nb_offs; 5534 } 5535 5536 void 5537 show_rx_pkt_segments(void) 5538 { 5539 uint32_t i, n; 5540 5541 n = rx_pkt_nb_segs; 5542 printf("Number of segments: %u\n", n); 5543 if (n) { 5544 printf("Segment sizes: "); 5545 for (i = 0; i != n - 1; i++) 5546 printf("%hu,", rx_pkt_seg_lengths[i]); 5547 printf("%hu\n", rx_pkt_seg_lengths[i]); 5548 } 5549 } 5550 5551 static const char *get_ptype_str(uint32_t ptype) 5552 { 5553 const char *str; 5554 5555 switch (ptype) { 5556 case RTE_PTYPE_L2_ETHER: 5557 str = "eth"; 5558 break; 5559 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5560 str = "ipv4"; 5561 break; 5562 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5563 str = "ipv6"; 5564 break; 5565 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5566 str = "ipv4-tcp"; 5567 break; 5568 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5569 str = "ipv4-udp"; 5570 break; 5571 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5572 str = "ipv4-sctp"; 5573 break; 5574 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5575 str = "ipv6-tcp"; 5576 break; 5577 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5578 str = "ipv6-udp"; 5579 break; 5580 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5581 str = "ipv6-sctp"; 5582 break; 5583 case RTE_PTYPE_TUNNEL_GRENAT: 5584 str = "grenat"; 5585 break; 5586 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5587 str = "inner-eth"; 5588 break; 5589 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5590 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5591 str = "inner-ipv4"; 5592 break; 5593 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5594 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5595 str = "inner-ipv6"; 5596 break; 5597 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5598 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5599 str = "inner-ipv4-tcp"; 5600 break; 5601 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5602 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5603 str = "inner-ipv4-udp"; 5604 break; 5605 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5607 str = "inner-ipv4-sctp"; 5608 break; 5609 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5610 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5611 str = "inner-ipv6-tcp"; 5612 break; 5613 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5614 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5615 str = "inner-ipv6-udp"; 5616 break; 5617 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5618 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5619 str = "inner-ipv6-sctp"; 5620 break; 5621 default: 5622 str = "unsupported"; 5623 } 5624 5625 return str; 5626 } 5627 5628 void 5629 show_rx_pkt_hdrs(void) 5630 { 5631 uint32_t i, n; 5632 5633 n = rx_pkt_nb_segs; 5634 printf("Number of segments: %u\n", n); 5635 if (n) { 5636 printf("Packet segs: "); 5637 for (i = 0; i < n - 1; i++) 5638 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5639 printf("payload\n"); 5640 } 5641 } 5642 5643 void 5644 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5645 { 5646 unsigned int i; 5647 5648 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5649 printf("nb segments per RX packets=%u > " 5650 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5651 return; 5652 } 5653 5654 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5655 5656 for (i = 0; i < nb_segs; i++) 5657 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5658 /* 5659 * We calculate the number of hdrs, but payload is not included, 5660 * so rx_pkt_nb_segs would increase 1. 5661 */ 5662 rx_pkt_nb_segs = nb_segs + 1; 5663 } 5664 5665 void 5666 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5667 { 5668 unsigned int i; 5669 5670 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5671 printf("nb segments per RX packets=%u >= " 5672 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5673 return; 5674 } 5675 5676 /* 5677 * No extra check here, the segment length will be checked by PMD 5678 * in the extended queue setup. 5679 */ 5680 for (i = 0; i < nb_segs; i++) { 5681 if (seg_lengths[i] >= UINT16_MAX) { 5682 printf("length[%u]=%u > UINT16_MAX - give up\n", 5683 i, seg_lengths[i]); 5684 return; 5685 } 5686 } 5687 5688 for (i = 0; i < nb_segs; i++) 5689 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5690 5691 rx_pkt_nb_segs = (uint8_t) nb_segs; 5692 } 5693 5694 void 5695 show_tx_pkt_segments(void) 5696 { 5697 uint32_t i, n; 5698 const char *split; 5699 5700 n = tx_pkt_nb_segs; 5701 split = tx_split_get_name(tx_pkt_split); 5702 5703 printf("Number of segments: %u\n", n); 5704 printf("Segment sizes: "); 5705 for (i = 0; i != n - 1; i++) 5706 printf("%hu,", tx_pkt_seg_lengths[i]); 5707 printf("%hu\n", tx_pkt_seg_lengths[i]); 5708 printf("Split packet: %s\n", split); 5709 } 5710 5711 static bool 5712 nb_segs_is_invalid(unsigned int nb_segs) 5713 { 5714 uint16_t ring_size; 5715 uint16_t queue_id; 5716 uint16_t port_id; 5717 int ret; 5718 5719 RTE_ETH_FOREACH_DEV(port_id) { 5720 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5721 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5722 if (ret) { 5723 /* Port may not be initialized yet, can't say 5724 * the port is invalid in this stage. 5725 */ 5726 continue; 5727 } 5728 if (ring_size < nb_segs) { 5729 printf("nb segments per TX packets=%u >= TX " 5730 "queue(%u) ring_size=%u - txpkts ignored\n", 5731 nb_segs, queue_id, ring_size); 5732 return true; 5733 } 5734 } 5735 } 5736 5737 return false; 5738 } 5739 5740 void 5741 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5742 { 5743 uint16_t tx_pkt_len; 5744 unsigned int i; 5745 5746 /* 5747 * For single segment settings failed check is ignored. 5748 * It is a very basic capability to send the single segment 5749 * packets, suppose it is always supported. 5750 */ 5751 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5752 fprintf(stderr, 5753 "Tx segment size(%u) is not supported - txpkts ignored\n", 5754 nb_segs); 5755 return; 5756 } 5757 5758 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5759 fprintf(stderr, 5760 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5761 nb_segs, RTE_MAX_SEGS_PER_PKT); 5762 return; 5763 } 5764 5765 /* 5766 * Check that each segment length is greater or equal than 5767 * the mbuf data size. 5768 * Check also that the total packet length is greater or equal than the 5769 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5770 * 20 + 8). 5771 */ 5772 tx_pkt_len = 0; 5773 for (i = 0; i < nb_segs; i++) { 5774 if (seg_lengths[i] > mbuf_data_size[0]) { 5775 fprintf(stderr, 5776 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5777 i, seg_lengths[i], mbuf_data_size[0]); 5778 return; 5779 } 5780 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5781 } 5782 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5783 fprintf(stderr, "total packet length=%u < %d - give up\n", 5784 (unsigned) tx_pkt_len, 5785 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5786 return; 5787 } 5788 5789 for (i = 0; i < nb_segs; i++) 5790 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5791 5792 tx_pkt_length = tx_pkt_len; 5793 tx_pkt_nb_segs = (uint8_t) nb_segs; 5794 } 5795 5796 void 5797 show_tx_pkt_times(void) 5798 { 5799 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5800 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5801 } 5802 5803 void 5804 set_tx_pkt_times(unsigned int *tx_times) 5805 { 5806 tx_pkt_times_inter = tx_times[0]; 5807 tx_pkt_times_intra = tx_times[1]; 5808 } 5809 5810 #ifdef RTE_LIB_GRO 5811 void 5812 setup_gro(const char *onoff, portid_t port_id) 5813 { 5814 if (!rte_eth_dev_is_valid_port(port_id)) { 5815 fprintf(stderr, "invalid port id %u\n", port_id); 5816 return; 5817 } 5818 if (test_done == 0) { 5819 fprintf(stderr, 5820 "Before enable/disable GRO, please stop forwarding first\n"); 5821 return; 5822 } 5823 if (strcmp(onoff, "on") == 0) { 5824 if (gro_ports[port_id].enable != 0) { 5825 fprintf(stderr, 5826 "Port %u has enabled GRO. Please disable GRO first\n", 5827 port_id); 5828 return; 5829 } 5830 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5831 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5832 gro_ports[port_id].param.max_flow_num = 5833 GRO_DEFAULT_FLOW_NUM; 5834 gro_ports[port_id].param.max_item_per_flow = 5835 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5836 } 5837 gro_ports[port_id].enable = 1; 5838 } else { 5839 if (gro_ports[port_id].enable == 0) { 5840 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5841 return; 5842 } 5843 gro_ports[port_id].enable = 0; 5844 } 5845 } 5846 5847 void 5848 setup_gro_flush_cycles(uint8_t cycles) 5849 { 5850 if (test_done == 0) { 5851 fprintf(stderr, 5852 "Before change flush interval for GRO, please stop forwarding first.\n"); 5853 return; 5854 } 5855 5856 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5857 GRO_DEFAULT_FLUSH_CYCLES) { 5858 fprintf(stderr, 5859 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5860 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5861 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5862 } 5863 5864 gro_flush_cycles = cycles; 5865 } 5866 5867 void 5868 show_gro(portid_t port_id) 5869 { 5870 struct rte_gro_param *param; 5871 uint32_t max_pkts_num; 5872 5873 param = &gro_ports[port_id].param; 5874 5875 if (!rte_eth_dev_is_valid_port(port_id)) { 5876 fprintf(stderr, "Invalid port id %u.\n", port_id); 5877 return; 5878 } 5879 if (gro_ports[port_id].enable) { 5880 printf("GRO type: TCP/IPv4\n"); 5881 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5882 max_pkts_num = param->max_flow_num * 5883 param->max_item_per_flow; 5884 } else 5885 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5886 printf("Max number of packets to perform GRO: %u\n", 5887 max_pkts_num); 5888 printf("Flushing cycles: %u\n", gro_flush_cycles); 5889 } else 5890 printf("Port %u doesn't enable GRO.\n", port_id); 5891 } 5892 #endif /* RTE_LIB_GRO */ 5893 5894 #ifdef RTE_LIB_GSO 5895 void 5896 setup_gso(const char *mode, portid_t port_id) 5897 { 5898 if (!rte_eth_dev_is_valid_port(port_id)) { 5899 fprintf(stderr, "invalid port id %u\n", port_id); 5900 return; 5901 } 5902 if (strcmp(mode, "on") == 0) { 5903 if (test_done == 0) { 5904 fprintf(stderr, 5905 "before enabling GSO, please stop forwarding first\n"); 5906 return; 5907 } 5908 gso_ports[port_id].enable = 1; 5909 } else if (strcmp(mode, "off") == 0) { 5910 if (test_done == 0) { 5911 fprintf(stderr, 5912 "before disabling GSO, please stop forwarding first\n"); 5913 return; 5914 } 5915 gso_ports[port_id].enable = 0; 5916 } 5917 } 5918 #endif /* RTE_LIB_GSO */ 5919 5920 char* 5921 list_pkt_forwarding_modes(void) 5922 { 5923 static char fwd_modes[128] = ""; 5924 const char *separator = "|"; 5925 struct fwd_engine *fwd_eng; 5926 unsigned i = 0; 5927 5928 if (strlen (fwd_modes) == 0) { 5929 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5930 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5931 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5932 strncat(fwd_modes, separator, 5933 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5934 } 5935 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5936 } 5937 5938 return fwd_modes; 5939 } 5940 5941 char* 5942 list_pkt_forwarding_retry_modes(void) 5943 { 5944 static char fwd_modes[128] = ""; 5945 const char *separator = "|"; 5946 struct fwd_engine *fwd_eng; 5947 unsigned i = 0; 5948 5949 if (strlen(fwd_modes) == 0) { 5950 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5951 if (fwd_eng == &rx_only_engine) 5952 continue; 5953 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5954 sizeof(fwd_modes) - 5955 strlen(fwd_modes) - 1); 5956 strncat(fwd_modes, separator, 5957 sizeof(fwd_modes) - 5958 strlen(fwd_modes) - 1); 5959 } 5960 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5961 } 5962 5963 return fwd_modes; 5964 } 5965 5966 void 5967 set_pkt_forwarding_mode(const char *fwd_mode_name) 5968 { 5969 struct fwd_engine *fwd_eng; 5970 unsigned i; 5971 5972 i = 0; 5973 while ((fwd_eng = fwd_engines[i]) != NULL) { 5974 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5975 printf("Set %s packet forwarding mode%s\n", 5976 fwd_mode_name, 5977 retry_enabled == 0 ? "" : " with retry"); 5978 cur_fwd_eng = fwd_eng; 5979 return; 5980 } 5981 i++; 5982 } 5983 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5984 } 5985 5986 void 5987 add_rx_dump_callbacks(portid_t portid) 5988 { 5989 struct rte_eth_dev_info dev_info; 5990 uint16_t queue; 5991 int ret; 5992 5993 if (port_id_is_invalid(portid, ENABLED_WARN)) 5994 return; 5995 5996 ret = eth_dev_info_get_print_err(portid, &dev_info); 5997 if (ret != 0) 5998 return; 5999 6000 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 6001 if (!ports[portid].rx_dump_cb[queue]) 6002 ports[portid].rx_dump_cb[queue] = 6003 rte_eth_add_rx_callback(portid, queue, 6004 dump_rx_pkts, NULL); 6005 } 6006 6007 void 6008 add_tx_dump_callbacks(portid_t portid) 6009 { 6010 struct rte_eth_dev_info dev_info; 6011 uint16_t queue; 6012 int ret; 6013 6014 if (port_id_is_invalid(portid, ENABLED_WARN)) 6015 return; 6016 6017 ret = eth_dev_info_get_print_err(portid, &dev_info); 6018 if (ret != 0) 6019 return; 6020 6021 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 6022 if (!ports[portid].tx_dump_cb[queue]) 6023 ports[portid].tx_dump_cb[queue] = 6024 rte_eth_add_tx_callback(portid, queue, 6025 dump_tx_pkts, NULL); 6026 } 6027 6028 void 6029 remove_rx_dump_callbacks(portid_t portid) 6030 { 6031 struct rte_eth_dev_info dev_info; 6032 uint16_t queue; 6033 int ret; 6034 6035 if (port_id_is_invalid(portid, ENABLED_WARN)) 6036 return; 6037 6038 ret = eth_dev_info_get_print_err(portid, &dev_info); 6039 if (ret != 0) 6040 return; 6041 6042 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 6043 if (ports[portid].rx_dump_cb[queue]) { 6044 rte_eth_remove_rx_callback(portid, queue, 6045 ports[portid].rx_dump_cb[queue]); 6046 ports[portid].rx_dump_cb[queue] = NULL; 6047 } 6048 } 6049 6050 void 6051 remove_tx_dump_callbacks(portid_t portid) 6052 { 6053 struct rte_eth_dev_info dev_info; 6054 uint16_t queue; 6055 int ret; 6056 6057 if (port_id_is_invalid(portid, ENABLED_WARN)) 6058 return; 6059 6060 ret = eth_dev_info_get_print_err(portid, &dev_info); 6061 if (ret != 0) 6062 return; 6063 6064 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 6065 if (ports[portid].tx_dump_cb[queue]) { 6066 rte_eth_remove_tx_callback(portid, queue, 6067 ports[portid].tx_dump_cb[queue]); 6068 ports[portid].tx_dump_cb[queue] = NULL; 6069 } 6070 } 6071 6072 void 6073 configure_rxtx_dump_callbacks(uint16_t verbose) 6074 { 6075 portid_t portid; 6076 6077 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 6078 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 6079 return; 6080 #endif 6081 6082 RTE_ETH_FOREACH_DEV(portid) 6083 { 6084 if (verbose == 1 || verbose > 2) 6085 add_rx_dump_callbacks(portid); 6086 else 6087 remove_rx_dump_callbacks(portid); 6088 if (verbose >= 2) 6089 add_tx_dump_callbacks(portid); 6090 else 6091 remove_tx_dump_callbacks(portid); 6092 } 6093 } 6094 6095 void 6096 set_verbose_level(uint16_t vb_level) 6097 { 6098 printf("Change verbose level from %u to %u\n", 6099 (unsigned int) verbose_level, (unsigned int) vb_level); 6100 verbose_level = vb_level; 6101 configure_rxtx_dump_callbacks(verbose_level); 6102 } 6103 6104 void 6105 vlan_extend_set(portid_t port_id, int on) 6106 { 6107 int diag; 6108 int vlan_offload; 6109 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6110 6111 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6112 return; 6113 6114 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6115 6116 if (on) { 6117 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6118 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6119 } else { 6120 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6121 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6122 } 6123 6124 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6125 if (diag < 0) { 6126 fprintf(stderr, 6127 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6128 port_id, on, diag); 6129 return; 6130 } 6131 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6132 } 6133 6134 void 6135 rx_vlan_strip_set(portid_t port_id, int on) 6136 { 6137 int diag; 6138 int vlan_offload; 6139 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6140 6141 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6142 return; 6143 6144 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6145 6146 if (on) { 6147 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6148 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6149 } else { 6150 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6151 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6152 } 6153 6154 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6155 if (diag < 0) { 6156 fprintf(stderr, 6157 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6158 __func__, port_id, on, diag); 6159 return; 6160 } 6161 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6162 } 6163 6164 void 6165 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6166 { 6167 int diag; 6168 6169 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6170 return; 6171 6172 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6173 if (diag < 0) 6174 fprintf(stderr, 6175 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6176 __func__, port_id, queue_id, on, diag); 6177 } 6178 6179 void 6180 rx_vlan_filter_set(portid_t port_id, int on) 6181 { 6182 int diag; 6183 int vlan_offload; 6184 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6185 6186 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6187 return; 6188 6189 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6190 6191 if (on) { 6192 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6193 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6194 } else { 6195 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6196 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6197 } 6198 6199 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6200 if (diag < 0) { 6201 fprintf(stderr, 6202 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6203 __func__, port_id, on, diag); 6204 return; 6205 } 6206 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6207 } 6208 6209 void 6210 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6211 { 6212 int diag; 6213 int vlan_offload; 6214 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6215 6216 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6217 return; 6218 6219 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6220 6221 if (on) { 6222 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6223 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6224 } else { 6225 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6226 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6227 } 6228 6229 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6230 if (diag < 0) { 6231 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6232 __func__, port_id, on, diag); 6233 return; 6234 } 6235 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6236 } 6237 6238 int 6239 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6240 { 6241 int diag; 6242 6243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6244 return 1; 6245 if (vlan_id_is_invalid(vlan_id)) 6246 return 1; 6247 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6248 if (diag == 0) 6249 return 0; 6250 fprintf(stderr, 6251 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6252 port_id, vlan_id, on, diag); 6253 return -1; 6254 } 6255 6256 void 6257 rx_vlan_all_filter_set(portid_t port_id, int on) 6258 { 6259 uint16_t vlan_id; 6260 6261 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6262 return; 6263 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6264 if (rx_vft_set(port_id, vlan_id, on)) 6265 break; 6266 } 6267 } 6268 6269 void 6270 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6271 { 6272 int diag; 6273 6274 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6275 return; 6276 6277 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6278 if (diag == 0) 6279 return; 6280 6281 fprintf(stderr, 6282 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6283 port_id, vlan_type, tp_id, diag); 6284 } 6285 6286 void 6287 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6288 { 6289 struct rte_eth_dev_info dev_info; 6290 int ret; 6291 6292 if (vlan_id_is_invalid(vlan_id)) 6293 return; 6294 6295 if (ports[port_id].dev_conf.txmode.offloads & 6296 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6297 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6298 return; 6299 } 6300 6301 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6302 if (ret != 0) 6303 return; 6304 6305 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6306 fprintf(stderr, 6307 "Error: vlan insert is not supported by port %d\n", 6308 port_id); 6309 return; 6310 } 6311 6312 tx_vlan_reset(port_id); 6313 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6314 ports[port_id].tx_vlan_id = vlan_id; 6315 } 6316 6317 void 6318 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6319 { 6320 struct rte_eth_dev_info dev_info; 6321 int ret; 6322 6323 if (vlan_id_is_invalid(vlan_id)) 6324 return; 6325 if (vlan_id_is_invalid(vlan_id_outer)) 6326 return; 6327 6328 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6329 if (ret != 0) 6330 return; 6331 6332 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6333 fprintf(stderr, 6334 "Error: qinq insert not supported by port %d\n", 6335 port_id); 6336 return; 6337 } 6338 6339 tx_vlan_reset(port_id); 6340 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6341 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6342 ports[port_id].tx_vlan_id = vlan_id; 6343 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6344 } 6345 6346 void 6347 tx_vlan_reset(portid_t port_id) 6348 { 6349 ports[port_id].dev_conf.txmode.offloads &= 6350 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6351 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6352 ports[port_id].tx_vlan_id = 0; 6353 ports[port_id].tx_vlan_id_outer = 0; 6354 } 6355 6356 void 6357 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6358 { 6359 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6360 return; 6361 6362 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6363 } 6364 6365 void 6366 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6367 { 6368 int ret; 6369 6370 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6371 return; 6372 6373 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6374 return; 6375 6376 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6377 fprintf(stderr, "map_value not in required range 0..%d\n", 6378 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6379 return; 6380 } 6381 6382 if (!is_rx) { /* tx */ 6383 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6384 map_value); 6385 if (ret) { 6386 fprintf(stderr, 6387 "failed to set tx queue stats mapping.\n"); 6388 return; 6389 } 6390 } else { /* rx */ 6391 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6392 map_value); 6393 if (ret) { 6394 fprintf(stderr, 6395 "failed to set rx queue stats mapping.\n"); 6396 return; 6397 } 6398 } 6399 } 6400 6401 void 6402 set_xstats_hide_zero(uint8_t on_off) 6403 { 6404 xstats_hide_zero = on_off; 6405 } 6406 6407 void 6408 set_record_core_cycles(uint8_t on_off) 6409 { 6410 record_core_cycles = on_off; 6411 } 6412 6413 void 6414 set_record_burst_stats(uint8_t on_off) 6415 { 6416 record_burst_stats = on_off; 6417 } 6418 6419 uint16_t 6420 str_to_flowtype(const char *string) 6421 { 6422 uint8_t i; 6423 6424 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6425 if (!strcmp(flowtype_str_table[i].str, string)) 6426 return flowtype_str_table[i].ftype; 6427 } 6428 6429 if (isdigit(string[0])) { 6430 int val = atoi(string); 6431 if (val > 0 && val < 64) 6432 return (uint16_t)val; 6433 } 6434 6435 return RTE_ETH_FLOW_UNKNOWN; 6436 } 6437 6438 const char* 6439 flowtype_to_str(uint16_t flow_type) 6440 { 6441 uint8_t i; 6442 6443 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6444 if (flowtype_str_table[i].ftype == flow_type) 6445 return flowtype_str_table[i].str; 6446 } 6447 6448 return NULL; 6449 } 6450 6451 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6452 6453 static inline void 6454 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6455 { 6456 struct rte_eth_flex_payload_cfg *cfg; 6457 uint32_t i, j; 6458 6459 for (i = 0; i < flex_conf->nb_payloads; i++) { 6460 cfg = &flex_conf->flex_set[i]; 6461 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6462 printf("\n RAW: "); 6463 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6464 printf("\n L2_PAYLOAD: "); 6465 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6466 printf("\n L3_PAYLOAD: "); 6467 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6468 printf("\n L4_PAYLOAD: "); 6469 else 6470 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6471 for (j = 0; j < num; j++) 6472 printf(" %-5u", cfg->src_offset[j]); 6473 } 6474 printf("\n"); 6475 } 6476 6477 static inline void 6478 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6479 { 6480 struct rte_eth_fdir_flex_mask *mask; 6481 uint32_t i, j; 6482 const char *p; 6483 6484 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6485 mask = &flex_conf->flex_mask[i]; 6486 p = flowtype_to_str(mask->flow_type); 6487 printf("\n %s:\t", p ? p : "unknown"); 6488 for (j = 0; j < num; j++) 6489 printf(" %02x", mask->mask[j]); 6490 } 6491 printf("\n"); 6492 } 6493 6494 static inline void 6495 print_fdir_flow_type(uint32_t flow_types_mask) 6496 { 6497 int i; 6498 const char *p; 6499 6500 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6501 if (!(flow_types_mask & (1 << i))) 6502 continue; 6503 p = flowtype_to_str(i); 6504 if (p) 6505 printf(" %s", p); 6506 else 6507 printf(" unknown"); 6508 } 6509 printf("\n"); 6510 } 6511 6512 static int 6513 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6514 struct rte_eth_fdir_stats *fdir_stat) 6515 { 6516 int ret = -ENOTSUP; 6517 6518 #ifdef RTE_NET_I40E 6519 if (ret == -ENOTSUP) { 6520 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6521 if (!ret) 6522 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6523 } 6524 #endif 6525 #ifdef RTE_NET_IXGBE 6526 if (ret == -ENOTSUP) { 6527 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6528 if (!ret) 6529 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6530 } 6531 #endif 6532 switch (ret) { 6533 case 0: 6534 break; 6535 case -ENOTSUP: 6536 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6537 port_id); 6538 break; 6539 default: 6540 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6541 break; 6542 } 6543 return ret; 6544 } 6545 6546 void 6547 fdir_get_infos(portid_t port_id) 6548 { 6549 struct rte_eth_fdir_stats fdir_stat; 6550 struct rte_eth_fdir_info fdir_info; 6551 6552 static const char *fdir_stats_border = "########################"; 6553 6554 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6555 return; 6556 6557 memset(&fdir_info, 0, sizeof(fdir_info)); 6558 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6559 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6560 return; 6561 6562 printf("\n %s FDIR infos for port %-2d %s\n", 6563 fdir_stats_border, port_id, fdir_stats_border); 6564 printf(" MODE: "); 6565 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6566 printf(" PERFECT\n"); 6567 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6568 printf(" PERFECT-MAC-VLAN\n"); 6569 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6570 printf(" PERFECT-TUNNEL\n"); 6571 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6572 printf(" SIGNATURE\n"); 6573 else 6574 printf(" DISABLE\n"); 6575 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6576 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6577 printf(" SUPPORTED FLOW TYPE: "); 6578 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6579 } 6580 printf(" FLEX PAYLOAD INFO:\n"); 6581 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6582 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6583 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6584 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6585 fdir_info.flex_payload_unit, 6586 fdir_info.max_flex_payload_segment_num, 6587 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6588 if (fdir_info.flex_conf.nb_payloads > 0) { 6589 printf(" FLEX PAYLOAD SRC OFFSET:"); 6590 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6591 } 6592 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6593 printf(" FLEX MASK CFG:"); 6594 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6595 } 6596 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6597 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6598 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6599 fdir_info.guarant_spc, fdir_info.best_spc); 6600 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6601 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6602 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6603 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6604 fdir_stat.collision, fdir_stat.free, 6605 fdir_stat.maxhash, fdir_stat.maxlen, 6606 fdir_stat.add, fdir_stat.remove, 6607 fdir_stat.f_add, fdir_stat.f_remove); 6608 printf(" %s############################%s\n", 6609 fdir_stats_border, fdir_stats_border); 6610 } 6611 6612 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6613 6614 void 6615 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6616 { 6617 #ifdef RTE_NET_IXGBE 6618 int diag; 6619 6620 if (is_rx) 6621 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6622 else 6623 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6624 6625 if (diag == 0) 6626 return; 6627 fprintf(stderr, 6628 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6629 is_rx ? "rx" : "tx", port_id, diag); 6630 return; 6631 #endif 6632 fprintf(stderr, "VF %s setting not supported for port %d\n", 6633 is_rx ? "Rx" : "Tx", port_id); 6634 RTE_SET_USED(vf); 6635 RTE_SET_USED(on); 6636 } 6637 6638 int 6639 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6640 { 6641 int diag; 6642 struct rte_eth_link link; 6643 int ret; 6644 6645 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6646 return 1; 6647 ret = eth_link_get_nowait_print_err(port_id, &link); 6648 if (ret < 0) 6649 return 1; 6650 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6651 rate > link.link_speed) { 6652 fprintf(stderr, 6653 "Invalid rate value:%u bigger than link speed: %u\n", 6654 rate, link.link_speed); 6655 return 1; 6656 } 6657 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6658 if (diag == 0) 6659 return diag; 6660 fprintf(stderr, 6661 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6662 port_id, diag); 6663 return diag; 6664 } 6665 6666 int 6667 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6668 { 6669 int diag = -ENOTSUP; 6670 6671 RTE_SET_USED(vf); 6672 RTE_SET_USED(rate); 6673 RTE_SET_USED(q_msk); 6674 6675 #ifdef RTE_NET_IXGBE 6676 if (diag == -ENOTSUP) 6677 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6678 q_msk); 6679 #endif 6680 #ifdef RTE_NET_BNXT 6681 if (diag == -ENOTSUP) 6682 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6683 #endif 6684 if (diag == 0) 6685 return diag; 6686 6687 fprintf(stderr, 6688 "%s for port_id=%d failed diag=%d\n", 6689 __func__, port_id, diag); 6690 return diag; 6691 } 6692 6693 int 6694 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6695 { 6696 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6697 return -EINVAL; 6698 6699 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6700 } 6701 6702 /* 6703 * Functions to manage the set of filtered Multicast MAC addresses. 6704 * 6705 * A pool of filtered multicast MAC addresses is associated with each port. 6706 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6707 * The address of the pool and the number of valid multicast MAC addresses 6708 * recorded in the pool are stored in the fields "mc_addr_pool" and 6709 * "mc_addr_nb" of the "rte_port" data structure. 6710 * 6711 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6712 * to be supplied a contiguous array of multicast MAC addresses. 6713 * To comply with this constraint, the set of multicast addresses recorded 6714 * into the pool are systematically compacted at the beginning of the pool. 6715 * Hence, when a multicast address is removed from the pool, all following 6716 * addresses, if any, are copied back to keep the set contiguous. 6717 */ 6718 #define MCAST_POOL_INC 32 6719 6720 static int 6721 mcast_addr_pool_extend(struct rte_port *port) 6722 { 6723 struct rte_ether_addr *mc_pool; 6724 size_t mc_pool_size; 6725 6726 /* 6727 * If a free entry is available at the end of the pool, just 6728 * increment the number of recorded multicast addresses. 6729 */ 6730 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6731 port->mc_addr_nb++; 6732 return 0; 6733 } 6734 6735 /* 6736 * [re]allocate a pool with MCAST_POOL_INC more entries. 6737 * The previous test guarantees that port->mc_addr_nb is a multiple 6738 * of MCAST_POOL_INC. 6739 */ 6740 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6741 MCAST_POOL_INC); 6742 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6743 mc_pool_size); 6744 if (mc_pool == NULL) { 6745 fprintf(stderr, 6746 "allocation of pool of %u multicast addresses failed\n", 6747 port->mc_addr_nb + MCAST_POOL_INC); 6748 return -ENOMEM; 6749 } 6750 6751 port->mc_addr_pool = mc_pool; 6752 port->mc_addr_nb++; 6753 return 0; 6754 6755 } 6756 6757 static void 6758 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6759 { 6760 if (mcast_addr_pool_extend(port) != 0) 6761 return; 6762 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6763 } 6764 6765 static void 6766 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6767 { 6768 port->mc_addr_nb--; 6769 if (addr_idx == port->mc_addr_nb) { 6770 /* No need to recompact the set of multicast addresses. */ 6771 if (port->mc_addr_nb == 0) { 6772 /* free the pool of multicast addresses. */ 6773 free(port->mc_addr_pool); 6774 port->mc_addr_pool = NULL; 6775 } 6776 return; 6777 } 6778 memmove(&port->mc_addr_pool[addr_idx], 6779 &port->mc_addr_pool[addr_idx + 1], 6780 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6781 } 6782 6783 int 6784 mcast_addr_pool_destroy(portid_t port_id) 6785 { 6786 struct rte_port *port; 6787 6788 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6789 port_id == (portid_t)RTE_PORT_ALL) 6790 return -EINVAL; 6791 port = &ports[port_id]; 6792 6793 if (port->mc_addr_nb != 0) { 6794 /* free the pool of multicast addresses. */ 6795 free(port->mc_addr_pool); 6796 port->mc_addr_pool = NULL; 6797 port->mc_addr_nb = 0; 6798 } 6799 return 0; 6800 } 6801 6802 static int 6803 eth_port_multicast_addr_list_set(portid_t port_id) 6804 { 6805 struct rte_port *port; 6806 int diag; 6807 6808 port = &ports[port_id]; 6809 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6810 port->mc_addr_nb); 6811 if (diag < 0) 6812 fprintf(stderr, 6813 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6814 port_id, port->mc_addr_nb, diag); 6815 6816 return diag; 6817 } 6818 6819 void 6820 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6821 { 6822 struct rte_port *port; 6823 uint32_t i; 6824 6825 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6826 return; 6827 6828 port = &ports[port_id]; 6829 6830 /* 6831 * Check that the added multicast MAC address is not already recorded 6832 * in the pool of multicast addresses. 6833 */ 6834 for (i = 0; i < port->mc_addr_nb; i++) { 6835 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6836 fprintf(stderr, 6837 "multicast address already filtered by port\n"); 6838 return; 6839 } 6840 } 6841 6842 mcast_addr_pool_append(port, mc_addr); 6843 if (eth_port_multicast_addr_list_set(port_id) < 0) 6844 /* Rollback on failure, remove the address from the pool */ 6845 mcast_addr_pool_remove(port, i); 6846 } 6847 6848 void 6849 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6850 { 6851 struct rte_port *port; 6852 uint32_t i; 6853 6854 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6855 return; 6856 6857 port = &ports[port_id]; 6858 6859 /* 6860 * Search the pool of multicast MAC addresses for the removed address. 6861 */ 6862 for (i = 0; i < port->mc_addr_nb; i++) { 6863 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6864 break; 6865 } 6866 if (i == port->mc_addr_nb) { 6867 fprintf(stderr, "multicast address not filtered by port %d\n", 6868 port_id); 6869 return; 6870 } 6871 6872 mcast_addr_pool_remove(port, i); 6873 if (eth_port_multicast_addr_list_set(port_id) < 0) 6874 /* Rollback on failure, add the address back into the pool */ 6875 mcast_addr_pool_append(port, mc_addr); 6876 } 6877 6878 void 6879 mcast_addr_flush(portid_t port_id) 6880 { 6881 int ret; 6882 6883 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6884 return; 6885 6886 ret = rte_eth_dev_set_mc_addr_list(port_id, NULL, 0); 6887 if (ret != 0) { 6888 fprintf(stderr, 6889 "Failed to flush all multicast MAC addresses on port_id %u\n", 6890 port_id); 6891 return; 6892 } 6893 mcast_addr_pool_destroy(port_id); 6894 } 6895 6896 void 6897 port_dcb_info_display(portid_t port_id) 6898 { 6899 struct rte_eth_dcb_info dcb_info; 6900 uint16_t i; 6901 int ret; 6902 static const char *border = "================"; 6903 6904 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6905 return; 6906 6907 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6908 if (ret) { 6909 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6910 port_id); 6911 return; 6912 } 6913 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6914 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6915 printf("\n TC : "); 6916 for (i = 0; i < dcb_info.nb_tcs; i++) 6917 printf("\t%4d", i); 6918 printf("\n Priority : "); 6919 for (i = 0; i < dcb_info.nb_tcs; i++) 6920 printf("\t%4d", dcb_info.prio_tc[i]); 6921 printf("\n BW percent :"); 6922 for (i = 0; i < dcb_info.nb_tcs; i++) 6923 printf("\t%4d%%", dcb_info.tc_bws[i]); 6924 printf("\n RXQ base : "); 6925 for (i = 0; i < dcb_info.nb_tcs; i++) 6926 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6927 printf("\n RXQ number :"); 6928 for (i = 0; i < dcb_info.nb_tcs; i++) 6929 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6930 printf("\n TXQ base : "); 6931 for (i = 0; i < dcb_info.nb_tcs; i++) 6932 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6933 printf("\n TXQ number :"); 6934 for (i = 0; i < dcb_info.nb_tcs; i++) 6935 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6936 printf("\n"); 6937 } 6938 6939 uint8_t * 6940 open_file(const char *file_path, uint32_t *size) 6941 { 6942 int fd = open(file_path, O_RDONLY); 6943 off_t pkg_size; 6944 uint8_t *buf = NULL; 6945 int ret = 0; 6946 struct stat st_buf; 6947 6948 if (size) 6949 *size = 0; 6950 6951 if (fd == -1) { 6952 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6953 return buf; 6954 } 6955 6956 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6957 close(fd); 6958 fprintf(stderr, "%s: File operations failed\n", __func__); 6959 return buf; 6960 } 6961 6962 pkg_size = st_buf.st_size; 6963 if (pkg_size < 0) { 6964 close(fd); 6965 fprintf(stderr, "%s: File operations failed\n", __func__); 6966 return buf; 6967 } 6968 6969 buf = (uint8_t *)malloc(pkg_size); 6970 if (!buf) { 6971 close(fd); 6972 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6973 return buf; 6974 } 6975 6976 ret = read(fd, buf, pkg_size); 6977 if (ret < 0) { 6978 close(fd); 6979 fprintf(stderr, "%s: File read operation failed\n", __func__); 6980 close_file(buf); 6981 return NULL; 6982 } 6983 6984 if (size) 6985 *size = pkg_size; 6986 6987 close(fd); 6988 6989 return buf; 6990 } 6991 6992 int 6993 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6994 { 6995 FILE *fh = fopen(file_path, "wb"); 6996 6997 if (fh == NULL) { 6998 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6999 return -1; 7000 } 7001 7002 if (fwrite(buf, 1, size, fh) != size) { 7003 fclose(fh); 7004 fprintf(stderr, "%s: File write operation failed\n", __func__); 7005 return -1; 7006 } 7007 7008 fclose(fh); 7009 7010 return 0; 7011 } 7012 7013 int 7014 close_file(uint8_t *buf) 7015 { 7016 if (buf) { 7017 free((void *)buf); 7018 return 0; 7019 } 7020 7021 return -1; 7022 } 7023 7024 void 7025 show_macs(portid_t port_id) 7026 { 7027 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 7028 struct rte_eth_dev_info dev_info; 7029 int32_t i, rc, num_macs = 0; 7030 7031 if (eth_dev_info_get_print_err(port_id, &dev_info)) 7032 return; 7033 7034 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 7035 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 7036 if (rc < 0) 7037 return; 7038 7039 for (i = 0; i < rc; i++) { 7040 7041 /* skip zero address */ 7042 if (rte_is_zero_ether_addr(&addr[i])) 7043 continue; 7044 7045 num_macs++; 7046 } 7047 7048 printf("Number of MAC address added: %d\n", num_macs); 7049 7050 for (i = 0; i < rc; i++) { 7051 7052 /* skip zero address */ 7053 if (rte_is_zero_ether_addr(&addr[i])) 7054 continue; 7055 7056 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 7057 printf(" %s\n", buf); 7058 } 7059 } 7060 7061 void 7062 show_mcast_macs(portid_t port_id) 7063 { 7064 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 7065 struct rte_ether_addr *addr; 7066 struct rte_port *port; 7067 uint32_t i; 7068 7069 port = &ports[port_id]; 7070 7071 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 7072 7073 for (i = 0; i < port->mc_addr_nb; i++) { 7074 addr = &port->mc_addr_pool[i]; 7075 7076 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 7077 printf(" %s\n", buf); 7078 } 7079 } 7080