1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 { 174 .mode = RTE_ETH_FEC_LLRS, 175 .name = "llrs", 176 }, 177 }; 178 179 static const struct { 180 char str[32]; 181 uint16_t ftype; 182 } flowtype_str_table[] = { 183 {"raw", RTE_ETH_FLOW_RAW}, 184 {"ipv4", RTE_ETH_FLOW_IPV4}, 185 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 186 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 187 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 188 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 189 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 190 {"ipv6", RTE_ETH_FLOW_IPV6}, 191 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 192 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 193 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 194 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 195 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 196 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 197 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 198 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 199 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 200 {"port", RTE_ETH_FLOW_PORT}, 201 {"vxlan", RTE_ETH_FLOW_VXLAN}, 202 {"geneve", RTE_ETH_FLOW_GENEVE}, 203 {"nvgre", RTE_ETH_FLOW_NVGRE}, 204 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 205 {"gtpu", RTE_ETH_FLOW_GTPU}, 206 }; 207 208 static void 209 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 210 { 211 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 212 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 213 printf("%s%s", name, buf); 214 } 215 216 static void 217 nic_xstats_display_periodic(portid_t port_id) 218 { 219 struct xstat_display_info *xstats_info; 220 uint64_t *prev_values, *curr_values; 221 uint64_t diff_value, value_rate; 222 struct timespec cur_time; 223 uint64_t *ids_supp; 224 size_t ids_supp_sz; 225 uint64_t diff_ns; 226 unsigned int i; 227 int rc; 228 229 xstats_info = &ports[port_id].xstats_info; 230 231 ids_supp_sz = xstats_info->ids_supp_sz; 232 if (ids_supp_sz == 0) 233 return; 234 235 printf("\n"); 236 237 ids_supp = xstats_info->ids_supp; 238 prev_values = xstats_info->prev_values; 239 curr_values = xstats_info->curr_values; 240 241 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 242 ids_supp_sz); 243 if (rc != (int)ids_supp_sz) { 244 fprintf(stderr, 245 "Failed to get values of %zu xstats for port %u - return code %d\n", 246 ids_supp_sz, port_id, rc); 247 return; 248 } 249 250 diff_ns = 0; 251 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 252 uint64_t ns; 253 254 ns = cur_time.tv_sec * NS_PER_SEC; 255 ns += cur_time.tv_nsec; 256 257 if (xstats_info->prev_ns != 0) 258 diff_ns = ns - xstats_info->prev_ns; 259 xstats_info->prev_ns = ns; 260 } 261 262 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 263 for (i = 0; i < ids_supp_sz; i++) { 264 diff_value = (curr_values[i] > prev_values[i]) ? 265 (curr_values[i] - prev_values[i]) : 0; 266 prev_values[i] = curr_values[i]; 267 value_rate = diff_ns > 0 ? 268 (double)diff_value / diff_ns * NS_PER_SEC : 0; 269 270 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 271 xstats_display[i].name, curr_values[i], value_rate); 272 } 273 } 274 275 void 276 nic_stats_display(portid_t port_id) 277 { 278 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 279 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 283 struct timespec cur_time; 284 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 285 diff_ns; 286 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 287 struct rte_eth_stats stats; 288 static const char *nic_stats_border = "########################"; 289 int ret; 290 291 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 292 print_valid_ports(); 293 return; 294 } 295 ret = rte_eth_stats_get(port_id, &stats); 296 if (ret != 0) { 297 fprintf(stderr, 298 "%s: Error: failed to get stats (port %u): %d", 299 __func__, port_id, ret); 300 return; 301 } 302 printf("\n %s NIC statistics for port %-2d %s\n", 303 nic_stats_border, port_id, nic_stats_border); 304 305 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 306 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 307 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 308 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 309 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 310 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 311 312 diff_ns = 0; 313 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 314 uint64_t ns; 315 316 ns = cur_time.tv_sec * NS_PER_SEC; 317 ns += cur_time.tv_nsec; 318 319 if (prev_ns[port_id] != 0) 320 diff_ns = ns - prev_ns[port_id]; 321 prev_ns[port_id] = ns; 322 } 323 324 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 325 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 326 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 327 (stats.opackets - prev_pkts_tx[port_id]) : 0; 328 prev_pkts_rx[port_id] = stats.ipackets; 329 prev_pkts_tx[port_id] = stats.opackets; 330 mpps_rx = diff_ns > 0 ? 331 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 332 mpps_tx = diff_ns > 0 ? 333 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 334 335 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 336 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 337 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 338 (stats.obytes - prev_bytes_tx[port_id]) : 0; 339 prev_bytes_rx[port_id] = stats.ibytes; 340 prev_bytes_tx[port_id] = stats.obytes; 341 mbps_rx = diff_ns > 0 ? 342 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 343 mbps_tx = diff_ns > 0 ? 344 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 345 346 printf("\n Throughput (since last show)\n"); 347 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 348 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 349 mpps_tx, mbps_tx * 8); 350 351 if (xstats_display_num > 0) 352 nic_xstats_display_periodic(port_id); 353 354 printf(" %s############################%s\n", 355 nic_stats_border, nic_stats_border); 356 } 357 358 void 359 nic_stats_clear(portid_t port_id) 360 { 361 int ret; 362 363 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 364 print_valid_ports(); 365 return; 366 } 367 368 ret = rte_eth_stats_reset(port_id); 369 if (ret != 0) { 370 fprintf(stderr, 371 "%s: Error: failed to reset stats (port %u): %s", 372 __func__, port_id, strerror(-ret)); 373 return; 374 } 375 376 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 377 if (ret != 0) { 378 if (ret < 0) 379 ret = -ret; 380 fprintf(stderr, 381 "%s: Error: failed to get stats (port %u): %s", 382 __func__, port_id, strerror(ret)); 383 return; 384 } 385 printf("\n NIC statistics for port %d cleared\n", port_id); 386 } 387 388 void 389 nic_xstats_display(portid_t port_id) 390 { 391 struct rte_eth_xstat *xstats; 392 int cnt_xstats, idx_xstat; 393 struct rte_eth_xstat_name *xstats_names; 394 395 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 396 print_valid_ports(); 397 return; 398 } 399 printf("###### NIC extended statistics for port %-2d\n", port_id); 400 if (!rte_eth_dev_is_valid_port(port_id)) { 401 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 402 return; 403 } 404 405 /* Get count */ 406 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 407 if (cnt_xstats < 0) { 408 fprintf(stderr, "Error: Cannot get count of xstats\n"); 409 return; 410 } 411 412 /* Get id-name lookup table */ 413 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 414 if (xstats_names == NULL) { 415 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 416 return; 417 } 418 if (cnt_xstats != rte_eth_xstats_get_names( 419 port_id, xstats_names, cnt_xstats)) { 420 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 421 free(xstats_names); 422 return; 423 } 424 425 /* Get stats themselves */ 426 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 427 if (xstats == NULL) { 428 fprintf(stderr, "Cannot allocate memory for xstats\n"); 429 free(xstats_names); 430 return; 431 } 432 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 433 fprintf(stderr, "Error: Unable to get xstats\n"); 434 free(xstats_names); 435 free(xstats); 436 return; 437 } 438 439 /* Display xstats */ 440 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 441 if (xstats_hide_zero && !xstats[idx_xstat].value) 442 continue; 443 printf("%s: %"PRIu64"\n", 444 xstats_names[idx_xstat].name, 445 xstats[idx_xstat].value); 446 } 447 free(xstats_names); 448 free(xstats); 449 } 450 451 void 452 nic_xstats_clear(portid_t port_id) 453 { 454 int ret; 455 456 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 457 print_valid_ports(); 458 return; 459 } 460 461 ret = rte_eth_xstats_reset(port_id); 462 if (ret != 0) { 463 fprintf(stderr, 464 "%s: Error: failed to reset xstats (port %u): %s\n", 465 __func__, port_id, strerror(-ret)); 466 return; 467 } 468 469 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 470 if (ret != 0) { 471 if (ret < 0) 472 ret = -ret; 473 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 474 __func__, port_id, strerror(ret)); 475 return; 476 } 477 } 478 479 static const char * 480 get_queue_state_name(uint8_t queue_state) 481 { 482 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 483 return "stopped"; 484 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 485 return "started"; 486 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 487 return "hairpin"; 488 else 489 return "unknown"; 490 } 491 492 void 493 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_rxq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 512 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 513 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 514 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 515 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 516 printf("\nRX drop packets: %s", 517 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 518 printf("\nRX deferred start: %s", 519 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 520 printf("\nRX scattered packets: %s", 521 (qinfo.scattered_rx != 0) ? "on" : "off"); 522 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 523 if (qinfo.rx_buf_size != 0) 524 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 525 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 526 527 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 528 printf("\nBurst mode: %s%s", 529 mode.info, 530 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 531 " (per queue)" : ""); 532 533 printf("\n"); 534 } 535 536 void 537 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 538 { 539 struct rte_eth_burst_mode mode; 540 struct rte_eth_txq_info qinfo; 541 int32_t rc; 542 static const char *info_border = "*********************"; 543 544 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 545 if (rc != 0) { 546 fprintf(stderr, 547 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 548 port_id, queue_id, strerror(-rc), rc); 549 return; 550 } 551 552 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 553 info_border, port_id, queue_id, info_border); 554 555 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 556 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 557 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 558 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 559 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 560 printf("\nTX deferred start: %s", 561 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 562 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 563 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 564 565 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 566 printf("\nBurst mode: %s%s", 567 mode.info, 568 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 569 " (per queue)" : ""); 570 571 printf("\n"); 572 } 573 574 static int bus_match_all(const struct rte_bus *bus, const void *data) 575 { 576 RTE_SET_USED(bus); 577 RTE_SET_USED(data); 578 return 0; 579 } 580 581 static void 582 device_infos_display_speeds(uint32_t speed_capa) 583 { 584 printf("\n\tDevice speed capability:"); 585 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 586 printf(" Autonegotiate (all speeds)"); 587 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 588 printf(" Disable autonegotiate (fixed speed) "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 590 printf(" 10 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 592 printf(" 10 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 594 printf(" 100 Mbps half-duplex "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 596 printf(" 100 Mbps full-duplex "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 598 printf(" 1 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 600 printf(" 2.5 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 602 printf(" 5 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 604 printf(" 10 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 606 printf(" 20 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 608 printf(" 25 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 610 printf(" 40 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 612 printf(" 50 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 614 printf(" 56 Gbps "); 615 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 616 printf(" 100 Gbps "); 617 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 618 printf(" 200 Gbps "); 619 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 620 printf(" 400 Gbps "); 621 } 622 623 void 624 device_infos_display(const char *identifier) 625 { 626 static const char *info_border = "*********************"; 627 struct rte_bus *start = NULL, *next; 628 struct rte_dev_iterator dev_iter; 629 char name[RTE_ETH_NAME_MAX_LEN]; 630 struct rte_ether_addr mac_addr; 631 struct rte_device *dev; 632 struct rte_devargs da; 633 portid_t port_id; 634 struct rte_eth_dev_info dev_info; 635 char devstr[128]; 636 637 memset(&da, 0, sizeof(da)); 638 if (!identifier) 639 goto skip_parse; 640 641 if (rte_devargs_parsef(&da, "%s", identifier)) { 642 fprintf(stderr, "cannot parse identifier\n"); 643 return; 644 } 645 646 skip_parse: 647 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 648 649 start = next; 650 if (identifier && da.bus != next) 651 continue; 652 653 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 654 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 655 656 if (rte_dev_driver(dev) == NULL) 657 continue; 658 /* Check for matching device if identifier is present */ 659 if (identifier && 660 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 661 continue; 662 printf("\n%s Infos for device %s %s\n", 663 info_border, rte_dev_name(dev), info_border); 664 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 665 printf("\nBus information: %s", 666 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 667 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 668 printf("\nDevargs: %s", 669 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 670 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 671 printf("\n"); 672 673 /* List ports with matching device name */ 674 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 675 printf("\n\tPort id: %-2d", port_id); 676 if (eth_macaddr_get_print_err(port_id, 677 &mac_addr) == 0) 678 print_ethaddr("\n\tMAC address: ", 679 &mac_addr); 680 rte_eth_dev_get_name_by_port(port_id, name); 681 printf("\n\tDevice name: %s", name); 682 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 683 device_infos_display_speeds(dev_info.speed_capa); 684 printf("\n"); 685 } 686 } 687 }; 688 rte_devargs_reset(&da); 689 } 690 691 static void 692 print_dev_capabilities(uint64_t capabilities) 693 { 694 uint64_t single_capa; 695 int begin; 696 int end; 697 int bit; 698 699 if (capabilities == 0) 700 return; 701 702 begin = __builtin_ctzll(capabilities); 703 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 704 705 single_capa = 1ULL << begin; 706 for (bit = begin; bit < end; bit++) { 707 if (capabilities & single_capa) 708 printf(" %s", 709 rte_eth_dev_capability_name(single_capa)); 710 single_capa <<= 1; 711 } 712 } 713 714 uint64_t 715 str_to_rsstypes(const char *str) 716 { 717 uint16_t i; 718 719 for (i = 0; rss_type_table[i].str != NULL; i++) { 720 if (strcmp(rss_type_table[i].str, str) == 0) 721 return rss_type_table[i].rss_type; 722 } 723 724 return 0; 725 } 726 727 const char * 728 rsstypes_to_str(uint64_t rss_type) 729 { 730 uint16_t i; 731 732 for (i = 0; rss_type_table[i].str != NULL; i++) { 733 if (rss_type_table[i].rss_type == rss_type) 734 return rss_type_table[i].str; 735 } 736 737 return NULL; 738 } 739 740 static void 741 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 742 { 743 uint16_t user_defined_str_len; 744 uint16_t total_len = 0; 745 uint16_t str_len = 0; 746 uint64_t rss_offload; 747 uint16_t i; 748 749 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 750 rss_offload = RTE_BIT64(i); 751 if ((offload_types & rss_offload) != 0) { 752 const char *p = rsstypes_to_str(rss_offload); 753 754 user_defined_str_len = 755 strlen("user-defined-") + (i / 10 + 1); 756 str_len = p ? strlen(p) : user_defined_str_len; 757 str_len += 2; /* add two spaces */ 758 if (total_len + str_len >= char_num_per_line) { 759 total_len = 0; 760 printf("\n"); 761 } 762 763 if (p) 764 printf(" %s", p); 765 else 766 printf(" user-defined-%u", i); 767 total_len += str_len; 768 } 769 } 770 printf("\n"); 771 } 772 773 void 774 port_infos_display(portid_t port_id) 775 { 776 struct rte_port *port; 777 struct rte_ether_addr mac_addr; 778 struct rte_eth_link link; 779 struct rte_eth_dev_info dev_info; 780 int vlan_offload; 781 struct rte_mempool * mp; 782 static const char *info_border = "*********************"; 783 uint16_t mtu; 784 char name[RTE_ETH_NAME_MAX_LEN]; 785 int ret; 786 char fw_version[ETHDEV_FWVERS_LEN]; 787 788 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 789 print_valid_ports(); 790 return; 791 } 792 port = &ports[port_id]; 793 ret = eth_link_get_nowait_print_err(port_id, &link); 794 if (ret < 0) 795 return; 796 797 ret = eth_dev_info_get_print_err(port_id, &dev_info); 798 if (ret != 0) 799 return; 800 801 printf("\n%s Infos for port %-2d %s\n", 802 info_border, port_id, info_border); 803 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 804 print_ethaddr("MAC address: ", &mac_addr); 805 rte_eth_dev_get_name_by_port(port_id, name); 806 printf("\nDevice name: %s", name); 807 printf("\nDriver name: %s", dev_info.driver_name); 808 809 if (rte_eth_dev_fw_version_get(port_id, fw_version, 810 ETHDEV_FWVERS_LEN) == 0) 811 printf("\nFirmware-version: %s", fw_version); 812 else 813 printf("\nFirmware-version: %s", "not available"); 814 815 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 816 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 817 printf("\nConnect to socket: %u", port->socket_id); 818 819 if (port_numa[port_id] != NUMA_NO_CONFIG) { 820 mp = mbuf_pool_find(port_numa[port_id], 0); 821 if (mp) 822 printf("\nmemory allocation on the socket: %d", 823 port_numa[port_id]); 824 } else 825 printf("\nmemory allocation on the socket: %u",port->socket_id); 826 827 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 828 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 829 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 830 ("full-duplex") : ("half-duplex")); 831 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 832 ("On") : ("Off")); 833 834 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 835 printf("MTU: %u\n", mtu); 836 837 printf("Promiscuous mode: %s\n", 838 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 839 printf("Allmulticast mode: %s\n", 840 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 841 printf("Maximum number of MAC addresses: %u\n", 842 (unsigned int)(port->dev_info.max_mac_addrs)); 843 printf("Maximum number of MAC addresses of hash filtering: %u\n", 844 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 845 846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 847 if (vlan_offload >= 0){ 848 printf("VLAN offload: \n"); 849 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 850 printf(" strip on, "); 851 else 852 printf(" strip off, "); 853 854 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 855 printf("filter on, "); 856 else 857 printf("filter off, "); 858 859 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 860 printf("extend on, "); 861 else 862 printf("extend off, "); 863 864 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 865 printf("qinq strip on\n"); 866 else 867 printf("qinq strip off\n"); 868 } 869 870 if (dev_info.hash_key_size > 0) 871 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 872 if (dev_info.reta_size > 0) 873 printf("Redirection table size: %u\n", dev_info.reta_size); 874 if (!dev_info.flow_type_rss_offloads) 875 printf("No RSS offload flow type is supported.\n"); 876 else { 877 printf("Supported RSS offload flow types:\n"); 878 rss_offload_types_display(dev_info.flow_type_rss_offloads, 879 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 880 } 881 882 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 883 printf("Maximum configurable length of RX packet: %u\n", 884 dev_info.max_rx_pktlen); 885 printf("Maximum configurable size of LRO aggregated packet: %u\n", 886 dev_info.max_lro_pkt_size); 887 if (dev_info.max_vfs) 888 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 889 if (dev_info.max_vmdq_pools) 890 printf("Maximum number of VMDq pools: %u\n", 891 dev_info.max_vmdq_pools); 892 893 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 894 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 895 printf("Max possible number of RXDs per queue: %hu\n", 896 dev_info.rx_desc_lim.nb_max); 897 printf("Min possible number of RXDs per queue: %hu\n", 898 dev_info.rx_desc_lim.nb_min); 899 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 900 901 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 902 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 903 printf("Max possible number of TXDs per queue: %hu\n", 904 dev_info.tx_desc_lim.nb_max); 905 printf("Min possible number of TXDs per queue: %hu\n", 906 dev_info.tx_desc_lim.nb_min); 907 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 908 printf("Max segment number per packet: %hu\n", 909 dev_info.tx_desc_lim.nb_seg_max); 910 printf("Max segment number per MTU/TSO: %hu\n", 911 dev_info.tx_desc_lim.nb_mtu_seg_max); 912 913 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 914 print_dev_capabilities(dev_info.dev_capa); 915 printf(" )\n"); 916 /* Show switch info only if valid switch domain and port id is set */ 917 if (dev_info.switch_info.domain_id != 918 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 919 if (dev_info.switch_info.name) 920 printf("Switch name: %s\n", dev_info.switch_info.name); 921 922 printf("Switch domain Id: %u\n", 923 dev_info.switch_info.domain_id); 924 printf("Switch Port Id: %u\n", 925 dev_info.switch_info.port_id); 926 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 927 printf("Switch Rx domain: %u\n", 928 dev_info.switch_info.rx_domain); 929 } 930 printf("Device error handling mode: "); 931 switch (dev_info.err_handle_mode) { 932 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 933 printf("none\n"); 934 break; 935 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 936 printf("passive\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 939 printf("proactive\n"); 940 break; 941 default: 942 printf("unknown\n"); 943 break; 944 } 945 printf("Device private info:\n"); 946 ret = rte_eth_dev_priv_dump(port_id, stdout); 947 if (ret == -ENOTSUP) 948 printf(" none\n"); 949 else if (ret < 0) 950 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 951 ret, strerror(-ret)); 952 } 953 954 void 955 port_summary_header_display(void) 956 { 957 uint16_t port_number; 958 959 port_number = rte_eth_dev_count_avail(); 960 printf("Number of available ports: %i\n", port_number); 961 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 962 "Driver", "Status", "Link"); 963 } 964 965 void 966 port_summary_display(portid_t port_id) 967 { 968 struct rte_ether_addr mac_addr; 969 struct rte_eth_link link; 970 struct rte_eth_dev_info dev_info; 971 char name[RTE_ETH_NAME_MAX_LEN]; 972 int ret; 973 974 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 975 print_valid_ports(); 976 return; 977 } 978 979 ret = eth_link_get_nowait_print_err(port_id, &link); 980 if (ret < 0) 981 return; 982 983 ret = eth_dev_info_get_print_err(port_id, &dev_info); 984 if (ret != 0) 985 return; 986 987 rte_eth_dev_get_name_by_port(port_id, name); 988 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 989 if (ret != 0) 990 return; 991 992 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 993 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 994 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 995 rte_eth_link_speed_to_str(link.link_speed)); 996 } 997 998 void 999 port_eeprom_display(portid_t port_id) 1000 { 1001 struct rte_dev_eeprom_info einfo; 1002 int ret; 1003 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1004 print_valid_ports(); 1005 return; 1006 } 1007 1008 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1009 if (len_eeprom < 0) { 1010 switch (len_eeprom) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", 1022 len_eeprom); 1023 break; 1024 } 1025 return; 1026 } 1027 1028 einfo.offset = 0; 1029 einfo.length = len_eeprom; 1030 einfo.data = calloc(1, len_eeprom); 1031 if (!einfo.data) { 1032 fprintf(stderr, 1033 "Allocation of port %u eeprom data failed\n", 1034 port_id); 1035 return; 1036 } 1037 1038 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1039 if (ret != 0) { 1040 switch (ret) { 1041 case -ENODEV: 1042 fprintf(stderr, "port index %d invalid\n", port_id); 1043 break; 1044 case -ENOTSUP: 1045 fprintf(stderr, "operation not supported by device\n"); 1046 break; 1047 case -EIO: 1048 fprintf(stderr, "device is removed\n"); 1049 break; 1050 default: 1051 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1052 break; 1053 } 1054 free(einfo.data); 1055 return; 1056 } 1057 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1058 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1059 free(einfo.data); 1060 } 1061 1062 void 1063 port_module_eeprom_display(portid_t port_id) 1064 { 1065 struct rte_eth_dev_module_info minfo; 1066 struct rte_dev_eeprom_info einfo; 1067 int ret; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1070 print_valid_ports(); 1071 return; 1072 } 1073 1074 1075 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 return; 1093 } 1094 1095 einfo.offset = 0; 1096 einfo.length = minfo.eeprom_len; 1097 einfo.data = calloc(1, minfo.eeprom_len); 1098 if (!einfo.data) { 1099 fprintf(stderr, 1100 "Allocation of port %u eeprom data failed\n", 1101 port_id); 1102 return; 1103 } 1104 1105 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1106 if (ret != 0) { 1107 switch (ret) { 1108 case -ENODEV: 1109 fprintf(stderr, "port index %d invalid\n", port_id); 1110 break; 1111 case -ENOTSUP: 1112 fprintf(stderr, "operation not supported by device\n"); 1113 break; 1114 case -EIO: 1115 fprintf(stderr, "device is removed\n"); 1116 break; 1117 default: 1118 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1119 ret); 1120 break; 1121 } 1122 free(einfo.data); 1123 return; 1124 } 1125 1126 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1127 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1128 free(einfo.data); 1129 } 1130 1131 int 1132 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1133 { 1134 uint16_t pid; 1135 1136 if (port_id == (portid_t)RTE_PORT_ALL) 1137 return 0; 1138 1139 RTE_ETH_FOREACH_DEV(pid) 1140 if (port_id == pid) 1141 return 0; 1142 1143 if (warning == ENABLED_WARN) 1144 fprintf(stderr, "Invalid port %d\n", port_id); 1145 1146 return 1; 1147 } 1148 1149 void print_valid_ports(void) 1150 { 1151 portid_t pid; 1152 1153 printf("The valid ports array is ["); 1154 RTE_ETH_FOREACH_DEV(pid) { 1155 printf(" %d", pid); 1156 } 1157 printf(" ]\n"); 1158 } 1159 1160 static int 1161 vlan_id_is_invalid(uint16_t vlan_id) 1162 { 1163 if (vlan_id < 4096) 1164 return 0; 1165 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1166 return 1; 1167 } 1168 1169 static uint32_t 1170 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1171 { 1172 uint32_t overhead_len; 1173 1174 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1175 overhead_len = max_rx_pktlen - max_mtu; 1176 else 1177 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1178 1179 return overhead_len; 1180 } 1181 1182 static int 1183 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1184 { 1185 struct rte_eth_dev_info dev_info; 1186 uint32_t overhead_len; 1187 uint32_t frame_size; 1188 int ret; 1189 1190 ret = rte_eth_dev_info_get(port_id, &dev_info); 1191 if (ret != 0) 1192 return ret; 1193 1194 if (mtu < dev_info.min_mtu) { 1195 fprintf(stderr, 1196 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1197 mtu, dev_info.min_mtu, port_id); 1198 return -EINVAL; 1199 } 1200 if (mtu > dev_info.max_mtu) { 1201 fprintf(stderr, 1202 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1203 mtu, dev_info.max_mtu, port_id); 1204 return -EINVAL; 1205 } 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 frame_size = mtu + overhead_len; 1210 if (frame_size > dev_info.max_rx_pktlen) { 1211 fprintf(stderr, 1212 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1213 frame_size, dev_info.max_rx_pktlen, port_id); 1214 return -EINVAL; 1215 } 1216 1217 return 0; 1218 } 1219 1220 void 1221 port_mtu_set(portid_t port_id, uint16_t mtu) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 int diag; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 1229 diag = eth_dev_validate_mtu(port_id, mtu); 1230 if (diag != 0) 1231 return; 1232 1233 if (port->need_reconfig == 0) { 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag != 0) { 1236 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1237 return; 1238 } 1239 } 1240 1241 port->dev_conf.rxmode.mtu = mtu; 1242 } 1243 1244 /* Generic flow management functions. */ 1245 1246 static struct port_flow_tunnel * 1247 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1248 { 1249 struct port_flow_tunnel *flow_tunnel; 1250 1251 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1252 if (flow_tunnel->id == port_tunnel_id) 1253 goto out; 1254 } 1255 flow_tunnel = NULL; 1256 1257 out: 1258 return flow_tunnel; 1259 } 1260 1261 const char * 1262 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1263 { 1264 const char *type; 1265 switch (tunnel->type) { 1266 default: 1267 type = "unknown"; 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_VXLAN: 1270 type = "vxlan"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_GRE: 1273 type = "gre"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_NVGRE: 1276 type = "nvgre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_GENEVE: 1279 type = "geneve"; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 struct port_flow_tunnel * 1287 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1288 { 1289 struct rte_port *port = &ports[port_id]; 1290 struct port_flow_tunnel *flow_tunnel; 1291 1292 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1293 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1294 goto out; 1295 } 1296 flow_tunnel = NULL; 1297 1298 out: 1299 return flow_tunnel; 1300 } 1301 1302 void port_flow_tunnel_list(portid_t port_id) 1303 { 1304 struct rte_port *port = &ports[port_id]; 1305 struct port_flow_tunnel *flt; 1306 1307 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1308 printf("port %u tunnel #%u type=%s", 1309 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1310 if (flt->tunnel.tun_id) 1311 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1312 printf("\n"); 1313 } 1314 } 1315 1316 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1317 { 1318 struct rte_port *port = &ports[port_id]; 1319 struct port_flow_tunnel *flt; 1320 1321 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1322 if (flt->id == tunnel_id) 1323 break; 1324 } 1325 if (flt) { 1326 LIST_REMOVE(flt, chain); 1327 free(flt); 1328 printf("port %u: flow tunnel #%u destroyed\n", 1329 port_id, tunnel_id); 1330 } 1331 } 1332 1333 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1334 { 1335 struct rte_port *port = &ports[port_id]; 1336 enum rte_flow_item_type type; 1337 struct port_flow_tunnel *flt; 1338 1339 if (!strcmp(ops->type, "vxlan")) 1340 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1341 else if (!strcmp(ops->type, "gre")) 1342 type = RTE_FLOW_ITEM_TYPE_GRE; 1343 else if (!strcmp(ops->type, "nvgre")) 1344 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1345 else if (!strcmp(ops->type, "geneve")) 1346 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1347 else { 1348 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1349 ops->type); 1350 return; 1351 } 1352 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1353 if (flt->tunnel.type == type) 1354 break; 1355 } 1356 if (!flt) { 1357 flt = calloc(1, sizeof(*flt)); 1358 if (!flt) { 1359 fprintf(stderr, "failed to allocate port flt object\n"); 1360 return; 1361 } 1362 flt->tunnel.type = type; 1363 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1364 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1365 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1366 } 1367 printf("port %d: flow tunnel #%u type %s\n", 1368 port_id, flt->id, ops->type); 1369 } 1370 1371 /** Generate a port_flow entry from attributes/pattern/actions. */ 1372 static struct port_flow * 1373 port_flow_new(const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions, 1376 struct rte_flow_error *error) 1377 { 1378 const struct rte_flow_conv_rule rule = { 1379 .attr_ro = attr, 1380 .pattern_ro = pattern, 1381 .actions_ro = actions, 1382 }; 1383 struct port_flow *pf; 1384 int ret; 1385 1386 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1387 if (ret < 0) 1388 return NULL; 1389 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1390 if (!pf) { 1391 rte_flow_error_set 1392 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1393 "calloc() failed"); 1394 return NULL; 1395 } 1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1397 error) >= 0) 1398 return pf; 1399 free(pf); 1400 return NULL; 1401 } 1402 1403 /** Print a message out of a flow error. */ 1404 static int 1405 port_flow_complain(struct rte_flow_error *error) 1406 { 1407 static const char *const errstrlist[] = { 1408 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1409 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1410 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1411 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1412 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1413 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1417 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1418 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1419 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1421 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1422 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1423 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1424 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1425 }; 1426 const char *errstr; 1427 char buf[32]; 1428 int err = rte_errno; 1429 1430 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1431 !errstrlist[error->type]) 1432 errstr = "unknown type"; 1433 else 1434 errstr = errstrlist[error->type]; 1435 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1436 __func__, error->type, errstr, 1437 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1438 error->cause), buf) : "", 1439 error->message ? error->message : "(no stated reason)", 1440 rte_strerror(err)); 1441 1442 switch (error->type) { 1443 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1444 fprintf(stderr, "The status suggests the use of \"transfer\" " 1445 "as the possible cause of the failure. Make " 1446 "sure that the flow in question and its " 1447 "indirect components (if any) are managed " 1448 "via \"transfer\" proxy port. Use command " 1449 "\"show port (port_id) flow transfer proxy\" " 1450 "to figure out the proxy port ID\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 return -err; 1457 } 1458 1459 static void 1460 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1461 { 1462 uint16_t total_len = 0; 1463 uint16_t str_len; 1464 uint16_t i; 1465 1466 if (rss_types == 0) 1467 return; 1468 1469 for (i = 0; rss_type_table[i].str; i++) { 1470 if (rss_type_table[i].rss_type == 0) 1471 continue; 1472 1473 if ((rss_types & rss_type_table[i].rss_type) == 1474 rss_type_table[i].rss_type) { 1475 /* Contain two spaces */ 1476 str_len = strlen(rss_type_table[i].str) + 2; 1477 if (total_len + str_len > char_num_per_line) { 1478 printf("\n"); 1479 total_len = 0; 1480 } 1481 printf(" %s", rss_type_table[i].str); 1482 total_len += str_len; 1483 } 1484 } 1485 printf("\n"); 1486 } 1487 1488 static void 1489 rss_config_display(struct rte_flow_action_rss *rss_conf) 1490 { 1491 uint8_t i; 1492 1493 if (rss_conf == NULL) { 1494 fprintf(stderr, "Invalid rule\n"); 1495 return; 1496 } 1497 1498 printf("RSS:\n" 1499 " queues:"); 1500 if (rss_conf->queue_num == 0) 1501 printf(" none"); 1502 for (i = 0; i < rss_conf->queue_num; i++) 1503 printf(" %d", rss_conf->queue[i]); 1504 printf("\n"); 1505 1506 printf(" function: "); 1507 switch (rss_conf->func) { 1508 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1509 printf("default\n"); 1510 break; 1511 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1512 printf("toeplitz\n"); 1513 break; 1514 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1515 printf("simple_xor\n"); 1516 break; 1517 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1518 printf("symmetric_toeplitz\n"); 1519 break; 1520 default: 1521 printf("Unknown function\n"); 1522 return; 1523 } 1524 1525 printf(" RSS key:\n"); 1526 if (rss_conf->key_len == 0) { 1527 printf(" none"); 1528 } else { 1529 printf(" key_len: %u\n", rss_conf->key_len); 1530 printf(" key: "); 1531 if (rss_conf->key == NULL) { 1532 printf("none"); 1533 } else { 1534 for (i = 0; i < rss_conf->key_len; i++) 1535 printf("%02X", rss_conf->key[i]); 1536 } 1537 } 1538 printf("\n"); 1539 1540 printf(" types:\n"); 1541 if (rss_conf->types == 0) { 1542 printf(" none\n"); 1543 return; 1544 } 1545 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1546 } 1547 1548 static struct port_indirect_action * 1549 action_get_by_id(portid_t port_id, uint32_t id) 1550 { 1551 struct rte_port *port; 1552 struct port_indirect_action **ppia; 1553 struct port_indirect_action *pia = NULL; 1554 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return NULL; 1558 port = &ports[port_id]; 1559 ppia = &port->actions_list; 1560 while (*ppia) { 1561 if ((*ppia)->id == id) { 1562 pia = *ppia; 1563 break; 1564 } 1565 ppia = &(*ppia)->next; 1566 } 1567 if (!pia) 1568 fprintf(stderr, 1569 "Failed to find indirect action #%u on port %u\n", 1570 id, port_id); 1571 return pia; 1572 } 1573 1574 static int 1575 action_alloc(portid_t port_id, uint32_t id, 1576 struct port_indirect_action **action) 1577 { 1578 struct rte_port *port; 1579 struct port_indirect_action **ppia; 1580 struct port_indirect_action *pia = NULL; 1581 1582 *action = NULL; 1583 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1584 port_id == (portid_t)RTE_PORT_ALL) 1585 return -EINVAL; 1586 port = &ports[port_id]; 1587 if (id == UINT32_MAX) { 1588 /* taking first available ID */ 1589 if (port->actions_list) { 1590 if (port->actions_list->id == UINT32_MAX - 1) { 1591 fprintf(stderr, 1592 "Highest indirect action ID is already assigned, delete it first\n"); 1593 return -ENOMEM; 1594 } 1595 id = port->actions_list->id + 1; 1596 } else { 1597 id = 0; 1598 } 1599 } 1600 pia = calloc(1, sizeof(*pia)); 1601 if (!pia) { 1602 fprintf(stderr, 1603 "Allocation of port %u indirect action failed\n", 1604 port_id); 1605 return -ENOMEM; 1606 } 1607 ppia = &port->actions_list; 1608 while (*ppia && (*ppia)->id > id) 1609 ppia = &(*ppia)->next; 1610 if (*ppia && (*ppia)->id == id) { 1611 fprintf(stderr, 1612 "Indirect action #%u is already assigned, delete it first\n", 1613 id); 1614 free(pia); 1615 return -EINVAL; 1616 } 1617 pia->next = *ppia; 1618 pia->id = id; 1619 *ppia = pia; 1620 *action = pia; 1621 return 0; 1622 } 1623 1624 static int 1625 template_alloc(uint32_t id, struct port_template **template, 1626 struct port_template **list) 1627 { 1628 struct port_template *lst = *list; 1629 struct port_template **ppt; 1630 struct port_template *pt = NULL; 1631 1632 *template = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest template ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of port template failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Template #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *template = pt; 1664 return 0; 1665 } 1666 1667 static int 1668 table_alloc(uint32_t id, struct port_table **table, 1669 struct port_table **list) 1670 { 1671 struct port_table *lst = *list; 1672 struct port_table **ppt; 1673 struct port_table *pt = NULL; 1674 1675 *table = NULL; 1676 if (id == UINT32_MAX) { 1677 /* taking first available ID */ 1678 if (lst) { 1679 if (lst->id == UINT32_MAX - 1) { 1680 printf("Highest table ID is already" 1681 " assigned, delete it first\n"); 1682 return -ENOMEM; 1683 } 1684 id = lst->id + 1; 1685 } else { 1686 id = 0; 1687 } 1688 } 1689 pt = calloc(1, sizeof(*pt)); 1690 if (!pt) { 1691 printf("Allocation of table failed\n"); 1692 return -ENOMEM; 1693 } 1694 ppt = list; 1695 while (*ppt && (*ppt)->id > id) 1696 ppt = &(*ppt)->next; 1697 if (*ppt && (*ppt)->id == id) { 1698 printf("Table #%u is already assigned," 1699 " delete it first\n", id); 1700 free(pt); 1701 return -EINVAL; 1702 } 1703 pt->next = *ppt; 1704 pt->id = id; 1705 *ppt = pt; 1706 *table = pt; 1707 return 0; 1708 } 1709 1710 /** Get info about flow management resources. */ 1711 int 1712 port_flow_get_info(portid_t port_id) 1713 { 1714 struct rte_flow_port_info port_info; 1715 struct rte_flow_queue_info queue_info; 1716 struct rte_flow_error error; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1719 port_id == (portid_t)RTE_PORT_ALL) 1720 return -EINVAL; 1721 /* Poisoning to make sure PMDs update it in case of error. */ 1722 memset(&error, 0x99, sizeof(error)); 1723 memset(&port_info, 0, sizeof(port_info)); 1724 memset(&queue_info, 0, sizeof(queue_info)); 1725 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1726 return port_flow_complain(&error); 1727 printf("Flow engine resources on port %u:\n" 1728 "Number of queues: %d\n" 1729 "Size of queues: %d\n" 1730 "Number of counters: %d\n" 1731 "Number of aging objects: %d\n" 1732 "Number of meter actions: %d\n", 1733 port_id, port_info.max_nb_queues, 1734 queue_info.max_size, 1735 port_info.max_nb_counters, 1736 port_info.max_nb_aging_objects, 1737 port_info.max_nb_meters); 1738 return 0; 1739 } 1740 1741 /** Configure flow management resources. */ 1742 int 1743 port_flow_configure(portid_t port_id, 1744 const struct rte_flow_port_attr *port_attr, 1745 uint16_t nb_queue, 1746 const struct rte_flow_queue_attr *queue_attr) 1747 { 1748 struct rte_port *port; 1749 struct rte_flow_error error; 1750 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1751 int std_queue; 1752 1753 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1754 port_id == (portid_t)RTE_PORT_ALL) 1755 return -EINVAL; 1756 port = &ports[port_id]; 1757 port->queue_nb = nb_queue; 1758 port->queue_sz = queue_attr->size; 1759 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1760 attr_list[std_queue] = queue_attr; 1761 /* Poisoning to make sure PMDs update it in case of error. */ 1762 memset(&error, 0x66, sizeof(error)); 1763 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1764 return port_flow_complain(&error); 1765 printf("Configure flows on port %u: " 1766 "number of queues %d with %d elements\n", 1767 port_id, nb_queue, queue_attr->size); 1768 return 0; 1769 } 1770 1771 static int 1772 action_handle_create(portid_t port_id, 1773 struct port_indirect_action *pia, 1774 const struct rte_flow_indir_action_conf *conf, 1775 const struct rte_flow_action *action, 1776 struct rte_flow_error *error) 1777 { 1778 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1779 struct rte_flow_action_age *age = 1780 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1781 1782 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1783 age->context = &pia->age_type; 1784 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1785 struct rte_flow_action_conntrack *ct = 1786 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1787 1788 memcpy(ct, &conntrack_context, sizeof(*ct)); 1789 } 1790 pia->type = action->type; 1791 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1792 error); 1793 return pia->handle ? 0 : -1; 1794 } 1795 1796 static int 1797 action_list_handle_create(portid_t port_id, 1798 struct port_indirect_action *pia, 1799 const struct rte_flow_indir_action_conf *conf, 1800 const struct rte_flow_action *actions, 1801 struct rte_flow_error *error) 1802 { 1803 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1804 pia->list_handle = 1805 rte_flow_action_list_handle_create(port_id, conf, 1806 actions, error); 1807 return pia->list_handle ? 0 : -1; 1808 } 1809 /** Create indirect action */ 1810 int 1811 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1812 const struct rte_flow_indir_action_conf *conf, 1813 const struct rte_flow_action *action) 1814 { 1815 struct port_indirect_action *pia; 1816 int ret; 1817 struct rte_flow_error error; 1818 1819 ret = action_alloc(port_id, id, &pia); 1820 if (ret) 1821 return ret; 1822 /* Poisoning to make sure PMDs update it in case of error. */ 1823 memset(&error, 0x22, sizeof(error)); 1824 ret = indirect_list ? 1825 action_list_handle_create(port_id, pia, conf, action, &error) : 1826 action_handle_create(port_id, pia, conf, action, &error); 1827 if (ret) { 1828 uint32_t destroy_id = pia->id; 1829 port_action_handle_destroy(port_id, 1, &destroy_id); 1830 return port_flow_complain(&error); 1831 } 1832 printf("Indirect action #%u created\n", pia->id); 1833 return 0; 1834 } 1835 1836 /** Destroy indirect action */ 1837 int 1838 port_action_handle_destroy(portid_t port_id, 1839 uint32_t n, 1840 const uint32_t *actions) 1841 { 1842 struct rte_port *port; 1843 struct port_indirect_action **tmp; 1844 int ret = 0; 1845 1846 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1847 port_id == (portid_t)RTE_PORT_ALL) 1848 return -EINVAL; 1849 port = &ports[port_id]; 1850 tmp = &port->actions_list; 1851 while (*tmp) { 1852 uint32_t i; 1853 1854 for (i = 0; i != n; ++i) { 1855 struct rte_flow_error error; 1856 struct port_indirect_action *pia = *tmp; 1857 1858 if (actions[i] != pia->id) 1859 continue; 1860 /* 1861 * Poisoning to make sure PMDs update it in case 1862 * of error. 1863 */ 1864 memset(&error, 0x33, sizeof(error)); 1865 1866 if (pia->handle) { 1867 ret = pia->type == 1868 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1869 rte_flow_action_list_handle_destroy 1870 (port_id, pia->list_handle, &error) : 1871 rte_flow_action_handle_destroy 1872 (port_id, pia->handle, &error); 1873 if (ret) { 1874 ret = port_flow_complain(&error); 1875 continue; 1876 } 1877 } 1878 *tmp = pia->next; 1879 printf("Indirect action #%u destroyed\n", pia->id); 1880 free(pia); 1881 break; 1882 } 1883 if (i == n) 1884 tmp = &(*tmp)->next; 1885 } 1886 return ret; 1887 } 1888 1889 int 1890 port_action_handle_flush(portid_t port_id) 1891 { 1892 struct rte_port *port; 1893 struct port_indirect_action **tmp; 1894 int ret = 0; 1895 1896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1897 port_id == (portid_t)RTE_PORT_ALL) 1898 return -EINVAL; 1899 port = &ports[port_id]; 1900 tmp = &port->actions_list; 1901 while (*tmp != NULL) { 1902 struct rte_flow_error error; 1903 struct port_indirect_action *pia = *tmp; 1904 1905 /* Poisoning to make sure PMDs update it in case of error. */ 1906 memset(&error, 0x44, sizeof(error)); 1907 if (pia->handle != NULL) { 1908 ret = pia->type == 1909 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1910 rte_flow_action_list_handle_destroy 1911 (port_id, pia->list_handle, &error) : 1912 rte_flow_action_handle_destroy 1913 (port_id, pia->handle, &error); 1914 if (ret) { 1915 printf("Indirect action #%u not destroyed\n", 1916 pia->id); 1917 ret = port_flow_complain(&error); 1918 } 1919 tmp = &pia->next; 1920 } else { 1921 *tmp = pia->next; 1922 free(pia); 1923 } 1924 } 1925 return ret; 1926 } 1927 1928 /** Get indirect action by port + id */ 1929 struct rte_flow_action_handle * 1930 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1931 { 1932 1933 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1934 1935 return (pia) ? pia->handle : NULL; 1936 } 1937 1938 /** Update indirect action */ 1939 int 1940 port_action_handle_update(portid_t port_id, uint32_t id, 1941 const struct rte_flow_action *action) 1942 { 1943 struct rte_flow_error error; 1944 struct rte_flow_action_handle *action_handle; 1945 struct port_indirect_action *pia; 1946 const void *update; 1947 1948 action_handle = port_action_handle_get_by_id(port_id, id); 1949 if (!action_handle) 1950 return -EINVAL; 1951 pia = action_get_by_id(port_id, id); 1952 if (!pia) 1953 return -EINVAL; 1954 switch (pia->type) { 1955 case RTE_FLOW_ACTION_TYPE_AGE: 1956 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1957 update = action->conf; 1958 break; 1959 default: 1960 update = action; 1961 break; 1962 } 1963 if (rte_flow_action_handle_update(port_id, action_handle, update, 1964 &error)) { 1965 return port_flow_complain(&error); 1966 } 1967 printf("Indirect action #%u updated\n", id); 1968 return 0; 1969 } 1970 1971 static void 1972 port_action_handle_query_dump(portid_t port_id, 1973 const struct port_indirect_action *pia, 1974 union port_action_query *query) 1975 { 1976 if (!pia || !query) 1977 return; 1978 switch (pia->type) { 1979 case RTE_FLOW_ACTION_TYPE_AGE: 1980 printf("Indirect AGE action:\n" 1981 " aged: %u\n" 1982 " sec_since_last_hit_valid: %u\n" 1983 " sec_since_last_hit: %" PRIu32 "\n", 1984 query->age.aged, 1985 query->age.sec_since_last_hit_valid, 1986 query->age.sec_since_last_hit); 1987 break; 1988 case RTE_FLOW_ACTION_TYPE_COUNT: 1989 printf("Indirect COUNT action:\n" 1990 " hits_set: %u\n" 1991 " bytes_set: %u\n" 1992 " hits: %" PRIu64 "\n" 1993 " bytes: %" PRIu64 "\n", 1994 query->count.hits_set, 1995 query->count.bytes_set, 1996 query->count.hits, 1997 query->count.bytes); 1998 break; 1999 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2000 printf("Conntrack Context:\n" 2001 " Peer: %u, Flow dir: %s, Enable: %u\n" 2002 " Live: %u, SACK: %u, CACK: %u\n" 2003 " Packet dir: %s, Liberal: %u, State: %u\n" 2004 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2005 " Last Seq: %u, Last ACK: %u\n" 2006 " Last Win: %u, Last End: %u\n", 2007 query->ct.peer_port, 2008 query->ct.is_original_dir ? "Original" : "Reply", 2009 query->ct.enable, query->ct.live_connection, 2010 query->ct.selective_ack, query->ct.challenge_ack_passed, 2011 query->ct.last_direction ? "Original" : "Reply", 2012 query->ct.liberal_mode, query->ct.state, 2013 query->ct.max_ack_window, query->ct.retransmission_limit, 2014 query->ct.last_index, query->ct.last_seq, 2015 query->ct.last_ack, query->ct.last_window, 2016 query->ct.last_end); 2017 printf(" Original Dir:\n" 2018 " scale: %u, fin: %u, ack seen: %u\n" 2019 " unacked data: %u\n Sent end: %u," 2020 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2021 query->ct.original_dir.scale, 2022 query->ct.original_dir.close_initiated, 2023 query->ct.original_dir.last_ack_seen, 2024 query->ct.original_dir.data_unacked, 2025 query->ct.original_dir.sent_end, 2026 query->ct.original_dir.reply_end, 2027 query->ct.original_dir.max_win, 2028 query->ct.original_dir.max_ack); 2029 printf(" Reply Dir:\n" 2030 " scale: %u, fin: %u, ack seen: %u\n" 2031 " unacked data: %u\n Sent end: %u," 2032 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2033 query->ct.reply_dir.scale, 2034 query->ct.reply_dir.close_initiated, 2035 query->ct.reply_dir.last_ack_seen, 2036 query->ct.reply_dir.data_unacked, 2037 query->ct.reply_dir.sent_end, 2038 query->ct.reply_dir.reply_end, 2039 query->ct.reply_dir.max_win, 2040 query->ct.reply_dir.max_ack); 2041 break; 2042 case RTE_FLOW_ACTION_TYPE_QUOTA: 2043 printf("Indirect QUOTA action %u\n" 2044 " unused quota: %" PRId64 "\n", 2045 pia->id, query->quota.quota); 2046 break; 2047 default: 2048 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2049 pia->type, pia->id, port_id); 2050 break; 2051 } 2052 2053 } 2054 2055 void 2056 port_action_handle_query_update(portid_t port_id, uint32_t id, 2057 enum rte_flow_query_update_mode qu_mode, 2058 const struct rte_flow_action *action) 2059 { 2060 int ret; 2061 struct rte_flow_error error; 2062 struct port_indirect_action *pia; 2063 union port_action_query query; 2064 2065 pia = action_get_by_id(port_id, id); 2066 if (!pia || !pia->handle) 2067 return; 2068 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2069 &query, qu_mode, &error); 2070 if (ret) 2071 port_flow_complain(&error); 2072 else 2073 port_action_handle_query_dump(port_id, pia, &query); 2074 2075 } 2076 2077 int 2078 port_action_handle_query(portid_t port_id, uint32_t id) 2079 { 2080 struct rte_flow_error error; 2081 struct port_indirect_action *pia; 2082 union port_action_query query; 2083 2084 pia = action_get_by_id(port_id, id); 2085 if (!pia) 2086 return -EINVAL; 2087 switch (pia->type) { 2088 case RTE_FLOW_ACTION_TYPE_AGE: 2089 case RTE_FLOW_ACTION_TYPE_COUNT: 2090 case RTE_FLOW_ACTION_TYPE_QUOTA: 2091 break; 2092 default: 2093 fprintf(stderr, 2094 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2095 id, pia->type, port_id); 2096 return -ENOTSUP; 2097 } 2098 /* Poisoning to make sure PMDs update it in case of error. */ 2099 memset(&error, 0x55, sizeof(error)); 2100 memset(&query, 0, sizeof(query)); 2101 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2102 return port_flow_complain(&error); 2103 port_action_handle_query_dump(port_id, pia, &query); 2104 return 0; 2105 } 2106 2107 static struct port_flow_tunnel * 2108 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2109 const struct rte_flow_item *pattern, 2110 const struct rte_flow_action *actions, 2111 const struct tunnel_ops *tunnel_ops) 2112 { 2113 int ret; 2114 struct rte_port *port; 2115 struct port_flow_tunnel *pft; 2116 struct rte_flow_error error; 2117 2118 port = &ports[port_id]; 2119 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2120 if (!pft) { 2121 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2122 tunnel_ops->id); 2123 return NULL; 2124 } 2125 if (tunnel_ops->actions) { 2126 uint32_t num_actions; 2127 const struct rte_flow_action *aptr; 2128 2129 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2130 &pft->pmd_actions, 2131 &pft->num_pmd_actions, 2132 &error); 2133 if (ret) { 2134 port_flow_complain(&error); 2135 return NULL; 2136 } 2137 for (aptr = actions, num_actions = 1; 2138 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2139 aptr++, num_actions++); 2140 pft->actions = malloc( 2141 (num_actions + pft->num_pmd_actions) * 2142 sizeof(actions[0])); 2143 if (!pft->actions) { 2144 rte_flow_tunnel_action_decap_release( 2145 port_id, pft->actions, 2146 pft->num_pmd_actions, &error); 2147 return NULL; 2148 } 2149 rte_memcpy(pft->actions, pft->pmd_actions, 2150 pft->num_pmd_actions * sizeof(actions[0])); 2151 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2152 num_actions * sizeof(actions[0])); 2153 } 2154 if (tunnel_ops->items) { 2155 uint32_t num_items; 2156 const struct rte_flow_item *iptr; 2157 2158 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2159 &pft->pmd_items, 2160 &pft->num_pmd_items, 2161 &error); 2162 if (ret) { 2163 port_flow_complain(&error); 2164 return NULL; 2165 } 2166 for (iptr = pattern, num_items = 1; 2167 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2168 iptr++, num_items++); 2169 pft->items = malloc((num_items + pft->num_pmd_items) * 2170 sizeof(pattern[0])); 2171 if (!pft->items) { 2172 rte_flow_tunnel_item_release( 2173 port_id, pft->pmd_items, 2174 pft->num_pmd_items, &error); 2175 return NULL; 2176 } 2177 rte_memcpy(pft->items, pft->pmd_items, 2178 pft->num_pmd_items * sizeof(pattern[0])); 2179 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2180 num_items * sizeof(pattern[0])); 2181 } 2182 2183 return pft; 2184 } 2185 2186 static void 2187 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2188 const struct tunnel_ops *tunnel_ops, 2189 struct port_flow_tunnel *pft) 2190 { 2191 struct rte_flow_error error; 2192 2193 if (tunnel_ops->actions) { 2194 free(pft->actions); 2195 rte_flow_tunnel_action_decap_release( 2196 port_id, pft->pmd_actions, 2197 pft->num_pmd_actions, &error); 2198 pft->actions = NULL; 2199 pft->pmd_actions = NULL; 2200 } 2201 if (tunnel_ops->items) { 2202 free(pft->items); 2203 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2204 pft->num_pmd_items, 2205 &error); 2206 pft->items = NULL; 2207 pft->pmd_items = NULL; 2208 } 2209 } 2210 2211 /** Add port meter policy */ 2212 int 2213 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2214 const struct rte_flow_action *actions) 2215 { 2216 struct rte_mtr_error error; 2217 const struct rte_flow_action *act = actions; 2218 const struct rte_flow_action *start; 2219 struct rte_mtr_meter_policy_params policy; 2220 uint32_t i = 0, act_n; 2221 int ret; 2222 2223 for (i = 0; i < RTE_COLORS; i++) { 2224 for (act_n = 0, start = act; 2225 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2226 act_n++; 2227 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2228 policy.actions[i] = start; 2229 else 2230 policy.actions[i] = NULL; 2231 act++; 2232 } 2233 ret = rte_mtr_meter_policy_add(port_id, 2234 policy_id, 2235 &policy, &error); 2236 if (ret) 2237 print_mtr_err_msg(&error); 2238 return ret; 2239 } 2240 2241 struct rte_flow_meter_profile * 2242 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2243 { 2244 struct rte_mtr_error error; 2245 struct rte_flow_meter_profile *profile; 2246 2247 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2248 if (!profile) 2249 print_mtr_err_msg(&error); 2250 return profile; 2251 } 2252 struct rte_flow_meter_policy * 2253 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2254 { 2255 struct rte_mtr_error error; 2256 struct rte_flow_meter_policy *policy; 2257 2258 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2259 if (!policy) 2260 print_mtr_err_msg(&error); 2261 return policy; 2262 } 2263 2264 /** Validate flow rule. */ 2265 int 2266 port_flow_validate(portid_t port_id, 2267 const struct rte_flow_attr *attr, 2268 const struct rte_flow_item *pattern, 2269 const struct rte_flow_action *actions, 2270 const struct tunnel_ops *tunnel_ops) 2271 { 2272 struct rte_flow_error error; 2273 struct port_flow_tunnel *pft = NULL; 2274 int ret; 2275 2276 /* Poisoning to make sure PMDs update it in case of error. */ 2277 memset(&error, 0x11, sizeof(error)); 2278 if (tunnel_ops->enabled) { 2279 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2280 actions, tunnel_ops); 2281 if (!pft) 2282 return -ENOENT; 2283 if (pft->items) 2284 pattern = pft->items; 2285 if (pft->actions) 2286 actions = pft->actions; 2287 } 2288 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2289 if (tunnel_ops->enabled) 2290 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2291 if (ret) 2292 return port_flow_complain(&error); 2293 printf("Flow rule validated\n"); 2294 return 0; 2295 } 2296 2297 /** Return age action structure if exists, otherwise NULL. */ 2298 static struct rte_flow_action_age * 2299 age_action_get(const struct rte_flow_action *actions) 2300 { 2301 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2302 switch (actions->type) { 2303 case RTE_FLOW_ACTION_TYPE_AGE: 2304 return (struct rte_flow_action_age *) 2305 (uintptr_t)actions->conf; 2306 default: 2307 break; 2308 } 2309 } 2310 return NULL; 2311 } 2312 2313 /** Create pattern template */ 2314 int 2315 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2316 const struct rte_flow_pattern_template_attr *attr, 2317 const struct rte_flow_item *pattern) 2318 { 2319 struct rte_port *port; 2320 struct port_template *pit; 2321 int ret; 2322 struct rte_flow_error error; 2323 2324 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2325 port_id == (portid_t)RTE_PORT_ALL) 2326 return -EINVAL; 2327 port = &ports[port_id]; 2328 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2329 if (ret) 2330 return ret; 2331 /* Poisoning to make sure PMDs update it in case of error. */ 2332 memset(&error, 0x22, sizeof(error)); 2333 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2334 attr, pattern, &error); 2335 if (!pit->template.pattern_template) { 2336 uint32_t destroy_id = pit->id; 2337 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2338 return port_flow_complain(&error); 2339 } 2340 printf("Pattern template #%u created\n", pit->id); 2341 return 0; 2342 } 2343 2344 /** Destroy pattern template */ 2345 int 2346 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2347 const uint32_t *template) 2348 { 2349 struct rte_port *port; 2350 struct port_template **tmp; 2351 int ret = 0; 2352 2353 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2354 port_id == (portid_t)RTE_PORT_ALL) 2355 return -EINVAL; 2356 port = &ports[port_id]; 2357 tmp = &port->pattern_templ_list; 2358 while (*tmp) { 2359 uint32_t i; 2360 2361 for (i = 0; i != n; ++i) { 2362 struct rte_flow_error error; 2363 struct port_template *pit = *tmp; 2364 2365 if (template[i] != pit->id) 2366 continue; 2367 /* 2368 * Poisoning to make sure PMDs update it in case 2369 * of error. 2370 */ 2371 memset(&error, 0x33, sizeof(error)); 2372 2373 if (pit->template.pattern_template && 2374 rte_flow_pattern_template_destroy(port_id, 2375 pit->template.pattern_template, 2376 &error)) { 2377 ret = port_flow_complain(&error); 2378 continue; 2379 } 2380 *tmp = pit->next; 2381 printf("Pattern template #%u destroyed\n", pit->id); 2382 free(pit); 2383 break; 2384 } 2385 if (i == n) 2386 tmp = &(*tmp)->next; 2387 } 2388 return ret; 2389 } 2390 2391 /** Flush pattern template */ 2392 int 2393 port_flow_pattern_template_flush(portid_t port_id) 2394 { 2395 struct rte_port *port; 2396 struct port_template **tmp; 2397 int ret = 0; 2398 2399 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2400 port_id == (portid_t)RTE_PORT_ALL) 2401 return -EINVAL; 2402 port = &ports[port_id]; 2403 tmp = &port->pattern_templ_list; 2404 while (*tmp) { 2405 struct rte_flow_error error; 2406 struct port_template *pit = *tmp; 2407 2408 /* 2409 * Poisoning to make sure PMDs update it in case 2410 * of error. 2411 */ 2412 memset(&error, 0x33, sizeof(error)); 2413 if (pit->template.pattern_template && 2414 rte_flow_pattern_template_destroy(port_id, 2415 pit->template.pattern_template, &error)) { 2416 printf("Pattern template #%u not destroyed\n", pit->id); 2417 ret = port_flow_complain(&error); 2418 tmp = &pit->next; 2419 } else { 2420 *tmp = pit->next; 2421 free(pit); 2422 } 2423 } 2424 return ret; 2425 } 2426 2427 /** Create actions template */ 2428 int 2429 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2430 const struct rte_flow_actions_template_attr *attr, 2431 const struct rte_flow_action *actions, 2432 const struct rte_flow_action *masks) 2433 { 2434 struct rte_port *port; 2435 struct port_template *pat; 2436 int ret; 2437 struct rte_flow_error error; 2438 2439 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2440 port_id == (portid_t)RTE_PORT_ALL) 2441 return -EINVAL; 2442 port = &ports[port_id]; 2443 ret = template_alloc(id, &pat, &port->actions_templ_list); 2444 if (ret) 2445 return ret; 2446 /* Poisoning to make sure PMDs update it in case of error. */ 2447 memset(&error, 0x22, sizeof(error)); 2448 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2449 attr, actions, masks, &error); 2450 if (!pat->template.actions_template) { 2451 uint32_t destroy_id = pat->id; 2452 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2453 return port_flow_complain(&error); 2454 } 2455 printf("Actions template #%u created\n", pat->id); 2456 return 0; 2457 } 2458 2459 /** Destroy actions template */ 2460 int 2461 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2462 const uint32_t *template) 2463 { 2464 struct rte_port *port; 2465 struct port_template **tmp; 2466 int ret = 0; 2467 2468 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2469 port_id == (portid_t)RTE_PORT_ALL) 2470 return -EINVAL; 2471 port = &ports[port_id]; 2472 tmp = &port->actions_templ_list; 2473 while (*tmp) { 2474 uint32_t i; 2475 2476 for (i = 0; i != n; ++i) { 2477 struct rte_flow_error error; 2478 struct port_template *pat = *tmp; 2479 2480 if (template[i] != pat->id) 2481 continue; 2482 /* 2483 * Poisoning to make sure PMDs update it in case 2484 * of error. 2485 */ 2486 memset(&error, 0x33, sizeof(error)); 2487 2488 if (pat->template.actions_template && 2489 rte_flow_actions_template_destroy(port_id, 2490 pat->template.actions_template, &error)) { 2491 ret = port_flow_complain(&error); 2492 continue; 2493 } 2494 *tmp = pat->next; 2495 printf("Actions template #%u destroyed\n", pat->id); 2496 free(pat); 2497 break; 2498 } 2499 if (i == n) 2500 tmp = &(*tmp)->next; 2501 } 2502 return ret; 2503 } 2504 2505 /** Flush actions template */ 2506 int 2507 port_flow_actions_template_flush(portid_t port_id) 2508 { 2509 struct rte_port *port; 2510 struct port_template **tmp; 2511 int ret = 0; 2512 2513 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2514 port_id == (portid_t)RTE_PORT_ALL) 2515 return -EINVAL; 2516 port = &ports[port_id]; 2517 tmp = &port->actions_templ_list; 2518 while (*tmp) { 2519 struct rte_flow_error error; 2520 struct port_template *pat = *tmp; 2521 2522 /* 2523 * Poisoning to make sure PMDs update it in case 2524 * of error. 2525 */ 2526 memset(&error, 0x33, sizeof(error)); 2527 2528 if (pat->template.actions_template && 2529 rte_flow_actions_template_destroy(port_id, 2530 pat->template.actions_template, &error)) { 2531 ret = port_flow_complain(&error); 2532 printf("Actions template #%u not destroyed\n", pat->id); 2533 tmp = &pat->next; 2534 } else { 2535 *tmp = pat->next; 2536 free(pat); 2537 } 2538 } 2539 return ret; 2540 } 2541 2542 /** Create table */ 2543 int 2544 port_flow_template_table_create(portid_t port_id, uint32_t id, 2545 const struct rte_flow_template_table_attr *table_attr, 2546 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2547 uint32_t nb_actions_templates, uint32_t *actions_templates) 2548 { 2549 struct rte_port *port; 2550 struct port_table *pt; 2551 struct port_template *temp = NULL; 2552 int ret; 2553 uint32_t i; 2554 struct rte_flow_error error; 2555 struct rte_flow_pattern_template 2556 *flow_pattern_templates[nb_pattern_templates]; 2557 struct rte_flow_actions_template 2558 *flow_actions_templates[nb_actions_templates]; 2559 2560 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2561 port_id == (portid_t)RTE_PORT_ALL) 2562 return -EINVAL; 2563 port = &ports[port_id]; 2564 for (i = 0; i < nb_pattern_templates; ++i) { 2565 bool found = false; 2566 temp = port->pattern_templ_list; 2567 while (temp) { 2568 if (pattern_templates[i] == temp->id) { 2569 flow_pattern_templates[i] = 2570 temp->template.pattern_template; 2571 found = true; 2572 break; 2573 } 2574 temp = temp->next; 2575 } 2576 if (!found) { 2577 printf("Pattern template #%u is invalid\n", 2578 pattern_templates[i]); 2579 return -EINVAL; 2580 } 2581 } 2582 for (i = 0; i < nb_actions_templates; ++i) { 2583 bool found = false; 2584 temp = port->actions_templ_list; 2585 while (temp) { 2586 if (actions_templates[i] == temp->id) { 2587 flow_actions_templates[i] = 2588 temp->template.actions_template; 2589 found = true; 2590 break; 2591 } 2592 temp = temp->next; 2593 } 2594 if (!found) { 2595 printf("Actions template #%u is invalid\n", 2596 actions_templates[i]); 2597 return -EINVAL; 2598 } 2599 } 2600 ret = table_alloc(id, &pt, &port->table_list); 2601 if (ret) 2602 return ret; 2603 /* Poisoning to make sure PMDs update it in case of error. */ 2604 memset(&error, 0x22, sizeof(error)); 2605 pt->table = rte_flow_template_table_create(port_id, table_attr, 2606 flow_pattern_templates, nb_pattern_templates, 2607 flow_actions_templates, nb_actions_templates, 2608 &error); 2609 2610 if (!pt->table) { 2611 uint32_t destroy_id = pt->id; 2612 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2613 return port_flow_complain(&error); 2614 } 2615 pt->nb_pattern_templates = nb_pattern_templates; 2616 pt->nb_actions_templates = nb_actions_templates; 2617 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2618 sizeof(struct rte_flow_attr)); 2619 printf("Template table #%u created\n", pt->id); 2620 return 0; 2621 } 2622 2623 /** Destroy table */ 2624 int 2625 port_flow_template_table_destroy(portid_t port_id, 2626 uint32_t n, const uint32_t *table) 2627 { 2628 struct rte_port *port; 2629 struct port_table **tmp; 2630 int ret = 0; 2631 2632 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2633 port_id == (portid_t)RTE_PORT_ALL) 2634 return -EINVAL; 2635 port = &ports[port_id]; 2636 tmp = &port->table_list; 2637 while (*tmp) { 2638 uint32_t i; 2639 2640 for (i = 0; i != n; ++i) { 2641 struct rte_flow_error error; 2642 struct port_table *pt = *tmp; 2643 2644 if (table[i] != pt->id) 2645 continue; 2646 /* 2647 * Poisoning to make sure PMDs update it in case 2648 * of error. 2649 */ 2650 memset(&error, 0x33, sizeof(error)); 2651 2652 if (pt->table && 2653 rte_flow_template_table_destroy(port_id, 2654 pt->table, 2655 &error)) { 2656 ret = port_flow_complain(&error); 2657 continue; 2658 } 2659 *tmp = pt->next; 2660 printf("Template table #%u destroyed\n", pt->id); 2661 free(pt); 2662 break; 2663 } 2664 if (i == n) 2665 tmp = &(*tmp)->next; 2666 } 2667 return ret; 2668 } 2669 2670 /** Flush table */ 2671 int 2672 port_flow_template_table_flush(portid_t port_id) 2673 { 2674 struct rte_port *port; 2675 struct port_table **tmp; 2676 int ret = 0; 2677 2678 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2679 port_id == (portid_t)RTE_PORT_ALL) 2680 return -EINVAL; 2681 port = &ports[port_id]; 2682 tmp = &port->table_list; 2683 while (*tmp) { 2684 struct rte_flow_error error; 2685 struct port_table *pt = *tmp; 2686 2687 /* 2688 * Poisoning to make sure PMDs update it in case 2689 * of error. 2690 */ 2691 memset(&error, 0x33, sizeof(error)); 2692 2693 if (pt->table && 2694 rte_flow_template_table_destroy(port_id, 2695 pt->table, 2696 &error)) { 2697 ret = port_flow_complain(&error); 2698 printf("Template table #%u not destroyed\n", pt->id); 2699 tmp = &pt->next; 2700 } else { 2701 *tmp = pt->next; 2702 free(pt); 2703 } 2704 } 2705 return ret; 2706 } 2707 2708 /** Enqueue create flow rule operation. */ 2709 int 2710 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2711 bool postpone, uint32_t table_id, uint32_t rule_idx, 2712 uint32_t pattern_idx, uint32_t actions_idx, 2713 const struct rte_flow_item *pattern, 2714 const struct rte_flow_action *actions) 2715 { 2716 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2717 struct rte_flow *flow; 2718 struct rte_port *port; 2719 struct port_flow *pf; 2720 struct port_table *pt; 2721 uint32_t id = 0; 2722 bool found; 2723 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2724 struct rte_flow_action_age *age = age_action_get(actions); 2725 struct queue_job *job; 2726 2727 port = &ports[port_id]; 2728 if (port->flow_list) { 2729 if (port->flow_list->id == UINT32_MAX) { 2730 printf("Highest rule ID is already assigned," 2731 " delete it first"); 2732 return -ENOMEM; 2733 } 2734 id = port->flow_list->id + 1; 2735 } 2736 2737 if (queue_id >= port->queue_nb) { 2738 printf("Queue #%u is invalid\n", queue_id); 2739 return -EINVAL; 2740 } 2741 2742 found = false; 2743 pt = port->table_list; 2744 while (pt) { 2745 if (table_id == pt->id) { 2746 found = true; 2747 break; 2748 } 2749 pt = pt->next; 2750 } 2751 if (!found) { 2752 printf("Table #%u is invalid\n", table_id); 2753 return -EINVAL; 2754 } 2755 2756 if (pattern_idx >= pt->nb_pattern_templates) { 2757 printf("Pattern template index #%u is invalid," 2758 " %u templates present in the table\n", 2759 pattern_idx, pt->nb_pattern_templates); 2760 return -EINVAL; 2761 } 2762 if (actions_idx >= pt->nb_actions_templates) { 2763 printf("Actions template index #%u is invalid," 2764 " %u templates present in the table\n", 2765 actions_idx, pt->nb_actions_templates); 2766 return -EINVAL; 2767 } 2768 2769 job = calloc(1, sizeof(*job)); 2770 if (!job) { 2771 printf("Queue flow create job allocate failed\n"); 2772 return -ENOMEM; 2773 } 2774 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2775 2776 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2777 if (!pf) { 2778 free(job); 2779 return port_flow_complain(&error); 2780 } 2781 if (age) { 2782 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2783 age->context = &pf->age_type; 2784 } 2785 /* Poisoning to make sure PMDs update it in case of error. */ 2786 memset(&error, 0x11, sizeof(error)); 2787 if (rule_idx == UINT32_MAX) 2788 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2789 pattern, pattern_idx, actions, actions_idx, job, &error); 2790 else 2791 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2792 rule_idx, actions, actions_idx, job, &error); 2793 if (!flow) { 2794 uint32_t flow_id = pf->id; 2795 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2796 free(job); 2797 return port_flow_complain(&error); 2798 } 2799 2800 pf->next = port->flow_list; 2801 pf->id = id; 2802 pf->table = pt; 2803 pf->flow = flow; 2804 job->pf = pf; 2805 port->flow_list = pf; 2806 printf("Flow rule #%u creation enqueued\n", pf->id); 2807 return 0; 2808 } 2809 2810 /** Enqueue number of destroy flow rules operations. */ 2811 int 2812 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2813 bool postpone, uint32_t n, const uint32_t *rule) 2814 { 2815 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2816 struct rte_port *port; 2817 struct port_flow **tmp; 2818 int ret = 0; 2819 struct queue_job *job; 2820 2821 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2822 port_id == (portid_t)RTE_PORT_ALL) 2823 return -EINVAL; 2824 port = &ports[port_id]; 2825 2826 if (queue_id >= port->queue_nb) { 2827 printf("Queue #%u is invalid\n", queue_id); 2828 return -EINVAL; 2829 } 2830 2831 tmp = &port->flow_list; 2832 while (*tmp) { 2833 uint32_t i; 2834 2835 for (i = 0; i != n; ++i) { 2836 struct rte_flow_error error; 2837 struct port_flow *pf = *tmp; 2838 2839 if (rule[i] != pf->id) 2840 continue; 2841 /* 2842 * Poisoning to make sure PMD 2843 * update it in case of error. 2844 */ 2845 memset(&error, 0x33, sizeof(error)); 2846 job = calloc(1, sizeof(*job)); 2847 if (!job) { 2848 printf("Queue flow destroy job allocate failed\n"); 2849 return -ENOMEM; 2850 } 2851 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2852 job->pf = pf; 2853 2854 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2855 pf->flow, job, &error)) { 2856 free(job); 2857 ret = port_flow_complain(&error); 2858 continue; 2859 } 2860 printf("Flow rule #%u destruction enqueued\n", pf->id); 2861 *tmp = pf->next; 2862 break; 2863 } 2864 if (i == n) 2865 tmp = &(*tmp)->next; 2866 } 2867 return ret; 2868 } 2869 2870 static void 2871 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2872 struct port_indirect_action *pia, 2873 struct queue_job *job, 2874 const struct rte_flow_op_attr *attr, 2875 const struct rte_flow_indir_action_conf *conf, 2876 const struct rte_flow_action *action, 2877 struct rte_flow_error *error) 2878 { 2879 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2880 struct rte_flow_action_age *age = 2881 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2882 2883 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2884 age->context = &pia->age_type; 2885 } 2886 /* Poisoning to make sure PMDs update it in case of error. */ 2887 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2888 attr, conf, action, 2889 job, error); 2890 pia->type = action->type; 2891 } 2892 2893 static void 2894 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2895 struct port_indirect_action *pia, 2896 struct queue_job *job, 2897 const struct rte_flow_op_attr *attr, 2898 const struct rte_flow_indir_action_conf *conf, 2899 const struct rte_flow_action *action, 2900 struct rte_flow_error *error) 2901 { 2902 /* Poisoning to make sure PMDs update it in case of error. */ 2903 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2904 pia->list_handle = rte_flow_async_action_list_handle_create 2905 (port_id, queue_id, attr, conf, action, 2906 job, error); 2907 } 2908 2909 /** Enqueue update flow rule operation. */ 2910 int 2911 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2912 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2913 const struct rte_flow_action *actions) 2914 { 2915 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2916 struct rte_port *port; 2917 struct port_flow *pf, *uf; 2918 struct port_flow **tmp; 2919 struct port_table *pt; 2920 bool found; 2921 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2922 struct rte_flow_action_age *age = age_action_get(actions); 2923 struct queue_job *job; 2924 2925 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2926 port_id == (portid_t)RTE_PORT_ALL) 2927 return -EINVAL; 2928 port = &ports[port_id]; 2929 2930 if (queue_id >= port->queue_nb) { 2931 printf("Queue #%u is invalid\n", queue_id); 2932 return -EINVAL; 2933 } 2934 2935 found = false; 2936 tmp = &port->flow_list; 2937 while (*tmp) { 2938 pf = *tmp; 2939 if (rule_idx == pf->id) { 2940 found = true; 2941 break; 2942 } 2943 tmp = &(*tmp)->next; 2944 } 2945 if (!found) { 2946 printf("Flow rule #%u is invalid\n", rule_idx); 2947 return -EINVAL; 2948 } 2949 2950 pt = pf->table; 2951 if (actions_idx >= pt->nb_actions_templates) { 2952 printf("Actions template index #%u is invalid," 2953 " %u templates present in the table\n", 2954 actions_idx, pt->nb_actions_templates); 2955 return -EINVAL; 2956 } 2957 2958 job = calloc(1, sizeof(*job)); 2959 if (!job) { 2960 printf("Queue flow create job allocate failed\n"); 2961 return -ENOMEM; 2962 } 2963 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2964 2965 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2966 if (!uf) { 2967 free(job); 2968 return port_flow_complain(&error); 2969 } 2970 2971 if (age) { 2972 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2973 age->context = &uf->age_type; 2974 } 2975 2976 /* 2977 * Poisoning to make sure PMD update it in case of error. 2978 */ 2979 memset(&error, 0x44, sizeof(error)); 2980 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2981 actions, actions_idx, job, &error)) { 2982 free(uf); 2983 free(job); 2984 return port_flow_complain(&error); 2985 } 2986 uf->next = pf->next; 2987 uf->id = pf->id; 2988 uf->table = pt; 2989 uf->flow = pf->flow; 2990 *tmp = uf; 2991 job->pf = pf; 2992 2993 printf("Flow rule #%u update enqueued\n", pf->id); 2994 return 0; 2995 } 2996 2997 /** Enqueue indirect action create operation. */ 2998 int 2999 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 3000 bool postpone, uint32_t id, 3001 const struct rte_flow_indir_action_conf *conf, 3002 const struct rte_flow_action *action) 3003 { 3004 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3005 struct rte_port *port; 3006 struct port_indirect_action *pia; 3007 int ret; 3008 struct rte_flow_error error; 3009 struct queue_job *job; 3010 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3011 3012 3013 ret = action_alloc(port_id, id, &pia); 3014 if (ret) 3015 return ret; 3016 3017 port = &ports[port_id]; 3018 if (queue_id >= port->queue_nb) { 3019 printf("Queue #%u is invalid\n", queue_id); 3020 return -EINVAL; 3021 } 3022 job = calloc(1, sizeof(*job)); 3023 if (!job) { 3024 printf("Queue action create job allocate failed\n"); 3025 return -ENOMEM; 3026 } 3027 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3028 job->pia = pia; 3029 3030 /* Poisoning to make sure PMDs update it in case of error. */ 3031 memset(&error, 0x88, sizeof(error)); 3032 3033 if (is_indirect_list) 3034 queue_action_list_handle_create(port_id, queue_id, pia, job, 3035 &attr, conf, action, &error); 3036 else 3037 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3038 conf, action, &error); 3039 3040 if (!pia->handle) { 3041 uint32_t destroy_id = pia->id; 3042 port_queue_action_handle_destroy(port_id, queue_id, 3043 postpone, 1, &destroy_id); 3044 free(job); 3045 return port_flow_complain(&error); 3046 } 3047 printf("Indirect action #%u creation queued\n", pia->id); 3048 return 0; 3049 } 3050 3051 /** Enqueue indirect action destroy operation. */ 3052 int 3053 port_queue_action_handle_destroy(portid_t port_id, 3054 uint32_t queue_id, bool postpone, 3055 uint32_t n, const uint32_t *actions) 3056 { 3057 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3058 struct rte_port *port; 3059 struct port_indirect_action **tmp; 3060 int ret = 0; 3061 struct queue_job *job; 3062 3063 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3064 port_id == (portid_t)RTE_PORT_ALL) 3065 return -EINVAL; 3066 port = &ports[port_id]; 3067 3068 if (queue_id >= port->queue_nb) { 3069 printf("Queue #%u is invalid\n", queue_id); 3070 return -EINVAL; 3071 } 3072 3073 tmp = &port->actions_list; 3074 while (*tmp) { 3075 uint32_t i; 3076 3077 for (i = 0; i != n; ++i) { 3078 struct rte_flow_error error; 3079 struct port_indirect_action *pia = *tmp; 3080 3081 if (actions[i] != pia->id) 3082 continue; 3083 /* 3084 * Poisoning to make sure PMDs update it in case 3085 * of error. 3086 */ 3087 memset(&error, 0x99, sizeof(error)); 3088 job = calloc(1, sizeof(*job)); 3089 if (!job) { 3090 printf("Queue action destroy job allocate failed\n"); 3091 return -ENOMEM; 3092 } 3093 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3094 job->pia = pia; 3095 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3096 rte_flow_async_action_list_handle_destroy 3097 (port_id, queue_id, 3098 &attr, pia->list_handle, 3099 job, &error) : 3100 rte_flow_async_action_handle_destroy 3101 (port_id, queue_id, &attr, pia->handle, 3102 job, &error); 3103 if (ret) { 3104 free(job); 3105 ret = port_flow_complain(&error); 3106 continue; 3107 } 3108 *tmp = pia->next; 3109 printf("Indirect action #%u destruction queued\n", 3110 pia->id); 3111 break; 3112 } 3113 if (i == n) 3114 tmp = &(*tmp)->next; 3115 } 3116 return ret; 3117 } 3118 3119 /** Enqueue indirect action update operation. */ 3120 int 3121 port_queue_action_handle_update(portid_t port_id, 3122 uint32_t queue_id, bool postpone, uint32_t id, 3123 const struct rte_flow_action *action) 3124 { 3125 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3126 struct rte_port *port; 3127 struct rte_flow_error error; 3128 struct rte_flow_action_handle *action_handle; 3129 struct queue_job *job; 3130 struct port_indirect_action *pia; 3131 struct rte_flow_update_meter_mark mtr_update; 3132 const void *update; 3133 3134 action_handle = port_action_handle_get_by_id(port_id, id); 3135 if (!action_handle) 3136 return -EINVAL; 3137 3138 port = &ports[port_id]; 3139 if (queue_id >= port->queue_nb) { 3140 printf("Queue #%u is invalid\n", queue_id); 3141 return -EINVAL; 3142 } 3143 3144 job = calloc(1, sizeof(*job)); 3145 if (!job) { 3146 printf("Queue action update job allocate failed\n"); 3147 return -ENOMEM; 3148 } 3149 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3150 3151 pia = action_get_by_id(port_id, id); 3152 if (!pia) { 3153 free(job); 3154 return -EINVAL; 3155 } 3156 3157 switch (pia->type) { 3158 case RTE_FLOW_ACTION_TYPE_AGE: 3159 update = action->conf; 3160 break; 3161 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3162 rte_memcpy(&mtr_update.meter_mark, action->conf, 3163 sizeof(struct rte_flow_action_meter_mark)); 3164 mtr_update.profile_valid = 1; 3165 mtr_update.policy_valid = 1; 3166 mtr_update.color_mode_valid = 1; 3167 mtr_update.init_color_valid = 1; 3168 mtr_update.state_valid = 1; 3169 update = &mtr_update; 3170 break; 3171 default: 3172 update = action; 3173 break; 3174 } 3175 3176 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3177 action_handle, update, job, &error)) { 3178 free(job); 3179 return port_flow_complain(&error); 3180 } 3181 printf("Indirect action #%u update queued\n", id); 3182 return 0; 3183 } 3184 3185 void 3186 port_queue_action_handle_query_update(portid_t port_id, 3187 uint32_t queue_id, bool postpone, 3188 uint32_t id, 3189 enum rte_flow_query_update_mode qu_mode, 3190 const struct rte_flow_action *action) 3191 { 3192 int ret; 3193 struct rte_flow_error error; 3194 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3195 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3196 struct queue_job *job; 3197 3198 if (!pia || !pia->handle) 3199 return; 3200 job = calloc(1, sizeof(*job)); 3201 if (!job) 3202 return; 3203 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3204 job->pia = pia; 3205 3206 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3207 &attr, pia->handle, 3208 action, 3209 &job->query, 3210 qu_mode, job, 3211 &error); 3212 if (ret) { 3213 port_flow_complain(&error); 3214 free(job); 3215 } else { 3216 printf("port-%u: indirect action #%u update-and-query queued\n", 3217 port_id, id); 3218 } 3219 } 3220 3221 /** Enqueue indirect action query operation. */ 3222 int 3223 port_queue_action_handle_query(portid_t port_id, 3224 uint32_t queue_id, bool postpone, uint32_t id) 3225 { 3226 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3227 struct rte_port *port; 3228 struct rte_flow_error error; 3229 struct rte_flow_action_handle *action_handle; 3230 struct port_indirect_action *pia; 3231 struct queue_job *job; 3232 3233 pia = action_get_by_id(port_id, id); 3234 action_handle = pia ? pia->handle : NULL; 3235 if (!action_handle) 3236 return -EINVAL; 3237 3238 port = &ports[port_id]; 3239 if (queue_id >= port->queue_nb) { 3240 printf("Queue #%u is invalid\n", queue_id); 3241 return -EINVAL; 3242 } 3243 3244 job = calloc(1, sizeof(*job)); 3245 if (!job) { 3246 printf("Queue action update job allocate failed\n"); 3247 return -ENOMEM; 3248 } 3249 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3250 job->pia = pia; 3251 3252 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3253 action_handle, &job->query, job, &error)) { 3254 free(job); 3255 return port_flow_complain(&error); 3256 } 3257 printf("Indirect action #%u update queued\n", id); 3258 return 0; 3259 } 3260 3261 /** Push all the queue operations in the queue to the NIC. */ 3262 int 3263 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3264 { 3265 struct rte_port *port; 3266 struct rte_flow_error error; 3267 int ret = 0; 3268 3269 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3270 port_id == (portid_t)RTE_PORT_ALL) 3271 return -EINVAL; 3272 port = &ports[port_id]; 3273 3274 if (queue_id >= port->queue_nb) { 3275 printf("Queue #%u is invalid\n", queue_id); 3276 return -EINVAL; 3277 } 3278 3279 memset(&error, 0x55, sizeof(error)); 3280 ret = rte_flow_push(port_id, queue_id, &error); 3281 if (ret < 0) { 3282 printf("Failed to push operations in the queue\n"); 3283 return -EINVAL; 3284 } 3285 printf("Queue #%u operations pushed\n", queue_id); 3286 return ret; 3287 } 3288 3289 /** Pull queue operation results from the queue. */ 3290 static int 3291 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3292 const uint32_t *rule, int nb_flows) 3293 { 3294 struct rte_port *port = &ports[port_id]; 3295 struct rte_flow_op_result *res; 3296 struct rte_flow_error error; 3297 uint32_t n = nb_flows; 3298 int ret = 0; 3299 int i; 3300 3301 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3302 if (!res) { 3303 printf("Failed to allocate memory for pulled results\n"); 3304 return -ENOMEM; 3305 } 3306 3307 memset(&error, 0x66, sizeof(error)); 3308 while (nb_flows > 0) { 3309 int success = 0; 3310 3311 if (n > port->queue_sz) 3312 n = port->queue_sz; 3313 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3314 if (ret < 0) { 3315 free(res); 3316 return ret; 3317 } 3318 ret = rte_flow_push(port_id, queue_id, &error); 3319 if (ret < 0) { 3320 printf("Failed to push operations in the queue: %s\n", 3321 strerror(-ret)); 3322 free(res); 3323 return ret; 3324 } 3325 while (success < nb_flows) { 3326 ret = rte_flow_pull(port_id, queue_id, res, 3327 port->queue_sz, &error); 3328 if (ret < 0) { 3329 printf("Failed to pull a operation results: %s\n", 3330 strerror(-ret)); 3331 free(res); 3332 return ret; 3333 } 3334 3335 for (i = 0; i < ret; i++) { 3336 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3337 success++; 3338 } 3339 } 3340 rule += n; 3341 nb_flows -= n; 3342 n = nb_flows; 3343 } 3344 3345 free(res); 3346 return ret; 3347 } 3348 3349 /** List simply and destroy all aged flows per queue. */ 3350 void 3351 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3352 { 3353 void **contexts; 3354 int nb_context, total = 0, idx; 3355 uint32_t *rules = NULL; 3356 struct rte_port *port; 3357 struct rte_flow_error error; 3358 enum age_action_context_type *type; 3359 union { 3360 struct port_flow *pf; 3361 struct port_indirect_action *pia; 3362 } ctx; 3363 3364 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3365 port_id == (portid_t)RTE_PORT_ALL) 3366 return; 3367 port = &ports[port_id]; 3368 if (queue_id >= port->queue_nb) { 3369 printf("Error: queue #%u is invalid\n", queue_id); 3370 return; 3371 } 3372 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3373 if (total < 0) { 3374 port_flow_complain(&error); 3375 return; 3376 } 3377 printf("Port %u queue %u total aged flows: %d\n", 3378 port_id, queue_id, total); 3379 if (total == 0) 3380 return; 3381 contexts = calloc(total, sizeof(void *)); 3382 if (contexts == NULL) { 3383 printf("Cannot allocate contexts for aged flow\n"); 3384 return; 3385 } 3386 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3387 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3388 total, &error); 3389 if (nb_context > total) { 3390 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3391 port_id, queue_id, nb_context, total); 3392 free(contexts); 3393 return; 3394 } 3395 if (destroy) { 3396 rules = malloc(sizeof(uint32_t) * nb_context); 3397 if (rules == NULL) 3398 printf("Cannot allocate memory for destroy aged flow\n"); 3399 } 3400 total = 0; 3401 for (idx = 0; idx < nb_context; idx++) { 3402 if (!contexts[idx]) { 3403 printf("Error: get Null context in port %u queue %u\n", 3404 port_id, queue_id); 3405 continue; 3406 } 3407 type = (enum age_action_context_type *)contexts[idx]; 3408 switch (*type) { 3409 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3410 ctx.pf = container_of(type, struct port_flow, age_type); 3411 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3412 "\t%c%c%c\t\n", 3413 "Flow", 3414 ctx.pf->id, 3415 ctx.pf->rule.attr->group, 3416 ctx.pf->rule.attr->priority, 3417 ctx.pf->rule.attr->ingress ? 'i' : '-', 3418 ctx.pf->rule.attr->egress ? 'e' : '-', 3419 ctx.pf->rule.attr->transfer ? 't' : '-'); 3420 if (rules != NULL) { 3421 rules[total] = ctx.pf->id; 3422 total++; 3423 } 3424 break; 3425 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3426 ctx.pia = container_of(type, 3427 struct port_indirect_action, 3428 age_type); 3429 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3430 ctx.pia->id); 3431 break; 3432 default: 3433 printf("Error: invalid context type %u\n", port_id); 3434 break; 3435 } 3436 } 3437 if (rules != NULL) { 3438 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3439 free(rules); 3440 } 3441 printf("\n%d flows destroyed\n", total); 3442 free(contexts); 3443 } 3444 3445 /** Pull queue operation results from the queue. */ 3446 int 3447 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3448 { 3449 struct rte_port *port; 3450 struct rte_flow_op_result *res; 3451 struct rte_flow_error error; 3452 int ret = 0; 3453 int success = 0; 3454 int i; 3455 struct queue_job *job; 3456 3457 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3458 port_id == (portid_t)RTE_PORT_ALL) 3459 return -EINVAL; 3460 port = &ports[port_id]; 3461 3462 if (queue_id >= port->queue_nb) { 3463 printf("Queue #%u is invalid\n", queue_id); 3464 return -EINVAL; 3465 } 3466 3467 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3468 if (!res) { 3469 printf("Failed to allocate memory for pulled results\n"); 3470 return -ENOMEM; 3471 } 3472 3473 memset(&error, 0x66, sizeof(error)); 3474 ret = rte_flow_pull(port_id, queue_id, res, 3475 port->queue_sz, &error); 3476 if (ret < 0) { 3477 printf("Failed to pull a operation results\n"); 3478 free(res); 3479 return -EINVAL; 3480 } 3481 3482 for (i = 0; i < ret; i++) { 3483 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3484 success++; 3485 job = (struct queue_job *)res[i].user_data; 3486 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3487 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3488 free(job->pf); 3489 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3490 free(job->pia); 3491 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3492 port_action_handle_query_dump(port_id, job->pia, 3493 &job->query); 3494 free(job); 3495 } 3496 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3497 queue_id, ret, ret - success, success); 3498 free(res); 3499 return ret; 3500 } 3501 3502 /** Create flow rule. */ 3503 int 3504 port_flow_create(portid_t port_id, 3505 const struct rte_flow_attr *attr, 3506 const struct rte_flow_item *pattern, 3507 const struct rte_flow_action *actions, 3508 const struct tunnel_ops *tunnel_ops) 3509 { 3510 struct rte_flow *flow; 3511 struct rte_port *port; 3512 struct port_flow *pf; 3513 uint32_t id = 0; 3514 struct rte_flow_error error; 3515 struct port_flow_tunnel *pft = NULL; 3516 struct rte_flow_action_age *age = age_action_get(actions); 3517 3518 port = &ports[port_id]; 3519 if (port->flow_list) { 3520 if (port->flow_list->id == UINT32_MAX) { 3521 fprintf(stderr, 3522 "Highest rule ID is already assigned, delete it first"); 3523 return -ENOMEM; 3524 } 3525 id = port->flow_list->id + 1; 3526 } 3527 if (tunnel_ops->enabled) { 3528 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3529 actions, tunnel_ops); 3530 if (!pft) 3531 return -ENOENT; 3532 if (pft->items) 3533 pattern = pft->items; 3534 if (pft->actions) 3535 actions = pft->actions; 3536 } 3537 pf = port_flow_new(attr, pattern, actions, &error); 3538 if (!pf) 3539 return port_flow_complain(&error); 3540 if (age) { 3541 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3542 age->context = &pf->age_type; 3543 } 3544 /* Poisoning to make sure PMDs update it in case of error. */ 3545 memset(&error, 0x22, sizeof(error)); 3546 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3547 if (!flow) { 3548 if (tunnel_ops->enabled) 3549 port_flow_tunnel_offload_cmd_release(port_id, 3550 tunnel_ops, pft); 3551 free(pf); 3552 return port_flow_complain(&error); 3553 } 3554 pf->next = port->flow_list; 3555 pf->id = id; 3556 pf->flow = flow; 3557 port->flow_list = pf; 3558 if (tunnel_ops->enabled) 3559 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3560 printf("Flow rule #%u created\n", pf->id); 3561 return 0; 3562 } 3563 3564 /** Destroy a number of flow rules. */ 3565 int 3566 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 3567 { 3568 struct rte_port *port; 3569 struct port_flow **tmp; 3570 int ret = 0; 3571 3572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3573 port_id == (portid_t)RTE_PORT_ALL) 3574 return -EINVAL; 3575 port = &ports[port_id]; 3576 tmp = &port->flow_list; 3577 while (*tmp) { 3578 uint32_t i; 3579 3580 for (i = 0; i != n; ++i) { 3581 struct rte_flow_error error; 3582 struct port_flow *pf = *tmp; 3583 3584 if (rule[i] != pf->id) 3585 continue; 3586 /* 3587 * Poisoning to make sure PMDs update it in case 3588 * of error. 3589 */ 3590 memset(&error, 0x33, sizeof(error)); 3591 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3592 ret = port_flow_complain(&error); 3593 continue; 3594 } 3595 printf("Flow rule #%u destroyed\n", pf->id); 3596 *tmp = pf->next; 3597 free(pf); 3598 break; 3599 } 3600 if (i == n) 3601 tmp = &(*tmp)->next; 3602 } 3603 return ret; 3604 } 3605 3606 /** Remove all flow rules. */ 3607 int 3608 port_flow_flush(portid_t port_id) 3609 { 3610 struct rte_flow_error error; 3611 struct rte_port *port; 3612 int ret = 0; 3613 3614 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3615 port_id == (portid_t)RTE_PORT_ALL) 3616 return -EINVAL; 3617 3618 port = &ports[port_id]; 3619 3620 if (port->flow_list == NULL) 3621 return ret; 3622 3623 /* Poisoning to make sure PMDs update it in case of error. */ 3624 memset(&error, 0x44, sizeof(error)); 3625 if (rte_flow_flush(port_id, &error)) { 3626 port_flow_complain(&error); 3627 } 3628 3629 while (port->flow_list) { 3630 struct port_flow *pf = port->flow_list->next; 3631 3632 free(port->flow_list); 3633 port->flow_list = pf; 3634 } 3635 return ret; 3636 } 3637 3638 /** Dump flow rules. */ 3639 int 3640 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 3641 const char *file_name) 3642 { 3643 int ret = 0; 3644 FILE *file = stdout; 3645 struct rte_flow_error error; 3646 struct rte_port *port; 3647 struct port_flow *pflow; 3648 struct rte_flow *tmpFlow = NULL; 3649 bool found = false; 3650 3651 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3652 port_id == (portid_t)RTE_PORT_ALL) 3653 return -EINVAL; 3654 3655 if (!dump_all) { 3656 port = &ports[port_id]; 3657 pflow = port->flow_list; 3658 while (pflow) { 3659 if (rule_id != pflow->id) { 3660 pflow = pflow->next; 3661 } else { 3662 tmpFlow = pflow->flow; 3663 if (tmpFlow) 3664 found = true; 3665 break; 3666 } 3667 } 3668 if (found == false) { 3669 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 3670 return -EINVAL; 3671 } 3672 } 3673 3674 if (file_name && strlen(file_name)) { 3675 file = fopen(file_name, "w"); 3676 if (!file) { 3677 fprintf(stderr, "Failed to create file %s: %s\n", 3678 file_name, strerror(errno)); 3679 return -errno; 3680 } 3681 } 3682 3683 if (!dump_all) 3684 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3685 else 3686 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3687 if (ret) { 3688 port_flow_complain(&error); 3689 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3690 } else 3691 printf("Flow dump finished\n"); 3692 if (file_name && strlen(file_name)) 3693 fclose(file); 3694 return ret; 3695 } 3696 3697 /** Query a flow rule. */ 3698 int 3699 port_flow_query(portid_t port_id, uint32_t rule, 3700 const struct rte_flow_action *action) 3701 { 3702 struct rte_flow_error error; 3703 struct rte_port *port; 3704 struct port_flow *pf; 3705 const char *name; 3706 union { 3707 struct rte_flow_query_count count; 3708 struct rte_flow_action_rss rss_conf; 3709 struct rte_flow_query_age age; 3710 } query; 3711 int ret; 3712 3713 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3714 port_id == (portid_t)RTE_PORT_ALL) 3715 return -EINVAL; 3716 port = &ports[port_id]; 3717 for (pf = port->flow_list; pf; pf = pf->next) 3718 if (pf->id == rule) 3719 break; 3720 if (!pf) { 3721 fprintf(stderr, "Flow rule #%u not found\n", rule); 3722 return -ENOENT; 3723 } 3724 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3725 &name, sizeof(name), 3726 (void *)(uintptr_t)action->type, &error); 3727 if (ret < 0) 3728 return port_flow_complain(&error); 3729 switch (action->type) { 3730 case RTE_FLOW_ACTION_TYPE_COUNT: 3731 case RTE_FLOW_ACTION_TYPE_RSS: 3732 case RTE_FLOW_ACTION_TYPE_AGE: 3733 break; 3734 default: 3735 fprintf(stderr, "Cannot query action type %d (%s)\n", 3736 action->type, name); 3737 return -ENOTSUP; 3738 } 3739 /* Poisoning to make sure PMDs update it in case of error. */ 3740 memset(&error, 0x55, sizeof(error)); 3741 memset(&query, 0, sizeof(query)); 3742 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3743 return port_flow_complain(&error); 3744 switch (action->type) { 3745 case RTE_FLOW_ACTION_TYPE_COUNT: 3746 printf("%s:\n" 3747 " hits_set: %u\n" 3748 " bytes_set: %u\n" 3749 " hits: %" PRIu64 "\n" 3750 " bytes: %" PRIu64 "\n", 3751 name, 3752 query.count.hits_set, 3753 query.count.bytes_set, 3754 query.count.hits, 3755 query.count.bytes); 3756 break; 3757 case RTE_FLOW_ACTION_TYPE_RSS: 3758 rss_config_display(&query.rss_conf); 3759 break; 3760 case RTE_FLOW_ACTION_TYPE_AGE: 3761 printf("%s:\n" 3762 " aged: %u\n" 3763 " sec_since_last_hit_valid: %u\n" 3764 " sec_since_last_hit: %" PRIu32 "\n", 3765 name, 3766 query.age.aged, 3767 query.age.sec_since_last_hit_valid, 3768 query.age.sec_since_last_hit); 3769 break; 3770 default: 3771 fprintf(stderr, 3772 "Cannot display result for action type %d (%s)\n", 3773 action->type, name); 3774 break; 3775 } 3776 return 0; 3777 } 3778 3779 /** List simply and destroy all aged flows. */ 3780 void 3781 port_flow_aged(portid_t port_id, uint8_t destroy) 3782 { 3783 void **contexts; 3784 int nb_context, total = 0, idx; 3785 struct rte_flow_error error; 3786 enum age_action_context_type *type; 3787 union { 3788 struct port_flow *pf; 3789 struct port_indirect_action *pia; 3790 } ctx; 3791 3792 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3793 port_id == (portid_t)RTE_PORT_ALL) 3794 return; 3795 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3796 printf("Port %u total aged flows: %d\n", port_id, total); 3797 if (total < 0) { 3798 port_flow_complain(&error); 3799 return; 3800 } 3801 if (total == 0) 3802 return; 3803 contexts = malloc(sizeof(void *) * total); 3804 if (contexts == NULL) { 3805 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3806 return; 3807 } 3808 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3809 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3810 if (nb_context != total) { 3811 fprintf(stderr, 3812 "Port:%d get aged flows count(%d) != total(%d)\n", 3813 port_id, nb_context, total); 3814 free(contexts); 3815 return; 3816 } 3817 total = 0; 3818 for (idx = 0; idx < nb_context; idx++) { 3819 if (!contexts[idx]) { 3820 fprintf(stderr, "Error: get Null context in port %u\n", 3821 port_id); 3822 continue; 3823 } 3824 type = (enum age_action_context_type *)contexts[idx]; 3825 switch (*type) { 3826 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3827 ctx.pf = container_of(type, struct port_flow, age_type); 3828 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3829 "\t%c%c%c\t\n", 3830 "Flow", 3831 ctx.pf->id, 3832 ctx.pf->rule.attr->group, 3833 ctx.pf->rule.attr->priority, 3834 ctx.pf->rule.attr->ingress ? 'i' : '-', 3835 ctx.pf->rule.attr->egress ? 'e' : '-', 3836 ctx.pf->rule.attr->transfer ? 't' : '-'); 3837 if (destroy && !port_flow_destroy(port_id, 1, 3838 &ctx.pf->id)) 3839 total++; 3840 break; 3841 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3842 ctx.pia = container_of(type, 3843 struct port_indirect_action, age_type); 3844 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3845 ctx.pia->id); 3846 break; 3847 default: 3848 fprintf(stderr, "Error: invalid context type %u\n", 3849 port_id); 3850 break; 3851 } 3852 } 3853 printf("\n%d flows destroyed\n", total); 3854 free(contexts); 3855 } 3856 3857 /** List flow rules. */ 3858 void 3859 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3860 { 3861 struct rte_port *port; 3862 struct port_flow *pf; 3863 struct port_flow *list = NULL; 3864 uint32_t i; 3865 3866 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3867 port_id == (portid_t)RTE_PORT_ALL) 3868 return; 3869 port = &ports[port_id]; 3870 if (!port->flow_list) 3871 return; 3872 /* Sort flows by group, priority and ID. */ 3873 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3874 struct port_flow **tmp; 3875 const struct rte_flow_attr *curr = pf->rule.attr; 3876 3877 if (n) { 3878 /* Filter out unwanted groups. */ 3879 for (i = 0; i != n; ++i) 3880 if (curr->group == group[i]) 3881 break; 3882 if (i == n) 3883 continue; 3884 } 3885 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3886 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3887 3888 if (curr->group > comp->group || 3889 (curr->group == comp->group && 3890 curr->priority > comp->priority) || 3891 (curr->group == comp->group && 3892 curr->priority == comp->priority && 3893 pf->id > (*tmp)->id)) 3894 continue; 3895 break; 3896 } 3897 pf->tmp = *tmp; 3898 *tmp = pf; 3899 } 3900 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3901 for (pf = list; pf != NULL; pf = pf->tmp) { 3902 const struct rte_flow_item *item = pf->rule.pattern; 3903 const struct rte_flow_action *action = pf->rule.actions; 3904 const char *name; 3905 3906 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3907 pf->id, 3908 pf->rule.attr->group, 3909 pf->rule.attr->priority, 3910 pf->rule.attr->ingress ? 'i' : '-', 3911 pf->rule.attr->egress ? 'e' : '-', 3912 pf->rule.attr->transfer ? 't' : '-'); 3913 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3914 if ((uint32_t)item->type > INT_MAX) 3915 name = "PMD_INTERNAL"; 3916 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3917 &name, sizeof(name), 3918 (void *)(uintptr_t)item->type, 3919 NULL) <= 0) 3920 name = "[UNKNOWN]"; 3921 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3922 printf("%s ", name); 3923 ++item; 3924 } 3925 printf("=>"); 3926 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3927 if ((uint32_t)action->type > INT_MAX) 3928 name = "PMD_INTERNAL"; 3929 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3930 &name, sizeof(name), 3931 (void *)(uintptr_t)action->type, 3932 NULL) <= 0) 3933 name = "[UNKNOWN]"; 3934 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3935 printf(" %s", name); 3936 ++action; 3937 } 3938 printf("\n"); 3939 } 3940 } 3941 3942 /** Restrict ingress traffic to the defined flow rules. */ 3943 int 3944 port_flow_isolate(portid_t port_id, int set) 3945 { 3946 struct rte_flow_error error; 3947 3948 /* Poisoning to make sure PMDs update it in case of error. */ 3949 memset(&error, 0x66, sizeof(error)); 3950 if (rte_flow_isolate(port_id, set, &error)) 3951 return port_flow_complain(&error); 3952 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3953 port_id, 3954 set ? "now restricted" : "not restricted anymore"); 3955 return 0; 3956 } 3957 3958 /* 3959 * RX/TX ring descriptors display functions. 3960 */ 3961 int 3962 rx_queue_id_is_invalid(queueid_t rxq_id) 3963 { 3964 if (rxq_id < nb_rxq) 3965 return 0; 3966 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3967 rxq_id, nb_rxq); 3968 return 1; 3969 } 3970 3971 int 3972 tx_queue_id_is_invalid(queueid_t txq_id) 3973 { 3974 if (txq_id < nb_txq) 3975 return 0; 3976 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3977 txq_id, nb_txq); 3978 return 1; 3979 } 3980 3981 static int 3982 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3983 { 3984 struct rte_port *port = &ports[port_id]; 3985 struct rte_eth_rxq_info rx_qinfo; 3986 int ret; 3987 3988 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3989 if (ret == 0) { 3990 *ring_size = rx_qinfo.nb_desc; 3991 return ret; 3992 } 3993 3994 if (ret != -ENOTSUP) 3995 return ret; 3996 /* 3997 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3998 * ring_size stored in testpmd will be used for validity verification. 3999 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4000 * being 0, it will use a default value provided by PMDs to setup this 4001 * rxq. If the default value is 0, it will use the 4002 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4003 */ 4004 if (port->nb_rx_desc[rxq_id]) 4005 *ring_size = port->nb_rx_desc[rxq_id]; 4006 else if (port->dev_info.default_rxportconf.ring_size) 4007 *ring_size = port->dev_info.default_rxportconf.ring_size; 4008 else 4009 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4010 return 0; 4011 } 4012 4013 static int 4014 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4015 { 4016 struct rte_port *port = &ports[port_id]; 4017 struct rte_eth_txq_info tx_qinfo; 4018 int ret; 4019 4020 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4021 if (ret == 0) { 4022 *ring_size = tx_qinfo.nb_desc; 4023 return ret; 4024 } 4025 4026 if (ret != -ENOTSUP) 4027 return ret; 4028 /* 4029 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4030 * ring_size stored in testpmd will be used for validity verification. 4031 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4032 * being 0, it will use a default value provided by PMDs to setup this 4033 * txq. If the default value is 0, it will use the 4034 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4035 */ 4036 if (port->nb_tx_desc[txq_id]) 4037 *ring_size = port->nb_tx_desc[txq_id]; 4038 else if (port->dev_info.default_txportconf.ring_size) 4039 *ring_size = port->dev_info.default_txportconf.ring_size; 4040 else 4041 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4042 return 0; 4043 } 4044 4045 static int 4046 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4047 { 4048 uint16_t ring_size; 4049 int ret; 4050 4051 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4052 if (ret) 4053 return 1; 4054 4055 if (rxdesc_id < ring_size) 4056 return 0; 4057 4058 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4059 rxdesc_id, ring_size); 4060 return 1; 4061 } 4062 4063 static int 4064 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4065 { 4066 uint16_t ring_size; 4067 int ret; 4068 4069 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4070 if (ret) 4071 return 1; 4072 4073 if (txdesc_id < ring_size) 4074 return 0; 4075 4076 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4077 txdesc_id, ring_size); 4078 return 1; 4079 } 4080 4081 static const struct rte_memzone * 4082 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4083 { 4084 char mz_name[RTE_MEMZONE_NAMESIZE]; 4085 const struct rte_memzone *mz; 4086 4087 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4088 port_id, q_id, ring_name); 4089 mz = rte_memzone_lookup(mz_name); 4090 if (mz == NULL) 4091 fprintf(stderr, 4092 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4093 ring_name, port_id, q_id, mz_name); 4094 return mz; 4095 } 4096 4097 union igb_ring_dword { 4098 uint64_t dword; 4099 struct { 4100 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4101 uint32_t lo; 4102 uint32_t hi; 4103 #else 4104 uint32_t hi; 4105 uint32_t lo; 4106 #endif 4107 } words; 4108 }; 4109 4110 struct igb_ring_desc_32_bytes { 4111 union igb_ring_dword lo_dword; 4112 union igb_ring_dword hi_dword; 4113 union igb_ring_dword resv1; 4114 union igb_ring_dword resv2; 4115 }; 4116 4117 struct igb_ring_desc_16_bytes { 4118 union igb_ring_dword lo_dword; 4119 union igb_ring_dword hi_dword; 4120 }; 4121 4122 static void 4123 ring_rxd_display_dword(union igb_ring_dword dword) 4124 { 4125 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4126 (unsigned)dword.words.hi); 4127 } 4128 4129 static void 4130 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4131 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4132 portid_t port_id, 4133 #else 4134 __rte_unused portid_t port_id, 4135 #endif 4136 uint16_t desc_id) 4137 { 4138 struct igb_ring_desc_16_bytes *ring = 4139 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4140 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4141 int ret; 4142 struct rte_eth_dev_info dev_info; 4143 4144 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4145 if (ret != 0) 4146 return; 4147 4148 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4149 /* 32 bytes RX descriptor, i40e only */ 4150 struct igb_ring_desc_32_bytes *ring = 4151 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4152 ring[desc_id].lo_dword.dword = 4153 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4154 ring_rxd_display_dword(ring[desc_id].lo_dword); 4155 ring[desc_id].hi_dword.dword = 4156 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4157 ring_rxd_display_dword(ring[desc_id].hi_dword); 4158 ring[desc_id].resv1.dword = 4159 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4160 ring_rxd_display_dword(ring[desc_id].resv1); 4161 ring[desc_id].resv2.dword = 4162 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4163 ring_rxd_display_dword(ring[desc_id].resv2); 4164 4165 return; 4166 } 4167 #endif 4168 /* 16 bytes RX descriptor */ 4169 ring[desc_id].lo_dword.dword = 4170 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4171 ring_rxd_display_dword(ring[desc_id].lo_dword); 4172 ring[desc_id].hi_dword.dword = 4173 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4174 ring_rxd_display_dword(ring[desc_id].hi_dword); 4175 } 4176 4177 static void 4178 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4179 { 4180 struct igb_ring_desc_16_bytes *ring; 4181 struct igb_ring_desc_16_bytes txd; 4182 4183 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4184 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4185 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4186 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4187 (unsigned)txd.lo_dword.words.lo, 4188 (unsigned)txd.lo_dword.words.hi, 4189 (unsigned)txd.hi_dword.words.lo, 4190 (unsigned)txd.hi_dword.words.hi); 4191 } 4192 4193 void 4194 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4195 { 4196 const struct rte_memzone *rx_mz; 4197 4198 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4199 return; 4200 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4201 if (rx_mz == NULL) 4202 return; 4203 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4204 } 4205 4206 void 4207 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4208 { 4209 const struct rte_memzone *tx_mz; 4210 4211 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4212 return; 4213 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4214 if (tx_mz == NULL) 4215 return; 4216 ring_tx_descriptor_display(tx_mz, txd_id); 4217 } 4218 4219 void 4220 fwd_lcores_config_display(void) 4221 { 4222 lcoreid_t lc_id; 4223 4224 printf("List of forwarding lcores:"); 4225 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4226 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4227 printf("\n"); 4228 } 4229 void 4230 rxtx_config_display(void) 4231 { 4232 portid_t pid; 4233 queueid_t qid; 4234 4235 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4236 cur_fwd_eng->fwd_mode_name, 4237 cur_fwd_eng->status ? "-" : "", 4238 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4239 retry_enabled == 0 ? "" : " with retry", 4240 nb_pkt_per_burst); 4241 4242 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4243 printf(" packet len=%u - nb packet segments=%d\n", 4244 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4245 4246 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4247 nb_fwd_lcores, nb_fwd_ports); 4248 4249 RTE_ETH_FOREACH_DEV(pid) { 4250 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4251 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4252 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4253 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4254 struct rte_eth_rxq_info rx_qinfo; 4255 struct rte_eth_txq_info tx_qinfo; 4256 uint16_t rx_free_thresh_tmp; 4257 uint16_t tx_free_thresh_tmp; 4258 uint16_t tx_rs_thresh_tmp; 4259 uint16_t nb_rx_desc_tmp; 4260 uint16_t nb_tx_desc_tmp; 4261 uint64_t offloads_tmp; 4262 uint8_t pthresh_tmp; 4263 uint8_t hthresh_tmp; 4264 uint8_t wthresh_tmp; 4265 int32_t rc; 4266 4267 /* per port config */ 4268 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4269 (unsigned int)pid, nb_rxq, nb_txq); 4270 4271 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4272 ports[pid].dev_conf.rxmode.offloads, 4273 ports[pid].dev_conf.txmode.offloads); 4274 4275 /* per rx queue config only for first queue to be less verbose */ 4276 for (qid = 0; qid < 1; qid++) { 4277 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4278 if (rc) { 4279 nb_rx_desc_tmp = nb_rx_desc[qid]; 4280 rx_free_thresh_tmp = 4281 rx_conf[qid].rx_free_thresh; 4282 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4283 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4284 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4285 offloads_tmp = rx_conf[qid].offloads; 4286 } else { 4287 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4288 rx_free_thresh_tmp = 4289 rx_qinfo.conf.rx_free_thresh; 4290 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4291 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4292 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4293 offloads_tmp = rx_qinfo.conf.offloads; 4294 } 4295 4296 printf(" RX queue: %d\n", qid); 4297 printf(" RX desc=%d - RX free threshold=%d\n", 4298 nb_rx_desc_tmp, rx_free_thresh_tmp); 4299 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4300 " wthresh=%d\n", 4301 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4302 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4303 if (rx_conf->share_group > 0) 4304 printf(" share_group=%u share_qid=%u", 4305 rx_conf->share_group, 4306 rx_conf->share_qid); 4307 printf("\n"); 4308 } 4309 4310 /* per tx queue config only for first queue to be less verbose */ 4311 for (qid = 0; qid < 1; qid++) { 4312 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4313 if (rc) { 4314 nb_tx_desc_tmp = nb_tx_desc[qid]; 4315 tx_free_thresh_tmp = 4316 tx_conf[qid].tx_free_thresh; 4317 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4318 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4319 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4320 offloads_tmp = tx_conf[qid].offloads; 4321 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4322 } else { 4323 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4324 tx_free_thresh_tmp = 4325 tx_qinfo.conf.tx_free_thresh; 4326 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4327 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4328 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4329 offloads_tmp = tx_qinfo.conf.offloads; 4330 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4331 } 4332 4333 printf(" TX queue: %d\n", qid); 4334 printf(" TX desc=%d - TX free threshold=%d\n", 4335 nb_tx_desc_tmp, tx_free_thresh_tmp); 4336 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4337 " wthresh=%d\n", 4338 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4339 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4340 offloads_tmp, tx_rs_thresh_tmp); 4341 } 4342 } 4343 } 4344 4345 void 4346 port_rss_reta_info(portid_t port_id, 4347 struct rte_eth_rss_reta_entry64 *reta_conf, 4348 uint16_t nb_entries) 4349 { 4350 uint16_t i, idx, shift; 4351 int ret; 4352 4353 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4354 return; 4355 4356 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4357 if (ret != 0) { 4358 fprintf(stderr, 4359 "Failed to get RSS RETA info, return code = %d\n", 4360 ret); 4361 return; 4362 } 4363 4364 for (i = 0; i < nb_entries; i++) { 4365 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4366 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4367 if (!(reta_conf[idx].mask & (1ULL << shift))) 4368 continue; 4369 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4370 i, reta_conf[idx].reta[shift]); 4371 } 4372 } 4373 4374 /* 4375 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4376 * key of the port. 4377 */ 4378 void 4379 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4380 { 4381 struct rte_eth_rss_conf rss_conf = {0}; 4382 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4383 uint64_t rss_hf; 4384 uint8_t i; 4385 int diag; 4386 struct rte_eth_dev_info dev_info; 4387 uint8_t hash_key_size; 4388 int ret; 4389 4390 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4391 return; 4392 4393 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4394 if (ret != 0) 4395 return; 4396 4397 if (dev_info.hash_key_size > 0 && 4398 dev_info.hash_key_size <= sizeof(rss_key)) 4399 hash_key_size = dev_info.hash_key_size; 4400 else { 4401 fprintf(stderr, 4402 "dev_info did not provide a valid hash key size\n"); 4403 return; 4404 } 4405 4406 /* Get RSS hash key if asked to display it */ 4407 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4408 rss_conf.rss_key_len = hash_key_size; 4409 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4410 if (diag != 0) { 4411 switch (diag) { 4412 case -ENODEV: 4413 fprintf(stderr, "port index %d invalid\n", port_id); 4414 break; 4415 case -ENOTSUP: 4416 fprintf(stderr, "operation not supported by device\n"); 4417 break; 4418 default: 4419 fprintf(stderr, "operation failed - diag=%d\n", diag); 4420 break; 4421 } 4422 return; 4423 } 4424 rss_hf = rss_conf.rss_hf; 4425 if (rss_hf == 0) { 4426 printf("RSS disabled\n"); 4427 return; 4428 } 4429 printf("RSS functions:\n"); 4430 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4431 if (!show_rss_key) 4432 return; 4433 printf("RSS key:\n"); 4434 for (i = 0; i < hash_key_size; i++) 4435 printf("%02X", rss_key[i]); 4436 printf("\n"); 4437 } 4438 4439 void 4440 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4441 uint8_t hash_key_len) 4442 { 4443 struct rte_eth_rss_conf rss_conf; 4444 int diag; 4445 4446 rss_conf.rss_key = NULL; 4447 rss_conf.rss_key_len = 0; 4448 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4449 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4450 if (diag == 0) { 4451 rss_conf.rss_key = hash_key; 4452 rss_conf.rss_key_len = hash_key_len; 4453 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4454 } 4455 if (diag == 0) 4456 return; 4457 4458 switch (diag) { 4459 case -ENODEV: 4460 fprintf(stderr, "port index %d invalid\n", port_id); 4461 break; 4462 case -ENOTSUP: 4463 fprintf(stderr, "operation not supported by device\n"); 4464 break; 4465 default: 4466 fprintf(stderr, "operation failed - diag=%d\n", diag); 4467 break; 4468 } 4469 } 4470 4471 /* 4472 * Check whether a shared rxq scheduled on other lcores. 4473 */ 4474 static bool 4475 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4476 portid_t src_port, queueid_t src_rxq, 4477 uint32_t share_group, queueid_t share_rxq) 4478 { 4479 streamid_t sm_id; 4480 streamid_t nb_fs_per_lcore; 4481 lcoreid_t nb_fc; 4482 lcoreid_t lc_id; 4483 struct fwd_stream *fs; 4484 struct rte_port *port; 4485 struct rte_eth_dev_info *dev_info; 4486 struct rte_eth_rxconf *rxq_conf; 4487 4488 nb_fc = cur_fwd_config.nb_fwd_lcores; 4489 /* Check remaining cores. */ 4490 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4491 sm_id = fwd_lcores[lc_id]->stream_idx; 4492 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4493 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4494 sm_id++) { 4495 fs = fwd_streams[sm_id]; 4496 port = &ports[fs->rx_port]; 4497 dev_info = &port->dev_info; 4498 rxq_conf = &port->rxq[fs->rx_queue].conf; 4499 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4500 == 0 || rxq_conf->share_group == 0) 4501 /* Not shared rxq. */ 4502 continue; 4503 if (domain_id != port->dev_info.switch_info.domain_id) 4504 continue; 4505 if (rxq_conf->share_group != share_group) 4506 continue; 4507 if (rxq_conf->share_qid != share_rxq) 4508 continue; 4509 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4510 share_group, share_rxq); 4511 printf(" lcore %hhu Port %hu queue %hu\n", 4512 src_lc, src_port, src_rxq); 4513 printf(" lcore %hhu Port %hu queue %hu\n", 4514 lc_id, fs->rx_port, fs->rx_queue); 4515 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4516 nb_rxq); 4517 return true; 4518 } 4519 } 4520 return false; 4521 } 4522 4523 /* 4524 * Check shared rxq configuration. 4525 * 4526 * Shared group must not being scheduled on different core. 4527 */ 4528 bool 4529 pkt_fwd_shared_rxq_check(void) 4530 { 4531 streamid_t sm_id; 4532 streamid_t nb_fs_per_lcore; 4533 lcoreid_t nb_fc; 4534 lcoreid_t lc_id; 4535 struct fwd_stream *fs; 4536 uint16_t domain_id; 4537 struct rte_port *port; 4538 struct rte_eth_dev_info *dev_info; 4539 struct rte_eth_rxconf *rxq_conf; 4540 4541 if (rxq_share == 0) 4542 return true; 4543 nb_fc = cur_fwd_config.nb_fwd_lcores; 4544 /* 4545 * Check streams on each core, make sure the same switch domain + 4546 * group + queue doesn't get scheduled on other cores. 4547 */ 4548 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4549 sm_id = fwd_lcores[lc_id]->stream_idx; 4550 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4551 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4552 sm_id++) { 4553 fs = fwd_streams[sm_id]; 4554 /* Update lcore info stream being scheduled. */ 4555 fs->lcore = fwd_lcores[lc_id]; 4556 port = &ports[fs->rx_port]; 4557 dev_info = &port->dev_info; 4558 rxq_conf = &port->rxq[fs->rx_queue].conf; 4559 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4560 == 0 || rxq_conf->share_group == 0) 4561 /* Not shared rxq. */ 4562 continue; 4563 /* Check shared rxq not scheduled on remaining cores. */ 4564 domain_id = port->dev_info.switch_info.domain_id; 4565 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4566 fs->rx_port, 4567 fs->rx_queue, 4568 rxq_conf->share_group, 4569 rxq_conf->share_qid)) 4570 return false; 4571 } 4572 } 4573 return true; 4574 } 4575 4576 /* 4577 * Setup forwarding configuration for each logical core. 4578 */ 4579 static void 4580 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4581 { 4582 streamid_t nb_fs_per_lcore; 4583 streamid_t nb_fs; 4584 streamid_t sm_id; 4585 lcoreid_t nb_extra; 4586 lcoreid_t nb_fc; 4587 lcoreid_t nb_lc; 4588 lcoreid_t lc_id; 4589 4590 nb_fs = cfg->nb_fwd_streams; 4591 nb_fc = cfg->nb_fwd_lcores; 4592 if (nb_fs <= nb_fc) { 4593 nb_fs_per_lcore = 1; 4594 nb_extra = 0; 4595 } else { 4596 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4597 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4598 } 4599 4600 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4601 sm_id = 0; 4602 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4603 fwd_lcores[lc_id]->stream_idx = sm_id; 4604 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4605 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4606 } 4607 4608 /* 4609 * Assign extra remaining streams, if any. 4610 */ 4611 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4612 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4613 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4614 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4615 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4616 } 4617 } 4618 4619 static portid_t 4620 fwd_topology_tx_port_get(portid_t rxp) 4621 { 4622 static int warning_once = 1; 4623 4624 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4625 4626 switch (port_topology) { 4627 default: 4628 case PORT_TOPOLOGY_PAIRED: 4629 if ((rxp & 0x1) == 0) { 4630 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4631 return rxp + 1; 4632 if (warning_once) { 4633 fprintf(stderr, 4634 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4635 warning_once = 0; 4636 } 4637 return rxp; 4638 } 4639 return rxp - 1; 4640 case PORT_TOPOLOGY_CHAINED: 4641 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4642 case PORT_TOPOLOGY_LOOP: 4643 return rxp; 4644 } 4645 } 4646 4647 static void 4648 simple_fwd_config_setup(void) 4649 { 4650 portid_t i; 4651 4652 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4653 cur_fwd_config.nb_fwd_streams = 4654 (streamid_t) cur_fwd_config.nb_fwd_ports; 4655 4656 /* reinitialize forwarding streams */ 4657 init_fwd_streams(); 4658 4659 /* 4660 * In the simple forwarding test, the number of forwarding cores 4661 * must be lower or equal to the number of forwarding ports. 4662 */ 4663 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4664 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4665 cur_fwd_config.nb_fwd_lcores = 4666 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4667 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4668 4669 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4670 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4671 fwd_streams[i]->rx_queue = 0; 4672 fwd_streams[i]->tx_port = 4673 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4674 fwd_streams[i]->tx_queue = 0; 4675 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4676 fwd_streams[i]->retry_enabled = retry_enabled; 4677 } 4678 } 4679 4680 /** 4681 * For the RSS forwarding test all streams distributed over lcores. Each stream 4682 * being composed of a RX queue to poll on a RX port for input messages, 4683 * associated with a TX queue of a TX port where to send forwarded packets. 4684 */ 4685 static void 4686 rss_fwd_config_setup(void) 4687 { 4688 portid_t rxp; 4689 portid_t txp; 4690 queueid_t rxq; 4691 queueid_t nb_q; 4692 streamid_t sm_id; 4693 int start; 4694 int end; 4695 4696 nb_q = nb_rxq; 4697 if (nb_q > nb_txq) 4698 nb_q = nb_txq; 4699 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4700 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4701 cur_fwd_config.nb_fwd_streams = 4702 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4703 4704 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4705 cur_fwd_config.nb_fwd_lcores = 4706 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4707 4708 /* reinitialize forwarding streams */ 4709 init_fwd_streams(); 4710 4711 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4712 4713 if (proc_id > 0 && nb_q % num_procs != 0) 4714 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4715 4716 /** 4717 * In multi-process, All queues are allocated to different 4718 * processes based on num_procs and proc_id. For example: 4719 * if supports 4 queues(nb_q), 2 processes(num_procs), 4720 * the 0~1 queue for primary process. 4721 * the 2~3 queue for secondary process. 4722 */ 4723 start = proc_id * nb_q / num_procs; 4724 end = start + nb_q / num_procs; 4725 rxp = 0; 4726 rxq = start; 4727 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4728 struct fwd_stream *fs; 4729 4730 fs = fwd_streams[sm_id]; 4731 txp = fwd_topology_tx_port_get(rxp); 4732 fs->rx_port = fwd_ports_ids[rxp]; 4733 fs->rx_queue = rxq; 4734 fs->tx_port = fwd_ports_ids[txp]; 4735 fs->tx_queue = rxq; 4736 fs->peer_addr = fs->tx_port; 4737 fs->retry_enabled = retry_enabled; 4738 rxp++; 4739 if (rxp < nb_fwd_ports) 4740 continue; 4741 rxp = 0; 4742 rxq++; 4743 if (rxq >= end) 4744 rxq = start; 4745 } 4746 } 4747 4748 static uint16_t 4749 get_fwd_port_total_tc_num(void) 4750 { 4751 struct rte_eth_dcb_info dcb_info; 4752 uint16_t total_tc_num = 0; 4753 unsigned int i; 4754 4755 for (i = 0; i < nb_fwd_ports; i++) { 4756 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4757 total_tc_num += dcb_info.nb_tcs; 4758 } 4759 4760 return total_tc_num; 4761 } 4762 4763 /** 4764 * For the DCB forwarding test, each core is assigned on each traffic class. 4765 * 4766 * Each core is assigned a multi-stream, each stream being composed of 4767 * a RX queue to poll on a RX port for input messages, associated with 4768 * a TX queue of a TX port where to send forwarded packets. All RX and 4769 * TX queues are mapping to the same traffic class. 4770 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4771 * the same core 4772 */ 4773 static void 4774 dcb_fwd_config_setup(void) 4775 { 4776 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4777 portid_t txp, rxp = 0; 4778 queueid_t txq, rxq = 0; 4779 lcoreid_t lc_id; 4780 uint16_t nb_rx_queue, nb_tx_queue; 4781 uint16_t i, j, k, sm_id = 0; 4782 uint16_t total_tc_num; 4783 struct rte_port *port; 4784 uint8_t tc = 0; 4785 portid_t pid; 4786 int ret; 4787 4788 /* 4789 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4790 * or RTE_PORT_STOPPED. 4791 * 4792 * Re-configure ports to get updated mapping between tc and queue in 4793 * case the queue number of the port is changed. Skip for started ports 4794 * since modifying queue number and calling dev_configure need to stop 4795 * ports first. 4796 */ 4797 for (pid = 0; pid < nb_fwd_ports; pid++) { 4798 if (port_is_started(pid) == 1) 4799 continue; 4800 4801 port = &ports[pid]; 4802 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4803 &port->dev_conf); 4804 if (ret < 0) { 4805 fprintf(stderr, 4806 "Failed to re-configure port %d, ret = %d.\n", 4807 pid, ret); 4808 return; 4809 } 4810 } 4811 4812 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4813 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4814 cur_fwd_config.nb_fwd_streams = 4815 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4816 total_tc_num = get_fwd_port_total_tc_num(); 4817 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4818 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4819 4820 /* reinitialize forwarding streams */ 4821 init_fwd_streams(); 4822 sm_id = 0; 4823 txp = 1; 4824 /* get the dcb info on the first RX and TX ports */ 4825 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4826 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4827 4828 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4829 fwd_lcores[lc_id]->stream_nb = 0; 4830 fwd_lcores[lc_id]->stream_idx = sm_id; 4831 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4832 /* if the nb_queue is zero, means this tc is 4833 * not enabled on the POOL 4834 */ 4835 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4836 break; 4837 k = fwd_lcores[lc_id]->stream_nb + 4838 fwd_lcores[lc_id]->stream_idx; 4839 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4840 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4841 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4842 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4843 for (j = 0; j < nb_rx_queue; j++) { 4844 struct fwd_stream *fs; 4845 4846 fs = fwd_streams[k + j]; 4847 fs->rx_port = fwd_ports_ids[rxp]; 4848 fs->rx_queue = rxq + j; 4849 fs->tx_port = fwd_ports_ids[txp]; 4850 fs->tx_queue = txq + j % nb_tx_queue; 4851 fs->peer_addr = fs->tx_port; 4852 fs->retry_enabled = retry_enabled; 4853 } 4854 fwd_lcores[lc_id]->stream_nb += 4855 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4856 } 4857 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4858 4859 tc++; 4860 if (tc < rxp_dcb_info.nb_tcs) 4861 continue; 4862 /* Restart from TC 0 on next RX port */ 4863 tc = 0; 4864 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4865 rxp = (portid_t) 4866 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4867 else 4868 rxp++; 4869 if (rxp >= nb_fwd_ports) 4870 return; 4871 /* get the dcb information on next RX and TX ports */ 4872 if ((rxp & 0x1) == 0) 4873 txp = (portid_t) (rxp + 1); 4874 else 4875 txp = (portid_t) (rxp - 1); 4876 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4877 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4878 } 4879 } 4880 4881 static void 4882 icmp_echo_config_setup(void) 4883 { 4884 portid_t rxp; 4885 queueid_t rxq; 4886 lcoreid_t lc_id; 4887 uint16_t sm_id; 4888 4889 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4890 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4891 (nb_txq * nb_fwd_ports); 4892 else 4893 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4894 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4895 cur_fwd_config.nb_fwd_streams = 4896 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4897 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4898 cur_fwd_config.nb_fwd_lcores = 4899 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4900 if (verbose_level > 0) { 4901 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4902 __FUNCTION__, 4903 cur_fwd_config.nb_fwd_lcores, 4904 cur_fwd_config.nb_fwd_ports, 4905 cur_fwd_config.nb_fwd_streams); 4906 } 4907 4908 /* reinitialize forwarding streams */ 4909 init_fwd_streams(); 4910 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4911 rxp = 0; rxq = 0; 4912 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4913 if (verbose_level > 0) 4914 printf(" core=%d: \n", lc_id); 4915 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4916 struct fwd_stream *fs; 4917 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4918 fs->rx_port = fwd_ports_ids[rxp]; 4919 fs->rx_queue = rxq; 4920 fs->tx_port = fs->rx_port; 4921 fs->tx_queue = rxq; 4922 fs->peer_addr = fs->tx_port; 4923 fs->retry_enabled = retry_enabled; 4924 if (verbose_level > 0) 4925 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4926 sm_id, fs->rx_port, fs->rx_queue, 4927 fs->tx_queue); 4928 rxq = (queueid_t) (rxq + 1); 4929 if (rxq == nb_rxq) { 4930 rxq = 0; 4931 rxp = (portid_t) (rxp + 1); 4932 } 4933 } 4934 } 4935 } 4936 4937 void 4938 fwd_config_setup(void) 4939 { 4940 struct rte_port *port; 4941 portid_t pt_id; 4942 unsigned int i; 4943 4944 cur_fwd_config.fwd_eng = cur_fwd_eng; 4945 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4946 icmp_echo_config_setup(); 4947 return; 4948 } 4949 4950 if ((nb_rxq > 1) && (nb_txq > 1)){ 4951 if (dcb_config) { 4952 for (i = 0; i < nb_fwd_ports; i++) { 4953 pt_id = fwd_ports_ids[i]; 4954 port = &ports[pt_id]; 4955 if (!port->dcb_flag) { 4956 fprintf(stderr, 4957 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4958 return; 4959 } 4960 } 4961 if (nb_fwd_lcores == 1) { 4962 fprintf(stderr, 4963 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4964 return; 4965 } 4966 4967 dcb_fwd_config_setup(); 4968 } else 4969 rss_fwd_config_setup(); 4970 } 4971 else 4972 simple_fwd_config_setup(); 4973 } 4974 4975 static const char * 4976 mp_alloc_to_str(uint8_t mode) 4977 { 4978 switch (mode) { 4979 case MP_ALLOC_NATIVE: 4980 return "native"; 4981 case MP_ALLOC_ANON: 4982 return "anon"; 4983 case MP_ALLOC_XMEM: 4984 return "xmem"; 4985 case MP_ALLOC_XMEM_HUGE: 4986 return "xmemhuge"; 4987 case MP_ALLOC_XBUF: 4988 return "xbuf"; 4989 default: 4990 return "invalid"; 4991 } 4992 } 4993 4994 void 4995 pkt_fwd_config_display(struct fwd_config *cfg) 4996 { 4997 struct fwd_stream *fs; 4998 lcoreid_t lc_id; 4999 streamid_t sm_id; 5000 5001 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5002 "NUMA support %s, MP allocation mode: %s\n", 5003 cfg->fwd_eng->fwd_mode_name, 5004 cfg->fwd_eng->status ? "-" : "", 5005 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5006 retry_enabled == 0 ? "" : " with retry", 5007 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5008 numa_support == 1 ? "enabled" : "disabled", 5009 mp_alloc_to_str(mp_alloc_type)); 5010 5011 if (retry_enabled) 5012 printf("TX retry num: %u, delay between TX retries: %uus\n", 5013 burst_tx_retry_num, burst_tx_delay_time); 5014 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5015 printf("Logical Core %u (socket %u) forwards packets on " 5016 "%d streams:", 5017 fwd_lcores_cpuids[lc_id], 5018 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5019 fwd_lcores[lc_id]->stream_nb); 5020 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5021 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5022 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5023 "P=%d/Q=%d (socket %u) ", 5024 fs->rx_port, fs->rx_queue, 5025 ports[fs->rx_port].socket_id, 5026 fs->tx_port, fs->tx_queue, 5027 ports[fs->tx_port].socket_id); 5028 print_ethaddr("peer=", 5029 &peer_eth_addrs[fs->peer_addr]); 5030 } 5031 printf("\n"); 5032 } 5033 printf("\n"); 5034 } 5035 5036 void 5037 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5038 { 5039 struct rte_ether_addr new_peer_addr; 5040 if (!rte_eth_dev_is_valid_port(port_id)) { 5041 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5042 return; 5043 } 5044 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5045 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5046 peer_addr); 5047 return; 5048 } 5049 peer_eth_addrs[port_id] = new_peer_addr; 5050 } 5051 5052 int 5053 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5054 { 5055 unsigned int i; 5056 unsigned int lcore_cpuid; 5057 int record_now; 5058 5059 record_now = 0; 5060 again: 5061 for (i = 0; i < nb_lc; i++) { 5062 lcore_cpuid = lcorelist[i]; 5063 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5064 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5065 return -1; 5066 } 5067 if (lcore_cpuid == rte_get_main_lcore()) { 5068 fprintf(stderr, 5069 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5070 lcore_cpuid); 5071 return -1; 5072 } 5073 if (record_now) 5074 fwd_lcores_cpuids[i] = lcore_cpuid; 5075 } 5076 if (record_now == 0) { 5077 record_now = 1; 5078 goto again; 5079 } 5080 nb_cfg_lcores = (lcoreid_t) nb_lc; 5081 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5082 printf("previous number of forwarding cores %u - changed to " 5083 "number of configured cores %u\n", 5084 (unsigned int) nb_fwd_lcores, nb_lc); 5085 nb_fwd_lcores = (lcoreid_t) nb_lc; 5086 } 5087 5088 return 0; 5089 } 5090 5091 int 5092 set_fwd_lcores_mask(uint64_t lcoremask) 5093 { 5094 unsigned int lcorelist[64]; 5095 unsigned int nb_lc; 5096 unsigned int i; 5097 5098 if (lcoremask == 0) { 5099 fprintf(stderr, "Invalid NULL mask of cores\n"); 5100 return -1; 5101 } 5102 nb_lc = 0; 5103 for (i = 0; i < 64; i++) { 5104 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5105 continue; 5106 lcorelist[nb_lc++] = i; 5107 } 5108 return set_fwd_lcores_list(lcorelist, nb_lc); 5109 } 5110 5111 void 5112 set_fwd_lcores_number(uint16_t nb_lc) 5113 { 5114 if (test_done == 0) { 5115 fprintf(stderr, "Please stop forwarding first\n"); 5116 return; 5117 } 5118 if (nb_lc > nb_cfg_lcores) { 5119 fprintf(stderr, 5120 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5121 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5122 return; 5123 } 5124 nb_fwd_lcores = (lcoreid_t) nb_lc; 5125 printf("Number of forwarding cores set to %u\n", 5126 (unsigned int) nb_fwd_lcores); 5127 } 5128 5129 void 5130 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5131 { 5132 unsigned int i; 5133 portid_t port_id; 5134 int record_now; 5135 5136 record_now = 0; 5137 again: 5138 for (i = 0; i < nb_pt; i++) { 5139 port_id = (portid_t) portlist[i]; 5140 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5141 return; 5142 if (record_now) 5143 fwd_ports_ids[i] = port_id; 5144 } 5145 if (record_now == 0) { 5146 record_now = 1; 5147 goto again; 5148 } 5149 nb_cfg_ports = (portid_t) nb_pt; 5150 if (nb_fwd_ports != (portid_t) nb_pt) { 5151 printf("previous number of forwarding ports %u - changed to " 5152 "number of configured ports %u\n", 5153 (unsigned int) nb_fwd_ports, nb_pt); 5154 nb_fwd_ports = (portid_t) nb_pt; 5155 } 5156 } 5157 5158 /** 5159 * Parse the user input and obtain the list of forwarding ports 5160 * 5161 * @param[in] list 5162 * String containing the user input. User can specify 5163 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5164 * For example, if the user wants to use all the available 5165 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5166 * If the user wants to use only the ports 1,2 then the input 5167 * is 1,2. 5168 * valid characters are '-' and ',' 5169 * @param[out] values 5170 * This array will be filled with a list of port IDs 5171 * based on the user input 5172 * Note that duplicate entries are discarded and only the first 5173 * count entries in this array are port IDs and all the rest 5174 * will contain default values 5175 * @param[in] maxsize 5176 * This parameter denotes 2 things 5177 * 1) Number of elements in the values array 5178 * 2) Maximum value of each element in the values array 5179 * @return 5180 * On success, returns total count of parsed port IDs 5181 * On failure, returns 0 5182 */ 5183 static unsigned int 5184 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5185 { 5186 unsigned int count = 0; 5187 char *end = NULL; 5188 int min, max; 5189 int value, i; 5190 unsigned int marked[maxsize]; 5191 5192 if (list == NULL || values == NULL) 5193 return 0; 5194 5195 for (i = 0; i < (int)maxsize; i++) 5196 marked[i] = 0; 5197 5198 min = INT_MAX; 5199 5200 do { 5201 /*Remove the blank spaces if any*/ 5202 while (isblank(*list)) 5203 list++; 5204 if (*list == '\0') 5205 break; 5206 errno = 0; 5207 value = strtol(list, &end, 10); 5208 if (errno || end == NULL) 5209 return 0; 5210 if (value < 0 || value >= (int)maxsize) 5211 return 0; 5212 while (isblank(*end)) 5213 end++; 5214 if (*end == '-' && min == INT_MAX) { 5215 min = value; 5216 } else if ((*end == ',') || (*end == '\0')) { 5217 max = value; 5218 if (min == INT_MAX) 5219 min = value; 5220 for (i = min; i <= max; i++) { 5221 if (count < maxsize) { 5222 if (marked[i]) 5223 continue; 5224 values[count] = i; 5225 marked[i] = 1; 5226 count++; 5227 } 5228 } 5229 min = INT_MAX; 5230 } else 5231 return 0; 5232 list = end + 1; 5233 } while (*end != '\0'); 5234 5235 return count; 5236 } 5237 5238 void 5239 parse_fwd_portlist(const char *portlist) 5240 { 5241 unsigned int portcount; 5242 unsigned int portindex[RTE_MAX_ETHPORTS]; 5243 unsigned int i, valid_port_count = 0; 5244 5245 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5246 if (!portcount) 5247 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5248 5249 /* 5250 * Here we verify the validity of the ports 5251 * and thereby calculate the total number of 5252 * valid ports 5253 */ 5254 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5255 if (rte_eth_dev_is_valid_port(portindex[i])) { 5256 portindex[valid_port_count] = portindex[i]; 5257 valid_port_count++; 5258 } 5259 } 5260 5261 set_fwd_ports_list(portindex, valid_port_count); 5262 } 5263 5264 void 5265 set_fwd_ports_mask(uint64_t portmask) 5266 { 5267 unsigned int portlist[64]; 5268 unsigned int nb_pt; 5269 unsigned int i; 5270 5271 if (portmask == 0) { 5272 fprintf(stderr, "Invalid NULL mask of ports\n"); 5273 return; 5274 } 5275 nb_pt = 0; 5276 RTE_ETH_FOREACH_DEV(i) { 5277 if (! ((uint64_t)(1ULL << i) & portmask)) 5278 continue; 5279 portlist[nb_pt++] = i; 5280 } 5281 set_fwd_ports_list(portlist, nb_pt); 5282 } 5283 5284 void 5285 set_fwd_ports_number(uint16_t nb_pt) 5286 { 5287 if (nb_pt > nb_cfg_ports) { 5288 fprintf(stderr, 5289 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5290 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5291 return; 5292 } 5293 nb_fwd_ports = (portid_t) nb_pt; 5294 printf("Number of forwarding ports set to %u\n", 5295 (unsigned int) nb_fwd_ports); 5296 } 5297 5298 int 5299 port_is_forwarding(portid_t port_id) 5300 { 5301 unsigned int i; 5302 5303 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5304 return -1; 5305 5306 for (i = 0; i < nb_fwd_ports; i++) { 5307 if (fwd_ports_ids[i] == port_id) 5308 return 1; 5309 } 5310 5311 return 0; 5312 } 5313 5314 void 5315 set_nb_pkt_per_burst(uint16_t nb) 5316 { 5317 if (nb > MAX_PKT_BURST) { 5318 fprintf(stderr, 5319 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5320 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5321 return; 5322 } 5323 nb_pkt_per_burst = nb; 5324 printf("Number of packets per burst set to %u\n", 5325 (unsigned int) nb_pkt_per_burst); 5326 } 5327 5328 static const char * 5329 tx_split_get_name(enum tx_pkt_split split) 5330 { 5331 uint32_t i; 5332 5333 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5334 if (tx_split_name[i].split == split) 5335 return tx_split_name[i].name; 5336 } 5337 return NULL; 5338 } 5339 5340 void 5341 set_tx_pkt_split(const char *name) 5342 { 5343 uint32_t i; 5344 5345 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5346 if (strcmp(tx_split_name[i].name, name) == 0) { 5347 tx_pkt_split = tx_split_name[i].split; 5348 return; 5349 } 5350 } 5351 fprintf(stderr, "unknown value: \"%s\"\n", name); 5352 } 5353 5354 int 5355 parse_fec_mode(const char *name, uint32_t *fec_capa) 5356 { 5357 uint8_t i; 5358 5359 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5360 if (strcmp(fec_mode_name[i].name, name) == 0) { 5361 *fec_capa = 5362 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5363 return 0; 5364 } 5365 } 5366 return -1; 5367 } 5368 5369 void 5370 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5371 { 5372 unsigned int i, j; 5373 5374 printf("FEC capabilities:\n"); 5375 5376 for (i = 0; i < num; i++) { 5377 printf("%s : ", 5378 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5379 5380 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5381 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5382 speed_fec_capa[i].capa) 5383 printf("%s ", fec_mode_name[j].name); 5384 } 5385 printf("\n"); 5386 } 5387 } 5388 5389 void 5390 show_rx_pkt_offsets(void) 5391 { 5392 uint32_t i, n; 5393 5394 n = rx_pkt_nb_offs; 5395 printf("Number of offsets: %u\n", n); 5396 if (n) { 5397 printf("Segment offsets: "); 5398 for (i = 0; i != n - 1; i++) 5399 printf("%hu,", rx_pkt_seg_offsets[i]); 5400 printf("%hu\n", rx_pkt_seg_lengths[i]); 5401 } 5402 } 5403 5404 void 5405 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5406 { 5407 unsigned int i; 5408 5409 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5410 printf("nb segments per RX packets=%u >= " 5411 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5412 return; 5413 } 5414 5415 /* 5416 * No extra check here, the segment length will be checked by PMD 5417 * in the extended queue setup. 5418 */ 5419 for (i = 0; i < nb_offs; i++) { 5420 if (seg_offsets[i] >= UINT16_MAX) { 5421 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5422 i, seg_offsets[i]); 5423 return; 5424 } 5425 } 5426 5427 for (i = 0; i < nb_offs; i++) 5428 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5429 5430 rx_pkt_nb_offs = (uint8_t) nb_offs; 5431 } 5432 5433 void 5434 show_rx_pkt_segments(void) 5435 { 5436 uint32_t i, n; 5437 5438 n = rx_pkt_nb_segs; 5439 printf("Number of segments: %u\n", n); 5440 if (n) { 5441 printf("Segment sizes: "); 5442 for (i = 0; i != n - 1; i++) 5443 printf("%hu,", rx_pkt_seg_lengths[i]); 5444 printf("%hu\n", rx_pkt_seg_lengths[i]); 5445 } 5446 } 5447 5448 static const char *get_ptype_str(uint32_t ptype) 5449 { 5450 const char *str; 5451 5452 switch (ptype) { 5453 case RTE_PTYPE_L2_ETHER: 5454 str = "eth"; 5455 break; 5456 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5457 str = "ipv4"; 5458 break; 5459 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5460 str = "ipv6"; 5461 break; 5462 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5463 str = "ipv4-tcp"; 5464 break; 5465 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5466 str = "ipv4-udp"; 5467 break; 5468 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5469 str = "ipv4-sctp"; 5470 break; 5471 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5472 str = "ipv6-tcp"; 5473 break; 5474 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5475 str = "ipv6-udp"; 5476 break; 5477 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5478 str = "ipv6-sctp"; 5479 break; 5480 case RTE_PTYPE_TUNNEL_GRENAT: 5481 str = "grenat"; 5482 break; 5483 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5484 str = "inner-eth"; 5485 break; 5486 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5487 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5488 str = "inner-ipv4"; 5489 break; 5490 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5491 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5492 str = "inner-ipv6"; 5493 break; 5494 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5495 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5496 str = "inner-ipv4-tcp"; 5497 break; 5498 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5500 str = "inner-ipv4-udp"; 5501 break; 5502 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5503 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5504 str = "inner-ipv4-sctp"; 5505 break; 5506 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5507 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5508 str = "inner-ipv6-tcp"; 5509 break; 5510 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5511 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5512 str = "inner-ipv6-udp"; 5513 break; 5514 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5515 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5516 str = "inner-ipv6-sctp"; 5517 break; 5518 default: 5519 str = "unsupported"; 5520 } 5521 5522 return str; 5523 } 5524 5525 void 5526 show_rx_pkt_hdrs(void) 5527 { 5528 uint32_t i, n; 5529 5530 n = rx_pkt_nb_segs; 5531 printf("Number of segments: %u\n", n); 5532 if (n) { 5533 printf("Packet segs: "); 5534 for (i = 0; i < n - 1; i++) 5535 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5536 printf("payload\n"); 5537 } 5538 } 5539 5540 void 5541 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5542 { 5543 unsigned int i; 5544 5545 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5546 printf("nb segments per RX packets=%u > " 5547 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5548 return; 5549 } 5550 5551 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5552 5553 for (i = 0; i < nb_segs; i++) 5554 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5555 /* 5556 * We calculate the number of hdrs, but payload is not included, 5557 * so rx_pkt_nb_segs would increase 1. 5558 */ 5559 rx_pkt_nb_segs = nb_segs + 1; 5560 } 5561 5562 void 5563 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5564 { 5565 unsigned int i; 5566 5567 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5568 printf("nb segments per RX packets=%u >= " 5569 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5570 return; 5571 } 5572 5573 /* 5574 * No extra check here, the segment length will be checked by PMD 5575 * in the extended queue setup. 5576 */ 5577 for (i = 0; i < nb_segs; i++) { 5578 if (seg_lengths[i] >= UINT16_MAX) { 5579 printf("length[%u]=%u > UINT16_MAX - give up\n", 5580 i, seg_lengths[i]); 5581 return; 5582 } 5583 } 5584 5585 for (i = 0; i < nb_segs; i++) 5586 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5587 5588 rx_pkt_nb_segs = (uint8_t) nb_segs; 5589 } 5590 5591 void 5592 show_tx_pkt_segments(void) 5593 { 5594 uint32_t i, n; 5595 const char *split; 5596 5597 n = tx_pkt_nb_segs; 5598 split = tx_split_get_name(tx_pkt_split); 5599 5600 printf("Number of segments: %u\n", n); 5601 printf("Segment sizes: "); 5602 for (i = 0; i != n - 1; i++) 5603 printf("%hu,", tx_pkt_seg_lengths[i]); 5604 printf("%hu\n", tx_pkt_seg_lengths[i]); 5605 printf("Split packet: %s\n", split); 5606 } 5607 5608 static bool 5609 nb_segs_is_invalid(unsigned int nb_segs) 5610 { 5611 uint16_t ring_size; 5612 uint16_t queue_id; 5613 uint16_t port_id; 5614 int ret; 5615 5616 RTE_ETH_FOREACH_DEV(port_id) { 5617 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5618 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5619 if (ret) { 5620 /* Port may not be initialized yet, can't say 5621 * the port is invalid in this stage. 5622 */ 5623 continue; 5624 } 5625 if (ring_size < nb_segs) { 5626 printf("nb segments per TX packets=%u >= TX " 5627 "queue(%u) ring_size=%u - txpkts ignored\n", 5628 nb_segs, queue_id, ring_size); 5629 return true; 5630 } 5631 } 5632 } 5633 5634 return false; 5635 } 5636 5637 void 5638 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5639 { 5640 uint16_t tx_pkt_len; 5641 unsigned int i; 5642 5643 /* 5644 * For single segment settings failed check is ignored. 5645 * It is a very basic capability to send the single segment 5646 * packets, suppose it is always supported. 5647 */ 5648 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5649 fprintf(stderr, 5650 "Tx segment size(%u) is not supported - txpkts ignored\n", 5651 nb_segs); 5652 return; 5653 } 5654 5655 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5656 fprintf(stderr, 5657 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5658 nb_segs, RTE_MAX_SEGS_PER_PKT); 5659 return; 5660 } 5661 5662 /* 5663 * Check that each segment length is greater or equal than 5664 * the mbuf data size. 5665 * Check also that the total packet length is greater or equal than the 5666 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5667 * 20 + 8). 5668 */ 5669 tx_pkt_len = 0; 5670 for (i = 0; i < nb_segs; i++) { 5671 if (seg_lengths[i] > mbuf_data_size[0]) { 5672 fprintf(stderr, 5673 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5674 i, seg_lengths[i], mbuf_data_size[0]); 5675 return; 5676 } 5677 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5678 } 5679 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5680 fprintf(stderr, "total packet length=%u < %d - give up\n", 5681 (unsigned) tx_pkt_len, 5682 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5683 return; 5684 } 5685 5686 for (i = 0; i < nb_segs; i++) 5687 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5688 5689 tx_pkt_length = tx_pkt_len; 5690 tx_pkt_nb_segs = (uint8_t) nb_segs; 5691 } 5692 5693 void 5694 show_tx_pkt_times(void) 5695 { 5696 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5697 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5698 } 5699 5700 void 5701 set_tx_pkt_times(unsigned int *tx_times) 5702 { 5703 tx_pkt_times_inter = tx_times[0]; 5704 tx_pkt_times_intra = tx_times[1]; 5705 } 5706 5707 #ifdef RTE_LIB_GRO 5708 void 5709 setup_gro(const char *onoff, portid_t port_id) 5710 { 5711 if (!rte_eth_dev_is_valid_port(port_id)) { 5712 fprintf(stderr, "invalid port id %u\n", port_id); 5713 return; 5714 } 5715 if (test_done == 0) { 5716 fprintf(stderr, 5717 "Before enable/disable GRO, please stop forwarding first\n"); 5718 return; 5719 } 5720 if (strcmp(onoff, "on") == 0) { 5721 if (gro_ports[port_id].enable != 0) { 5722 fprintf(stderr, 5723 "Port %u has enabled GRO. Please disable GRO first\n", 5724 port_id); 5725 return; 5726 } 5727 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5728 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5729 gro_ports[port_id].param.max_flow_num = 5730 GRO_DEFAULT_FLOW_NUM; 5731 gro_ports[port_id].param.max_item_per_flow = 5732 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5733 } 5734 gro_ports[port_id].enable = 1; 5735 } else { 5736 if (gro_ports[port_id].enable == 0) { 5737 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5738 return; 5739 } 5740 gro_ports[port_id].enable = 0; 5741 } 5742 } 5743 5744 void 5745 setup_gro_flush_cycles(uint8_t cycles) 5746 { 5747 if (test_done == 0) { 5748 fprintf(stderr, 5749 "Before change flush interval for GRO, please stop forwarding first.\n"); 5750 return; 5751 } 5752 5753 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5754 GRO_DEFAULT_FLUSH_CYCLES) { 5755 fprintf(stderr, 5756 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5757 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5758 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5759 } 5760 5761 gro_flush_cycles = cycles; 5762 } 5763 5764 void 5765 show_gro(portid_t port_id) 5766 { 5767 struct rte_gro_param *param; 5768 uint32_t max_pkts_num; 5769 5770 param = &gro_ports[port_id].param; 5771 5772 if (!rte_eth_dev_is_valid_port(port_id)) { 5773 fprintf(stderr, "Invalid port id %u.\n", port_id); 5774 return; 5775 } 5776 if (gro_ports[port_id].enable) { 5777 printf("GRO type: TCP/IPv4\n"); 5778 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5779 max_pkts_num = param->max_flow_num * 5780 param->max_item_per_flow; 5781 } else 5782 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5783 printf("Max number of packets to perform GRO: %u\n", 5784 max_pkts_num); 5785 printf("Flushing cycles: %u\n", gro_flush_cycles); 5786 } else 5787 printf("Port %u doesn't enable GRO.\n", port_id); 5788 } 5789 #endif /* RTE_LIB_GRO */ 5790 5791 #ifdef RTE_LIB_GSO 5792 void 5793 setup_gso(const char *mode, portid_t port_id) 5794 { 5795 if (!rte_eth_dev_is_valid_port(port_id)) { 5796 fprintf(stderr, "invalid port id %u\n", port_id); 5797 return; 5798 } 5799 if (strcmp(mode, "on") == 0) { 5800 if (test_done == 0) { 5801 fprintf(stderr, 5802 "before enabling GSO, please stop forwarding first\n"); 5803 return; 5804 } 5805 gso_ports[port_id].enable = 1; 5806 } else if (strcmp(mode, "off") == 0) { 5807 if (test_done == 0) { 5808 fprintf(stderr, 5809 "before disabling GSO, please stop forwarding first\n"); 5810 return; 5811 } 5812 gso_ports[port_id].enable = 0; 5813 } 5814 } 5815 #endif /* RTE_LIB_GSO */ 5816 5817 char* 5818 list_pkt_forwarding_modes(void) 5819 { 5820 static char fwd_modes[128] = ""; 5821 const char *separator = "|"; 5822 struct fwd_engine *fwd_eng; 5823 unsigned i = 0; 5824 5825 if (strlen (fwd_modes) == 0) { 5826 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5827 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5828 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5829 strncat(fwd_modes, separator, 5830 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5831 } 5832 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5833 } 5834 5835 return fwd_modes; 5836 } 5837 5838 char* 5839 list_pkt_forwarding_retry_modes(void) 5840 { 5841 static char fwd_modes[128] = ""; 5842 const char *separator = "|"; 5843 struct fwd_engine *fwd_eng; 5844 unsigned i = 0; 5845 5846 if (strlen(fwd_modes) == 0) { 5847 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5848 if (fwd_eng == &rx_only_engine) 5849 continue; 5850 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5851 sizeof(fwd_modes) - 5852 strlen(fwd_modes) - 1); 5853 strncat(fwd_modes, separator, 5854 sizeof(fwd_modes) - 5855 strlen(fwd_modes) - 1); 5856 } 5857 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5858 } 5859 5860 return fwd_modes; 5861 } 5862 5863 void 5864 set_pkt_forwarding_mode(const char *fwd_mode_name) 5865 { 5866 struct fwd_engine *fwd_eng; 5867 unsigned i; 5868 5869 i = 0; 5870 while ((fwd_eng = fwd_engines[i]) != NULL) { 5871 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5872 printf("Set %s packet forwarding mode%s\n", 5873 fwd_mode_name, 5874 retry_enabled == 0 ? "" : " with retry"); 5875 cur_fwd_eng = fwd_eng; 5876 return; 5877 } 5878 i++; 5879 } 5880 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5881 } 5882 5883 void 5884 add_rx_dump_callbacks(portid_t portid) 5885 { 5886 struct rte_eth_dev_info dev_info; 5887 uint16_t queue; 5888 int ret; 5889 5890 if (port_id_is_invalid(portid, ENABLED_WARN)) 5891 return; 5892 5893 ret = eth_dev_info_get_print_err(portid, &dev_info); 5894 if (ret != 0) 5895 return; 5896 5897 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5898 if (!ports[portid].rx_dump_cb[queue]) 5899 ports[portid].rx_dump_cb[queue] = 5900 rte_eth_add_rx_callback(portid, queue, 5901 dump_rx_pkts, NULL); 5902 } 5903 5904 void 5905 add_tx_dump_callbacks(portid_t portid) 5906 { 5907 struct rte_eth_dev_info dev_info; 5908 uint16_t queue; 5909 int ret; 5910 5911 if (port_id_is_invalid(portid, ENABLED_WARN)) 5912 return; 5913 5914 ret = eth_dev_info_get_print_err(portid, &dev_info); 5915 if (ret != 0) 5916 return; 5917 5918 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5919 if (!ports[portid].tx_dump_cb[queue]) 5920 ports[portid].tx_dump_cb[queue] = 5921 rte_eth_add_tx_callback(portid, queue, 5922 dump_tx_pkts, NULL); 5923 } 5924 5925 void 5926 remove_rx_dump_callbacks(portid_t portid) 5927 { 5928 struct rte_eth_dev_info dev_info; 5929 uint16_t queue; 5930 int ret; 5931 5932 if (port_id_is_invalid(portid, ENABLED_WARN)) 5933 return; 5934 5935 ret = eth_dev_info_get_print_err(portid, &dev_info); 5936 if (ret != 0) 5937 return; 5938 5939 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5940 if (ports[portid].rx_dump_cb[queue]) { 5941 rte_eth_remove_rx_callback(portid, queue, 5942 ports[portid].rx_dump_cb[queue]); 5943 ports[portid].rx_dump_cb[queue] = NULL; 5944 } 5945 } 5946 5947 void 5948 remove_tx_dump_callbacks(portid_t portid) 5949 { 5950 struct rte_eth_dev_info dev_info; 5951 uint16_t queue; 5952 int ret; 5953 5954 if (port_id_is_invalid(portid, ENABLED_WARN)) 5955 return; 5956 5957 ret = eth_dev_info_get_print_err(portid, &dev_info); 5958 if (ret != 0) 5959 return; 5960 5961 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5962 if (ports[portid].tx_dump_cb[queue]) { 5963 rte_eth_remove_tx_callback(portid, queue, 5964 ports[portid].tx_dump_cb[queue]); 5965 ports[portid].tx_dump_cb[queue] = NULL; 5966 } 5967 } 5968 5969 void 5970 configure_rxtx_dump_callbacks(uint16_t verbose) 5971 { 5972 portid_t portid; 5973 5974 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5975 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5976 return; 5977 #endif 5978 5979 RTE_ETH_FOREACH_DEV(portid) 5980 { 5981 if (verbose == 1 || verbose > 2) 5982 add_rx_dump_callbacks(portid); 5983 else 5984 remove_rx_dump_callbacks(portid); 5985 if (verbose >= 2) 5986 add_tx_dump_callbacks(portid); 5987 else 5988 remove_tx_dump_callbacks(portid); 5989 } 5990 } 5991 5992 void 5993 set_verbose_level(uint16_t vb_level) 5994 { 5995 printf("Change verbose level from %u to %u\n", 5996 (unsigned int) verbose_level, (unsigned int) vb_level); 5997 verbose_level = vb_level; 5998 configure_rxtx_dump_callbacks(verbose_level); 5999 } 6000 6001 void 6002 vlan_extend_set(portid_t port_id, int on) 6003 { 6004 int diag; 6005 int vlan_offload; 6006 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6007 6008 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6009 return; 6010 6011 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6012 6013 if (on) { 6014 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6015 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6016 } else { 6017 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6018 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6019 } 6020 6021 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6022 if (diag < 0) { 6023 fprintf(stderr, 6024 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6025 port_id, on, diag); 6026 return; 6027 } 6028 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6029 } 6030 6031 void 6032 rx_vlan_strip_set(portid_t port_id, int on) 6033 { 6034 int diag; 6035 int vlan_offload; 6036 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6037 6038 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6039 return; 6040 6041 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6042 6043 if (on) { 6044 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6045 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6046 } else { 6047 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6048 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6049 } 6050 6051 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6052 if (diag < 0) { 6053 fprintf(stderr, 6054 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6055 __func__, port_id, on, diag); 6056 return; 6057 } 6058 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6059 } 6060 6061 void 6062 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6063 { 6064 int diag; 6065 6066 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6067 return; 6068 6069 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6070 if (diag < 0) 6071 fprintf(stderr, 6072 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6073 __func__, port_id, queue_id, on, diag); 6074 } 6075 6076 void 6077 rx_vlan_filter_set(portid_t port_id, int on) 6078 { 6079 int diag; 6080 int vlan_offload; 6081 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6082 6083 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6084 return; 6085 6086 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6087 6088 if (on) { 6089 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6090 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6091 } else { 6092 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6093 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6094 } 6095 6096 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6097 if (diag < 0) { 6098 fprintf(stderr, 6099 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6100 __func__, port_id, on, diag); 6101 return; 6102 } 6103 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6104 } 6105 6106 void 6107 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6108 { 6109 int diag; 6110 int vlan_offload; 6111 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6112 6113 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6114 return; 6115 6116 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6117 6118 if (on) { 6119 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6120 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6121 } else { 6122 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6123 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6124 } 6125 6126 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6127 if (diag < 0) { 6128 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6129 __func__, port_id, on, diag); 6130 return; 6131 } 6132 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6133 } 6134 6135 int 6136 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6137 { 6138 int diag; 6139 6140 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6141 return 1; 6142 if (vlan_id_is_invalid(vlan_id)) 6143 return 1; 6144 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6145 if (diag == 0) 6146 return 0; 6147 fprintf(stderr, 6148 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6149 port_id, vlan_id, on, diag); 6150 return -1; 6151 } 6152 6153 void 6154 rx_vlan_all_filter_set(portid_t port_id, int on) 6155 { 6156 uint16_t vlan_id; 6157 6158 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6159 return; 6160 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6161 if (rx_vft_set(port_id, vlan_id, on)) 6162 break; 6163 } 6164 } 6165 6166 void 6167 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6168 { 6169 int diag; 6170 6171 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6172 return; 6173 6174 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6175 if (diag == 0) 6176 return; 6177 6178 fprintf(stderr, 6179 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6180 port_id, vlan_type, tp_id, diag); 6181 } 6182 6183 void 6184 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6185 { 6186 struct rte_eth_dev_info dev_info; 6187 int ret; 6188 6189 if (vlan_id_is_invalid(vlan_id)) 6190 return; 6191 6192 if (ports[port_id].dev_conf.txmode.offloads & 6193 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6194 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6195 return; 6196 } 6197 6198 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6199 if (ret != 0) 6200 return; 6201 6202 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6203 fprintf(stderr, 6204 "Error: vlan insert is not supported by port %d\n", 6205 port_id); 6206 return; 6207 } 6208 6209 tx_vlan_reset(port_id); 6210 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6211 ports[port_id].tx_vlan_id = vlan_id; 6212 } 6213 6214 void 6215 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6216 { 6217 struct rte_eth_dev_info dev_info; 6218 int ret; 6219 6220 if (vlan_id_is_invalid(vlan_id)) 6221 return; 6222 if (vlan_id_is_invalid(vlan_id_outer)) 6223 return; 6224 6225 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6226 if (ret != 0) 6227 return; 6228 6229 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6230 fprintf(stderr, 6231 "Error: qinq insert not supported by port %d\n", 6232 port_id); 6233 return; 6234 } 6235 6236 tx_vlan_reset(port_id); 6237 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6238 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6239 ports[port_id].tx_vlan_id = vlan_id; 6240 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6241 } 6242 6243 void 6244 tx_vlan_reset(portid_t port_id) 6245 { 6246 ports[port_id].dev_conf.txmode.offloads &= 6247 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6248 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6249 ports[port_id].tx_vlan_id = 0; 6250 ports[port_id].tx_vlan_id_outer = 0; 6251 } 6252 6253 void 6254 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6255 { 6256 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6257 return; 6258 6259 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6260 } 6261 6262 void 6263 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6264 { 6265 int ret; 6266 6267 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6268 return; 6269 6270 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6271 return; 6272 6273 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6274 fprintf(stderr, "map_value not in required range 0..%d\n", 6275 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6276 return; 6277 } 6278 6279 if (!is_rx) { /* tx */ 6280 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6281 map_value); 6282 if (ret) { 6283 fprintf(stderr, 6284 "failed to set tx queue stats mapping.\n"); 6285 return; 6286 } 6287 } else { /* rx */ 6288 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6289 map_value); 6290 if (ret) { 6291 fprintf(stderr, 6292 "failed to set rx queue stats mapping.\n"); 6293 return; 6294 } 6295 } 6296 } 6297 6298 void 6299 set_xstats_hide_zero(uint8_t on_off) 6300 { 6301 xstats_hide_zero = on_off; 6302 } 6303 6304 void 6305 set_record_core_cycles(uint8_t on_off) 6306 { 6307 record_core_cycles = on_off; 6308 } 6309 6310 void 6311 set_record_burst_stats(uint8_t on_off) 6312 { 6313 record_burst_stats = on_off; 6314 } 6315 6316 uint16_t 6317 str_to_flowtype(const char *string) 6318 { 6319 uint8_t i; 6320 6321 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6322 if (!strcmp(flowtype_str_table[i].str, string)) 6323 return flowtype_str_table[i].ftype; 6324 } 6325 6326 if (isdigit(string[0])) { 6327 int val = atoi(string); 6328 if (val > 0 && val < 64) 6329 return (uint16_t)val; 6330 } 6331 6332 return RTE_ETH_FLOW_UNKNOWN; 6333 } 6334 6335 const char* 6336 flowtype_to_str(uint16_t flow_type) 6337 { 6338 uint8_t i; 6339 6340 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6341 if (flowtype_str_table[i].ftype == flow_type) 6342 return flowtype_str_table[i].str; 6343 } 6344 6345 return NULL; 6346 } 6347 6348 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6349 6350 static inline void 6351 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6352 { 6353 struct rte_eth_flex_payload_cfg *cfg; 6354 uint32_t i, j; 6355 6356 for (i = 0; i < flex_conf->nb_payloads; i++) { 6357 cfg = &flex_conf->flex_set[i]; 6358 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6359 printf("\n RAW: "); 6360 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6361 printf("\n L2_PAYLOAD: "); 6362 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6363 printf("\n L3_PAYLOAD: "); 6364 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6365 printf("\n L4_PAYLOAD: "); 6366 else 6367 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6368 for (j = 0; j < num; j++) 6369 printf(" %-5u", cfg->src_offset[j]); 6370 } 6371 printf("\n"); 6372 } 6373 6374 static inline void 6375 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6376 { 6377 struct rte_eth_fdir_flex_mask *mask; 6378 uint32_t i, j; 6379 const char *p; 6380 6381 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6382 mask = &flex_conf->flex_mask[i]; 6383 p = flowtype_to_str(mask->flow_type); 6384 printf("\n %s:\t", p ? p : "unknown"); 6385 for (j = 0; j < num; j++) 6386 printf(" %02x", mask->mask[j]); 6387 } 6388 printf("\n"); 6389 } 6390 6391 static inline void 6392 print_fdir_flow_type(uint32_t flow_types_mask) 6393 { 6394 int i; 6395 const char *p; 6396 6397 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6398 if (!(flow_types_mask & (1 << i))) 6399 continue; 6400 p = flowtype_to_str(i); 6401 if (p) 6402 printf(" %s", p); 6403 else 6404 printf(" unknown"); 6405 } 6406 printf("\n"); 6407 } 6408 6409 static int 6410 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6411 struct rte_eth_fdir_stats *fdir_stat) 6412 { 6413 int ret = -ENOTSUP; 6414 6415 #ifdef RTE_NET_I40E 6416 if (ret == -ENOTSUP) { 6417 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6418 if (!ret) 6419 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6420 } 6421 #endif 6422 #ifdef RTE_NET_IXGBE 6423 if (ret == -ENOTSUP) { 6424 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6425 if (!ret) 6426 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6427 } 6428 #endif 6429 switch (ret) { 6430 case 0: 6431 break; 6432 case -ENOTSUP: 6433 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6434 port_id); 6435 break; 6436 default: 6437 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6438 break; 6439 } 6440 return ret; 6441 } 6442 6443 void 6444 fdir_get_infos(portid_t port_id) 6445 { 6446 struct rte_eth_fdir_stats fdir_stat; 6447 struct rte_eth_fdir_info fdir_info; 6448 6449 static const char *fdir_stats_border = "########################"; 6450 6451 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6452 return; 6453 6454 memset(&fdir_info, 0, sizeof(fdir_info)); 6455 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6456 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6457 return; 6458 6459 printf("\n %s FDIR infos for port %-2d %s\n", 6460 fdir_stats_border, port_id, fdir_stats_border); 6461 printf(" MODE: "); 6462 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6463 printf(" PERFECT\n"); 6464 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6465 printf(" PERFECT-MAC-VLAN\n"); 6466 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6467 printf(" PERFECT-TUNNEL\n"); 6468 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6469 printf(" SIGNATURE\n"); 6470 else 6471 printf(" DISABLE\n"); 6472 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6473 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6474 printf(" SUPPORTED FLOW TYPE: "); 6475 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6476 } 6477 printf(" FLEX PAYLOAD INFO:\n"); 6478 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6479 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6480 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6481 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6482 fdir_info.flex_payload_unit, 6483 fdir_info.max_flex_payload_segment_num, 6484 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6485 if (fdir_info.flex_conf.nb_payloads > 0) { 6486 printf(" FLEX PAYLOAD SRC OFFSET:"); 6487 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6488 } 6489 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6490 printf(" FLEX MASK CFG:"); 6491 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6492 } 6493 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6494 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6495 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6496 fdir_info.guarant_spc, fdir_info.best_spc); 6497 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6498 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6499 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6500 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6501 fdir_stat.collision, fdir_stat.free, 6502 fdir_stat.maxhash, fdir_stat.maxlen, 6503 fdir_stat.add, fdir_stat.remove, 6504 fdir_stat.f_add, fdir_stat.f_remove); 6505 printf(" %s############################%s\n", 6506 fdir_stats_border, fdir_stats_border); 6507 } 6508 6509 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6510 6511 void 6512 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6513 { 6514 #ifdef RTE_NET_IXGBE 6515 int diag; 6516 6517 if (is_rx) 6518 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6519 else 6520 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6521 6522 if (diag == 0) 6523 return; 6524 fprintf(stderr, 6525 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6526 is_rx ? "rx" : "tx", port_id, diag); 6527 return; 6528 #endif 6529 fprintf(stderr, "VF %s setting not supported for port %d\n", 6530 is_rx ? "Rx" : "Tx", port_id); 6531 RTE_SET_USED(vf); 6532 RTE_SET_USED(on); 6533 } 6534 6535 int 6536 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6537 { 6538 int diag; 6539 struct rte_eth_link link; 6540 int ret; 6541 6542 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6543 return 1; 6544 ret = eth_link_get_nowait_print_err(port_id, &link); 6545 if (ret < 0) 6546 return 1; 6547 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6548 rate > link.link_speed) { 6549 fprintf(stderr, 6550 "Invalid rate value:%u bigger than link speed: %u\n", 6551 rate, link.link_speed); 6552 return 1; 6553 } 6554 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6555 if (diag == 0) 6556 return diag; 6557 fprintf(stderr, 6558 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6559 port_id, diag); 6560 return diag; 6561 } 6562 6563 int 6564 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6565 { 6566 int diag = -ENOTSUP; 6567 6568 RTE_SET_USED(vf); 6569 RTE_SET_USED(rate); 6570 RTE_SET_USED(q_msk); 6571 6572 #ifdef RTE_NET_IXGBE 6573 if (diag == -ENOTSUP) 6574 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6575 q_msk); 6576 #endif 6577 #ifdef RTE_NET_BNXT 6578 if (diag == -ENOTSUP) 6579 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6580 #endif 6581 if (diag == 0) 6582 return diag; 6583 6584 fprintf(stderr, 6585 "%s for port_id=%d failed diag=%d\n", 6586 __func__, port_id, diag); 6587 return diag; 6588 } 6589 6590 int 6591 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6592 { 6593 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6594 return -EINVAL; 6595 6596 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6597 } 6598 6599 /* 6600 * Functions to manage the set of filtered Multicast MAC addresses. 6601 * 6602 * A pool of filtered multicast MAC addresses is associated with each port. 6603 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6604 * The address of the pool and the number of valid multicast MAC addresses 6605 * recorded in the pool are stored in the fields "mc_addr_pool" and 6606 * "mc_addr_nb" of the "rte_port" data structure. 6607 * 6608 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6609 * to be supplied a contiguous array of multicast MAC addresses. 6610 * To comply with this constraint, the set of multicast addresses recorded 6611 * into the pool are systematically compacted at the beginning of the pool. 6612 * Hence, when a multicast address is removed from the pool, all following 6613 * addresses, if any, are copied back to keep the set contiguous. 6614 */ 6615 #define MCAST_POOL_INC 32 6616 6617 static int 6618 mcast_addr_pool_extend(struct rte_port *port) 6619 { 6620 struct rte_ether_addr *mc_pool; 6621 size_t mc_pool_size; 6622 6623 /* 6624 * If a free entry is available at the end of the pool, just 6625 * increment the number of recorded multicast addresses. 6626 */ 6627 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6628 port->mc_addr_nb++; 6629 return 0; 6630 } 6631 6632 /* 6633 * [re]allocate a pool with MCAST_POOL_INC more entries. 6634 * The previous test guarantees that port->mc_addr_nb is a multiple 6635 * of MCAST_POOL_INC. 6636 */ 6637 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6638 MCAST_POOL_INC); 6639 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6640 mc_pool_size); 6641 if (mc_pool == NULL) { 6642 fprintf(stderr, 6643 "allocation of pool of %u multicast addresses failed\n", 6644 port->mc_addr_nb + MCAST_POOL_INC); 6645 return -ENOMEM; 6646 } 6647 6648 port->mc_addr_pool = mc_pool; 6649 port->mc_addr_nb++; 6650 return 0; 6651 6652 } 6653 6654 static void 6655 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6656 { 6657 if (mcast_addr_pool_extend(port) != 0) 6658 return; 6659 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6660 } 6661 6662 static void 6663 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6664 { 6665 port->mc_addr_nb--; 6666 if (addr_idx == port->mc_addr_nb) { 6667 /* No need to recompact the set of multicast addresses. */ 6668 if (port->mc_addr_nb == 0) { 6669 /* free the pool of multicast addresses. */ 6670 free(port->mc_addr_pool); 6671 port->mc_addr_pool = NULL; 6672 } 6673 return; 6674 } 6675 memmove(&port->mc_addr_pool[addr_idx], 6676 &port->mc_addr_pool[addr_idx + 1], 6677 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6678 } 6679 6680 int 6681 mcast_addr_pool_destroy(portid_t port_id) 6682 { 6683 struct rte_port *port; 6684 6685 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6686 port_id == (portid_t)RTE_PORT_ALL) 6687 return -EINVAL; 6688 port = &ports[port_id]; 6689 6690 if (port->mc_addr_nb != 0) { 6691 /* free the pool of multicast addresses. */ 6692 free(port->mc_addr_pool); 6693 port->mc_addr_pool = NULL; 6694 port->mc_addr_nb = 0; 6695 } 6696 return 0; 6697 } 6698 6699 static int 6700 eth_port_multicast_addr_list_set(portid_t port_id) 6701 { 6702 struct rte_port *port; 6703 int diag; 6704 6705 port = &ports[port_id]; 6706 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6707 port->mc_addr_nb); 6708 if (diag < 0) 6709 fprintf(stderr, 6710 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6711 port_id, port->mc_addr_nb, diag); 6712 6713 return diag; 6714 } 6715 6716 void 6717 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6718 { 6719 struct rte_port *port; 6720 uint32_t i; 6721 6722 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6723 return; 6724 6725 port = &ports[port_id]; 6726 6727 /* 6728 * Check that the added multicast MAC address is not already recorded 6729 * in the pool of multicast addresses. 6730 */ 6731 for (i = 0; i < port->mc_addr_nb; i++) { 6732 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6733 fprintf(stderr, 6734 "multicast address already filtered by port\n"); 6735 return; 6736 } 6737 } 6738 6739 mcast_addr_pool_append(port, mc_addr); 6740 if (eth_port_multicast_addr_list_set(port_id) < 0) 6741 /* Rollback on failure, remove the address from the pool */ 6742 mcast_addr_pool_remove(port, i); 6743 } 6744 6745 void 6746 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6747 { 6748 struct rte_port *port; 6749 uint32_t i; 6750 6751 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6752 return; 6753 6754 port = &ports[port_id]; 6755 6756 /* 6757 * Search the pool of multicast MAC addresses for the removed address. 6758 */ 6759 for (i = 0; i < port->mc_addr_nb; i++) { 6760 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6761 break; 6762 } 6763 if (i == port->mc_addr_nb) { 6764 fprintf(stderr, "multicast address not filtered by port %d\n", 6765 port_id); 6766 return; 6767 } 6768 6769 mcast_addr_pool_remove(port, i); 6770 if (eth_port_multicast_addr_list_set(port_id) < 0) 6771 /* Rollback on failure, add the address back into the pool */ 6772 mcast_addr_pool_append(port, mc_addr); 6773 } 6774 6775 void 6776 port_dcb_info_display(portid_t port_id) 6777 { 6778 struct rte_eth_dcb_info dcb_info; 6779 uint16_t i; 6780 int ret; 6781 static const char *border = "================"; 6782 6783 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6784 return; 6785 6786 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6787 if (ret) { 6788 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6789 port_id); 6790 return; 6791 } 6792 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6793 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6794 printf("\n TC : "); 6795 for (i = 0; i < dcb_info.nb_tcs; i++) 6796 printf("\t%4d", i); 6797 printf("\n Priority : "); 6798 for (i = 0; i < dcb_info.nb_tcs; i++) 6799 printf("\t%4d", dcb_info.prio_tc[i]); 6800 printf("\n BW percent :"); 6801 for (i = 0; i < dcb_info.nb_tcs; i++) 6802 printf("\t%4d%%", dcb_info.tc_bws[i]); 6803 printf("\n RXQ base : "); 6804 for (i = 0; i < dcb_info.nb_tcs; i++) 6805 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6806 printf("\n RXQ number :"); 6807 for (i = 0; i < dcb_info.nb_tcs; i++) 6808 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6809 printf("\n TXQ base : "); 6810 for (i = 0; i < dcb_info.nb_tcs; i++) 6811 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6812 printf("\n TXQ number :"); 6813 for (i = 0; i < dcb_info.nb_tcs; i++) 6814 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6815 printf("\n"); 6816 } 6817 6818 uint8_t * 6819 open_file(const char *file_path, uint32_t *size) 6820 { 6821 int fd = open(file_path, O_RDONLY); 6822 off_t pkg_size; 6823 uint8_t *buf = NULL; 6824 int ret = 0; 6825 struct stat st_buf; 6826 6827 if (size) 6828 *size = 0; 6829 6830 if (fd == -1) { 6831 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6832 return buf; 6833 } 6834 6835 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6836 close(fd); 6837 fprintf(stderr, "%s: File operations failed\n", __func__); 6838 return buf; 6839 } 6840 6841 pkg_size = st_buf.st_size; 6842 if (pkg_size < 0) { 6843 close(fd); 6844 fprintf(stderr, "%s: File operations failed\n", __func__); 6845 return buf; 6846 } 6847 6848 buf = (uint8_t *)malloc(pkg_size); 6849 if (!buf) { 6850 close(fd); 6851 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6852 return buf; 6853 } 6854 6855 ret = read(fd, buf, pkg_size); 6856 if (ret < 0) { 6857 close(fd); 6858 fprintf(stderr, "%s: File read operation failed\n", __func__); 6859 close_file(buf); 6860 return NULL; 6861 } 6862 6863 if (size) 6864 *size = pkg_size; 6865 6866 close(fd); 6867 6868 return buf; 6869 } 6870 6871 int 6872 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6873 { 6874 FILE *fh = fopen(file_path, "wb"); 6875 6876 if (fh == NULL) { 6877 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6878 return -1; 6879 } 6880 6881 if (fwrite(buf, 1, size, fh) != size) { 6882 fclose(fh); 6883 fprintf(stderr, "%s: File write operation failed\n", __func__); 6884 return -1; 6885 } 6886 6887 fclose(fh); 6888 6889 return 0; 6890 } 6891 6892 int 6893 close_file(uint8_t *buf) 6894 { 6895 if (buf) { 6896 free((void *)buf); 6897 return 0; 6898 } 6899 6900 return -1; 6901 } 6902 6903 void 6904 show_macs(portid_t port_id) 6905 { 6906 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6907 struct rte_eth_dev_info dev_info; 6908 int32_t i, rc, num_macs = 0; 6909 6910 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6911 return; 6912 6913 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6914 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6915 if (rc < 0) 6916 return; 6917 6918 for (i = 0; i < rc; i++) { 6919 6920 /* skip zero address */ 6921 if (rte_is_zero_ether_addr(&addr[i])) 6922 continue; 6923 6924 num_macs++; 6925 } 6926 6927 printf("Number of MAC address added: %d\n", num_macs); 6928 6929 for (i = 0; i < rc; i++) { 6930 6931 /* skip zero address */ 6932 if (rte_is_zero_ether_addr(&addr[i])) 6933 continue; 6934 6935 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6936 printf(" %s\n", buf); 6937 } 6938 } 6939 6940 void 6941 show_mcast_macs(portid_t port_id) 6942 { 6943 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6944 struct rte_ether_addr *addr; 6945 struct rte_port *port; 6946 uint32_t i; 6947 6948 port = &ports[port_id]; 6949 6950 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6951 6952 for (i = 0; i < port->mc_addr_nb; i++) { 6953 addr = &port->mc_addr_pool[i]; 6954 6955 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6956 printf(" %s\n", buf); 6957 } 6958 } 6959