1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 { 174 .mode = RTE_ETH_FEC_LLRS, 175 .name = "llrs", 176 }, 177 }; 178 179 static const struct { 180 char str[32]; 181 uint16_t ftype; 182 } flowtype_str_table[] = { 183 {"raw", RTE_ETH_FLOW_RAW}, 184 {"ipv4", RTE_ETH_FLOW_IPV4}, 185 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 186 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 187 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 188 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 189 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 190 {"ipv6", RTE_ETH_FLOW_IPV6}, 191 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 192 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 193 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 194 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 195 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 196 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 197 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 198 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 199 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 200 {"port", RTE_ETH_FLOW_PORT}, 201 {"vxlan", RTE_ETH_FLOW_VXLAN}, 202 {"geneve", RTE_ETH_FLOW_GENEVE}, 203 {"nvgre", RTE_ETH_FLOW_NVGRE}, 204 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 205 {"gtpu", RTE_ETH_FLOW_GTPU}, 206 }; 207 208 static void 209 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 210 { 211 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 212 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 213 printf("%s%s", name, buf); 214 } 215 216 static void 217 nic_xstats_display_periodic(portid_t port_id) 218 { 219 struct xstat_display_info *xstats_info; 220 uint64_t *prev_values, *curr_values; 221 uint64_t diff_value, value_rate; 222 struct timespec cur_time; 223 uint64_t *ids_supp; 224 size_t ids_supp_sz; 225 uint64_t diff_ns; 226 unsigned int i; 227 int rc; 228 229 xstats_info = &ports[port_id].xstats_info; 230 231 ids_supp_sz = xstats_info->ids_supp_sz; 232 if (ids_supp_sz == 0) 233 return; 234 235 printf("\n"); 236 237 ids_supp = xstats_info->ids_supp; 238 prev_values = xstats_info->prev_values; 239 curr_values = xstats_info->curr_values; 240 241 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 242 ids_supp_sz); 243 if (rc != (int)ids_supp_sz) { 244 fprintf(stderr, 245 "Failed to get values of %zu xstats for port %u - return code %d\n", 246 ids_supp_sz, port_id, rc); 247 return; 248 } 249 250 diff_ns = 0; 251 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 252 uint64_t ns; 253 254 ns = cur_time.tv_sec * NS_PER_SEC; 255 ns += cur_time.tv_nsec; 256 257 if (xstats_info->prev_ns != 0) 258 diff_ns = ns - xstats_info->prev_ns; 259 xstats_info->prev_ns = ns; 260 } 261 262 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 263 for (i = 0; i < ids_supp_sz; i++) { 264 diff_value = (curr_values[i] > prev_values[i]) ? 265 (curr_values[i] - prev_values[i]) : 0; 266 prev_values[i] = curr_values[i]; 267 value_rate = diff_ns > 0 ? 268 (double)diff_value / diff_ns * NS_PER_SEC : 0; 269 270 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 271 xstats_display[i].name, curr_values[i], value_rate); 272 } 273 } 274 275 void 276 nic_stats_display(portid_t port_id) 277 { 278 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 279 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 283 struct timespec cur_time; 284 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 285 diff_ns; 286 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 287 struct rte_eth_stats stats; 288 static const char *nic_stats_border = "########################"; 289 int ret; 290 291 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 292 print_valid_ports(); 293 return; 294 } 295 ret = rte_eth_stats_get(port_id, &stats); 296 if (ret != 0) { 297 fprintf(stderr, 298 "%s: Error: failed to get stats (port %u): %d", 299 __func__, port_id, ret); 300 return; 301 } 302 printf("\n %s NIC statistics for port %-2d %s\n", 303 nic_stats_border, port_id, nic_stats_border); 304 305 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 306 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 307 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 308 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 309 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 310 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 311 312 diff_ns = 0; 313 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 314 uint64_t ns; 315 316 ns = cur_time.tv_sec * NS_PER_SEC; 317 ns += cur_time.tv_nsec; 318 319 if (prev_ns[port_id] != 0) 320 diff_ns = ns - prev_ns[port_id]; 321 prev_ns[port_id] = ns; 322 } 323 324 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 325 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 326 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 327 (stats.opackets - prev_pkts_tx[port_id]) : 0; 328 prev_pkts_rx[port_id] = stats.ipackets; 329 prev_pkts_tx[port_id] = stats.opackets; 330 mpps_rx = diff_ns > 0 ? 331 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 332 mpps_tx = diff_ns > 0 ? 333 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 334 335 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 336 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 337 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 338 (stats.obytes - prev_bytes_tx[port_id]) : 0; 339 prev_bytes_rx[port_id] = stats.ibytes; 340 prev_bytes_tx[port_id] = stats.obytes; 341 mbps_rx = diff_ns > 0 ? 342 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 343 mbps_tx = diff_ns > 0 ? 344 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 345 346 printf("\n Throughput (since last show)\n"); 347 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 348 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 349 mpps_tx, mbps_tx * 8); 350 351 if (xstats_display_num > 0) 352 nic_xstats_display_periodic(port_id); 353 354 printf(" %s############################%s\n", 355 nic_stats_border, nic_stats_border); 356 } 357 358 void 359 nic_stats_clear(portid_t port_id) 360 { 361 int ret; 362 363 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 364 print_valid_ports(); 365 return; 366 } 367 368 ret = rte_eth_stats_reset(port_id); 369 if (ret != 0) { 370 fprintf(stderr, 371 "%s: Error: failed to reset stats (port %u): %s", 372 __func__, port_id, strerror(-ret)); 373 return; 374 } 375 376 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 377 if (ret != 0) { 378 if (ret < 0) 379 ret = -ret; 380 fprintf(stderr, 381 "%s: Error: failed to get stats (port %u): %s", 382 __func__, port_id, strerror(ret)); 383 return; 384 } 385 printf("\n NIC statistics for port %d cleared\n", port_id); 386 } 387 388 void 389 nic_xstats_display(portid_t port_id) 390 { 391 struct rte_eth_xstat *xstats; 392 int cnt_xstats, idx_xstat; 393 struct rte_eth_xstat_name *xstats_names; 394 395 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 396 print_valid_ports(); 397 return; 398 } 399 printf("###### NIC extended statistics for port %-2d\n", port_id); 400 if (!rte_eth_dev_is_valid_port(port_id)) { 401 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 402 return; 403 } 404 405 /* Get count */ 406 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 407 if (cnt_xstats < 0) { 408 fprintf(stderr, "Error: Cannot get count of xstats\n"); 409 return; 410 } 411 412 /* Get id-name lookup table */ 413 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 414 if (xstats_names == NULL) { 415 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 416 return; 417 } 418 if (cnt_xstats != rte_eth_xstats_get_names( 419 port_id, xstats_names, cnt_xstats)) { 420 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 421 free(xstats_names); 422 return; 423 } 424 425 /* Get stats themselves */ 426 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 427 if (xstats == NULL) { 428 fprintf(stderr, "Cannot allocate memory for xstats\n"); 429 free(xstats_names); 430 return; 431 } 432 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 433 fprintf(stderr, "Error: Unable to get xstats\n"); 434 free(xstats_names); 435 free(xstats); 436 return; 437 } 438 439 /* Display xstats */ 440 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 441 if (xstats_hide_zero && !xstats[idx_xstat].value) 442 continue; 443 printf("%s: %"PRIu64"\n", 444 xstats_names[idx_xstat].name, 445 xstats[idx_xstat].value); 446 } 447 free(xstats_names); 448 free(xstats); 449 } 450 451 void 452 nic_xstats_clear(portid_t port_id) 453 { 454 int ret; 455 456 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 457 print_valid_ports(); 458 return; 459 } 460 461 ret = rte_eth_xstats_reset(port_id); 462 if (ret != 0) { 463 fprintf(stderr, 464 "%s: Error: failed to reset xstats (port %u): %s\n", 465 __func__, port_id, strerror(-ret)); 466 return; 467 } 468 469 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 470 if (ret != 0) { 471 if (ret < 0) 472 ret = -ret; 473 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 474 __func__, port_id, strerror(ret)); 475 return; 476 } 477 } 478 479 static const char * 480 get_queue_state_name(uint8_t queue_state) 481 { 482 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 483 return "stopped"; 484 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 485 return "started"; 486 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 487 return "hairpin"; 488 else 489 return "unknown"; 490 } 491 492 void 493 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_rxq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 512 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 513 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 514 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 515 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 516 printf("\nRX drop packets: %s", 517 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 518 printf("\nRX deferred start: %s", 519 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 520 printf("\nRX scattered packets: %s", 521 (qinfo.scattered_rx != 0) ? "on" : "off"); 522 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 523 if (qinfo.rx_buf_size != 0) 524 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 525 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 526 527 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 528 printf("\nBurst mode: %s%s", 529 mode.info, 530 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 531 " (per queue)" : ""); 532 533 printf("\n"); 534 } 535 536 void 537 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 538 { 539 struct rte_eth_burst_mode mode; 540 struct rte_eth_txq_info qinfo; 541 int32_t rc; 542 static const char *info_border = "*********************"; 543 544 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 545 if (rc != 0) { 546 fprintf(stderr, 547 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 548 port_id, queue_id, strerror(-rc), rc); 549 return; 550 } 551 552 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 553 info_border, port_id, queue_id, info_border); 554 555 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 556 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 557 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 558 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 559 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 560 printf("\nTX deferred start: %s", 561 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 562 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 563 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 564 565 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 566 printf("\nBurst mode: %s%s", 567 mode.info, 568 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 569 " (per queue)" : ""); 570 571 printf("\n"); 572 } 573 574 static int bus_match_all(const struct rte_bus *bus, const void *data) 575 { 576 RTE_SET_USED(bus); 577 RTE_SET_USED(data); 578 return 0; 579 } 580 581 static void 582 device_infos_display_speeds(uint32_t speed_capa) 583 { 584 printf("\n\tDevice speed capability:"); 585 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 586 printf(" Autonegotiate (all speeds)"); 587 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 588 printf(" Disable autonegotiate (fixed speed) "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 590 printf(" 10 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 592 printf(" 10 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 594 printf(" 100 Mbps half-duplex "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 596 printf(" 100 Mbps full-duplex "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 598 printf(" 1 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 600 printf(" 2.5 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 602 printf(" 5 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 604 printf(" 10 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 606 printf(" 20 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 608 printf(" 25 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 610 printf(" 40 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 612 printf(" 50 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 614 printf(" 56 Gbps "); 615 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 616 printf(" 100 Gbps "); 617 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 618 printf(" 200 Gbps "); 619 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 620 printf(" 400 Gbps "); 621 } 622 623 void 624 device_infos_display(const char *identifier) 625 { 626 static const char *info_border = "*********************"; 627 struct rte_bus *start = NULL, *next; 628 struct rte_dev_iterator dev_iter; 629 char name[RTE_ETH_NAME_MAX_LEN]; 630 struct rte_ether_addr mac_addr; 631 struct rte_device *dev; 632 struct rte_devargs da; 633 portid_t port_id; 634 struct rte_eth_dev_info dev_info; 635 char devstr[128]; 636 637 memset(&da, 0, sizeof(da)); 638 if (!identifier) 639 goto skip_parse; 640 641 if (rte_devargs_parsef(&da, "%s", identifier)) { 642 fprintf(stderr, "cannot parse identifier\n"); 643 return; 644 } 645 646 skip_parse: 647 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 648 649 start = next; 650 if (identifier && da.bus != next) 651 continue; 652 653 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 654 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 655 656 if (rte_dev_driver(dev) == NULL) 657 continue; 658 /* Check for matching device if identifier is present */ 659 if (identifier && 660 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 661 continue; 662 printf("\n%s Infos for device %s %s\n", 663 info_border, rte_dev_name(dev), info_border); 664 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 665 printf("\nBus information: %s", 666 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 667 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 668 printf("\nDevargs: %s", 669 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 670 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 671 printf("\n"); 672 673 /* List ports with matching device name */ 674 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 675 printf("\n\tPort id: %-2d", port_id); 676 if (eth_macaddr_get_print_err(port_id, 677 &mac_addr) == 0) 678 print_ethaddr("\n\tMAC address: ", 679 &mac_addr); 680 rte_eth_dev_get_name_by_port(port_id, name); 681 printf("\n\tDevice name: %s", name); 682 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 683 device_infos_display_speeds(dev_info.speed_capa); 684 printf("\n"); 685 } 686 } 687 }; 688 rte_devargs_reset(&da); 689 } 690 691 static void 692 print_dev_capabilities(uint64_t capabilities) 693 { 694 uint64_t single_capa; 695 int begin; 696 int end; 697 int bit; 698 699 if (capabilities == 0) 700 return; 701 702 begin = __builtin_ctzll(capabilities); 703 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 704 705 single_capa = 1ULL << begin; 706 for (bit = begin; bit < end; bit++) { 707 if (capabilities & single_capa) 708 printf(" %s", 709 rte_eth_dev_capability_name(single_capa)); 710 single_capa <<= 1; 711 } 712 } 713 714 uint64_t 715 str_to_rsstypes(const char *str) 716 { 717 uint16_t i; 718 719 for (i = 0; rss_type_table[i].str != NULL; i++) { 720 if (strcmp(rss_type_table[i].str, str) == 0) 721 return rss_type_table[i].rss_type; 722 } 723 724 return 0; 725 } 726 727 const char * 728 rsstypes_to_str(uint64_t rss_type) 729 { 730 uint16_t i; 731 732 for (i = 0; rss_type_table[i].str != NULL; i++) { 733 if (rss_type_table[i].rss_type == rss_type) 734 return rss_type_table[i].str; 735 } 736 737 return NULL; 738 } 739 740 static void 741 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 742 { 743 uint16_t user_defined_str_len; 744 uint16_t total_len = 0; 745 uint16_t str_len = 0; 746 uint64_t rss_offload; 747 uint16_t i; 748 749 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 750 rss_offload = RTE_BIT64(i); 751 if ((offload_types & rss_offload) != 0) { 752 const char *p = rsstypes_to_str(rss_offload); 753 754 user_defined_str_len = 755 strlen("user-defined-") + (i / 10 + 1); 756 str_len = p ? strlen(p) : user_defined_str_len; 757 str_len += 2; /* add two spaces */ 758 if (total_len + str_len >= char_num_per_line) { 759 total_len = 0; 760 printf("\n"); 761 } 762 763 if (p) 764 printf(" %s", p); 765 else 766 printf(" user-defined-%u", i); 767 total_len += str_len; 768 } 769 } 770 printf("\n"); 771 } 772 773 void 774 port_infos_display(portid_t port_id) 775 { 776 struct rte_port *port; 777 struct rte_ether_addr mac_addr; 778 struct rte_eth_link link; 779 struct rte_eth_dev_info dev_info; 780 int vlan_offload; 781 struct rte_mempool * mp; 782 static const char *info_border = "*********************"; 783 uint16_t mtu; 784 char name[RTE_ETH_NAME_MAX_LEN]; 785 int ret; 786 char fw_version[ETHDEV_FWVERS_LEN]; 787 788 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 789 print_valid_ports(); 790 return; 791 } 792 port = &ports[port_id]; 793 ret = eth_link_get_nowait_print_err(port_id, &link); 794 if (ret < 0) 795 return; 796 797 ret = eth_dev_info_get_print_err(port_id, &dev_info); 798 if (ret != 0) 799 return; 800 801 printf("\n%s Infos for port %-2d %s\n", 802 info_border, port_id, info_border); 803 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 804 print_ethaddr("MAC address: ", &mac_addr); 805 rte_eth_dev_get_name_by_port(port_id, name); 806 printf("\nDevice name: %s", name); 807 printf("\nDriver name: %s", dev_info.driver_name); 808 809 if (rte_eth_dev_fw_version_get(port_id, fw_version, 810 ETHDEV_FWVERS_LEN) == 0) 811 printf("\nFirmware-version: %s", fw_version); 812 else 813 printf("\nFirmware-version: %s", "not available"); 814 815 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 816 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 817 printf("\nConnect to socket: %u", port->socket_id); 818 819 if (port_numa[port_id] != NUMA_NO_CONFIG) { 820 mp = mbuf_pool_find(port_numa[port_id], 0); 821 if (mp) 822 printf("\nmemory allocation on the socket: %d", 823 port_numa[port_id]); 824 } else 825 printf("\nmemory allocation on the socket: %u",port->socket_id); 826 827 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 828 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 829 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 830 ("full-duplex") : ("half-duplex")); 831 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 832 ("On") : ("Off")); 833 834 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 835 printf("MTU: %u\n", mtu); 836 837 printf("Promiscuous mode: %s\n", 838 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 839 printf("Allmulticast mode: %s\n", 840 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 841 printf("Maximum number of MAC addresses: %u\n", 842 (unsigned int)(port->dev_info.max_mac_addrs)); 843 printf("Maximum number of MAC addresses of hash filtering: %u\n", 844 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 845 846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 847 if (vlan_offload >= 0){ 848 printf("VLAN offload: \n"); 849 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 850 printf(" strip on, "); 851 else 852 printf(" strip off, "); 853 854 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 855 printf("filter on, "); 856 else 857 printf("filter off, "); 858 859 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 860 printf("extend on, "); 861 else 862 printf("extend off, "); 863 864 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 865 printf("qinq strip on\n"); 866 else 867 printf("qinq strip off\n"); 868 } 869 870 if (dev_info.hash_key_size > 0) 871 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 872 if (dev_info.reta_size > 0) 873 printf("Redirection table size: %u\n", dev_info.reta_size); 874 if (!dev_info.flow_type_rss_offloads) 875 printf("No RSS offload flow type is supported.\n"); 876 else { 877 printf("Supported RSS offload flow types:\n"); 878 rss_offload_types_display(dev_info.flow_type_rss_offloads, 879 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 880 } 881 882 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 883 printf("Maximum configurable length of RX packet: %u\n", 884 dev_info.max_rx_pktlen); 885 printf("Maximum configurable size of LRO aggregated packet: %u\n", 886 dev_info.max_lro_pkt_size); 887 if (dev_info.max_vfs) 888 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 889 if (dev_info.max_vmdq_pools) 890 printf("Maximum number of VMDq pools: %u\n", 891 dev_info.max_vmdq_pools); 892 893 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 894 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 895 printf("Max possible number of RXDs per queue: %hu\n", 896 dev_info.rx_desc_lim.nb_max); 897 printf("Min possible number of RXDs per queue: %hu\n", 898 dev_info.rx_desc_lim.nb_min); 899 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 900 901 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 902 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 903 printf("Max possible number of TXDs per queue: %hu\n", 904 dev_info.tx_desc_lim.nb_max); 905 printf("Min possible number of TXDs per queue: %hu\n", 906 dev_info.tx_desc_lim.nb_min); 907 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 908 printf("Max segment number per packet: %hu\n", 909 dev_info.tx_desc_lim.nb_seg_max); 910 printf("Max segment number per MTU/TSO: %hu\n", 911 dev_info.tx_desc_lim.nb_mtu_seg_max); 912 913 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 914 print_dev_capabilities(dev_info.dev_capa); 915 printf(" )\n"); 916 /* Show switch info only if valid switch domain and port id is set */ 917 if (dev_info.switch_info.domain_id != 918 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 919 if (dev_info.switch_info.name) 920 printf("Switch name: %s\n", dev_info.switch_info.name); 921 922 printf("Switch domain Id: %u\n", 923 dev_info.switch_info.domain_id); 924 printf("Switch Port Id: %u\n", 925 dev_info.switch_info.port_id); 926 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 927 printf("Switch Rx domain: %u\n", 928 dev_info.switch_info.rx_domain); 929 } 930 printf("Device error handling mode: "); 931 switch (dev_info.err_handle_mode) { 932 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 933 printf("none\n"); 934 break; 935 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 936 printf("passive\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 939 printf("proactive\n"); 940 break; 941 default: 942 printf("unknown\n"); 943 break; 944 } 945 printf("Device private info:\n"); 946 ret = rte_eth_dev_priv_dump(port_id, stdout); 947 if (ret == -ENOTSUP) 948 printf(" none\n"); 949 else if (ret < 0) 950 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 951 ret, strerror(-ret)); 952 } 953 954 void 955 port_summary_header_display(void) 956 { 957 uint16_t port_number; 958 959 port_number = rte_eth_dev_count_avail(); 960 printf("Number of available ports: %i\n", port_number); 961 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 962 "Driver", "Status", "Link"); 963 } 964 965 void 966 port_summary_display(portid_t port_id) 967 { 968 struct rte_ether_addr mac_addr; 969 struct rte_eth_link link; 970 struct rte_eth_dev_info dev_info; 971 char name[RTE_ETH_NAME_MAX_LEN]; 972 int ret; 973 974 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 975 print_valid_ports(); 976 return; 977 } 978 979 ret = eth_link_get_nowait_print_err(port_id, &link); 980 if (ret < 0) 981 return; 982 983 ret = eth_dev_info_get_print_err(port_id, &dev_info); 984 if (ret != 0) 985 return; 986 987 rte_eth_dev_get_name_by_port(port_id, name); 988 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 989 if (ret != 0) 990 return; 991 992 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 993 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 994 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 995 rte_eth_link_speed_to_str(link.link_speed)); 996 } 997 998 void 999 port_eeprom_display(portid_t port_id) 1000 { 1001 struct rte_dev_eeprom_info einfo; 1002 int ret; 1003 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1004 print_valid_ports(); 1005 return; 1006 } 1007 1008 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1009 if (len_eeprom < 0) { 1010 switch (len_eeprom) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", 1022 len_eeprom); 1023 break; 1024 } 1025 return; 1026 } 1027 1028 einfo.offset = 0; 1029 einfo.length = len_eeprom; 1030 einfo.data = calloc(1, len_eeprom); 1031 if (!einfo.data) { 1032 fprintf(stderr, 1033 "Allocation of port %u eeprom data failed\n", 1034 port_id); 1035 return; 1036 } 1037 1038 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1039 if (ret != 0) { 1040 switch (ret) { 1041 case -ENODEV: 1042 fprintf(stderr, "port index %d invalid\n", port_id); 1043 break; 1044 case -ENOTSUP: 1045 fprintf(stderr, "operation not supported by device\n"); 1046 break; 1047 case -EIO: 1048 fprintf(stderr, "device is removed\n"); 1049 break; 1050 default: 1051 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1052 break; 1053 } 1054 free(einfo.data); 1055 return; 1056 } 1057 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1058 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1059 free(einfo.data); 1060 } 1061 1062 void 1063 port_module_eeprom_display(portid_t port_id) 1064 { 1065 struct rte_eth_dev_module_info minfo; 1066 struct rte_dev_eeprom_info einfo; 1067 int ret; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1070 print_valid_ports(); 1071 return; 1072 } 1073 1074 1075 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 return; 1093 } 1094 1095 einfo.offset = 0; 1096 einfo.length = minfo.eeprom_len; 1097 einfo.data = calloc(1, minfo.eeprom_len); 1098 if (!einfo.data) { 1099 fprintf(stderr, 1100 "Allocation of port %u eeprom data failed\n", 1101 port_id); 1102 return; 1103 } 1104 1105 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1106 if (ret != 0) { 1107 switch (ret) { 1108 case -ENODEV: 1109 fprintf(stderr, "port index %d invalid\n", port_id); 1110 break; 1111 case -ENOTSUP: 1112 fprintf(stderr, "operation not supported by device\n"); 1113 break; 1114 case -EIO: 1115 fprintf(stderr, "device is removed\n"); 1116 break; 1117 default: 1118 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1119 ret); 1120 break; 1121 } 1122 free(einfo.data); 1123 return; 1124 } 1125 1126 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1127 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1128 free(einfo.data); 1129 } 1130 1131 int 1132 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1133 { 1134 uint16_t pid; 1135 1136 if (port_id == (portid_t)RTE_PORT_ALL) 1137 return 0; 1138 1139 RTE_ETH_FOREACH_DEV(pid) 1140 if (port_id == pid) 1141 return 0; 1142 1143 if (warning == ENABLED_WARN) 1144 fprintf(stderr, "Invalid port %d\n", port_id); 1145 1146 return 1; 1147 } 1148 1149 void print_valid_ports(void) 1150 { 1151 portid_t pid; 1152 1153 printf("The valid ports array is ["); 1154 RTE_ETH_FOREACH_DEV(pid) { 1155 printf(" %d", pid); 1156 } 1157 printf(" ]\n"); 1158 } 1159 1160 static int 1161 vlan_id_is_invalid(uint16_t vlan_id) 1162 { 1163 if (vlan_id < 4096) 1164 return 0; 1165 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1166 return 1; 1167 } 1168 1169 static uint32_t 1170 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1171 { 1172 uint32_t overhead_len; 1173 1174 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1175 overhead_len = max_rx_pktlen - max_mtu; 1176 else 1177 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1178 1179 return overhead_len; 1180 } 1181 1182 static int 1183 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1184 { 1185 struct rte_eth_dev_info dev_info; 1186 uint32_t overhead_len; 1187 uint32_t frame_size; 1188 int ret; 1189 1190 ret = rte_eth_dev_info_get(port_id, &dev_info); 1191 if (ret != 0) 1192 return ret; 1193 1194 if (mtu < dev_info.min_mtu) { 1195 fprintf(stderr, 1196 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1197 mtu, dev_info.min_mtu, port_id); 1198 return -EINVAL; 1199 } 1200 if (mtu > dev_info.max_mtu) { 1201 fprintf(stderr, 1202 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1203 mtu, dev_info.max_mtu, port_id); 1204 return -EINVAL; 1205 } 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 frame_size = mtu + overhead_len; 1210 if (frame_size > dev_info.max_rx_pktlen) { 1211 fprintf(stderr, 1212 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1213 frame_size, dev_info.max_rx_pktlen, port_id); 1214 return -EINVAL; 1215 } 1216 1217 return 0; 1218 } 1219 1220 void 1221 port_mtu_set(portid_t port_id, uint16_t mtu) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 int diag; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 1229 diag = eth_dev_validate_mtu(port_id, mtu); 1230 if (diag != 0) 1231 return; 1232 1233 if (port->need_reconfig == 0) { 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag != 0) { 1236 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1237 return; 1238 } 1239 } 1240 1241 port->dev_conf.rxmode.mtu = mtu; 1242 } 1243 1244 /* Generic flow management functions. */ 1245 1246 static struct port_flow_tunnel * 1247 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1248 { 1249 struct port_flow_tunnel *flow_tunnel; 1250 1251 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1252 if (flow_tunnel->id == port_tunnel_id) 1253 goto out; 1254 } 1255 flow_tunnel = NULL; 1256 1257 out: 1258 return flow_tunnel; 1259 } 1260 1261 const char * 1262 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1263 { 1264 const char *type; 1265 switch (tunnel->type) { 1266 default: 1267 type = "unknown"; 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_VXLAN: 1270 type = "vxlan"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_GRE: 1273 type = "gre"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_NVGRE: 1276 type = "nvgre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_GENEVE: 1279 type = "geneve"; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 struct port_flow_tunnel * 1287 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1288 { 1289 struct rte_port *port = &ports[port_id]; 1290 struct port_flow_tunnel *flow_tunnel; 1291 1292 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1293 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1294 goto out; 1295 } 1296 flow_tunnel = NULL; 1297 1298 out: 1299 return flow_tunnel; 1300 } 1301 1302 void port_flow_tunnel_list(portid_t port_id) 1303 { 1304 struct rte_port *port = &ports[port_id]; 1305 struct port_flow_tunnel *flt; 1306 1307 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1308 printf("port %u tunnel #%u type=%s", 1309 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1310 if (flt->tunnel.tun_id) 1311 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1312 printf("\n"); 1313 } 1314 } 1315 1316 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1317 { 1318 struct rte_port *port = &ports[port_id]; 1319 struct port_flow_tunnel *flt; 1320 1321 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1322 if (flt->id == tunnel_id) 1323 break; 1324 } 1325 if (flt) { 1326 LIST_REMOVE(flt, chain); 1327 free(flt); 1328 printf("port %u: flow tunnel #%u destroyed\n", 1329 port_id, tunnel_id); 1330 } 1331 } 1332 1333 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1334 { 1335 struct rte_port *port = &ports[port_id]; 1336 enum rte_flow_item_type type; 1337 struct port_flow_tunnel *flt; 1338 1339 if (!strcmp(ops->type, "vxlan")) 1340 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1341 else if (!strcmp(ops->type, "gre")) 1342 type = RTE_FLOW_ITEM_TYPE_GRE; 1343 else if (!strcmp(ops->type, "nvgre")) 1344 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1345 else if (!strcmp(ops->type, "geneve")) 1346 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1347 else { 1348 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1349 ops->type); 1350 return; 1351 } 1352 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1353 if (flt->tunnel.type == type) 1354 break; 1355 } 1356 if (!flt) { 1357 flt = calloc(1, sizeof(*flt)); 1358 if (!flt) { 1359 fprintf(stderr, "failed to allocate port flt object\n"); 1360 return; 1361 } 1362 flt->tunnel.type = type; 1363 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1364 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1365 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1366 } 1367 printf("port %d: flow tunnel #%u type %s\n", 1368 port_id, flt->id, ops->type); 1369 } 1370 1371 /** Generate a port_flow entry from attributes/pattern/actions. */ 1372 static struct port_flow * 1373 port_flow_new(const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions, 1376 struct rte_flow_error *error) 1377 { 1378 const struct rte_flow_conv_rule rule = { 1379 .attr_ro = attr, 1380 .pattern_ro = pattern, 1381 .actions_ro = actions, 1382 }; 1383 struct port_flow *pf; 1384 int ret; 1385 1386 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1387 if (ret < 0) 1388 return NULL; 1389 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1390 if (!pf) { 1391 rte_flow_error_set 1392 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1393 "calloc() failed"); 1394 return NULL; 1395 } 1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1397 error) >= 0) 1398 return pf; 1399 free(pf); 1400 return NULL; 1401 } 1402 1403 /** Print a message out of a flow error. */ 1404 static int 1405 port_flow_complain(struct rte_flow_error *error) 1406 { 1407 static const char *const errstrlist[] = { 1408 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1409 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1410 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1411 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1412 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1413 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1417 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1418 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1419 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1421 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1422 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1423 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1424 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1425 }; 1426 const char *errstr; 1427 char buf[32]; 1428 int err = rte_errno; 1429 1430 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1431 !errstrlist[error->type]) 1432 errstr = "unknown type"; 1433 else 1434 errstr = errstrlist[error->type]; 1435 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1436 __func__, error->type, errstr, 1437 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1438 error->cause), buf) : "", 1439 error->message ? error->message : "(no stated reason)", 1440 rte_strerror(err)); 1441 1442 switch (error->type) { 1443 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1444 fprintf(stderr, "The status suggests the use of \"transfer\" " 1445 "as the possible cause of the failure. Make " 1446 "sure that the flow in question and its " 1447 "indirect components (if any) are managed " 1448 "via \"transfer\" proxy port. Use command " 1449 "\"show port (port_id) flow transfer proxy\" " 1450 "to figure out the proxy port ID\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 return -err; 1457 } 1458 1459 static void 1460 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1461 { 1462 uint16_t total_len = 0; 1463 uint16_t str_len; 1464 uint16_t i; 1465 1466 if (rss_types == 0) 1467 return; 1468 1469 for (i = 0; rss_type_table[i].str; i++) { 1470 if (rss_type_table[i].rss_type == 0) 1471 continue; 1472 1473 if ((rss_types & rss_type_table[i].rss_type) == 1474 rss_type_table[i].rss_type) { 1475 /* Contain two spaces */ 1476 str_len = strlen(rss_type_table[i].str) + 2; 1477 if (total_len + str_len > char_num_per_line) { 1478 printf("\n"); 1479 total_len = 0; 1480 } 1481 printf(" %s", rss_type_table[i].str); 1482 total_len += str_len; 1483 } 1484 } 1485 printf("\n"); 1486 } 1487 1488 static void 1489 rss_config_display(struct rte_flow_action_rss *rss_conf) 1490 { 1491 uint8_t i; 1492 1493 if (rss_conf == NULL) { 1494 fprintf(stderr, "Invalid rule\n"); 1495 return; 1496 } 1497 1498 printf("RSS:\n" 1499 " queues:"); 1500 if (rss_conf->queue_num == 0) 1501 printf(" none"); 1502 for (i = 0; i < rss_conf->queue_num; i++) 1503 printf(" %d", rss_conf->queue[i]); 1504 printf("\n"); 1505 1506 printf(" function: "); 1507 switch (rss_conf->func) { 1508 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1509 printf("default\n"); 1510 break; 1511 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1512 printf("toeplitz\n"); 1513 break; 1514 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1515 printf("simple_xor\n"); 1516 break; 1517 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1518 printf("symmetric_toeplitz\n"); 1519 break; 1520 default: 1521 printf("Unknown function\n"); 1522 return; 1523 } 1524 1525 printf(" RSS key:\n"); 1526 if (rss_conf->key_len == 0) { 1527 printf(" none"); 1528 } else { 1529 printf(" key_len: %u\n", rss_conf->key_len); 1530 printf(" key: "); 1531 if (rss_conf->key == NULL) { 1532 printf("none"); 1533 } else { 1534 for (i = 0; i < rss_conf->key_len; i++) 1535 printf("%02X", rss_conf->key[i]); 1536 } 1537 } 1538 printf("\n"); 1539 1540 printf(" types:\n"); 1541 if (rss_conf->types == 0) { 1542 printf(" none\n"); 1543 return; 1544 } 1545 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1546 } 1547 1548 static struct port_indirect_action * 1549 action_get_by_id(portid_t port_id, uint32_t id) 1550 { 1551 struct rte_port *port; 1552 struct port_indirect_action **ppia; 1553 struct port_indirect_action *pia = NULL; 1554 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return NULL; 1558 port = &ports[port_id]; 1559 ppia = &port->actions_list; 1560 while (*ppia) { 1561 if ((*ppia)->id == id) { 1562 pia = *ppia; 1563 break; 1564 } 1565 ppia = &(*ppia)->next; 1566 } 1567 if (!pia) 1568 fprintf(stderr, 1569 "Failed to find indirect action #%u on port %u\n", 1570 id, port_id); 1571 return pia; 1572 } 1573 1574 static int 1575 action_alloc(portid_t port_id, uint32_t id, 1576 struct port_indirect_action **action) 1577 { 1578 struct rte_port *port; 1579 struct port_indirect_action **ppia; 1580 struct port_indirect_action *pia = NULL; 1581 1582 *action = NULL; 1583 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1584 port_id == (portid_t)RTE_PORT_ALL) 1585 return -EINVAL; 1586 port = &ports[port_id]; 1587 if (id == UINT32_MAX) { 1588 /* taking first available ID */ 1589 if (port->actions_list) { 1590 if (port->actions_list->id == UINT32_MAX - 1) { 1591 fprintf(stderr, 1592 "Highest indirect action ID is already assigned, delete it first\n"); 1593 return -ENOMEM; 1594 } 1595 id = port->actions_list->id + 1; 1596 } else { 1597 id = 0; 1598 } 1599 } 1600 pia = calloc(1, sizeof(*pia)); 1601 if (!pia) { 1602 fprintf(stderr, 1603 "Allocation of port %u indirect action failed\n", 1604 port_id); 1605 return -ENOMEM; 1606 } 1607 ppia = &port->actions_list; 1608 while (*ppia && (*ppia)->id > id) 1609 ppia = &(*ppia)->next; 1610 if (*ppia && (*ppia)->id == id) { 1611 fprintf(stderr, 1612 "Indirect action #%u is already assigned, delete it first\n", 1613 id); 1614 free(pia); 1615 return -EINVAL; 1616 } 1617 pia->next = *ppia; 1618 pia->id = id; 1619 *ppia = pia; 1620 *action = pia; 1621 return 0; 1622 } 1623 1624 static int 1625 template_alloc(uint32_t id, struct port_template **template, 1626 struct port_template **list) 1627 { 1628 struct port_template *lst = *list; 1629 struct port_template **ppt; 1630 struct port_template *pt = NULL; 1631 1632 *template = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest template ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of port template failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Template #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *template = pt; 1664 return 0; 1665 } 1666 1667 static int 1668 table_alloc(uint32_t id, struct port_table **table, 1669 struct port_table **list) 1670 { 1671 struct port_table *lst = *list; 1672 struct port_table **ppt; 1673 struct port_table *pt = NULL; 1674 1675 *table = NULL; 1676 if (id == UINT32_MAX) { 1677 /* taking first available ID */ 1678 if (lst) { 1679 if (lst->id == UINT32_MAX - 1) { 1680 printf("Highest table ID is already" 1681 " assigned, delete it first\n"); 1682 return -ENOMEM; 1683 } 1684 id = lst->id + 1; 1685 } else { 1686 id = 0; 1687 } 1688 } 1689 pt = calloc(1, sizeof(*pt)); 1690 if (!pt) { 1691 printf("Allocation of table failed\n"); 1692 return -ENOMEM; 1693 } 1694 ppt = list; 1695 while (*ppt && (*ppt)->id > id) 1696 ppt = &(*ppt)->next; 1697 if (*ppt && (*ppt)->id == id) { 1698 printf("Table #%u is already assigned," 1699 " delete it first\n", id); 1700 free(pt); 1701 return -EINVAL; 1702 } 1703 pt->next = *ppt; 1704 pt->id = id; 1705 *ppt = pt; 1706 *table = pt; 1707 return 0; 1708 } 1709 1710 /** Get info about flow management resources. */ 1711 int 1712 port_flow_get_info(portid_t port_id) 1713 { 1714 struct rte_flow_port_info port_info; 1715 struct rte_flow_queue_info queue_info; 1716 struct rte_flow_error error; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1719 port_id == (portid_t)RTE_PORT_ALL) 1720 return -EINVAL; 1721 /* Poisoning to make sure PMDs update it in case of error. */ 1722 memset(&error, 0x99, sizeof(error)); 1723 memset(&port_info, 0, sizeof(port_info)); 1724 memset(&queue_info, 0, sizeof(queue_info)); 1725 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1726 return port_flow_complain(&error); 1727 printf("Flow engine resources on port %u:\n" 1728 "Number of queues: %d\n" 1729 "Size of queues: %d\n" 1730 "Number of counters: %d\n" 1731 "Number of aging objects: %d\n" 1732 "Number of meter actions: %d\n", 1733 port_id, port_info.max_nb_queues, 1734 queue_info.max_size, 1735 port_info.max_nb_counters, 1736 port_info.max_nb_aging_objects, 1737 port_info.max_nb_meters); 1738 return 0; 1739 } 1740 1741 /** Configure flow management resources. */ 1742 int 1743 port_flow_configure(portid_t port_id, 1744 const struct rte_flow_port_attr *port_attr, 1745 uint16_t nb_queue, 1746 const struct rte_flow_queue_attr *queue_attr) 1747 { 1748 struct rte_port *port; 1749 struct rte_flow_error error; 1750 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1751 int std_queue; 1752 1753 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1754 port_id == (portid_t)RTE_PORT_ALL) 1755 return -EINVAL; 1756 port = &ports[port_id]; 1757 port->queue_nb = nb_queue; 1758 port->queue_sz = queue_attr->size; 1759 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1760 attr_list[std_queue] = queue_attr; 1761 /* Poisoning to make sure PMDs update it in case of error. */ 1762 memset(&error, 0x66, sizeof(error)); 1763 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1764 return port_flow_complain(&error); 1765 printf("Configure flows on port %u: " 1766 "number of queues %d with %d elements\n", 1767 port_id, nb_queue, queue_attr->size); 1768 return 0; 1769 } 1770 1771 static int 1772 action_handle_create(portid_t port_id, 1773 struct port_indirect_action *pia, 1774 const struct rte_flow_indir_action_conf *conf, 1775 const struct rte_flow_action *action, 1776 struct rte_flow_error *error) 1777 { 1778 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1779 struct rte_flow_action_age *age = 1780 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1781 1782 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1783 age->context = &pia->age_type; 1784 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1785 struct rte_flow_action_conntrack *ct = 1786 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1787 1788 memcpy(ct, &conntrack_context, sizeof(*ct)); 1789 } 1790 pia->type = action->type; 1791 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1792 error); 1793 return pia->handle ? 0 : -1; 1794 } 1795 1796 static int 1797 action_list_handle_create(portid_t port_id, 1798 struct port_indirect_action *pia, 1799 const struct rte_flow_indir_action_conf *conf, 1800 const struct rte_flow_action *actions, 1801 struct rte_flow_error *error) 1802 { 1803 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1804 pia->list_handle = 1805 rte_flow_action_list_handle_create(port_id, conf, 1806 actions, error); 1807 return pia->list_handle ? 0 : -1; 1808 } 1809 /** Create indirect action */ 1810 int 1811 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1812 const struct rte_flow_indir_action_conf *conf, 1813 const struct rte_flow_action *action) 1814 { 1815 struct port_indirect_action *pia; 1816 int ret; 1817 struct rte_flow_error error; 1818 1819 ret = action_alloc(port_id, id, &pia); 1820 if (ret) 1821 return ret; 1822 /* Poisoning to make sure PMDs update it in case of error. */ 1823 memset(&error, 0x22, sizeof(error)); 1824 ret = indirect_list ? 1825 action_list_handle_create(port_id, pia, conf, action, &error) : 1826 action_handle_create(port_id, pia, conf, action, &error); 1827 if (ret) { 1828 uint32_t destroy_id = pia->id; 1829 port_action_handle_destroy(port_id, 1, &destroy_id); 1830 return port_flow_complain(&error); 1831 } 1832 printf("Indirect action #%u created\n", pia->id); 1833 return 0; 1834 } 1835 1836 /** Destroy indirect action */ 1837 int 1838 port_action_handle_destroy(portid_t port_id, 1839 uint32_t n, 1840 const uint32_t *actions) 1841 { 1842 struct rte_port *port; 1843 struct port_indirect_action **tmp; 1844 int ret = 0; 1845 1846 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1847 port_id == (portid_t)RTE_PORT_ALL) 1848 return -EINVAL; 1849 port = &ports[port_id]; 1850 tmp = &port->actions_list; 1851 while (*tmp) { 1852 uint32_t i; 1853 1854 for (i = 0; i != n; ++i) { 1855 struct rte_flow_error error; 1856 struct port_indirect_action *pia = *tmp; 1857 1858 if (actions[i] != pia->id) 1859 continue; 1860 /* 1861 * Poisoning to make sure PMDs update it in case 1862 * of error. 1863 */ 1864 memset(&error, 0x33, sizeof(error)); 1865 1866 if (pia->handle) { 1867 ret = pia->type == 1868 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1869 rte_flow_action_list_handle_destroy 1870 (port_id, pia->list_handle, &error) : 1871 rte_flow_action_handle_destroy 1872 (port_id, pia->handle, &error); 1873 if (ret) { 1874 ret = port_flow_complain(&error); 1875 continue; 1876 } 1877 } 1878 *tmp = pia->next; 1879 printf("Indirect action #%u destroyed\n", pia->id); 1880 free(pia); 1881 break; 1882 } 1883 if (i == n) 1884 tmp = &(*tmp)->next; 1885 } 1886 return ret; 1887 } 1888 1889 int 1890 port_action_handle_flush(portid_t port_id) 1891 { 1892 struct rte_port *port; 1893 struct port_indirect_action **tmp; 1894 int ret = 0; 1895 1896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1897 port_id == (portid_t)RTE_PORT_ALL) 1898 return -EINVAL; 1899 port = &ports[port_id]; 1900 tmp = &port->actions_list; 1901 while (*tmp != NULL) { 1902 struct rte_flow_error error; 1903 struct port_indirect_action *pia = *tmp; 1904 1905 /* Poisoning to make sure PMDs update it in case of error. */ 1906 memset(&error, 0x44, sizeof(error)); 1907 if (pia->handle != NULL) { 1908 ret = pia->type == 1909 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1910 rte_flow_action_list_handle_destroy 1911 (port_id, pia->list_handle, &error) : 1912 rte_flow_action_handle_destroy 1913 (port_id, pia->handle, &error); 1914 if (ret) { 1915 printf("Indirect action #%u not destroyed\n", 1916 pia->id); 1917 ret = port_flow_complain(&error); 1918 } 1919 tmp = &pia->next; 1920 } else { 1921 *tmp = pia->next; 1922 free(pia); 1923 } 1924 } 1925 return ret; 1926 } 1927 1928 /** Get indirect action by port + id */ 1929 struct rte_flow_action_handle * 1930 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1931 { 1932 1933 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1934 1935 return (pia) ? pia->handle : NULL; 1936 } 1937 1938 /** Update indirect action */ 1939 int 1940 port_action_handle_update(portid_t port_id, uint32_t id, 1941 const struct rte_flow_action *action) 1942 { 1943 struct rte_flow_error error; 1944 struct rte_flow_action_handle *action_handle; 1945 struct port_indirect_action *pia; 1946 const void *update; 1947 1948 action_handle = port_action_handle_get_by_id(port_id, id); 1949 if (!action_handle) 1950 return -EINVAL; 1951 pia = action_get_by_id(port_id, id); 1952 if (!pia) 1953 return -EINVAL; 1954 switch (pia->type) { 1955 case RTE_FLOW_ACTION_TYPE_AGE: 1956 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1957 update = action->conf; 1958 break; 1959 default: 1960 update = action; 1961 break; 1962 } 1963 if (rte_flow_action_handle_update(port_id, action_handle, update, 1964 &error)) { 1965 return port_flow_complain(&error); 1966 } 1967 printf("Indirect action #%u updated\n", id); 1968 return 0; 1969 } 1970 1971 static void 1972 port_action_handle_query_dump(portid_t port_id, 1973 const struct port_indirect_action *pia, 1974 union port_action_query *query) 1975 { 1976 if (!pia || !query) 1977 return; 1978 switch (pia->type) { 1979 case RTE_FLOW_ACTION_TYPE_AGE: 1980 printf("Indirect AGE action:\n" 1981 " aged: %u\n" 1982 " sec_since_last_hit_valid: %u\n" 1983 " sec_since_last_hit: %" PRIu32 "\n", 1984 query->age.aged, 1985 query->age.sec_since_last_hit_valid, 1986 query->age.sec_since_last_hit); 1987 break; 1988 case RTE_FLOW_ACTION_TYPE_COUNT: 1989 printf("Indirect COUNT action:\n" 1990 " hits_set: %u\n" 1991 " bytes_set: %u\n" 1992 " hits: %" PRIu64 "\n" 1993 " bytes: %" PRIu64 "\n", 1994 query->count.hits_set, 1995 query->count.bytes_set, 1996 query->count.hits, 1997 query->count.bytes); 1998 break; 1999 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2000 printf("Conntrack Context:\n" 2001 " Peer: %u, Flow dir: %s, Enable: %u\n" 2002 " Live: %u, SACK: %u, CACK: %u\n" 2003 " Packet dir: %s, Liberal: %u, State: %u\n" 2004 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2005 " Last Seq: %u, Last ACK: %u\n" 2006 " Last Win: %u, Last End: %u\n", 2007 query->ct.peer_port, 2008 query->ct.is_original_dir ? "Original" : "Reply", 2009 query->ct.enable, query->ct.live_connection, 2010 query->ct.selective_ack, query->ct.challenge_ack_passed, 2011 query->ct.last_direction ? "Original" : "Reply", 2012 query->ct.liberal_mode, query->ct.state, 2013 query->ct.max_ack_window, query->ct.retransmission_limit, 2014 query->ct.last_index, query->ct.last_seq, 2015 query->ct.last_ack, query->ct.last_window, 2016 query->ct.last_end); 2017 printf(" Original Dir:\n" 2018 " scale: %u, fin: %u, ack seen: %u\n" 2019 " unacked data: %u\n Sent end: %u," 2020 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2021 query->ct.original_dir.scale, 2022 query->ct.original_dir.close_initiated, 2023 query->ct.original_dir.last_ack_seen, 2024 query->ct.original_dir.data_unacked, 2025 query->ct.original_dir.sent_end, 2026 query->ct.original_dir.reply_end, 2027 query->ct.original_dir.max_win, 2028 query->ct.original_dir.max_ack); 2029 printf(" Reply Dir:\n" 2030 " scale: %u, fin: %u, ack seen: %u\n" 2031 " unacked data: %u\n Sent end: %u," 2032 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2033 query->ct.reply_dir.scale, 2034 query->ct.reply_dir.close_initiated, 2035 query->ct.reply_dir.last_ack_seen, 2036 query->ct.reply_dir.data_unacked, 2037 query->ct.reply_dir.sent_end, 2038 query->ct.reply_dir.reply_end, 2039 query->ct.reply_dir.max_win, 2040 query->ct.reply_dir.max_ack); 2041 break; 2042 case RTE_FLOW_ACTION_TYPE_QUOTA: 2043 printf("Indirect QUOTA action %u\n" 2044 " unused quota: %" PRId64 "\n", 2045 pia->id, query->quota.quota); 2046 break; 2047 default: 2048 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2049 pia->type, pia->id, port_id); 2050 break; 2051 } 2052 2053 } 2054 2055 void 2056 port_action_handle_query_update(portid_t port_id, uint32_t id, 2057 enum rte_flow_query_update_mode qu_mode, 2058 const struct rte_flow_action *action) 2059 { 2060 int ret; 2061 struct rte_flow_error error; 2062 struct port_indirect_action *pia; 2063 union port_action_query query; 2064 2065 pia = action_get_by_id(port_id, id); 2066 if (!pia || !pia->handle) 2067 return; 2068 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2069 &query, qu_mode, &error); 2070 if (ret) 2071 port_flow_complain(&error); 2072 else 2073 port_action_handle_query_dump(port_id, pia, &query); 2074 2075 } 2076 2077 int 2078 port_action_handle_query(portid_t port_id, uint32_t id) 2079 { 2080 struct rte_flow_error error; 2081 struct port_indirect_action *pia; 2082 union port_action_query query; 2083 2084 pia = action_get_by_id(port_id, id); 2085 if (!pia) 2086 return -EINVAL; 2087 switch (pia->type) { 2088 case RTE_FLOW_ACTION_TYPE_AGE: 2089 case RTE_FLOW_ACTION_TYPE_COUNT: 2090 case RTE_FLOW_ACTION_TYPE_QUOTA: 2091 break; 2092 default: 2093 fprintf(stderr, 2094 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2095 id, pia->type, port_id); 2096 return -ENOTSUP; 2097 } 2098 /* Poisoning to make sure PMDs update it in case of error. */ 2099 memset(&error, 0x55, sizeof(error)); 2100 memset(&query, 0, sizeof(query)); 2101 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2102 return port_flow_complain(&error); 2103 port_action_handle_query_dump(port_id, pia, &query); 2104 return 0; 2105 } 2106 2107 static struct port_flow_tunnel * 2108 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2109 const struct rte_flow_item *pattern, 2110 const struct rte_flow_action *actions, 2111 const struct tunnel_ops *tunnel_ops) 2112 { 2113 int ret; 2114 struct rte_port *port; 2115 struct port_flow_tunnel *pft; 2116 struct rte_flow_error error; 2117 2118 port = &ports[port_id]; 2119 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2120 if (!pft) { 2121 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2122 tunnel_ops->id); 2123 return NULL; 2124 } 2125 if (tunnel_ops->actions) { 2126 uint32_t num_actions; 2127 const struct rte_flow_action *aptr; 2128 2129 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2130 &pft->pmd_actions, 2131 &pft->num_pmd_actions, 2132 &error); 2133 if (ret) { 2134 port_flow_complain(&error); 2135 return NULL; 2136 } 2137 for (aptr = actions, num_actions = 1; 2138 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2139 aptr++, num_actions++); 2140 pft->actions = malloc( 2141 (num_actions + pft->num_pmd_actions) * 2142 sizeof(actions[0])); 2143 if (!pft->actions) { 2144 rte_flow_tunnel_action_decap_release( 2145 port_id, pft->actions, 2146 pft->num_pmd_actions, &error); 2147 return NULL; 2148 } 2149 rte_memcpy(pft->actions, pft->pmd_actions, 2150 pft->num_pmd_actions * sizeof(actions[0])); 2151 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2152 num_actions * sizeof(actions[0])); 2153 } 2154 if (tunnel_ops->items) { 2155 uint32_t num_items; 2156 const struct rte_flow_item *iptr; 2157 2158 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2159 &pft->pmd_items, 2160 &pft->num_pmd_items, 2161 &error); 2162 if (ret) { 2163 port_flow_complain(&error); 2164 return NULL; 2165 } 2166 for (iptr = pattern, num_items = 1; 2167 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2168 iptr++, num_items++); 2169 pft->items = malloc((num_items + pft->num_pmd_items) * 2170 sizeof(pattern[0])); 2171 if (!pft->items) { 2172 rte_flow_tunnel_item_release( 2173 port_id, pft->pmd_items, 2174 pft->num_pmd_items, &error); 2175 return NULL; 2176 } 2177 rte_memcpy(pft->items, pft->pmd_items, 2178 pft->num_pmd_items * sizeof(pattern[0])); 2179 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2180 num_items * sizeof(pattern[0])); 2181 } 2182 2183 return pft; 2184 } 2185 2186 static void 2187 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2188 const struct tunnel_ops *tunnel_ops, 2189 struct port_flow_tunnel *pft) 2190 { 2191 struct rte_flow_error error; 2192 2193 if (tunnel_ops->actions) { 2194 free(pft->actions); 2195 rte_flow_tunnel_action_decap_release( 2196 port_id, pft->pmd_actions, 2197 pft->num_pmd_actions, &error); 2198 pft->actions = NULL; 2199 pft->pmd_actions = NULL; 2200 } 2201 if (tunnel_ops->items) { 2202 free(pft->items); 2203 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2204 pft->num_pmd_items, 2205 &error); 2206 pft->items = NULL; 2207 pft->pmd_items = NULL; 2208 } 2209 } 2210 2211 /** Add port meter policy */ 2212 int 2213 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2214 const struct rte_flow_action *actions) 2215 { 2216 struct rte_mtr_error error; 2217 const struct rte_flow_action *act = actions; 2218 const struct rte_flow_action *start; 2219 struct rte_mtr_meter_policy_params policy; 2220 uint32_t i = 0, act_n; 2221 int ret; 2222 2223 for (i = 0; i < RTE_COLORS; i++) { 2224 for (act_n = 0, start = act; 2225 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2226 act_n++; 2227 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2228 policy.actions[i] = start; 2229 else 2230 policy.actions[i] = NULL; 2231 act++; 2232 } 2233 ret = rte_mtr_meter_policy_add(port_id, 2234 policy_id, 2235 &policy, &error); 2236 if (ret) 2237 print_mtr_err_msg(&error); 2238 return ret; 2239 } 2240 2241 struct rte_flow_meter_profile * 2242 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2243 { 2244 struct rte_mtr_error error; 2245 struct rte_flow_meter_profile *profile; 2246 2247 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2248 if (!profile) 2249 print_mtr_err_msg(&error); 2250 return profile; 2251 } 2252 struct rte_flow_meter_policy * 2253 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2254 { 2255 struct rte_mtr_error error; 2256 struct rte_flow_meter_policy *policy; 2257 2258 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2259 if (!policy) 2260 print_mtr_err_msg(&error); 2261 return policy; 2262 } 2263 2264 /** Validate flow rule. */ 2265 int 2266 port_flow_validate(portid_t port_id, 2267 const struct rte_flow_attr *attr, 2268 const struct rte_flow_item *pattern, 2269 const struct rte_flow_action *actions, 2270 const struct tunnel_ops *tunnel_ops) 2271 { 2272 struct rte_flow_error error; 2273 struct port_flow_tunnel *pft = NULL; 2274 int ret; 2275 2276 /* Poisoning to make sure PMDs update it in case of error. */ 2277 memset(&error, 0x11, sizeof(error)); 2278 if (tunnel_ops->enabled) { 2279 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2280 actions, tunnel_ops); 2281 if (!pft) 2282 return -ENOENT; 2283 if (pft->items) 2284 pattern = pft->items; 2285 if (pft->actions) 2286 actions = pft->actions; 2287 } 2288 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2289 if (tunnel_ops->enabled) 2290 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2291 if (ret) 2292 return port_flow_complain(&error); 2293 printf("Flow rule validated\n"); 2294 return 0; 2295 } 2296 2297 /** Return age action structure if exists, otherwise NULL. */ 2298 static struct rte_flow_action_age * 2299 age_action_get(const struct rte_flow_action *actions) 2300 { 2301 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2302 switch (actions->type) { 2303 case RTE_FLOW_ACTION_TYPE_AGE: 2304 return (struct rte_flow_action_age *) 2305 (uintptr_t)actions->conf; 2306 default: 2307 break; 2308 } 2309 } 2310 return NULL; 2311 } 2312 2313 /** Create pattern template */ 2314 int 2315 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2316 const struct rte_flow_pattern_template_attr *attr, 2317 const struct rte_flow_item *pattern) 2318 { 2319 struct rte_port *port; 2320 struct port_template *pit; 2321 int ret; 2322 struct rte_flow_error error; 2323 2324 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2325 port_id == (portid_t)RTE_PORT_ALL) 2326 return -EINVAL; 2327 port = &ports[port_id]; 2328 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2329 if (ret) 2330 return ret; 2331 /* Poisoning to make sure PMDs update it in case of error. */ 2332 memset(&error, 0x22, sizeof(error)); 2333 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2334 attr, pattern, &error); 2335 if (!pit->template.pattern_template) { 2336 uint32_t destroy_id = pit->id; 2337 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2338 return port_flow_complain(&error); 2339 } 2340 printf("Pattern template #%u created\n", pit->id); 2341 return 0; 2342 } 2343 2344 /** Destroy pattern template */ 2345 int 2346 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2347 const uint32_t *template) 2348 { 2349 struct rte_port *port; 2350 struct port_template **tmp; 2351 int ret = 0; 2352 2353 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2354 port_id == (portid_t)RTE_PORT_ALL) 2355 return -EINVAL; 2356 port = &ports[port_id]; 2357 tmp = &port->pattern_templ_list; 2358 while (*tmp) { 2359 uint32_t i; 2360 2361 for (i = 0; i != n; ++i) { 2362 struct rte_flow_error error; 2363 struct port_template *pit = *tmp; 2364 2365 if (template[i] != pit->id) 2366 continue; 2367 /* 2368 * Poisoning to make sure PMDs update it in case 2369 * of error. 2370 */ 2371 memset(&error, 0x33, sizeof(error)); 2372 2373 if (pit->template.pattern_template && 2374 rte_flow_pattern_template_destroy(port_id, 2375 pit->template.pattern_template, 2376 &error)) { 2377 ret = port_flow_complain(&error); 2378 continue; 2379 } 2380 *tmp = pit->next; 2381 printf("Pattern template #%u destroyed\n", pit->id); 2382 free(pit); 2383 break; 2384 } 2385 if (i == n) 2386 tmp = &(*tmp)->next; 2387 } 2388 return ret; 2389 } 2390 2391 /** Flush pattern template */ 2392 int 2393 port_flow_pattern_template_flush(portid_t port_id) 2394 { 2395 struct rte_port *port; 2396 struct port_template **tmp; 2397 int ret = 0; 2398 2399 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2400 port_id == (portid_t)RTE_PORT_ALL) 2401 return -EINVAL; 2402 port = &ports[port_id]; 2403 tmp = &port->pattern_templ_list; 2404 while (*tmp) { 2405 struct rte_flow_error error; 2406 struct port_template *pit = *tmp; 2407 2408 /* 2409 * Poisoning to make sure PMDs update it in case 2410 * of error. 2411 */ 2412 memset(&error, 0x33, sizeof(error)); 2413 if (pit->template.pattern_template && 2414 rte_flow_pattern_template_destroy(port_id, 2415 pit->template.pattern_template, &error)) { 2416 printf("Pattern template #%u not destroyed\n", pit->id); 2417 ret = port_flow_complain(&error); 2418 tmp = &pit->next; 2419 } else { 2420 *tmp = pit->next; 2421 free(pit); 2422 } 2423 } 2424 return ret; 2425 } 2426 2427 /** Create actions template */ 2428 int 2429 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2430 const struct rte_flow_actions_template_attr *attr, 2431 const struct rte_flow_action *actions, 2432 const struct rte_flow_action *masks) 2433 { 2434 struct rte_port *port; 2435 struct port_template *pat; 2436 int ret; 2437 struct rte_flow_error error; 2438 2439 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2440 port_id == (portid_t)RTE_PORT_ALL) 2441 return -EINVAL; 2442 port = &ports[port_id]; 2443 ret = template_alloc(id, &pat, &port->actions_templ_list); 2444 if (ret) 2445 return ret; 2446 /* Poisoning to make sure PMDs update it in case of error. */ 2447 memset(&error, 0x22, sizeof(error)); 2448 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2449 attr, actions, masks, &error); 2450 if (!pat->template.actions_template) { 2451 uint32_t destroy_id = pat->id; 2452 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2453 return port_flow_complain(&error); 2454 } 2455 printf("Actions template #%u created\n", pat->id); 2456 return 0; 2457 } 2458 2459 /** Destroy actions template */ 2460 int 2461 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2462 const uint32_t *template) 2463 { 2464 struct rte_port *port; 2465 struct port_template **tmp; 2466 int ret = 0; 2467 2468 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2469 port_id == (portid_t)RTE_PORT_ALL) 2470 return -EINVAL; 2471 port = &ports[port_id]; 2472 tmp = &port->actions_templ_list; 2473 while (*tmp) { 2474 uint32_t i; 2475 2476 for (i = 0; i != n; ++i) { 2477 struct rte_flow_error error; 2478 struct port_template *pat = *tmp; 2479 2480 if (template[i] != pat->id) 2481 continue; 2482 /* 2483 * Poisoning to make sure PMDs update it in case 2484 * of error. 2485 */ 2486 memset(&error, 0x33, sizeof(error)); 2487 2488 if (pat->template.actions_template && 2489 rte_flow_actions_template_destroy(port_id, 2490 pat->template.actions_template, &error)) { 2491 ret = port_flow_complain(&error); 2492 continue; 2493 } 2494 *tmp = pat->next; 2495 printf("Actions template #%u destroyed\n", pat->id); 2496 free(pat); 2497 break; 2498 } 2499 if (i == n) 2500 tmp = &(*tmp)->next; 2501 } 2502 return ret; 2503 } 2504 2505 /** Flush actions template */ 2506 int 2507 port_flow_actions_template_flush(portid_t port_id) 2508 { 2509 struct rte_port *port; 2510 struct port_template **tmp; 2511 int ret = 0; 2512 2513 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2514 port_id == (portid_t)RTE_PORT_ALL) 2515 return -EINVAL; 2516 port = &ports[port_id]; 2517 tmp = &port->actions_templ_list; 2518 while (*tmp) { 2519 struct rte_flow_error error; 2520 struct port_template *pat = *tmp; 2521 2522 /* 2523 * Poisoning to make sure PMDs update it in case 2524 * of error. 2525 */ 2526 memset(&error, 0x33, sizeof(error)); 2527 2528 if (pat->template.actions_template && 2529 rte_flow_actions_template_destroy(port_id, 2530 pat->template.actions_template, &error)) { 2531 ret = port_flow_complain(&error); 2532 printf("Actions template #%u not destroyed\n", pat->id); 2533 tmp = &pat->next; 2534 } else { 2535 *tmp = pat->next; 2536 free(pat); 2537 } 2538 } 2539 return ret; 2540 } 2541 2542 /** Create table */ 2543 int 2544 port_flow_template_table_create(portid_t port_id, uint32_t id, 2545 const struct rte_flow_template_table_attr *table_attr, 2546 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2547 uint32_t nb_actions_templates, uint32_t *actions_templates) 2548 { 2549 struct rte_port *port; 2550 struct port_table *pt; 2551 struct port_template *temp = NULL; 2552 int ret; 2553 uint32_t i; 2554 struct rte_flow_error error; 2555 struct rte_flow_pattern_template 2556 *flow_pattern_templates[nb_pattern_templates]; 2557 struct rte_flow_actions_template 2558 *flow_actions_templates[nb_actions_templates]; 2559 2560 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2561 port_id == (portid_t)RTE_PORT_ALL) 2562 return -EINVAL; 2563 port = &ports[port_id]; 2564 for (i = 0; i < nb_pattern_templates; ++i) { 2565 bool found = false; 2566 temp = port->pattern_templ_list; 2567 while (temp) { 2568 if (pattern_templates[i] == temp->id) { 2569 flow_pattern_templates[i] = 2570 temp->template.pattern_template; 2571 found = true; 2572 break; 2573 } 2574 temp = temp->next; 2575 } 2576 if (!found) { 2577 printf("Pattern template #%u is invalid\n", 2578 pattern_templates[i]); 2579 return -EINVAL; 2580 } 2581 } 2582 for (i = 0; i < nb_actions_templates; ++i) { 2583 bool found = false; 2584 temp = port->actions_templ_list; 2585 while (temp) { 2586 if (actions_templates[i] == temp->id) { 2587 flow_actions_templates[i] = 2588 temp->template.actions_template; 2589 found = true; 2590 break; 2591 } 2592 temp = temp->next; 2593 } 2594 if (!found) { 2595 printf("Actions template #%u is invalid\n", 2596 actions_templates[i]); 2597 return -EINVAL; 2598 } 2599 } 2600 ret = table_alloc(id, &pt, &port->table_list); 2601 if (ret) 2602 return ret; 2603 /* Poisoning to make sure PMDs update it in case of error. */ 2604 memset(&error, 0x22, sizeof(error)); 2605 pt->table = rte_flow_template_table_create(port_id, table_attr, 2606 flow_pattern_templates, nb_pattern_templates, 2607 flow_actions_templates, nb_actions_templates, 2608 &error); 2609 2610 if (!pt->table) { 2611 uint32_t destroy_id = pt->id; 2612 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2613 return port_flow_complain(&error); 2614 } 2615 pt->nb_pattern_templates = nb_pattern_templates; 2616 pt->nb_actions_templates = nb_actions_templates; 2617 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2618 sizeof(struct rte_flow_attr)); 2619 printf("Template table #%u created\n", pt->id); 2620 return 0; 2621 } 2622 2623 /** Destroy table */ 2624 int 2625 port_flow_template_table_destroy(portid_t port_id, 2626 uint32_t n, const uint32_t *table) 2627 { 2628 struct rte_port *port; 2629 struct port_table **tmp; 2630 int ret = 0; 2631 2632 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2633 port_id == (portid_t)RTE_PORT_ALL) 2634 return -EINVAL; 2635 port = &ports[port_id]; 2636 tmp = &port->table_list; 2637 while (*tmp) { 2638 uint32_t i; 2639 2640 for (i = 0; i != n; ++i) { 2641 struct rte_flow_error error; 2642 struct port_table *pt = *tmp; 2643 2644 if (table[i] != pt->id) 2645 continue; 2646 /* 2647 * Poisoning to make sure PMDs update it in case 2648 * of error. 2649 */ 2650 memset(&error, 0x33, sizeof(error)); 2651 2652 if (pt->table && 2653 rte_flow_template_table_destroy(port_id, 2654 pt->table, 2655 &error)) { 2656 ret = port_flow_complain(&error); 2657 continue; 2658 } 2659 *tmp = pt->next; 2660 printf("Template table #%u destroyed\n", pt->id); 2661 free(pt); 2662 break; 2663 } 2664 if (i == n) 2665 tmp = &(*tmp)->next; 2666 } 2667 return ret; 2668 } 2669 2670 /** Flush table */ 2671 int 2672 port_flow_template_table_flush(portid_t port_id) 2673 { 2674 struct rte_port *port; 2675 struct port_table **tmp; 2676 int ret = 0; 2677 2678 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2679 port_id == (portid_t)RTE_PORT_ALL) 2680 return -EINVAL; 2681 port = &ports[port_id]; 2682 tmp = &port->table_list; 2683 while (*tmp) { 2684 struct rte_flow_error error; 2685 struct port_table *pt = *tmp; 2686 2687 /* 2688 * Poisoning to make sure PMDs update it in case 2689 * of error. 2690 */ 2691 memset(&error, 0x33, sizeof(error)); 2692 2693 if (pt->table && 2694 rte_flow_template_table_destroy(port_id, 2695 pt->table, 2696 &error)) { 2697 ret = port_flow_complain(&error); 2698 printf("Template table #%u not destroyed\n", pt->id); 2699 tmp = &pt->next; 2700 } else { 2701 *tmp = pt->next; 2702 free(pt); 2703 } 2704 } 2705 return ret; 2706 } 2707 2708 /** Enqueue create flow rule operation. */ 2709 int 2710 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2711 bool postpone, uint32_t table_id, uint32_t rule_idx, 2712 uint32_t pattern_idx, uint32_t actions_idx, 2713 const struct rte_flow_item *pattern, 2714 const struct rte_flow_action *actions) 2715 { 2716 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2717 struct rte_flow *flow; 2718 struct rte_port *port; 2719 struct port_flow *pf; 2720 struct port_table *pt; 2721 uint32_t id = 0; 2722 bool found; 2723 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2724 struct rte_flow_action_age *age = age_action_get(actions); 2725 struct queue_job *job; 2726 2727 port = &ports[port_id]; 2728 if (port->flow_list) { 2729 if (port->flow_list->id == UINT32_MAX) { 2730 printf("Highest rule ID is already assigned," 2731 " delete it first"); 2732 return -ENOMEM; 2733 } 2734 id = port->flow_list->id + 1; 2735 } 2736 2737 if (queue_id >= port->queue_nb) { 2738 printf("Queue #%u is invalid\n", queue_id); 2739 return -EINVAL; 2740 } 2741 2742 found = false; 2743 pt = port->table_list; 2744 while (pt) { 2745 if (table_id == pt->id) { 2746 found = true; 2747 break; 2748 } 2749 pt = pt->next; 2750 } 2751 if (!found) { 2752 printf("Table #%u is invalid\n", table_id); 2753 return -EINVAL; 2754 } 2755 2756 if (pattern_idx >= pt->nb_pattern_templates) { 2757 printf("Pattern template index #%u is invalid," 2758 " %u templates present in the table\n", 2759 pattern_idx, pt->nb_pattern_templates); 2760 return -EINVAL; 2761 } 2762 if (actions_idx >= pt->nb_actions_templates) { 2763 printf("Actions template index #%u is invalid," 2764 " %u templates present in the table\n", 2765 actions_idx, pt->nb_actions_templates); 2766 return -EINVAL; 2767 } 2768 2769 job = calloc(1, sizeof(*job)); 2770 if (!job) { 2771 printf("Queue flow create job allocate failed\n"); 2772 return -ENOMEM; 2773 } 2774 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2775 2776 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2777 if (!pf) { 2778 free(job); 2779 return port_flow_complain(&error); 2780 } 2781 if (age) { 2782 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2783 age->context = &pf->age_type; 2784 } 2785 /* Poisoning to make sure PMDs update it in case of error. */ 2786 memset(&error, 0x11, sizeof(error)); 2787 if (rule_idx == UINT32_MAX) 2788 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2789 pattern, pattern_idx, actions, actions_idx, job, &error); 2790 else 2791 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2792 rule_idx, actions, actions_idx, job, &error); 2793 if (!flow) { 2794 uint64_t flow_id = pf->id; 2795 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2796 free(job); 2797 return port_flow_complain(&error); 2798 } 2799 2800 pf->next = port->flow_list; 2801 pf->id = id; 2802 pf->table = pt; 2803 pf->flow = flow; 2804 job->pf = pf; 2805 port->flow_list = pf; 2806 printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id); 2807 return 0; 2808 } 2809 2810 /** Enqueue number of destroy flow rules operations. */ 2811 int 2812 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2813 bool postpone, uint32_t n, const uint64_t *rule) 2814 { 2815 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2816 struct rte_port *port; 2817 struct port_flow **tmp; 2818 int ret = 0; 2819 struct queue_job *job; 2820 2821 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2822 port_id == (portid_t)RTE_PORT_ALL) 2823 return -EINVAL; 2824 port = &ports[port_id]; 2825 2826 if (queue_id >= port->queue_nb) { 2827 printf("Queue #%u is invalid\n", queue_id); 2828 return -EINVAL; 2829 } 2830 2831 tmp = &port->flow_list; 2832 while (*tmp) { 2833 uint32_t i; 2834 2835 for (i = 0; i != n; ++i) { 2836 struct rte_flow_error error; 2837 struct port_flow *pf = *tmp; 2838 2839 if (rule[i] != pf->id) 2840 continue; 2841 /* 2842 * Poisoning to make sure PMD 2843 * update it in case of error. 2844 */ 2845 memset(&error, 0x33, sizeof(error)); 2846 job = calloc(1, sizeof(*job)); 2847 if (!job) { 2848 printf("Queue flow destroy job allocate failed\n"); 2849 return -ENOMEM; 2850 } 2851 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2852 job->pf = pf; 2853 2854 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2855 pf->flow, job, &error)) { 2856 free(job); 2857 ret = port_flow_complain(&error); 2858 continue; 2859 } 2860 printf("Flow rule #%"PRIu64" destruction enqueued\n", 2861 pf->id); 2862 *tmp = pf->next; 2863 break; 2864 } 2865 if (i == n) 2866 tmp = &(*tmp)->next; 2867 } 2868 return ret; 2869 } 2870 2871 static void 2872 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2873 struct port_indirect_action *pia, 2874 struct queue_job *job, 2875 const struct rte_flow_op_attr *attr, 2876 const struct rte_flow_indir_action_conf *conf, 2877 const struct rte_flow_action *action, 2878 struct rte_flow_error *error) 2879 { 2880 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2881 struct rte_flow_action_age *age = 2882 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2883 2884 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2885 age->context = &pia->age_type; 2886 } 2887 /* Poisoning to make sure PMDs update it in case of error. */ 2888 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2889 attr, conf, action, 2890 job, error); 2891 pia->type = action->type; 2892 } 2893 2894 static void 2895 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2896 struct port_indirect_action *pia, 2897 struct queue_job *job, 2898 const struct rte_flow_op_attr *attr, 2899 const struct rte_flow_indir_action_conf *conf, 2900 const struct rte_flow_action *action, 2901 struct rte_flow_error *error) 2902 { 2903 /* Poisoning to make sure PMDs update it in case of error. */ 2904 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2905 pia->list_handle = rte_flow_async_action_list_handle_create 2906 (port_id, queue_id, attr, conf, action, 2907 job, error); 2908 } 2909 2910 /** Enqueue update flow rule operation. */ 2911 int 2912 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2913 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2914 const struct rte_flow_action *actions) 2915 { 2916 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2917 struct rte_port *port; 2918 struct port_flow *pf, *uf; 2919 struct port_flow **tmp; 2920 struct port_table *pt; 2921 bool found; 2922 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2923 struct rte_flow_action_age *age = age_action_get(actions); 2924 struct queue_job *job; 2925 2926 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2927 port_id == (portid_t)RTE_PORT_ALL) 2928 return -EINVAL; 2929 port = &ports[port_id]; 2930 2931 if (queue_id >= port->queue_nb) { 2932 printf("Queue #%u is invalid\n", queue_id); 2933 return -EINVAL; 2934 } 2935 2936 found = false; 2937 tmp = &port->flow_list; 2938 while (*tmp) { 2939 pf = *tmp; 2940 if (rule_idx == pf->id) { 2941 found = true; 2942 break; 2943 } 2944 tmp = &(*tmp)->next; 2945 } 2946 if (!found) { 2947 printf("Flow rule #%u is invalid\n", rule_idx); 2948 return -EINVAL; 2949 } 2950 2951 pt = pf->table; 2952 if (actions_idx >= pt->nb_actions_templates) { 2953 printf("Actions template index #%u is invalid," 2954 " %u templates present in the table\n", 2955 actions_idx, pt->nb_actions_templates); 2956 return -EINVAL; 2957 } 2958 2959 job = calloc(1, sizeof(*job)); 2960 if (!job) { 2961 printf("Queue flow create job allocate failed\n"); 2962 return -ENOMEM; 2963 } 2964 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2965 2966 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2967 if (!uf) { 2968 free(job); 2969 return port_flow_complain(&error); 2970 } 2971 2972 if (age) { 2973 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2974 age->context = &uf->age_type; 2975 } 2976 2977 /* 2978 * Poisoning to make sure PMD update it in case of error. 2979 */ 2980 memset(&error, 0x44, sizeof(error)); 2981 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2982 actions, actions_idx, job, &error)) { 2983 free(uf); 2984 free(job); 2985 return port_flow_complain(&error); 2986 } 2987 uf->next = pf->next; 2988 uf->id = pf->id; 2989 uf->table = pt; 2990 uf->flow = pf->flow; 2991 *tmp = uf; 2992 job->pf = pf; 2993 2994 printf("Flow rule #%"PRIu64" update enqueued\n", pf->id); 2995 return 0; 2996 } 2997 2998 /** Enqueue indirect action create operation. */ 2999 int 3000 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 3001 bool postpone, uint32_t id, 3002 const struct rte_flow_indir_action_conf *conf, 3003 const struct rte_flow_action *action) 3004 { 3005 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3006 struct rte_port *port; 3007 struct port_indirect_action *pia; 3008 int ret; 3009 struct rte_flow_error error; 3010 struct queue_job *job; 3011 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3012 3013 3014 ret = action_alloc(port_id, id, &pia); 3015 if (ret) 3016 return ret; 3017 3018 port = &ports[port_id]; 3019 if (queue_id >= port->queue_nb) { 3020 printf("Queue #%u is invalid\n", queue_id); 3021 return -EINVAL; 3022 } 3023 job = calloc(1, sizeof(*job)); 3024 if (!job) { 3025 printf("Queue action create job allocate failed\n"); 3026 return -ENOMEM; 3027 } 3028 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3029 job->pia = pia; 3030 3031 /* Poisoning to make sure PMDs update it in case of error. */ 3032 memset(&error, 0x88, sizeof(error)); 3033 3034 if (is_indirect_list) 3035 queue_action_list_handle_create(port_id, queue_id, pia, job, 3036 &attr, conf, action, &error); 3037 else 3038 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3039 conf, action, &error); 3040 3041 if (!pia->handle) { 3042 uint32_t destroy_id = pia->id; 3043 port_queue_action_handle_destroy(port_id, queue_id, 3044 postpone, 1, &destroy_id); 3045 free(job); 3046 return port_flow_complain(&error); 3047 } 3048 printf("Indirect action #%u creation queued\n", pia->id); 3049 return 0; 3050 } 3051 3052 /** Enqueue indirect action destroy operation. */ 3053 int 3054 port_queue_action_handle_destroy(portid_t port_id, 3055 uint32_t queue_id, bool postpone, 3056 uint32_t n, const uint32_t *actions) 3057 { 3058 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3059 struct rte_port *port; 3060 struct port_indirect_action **tmp; 3061 int ret = 0; 3062 struct queue_job *job; 3063 3064 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3065 port_id == (portid_t)RTE_PORT_ALL) 3066 return -EINVAL; 3067 port = &ports[port_id]; 3068 3069 if (queue_id >= port->queue_nb) { 3070 printf("Queue #%u is invalid\n", queue_id); 3071 return -EINVAL; 3072 } 3073 3074 tmp = &port->actions_list; 3075 while (*tmp) { 3076 uint32_t i; 3077 3078 for (i = 0; i != n; ++i) { 3079 struct rte_flow_error error; 3080 struct port_indirect_action *pia = *tmp; 3081 3082 if (actions[i] != pia->id) 3083 continue; 3084 /* 3085 * Poisoning to make sure PMDs update it in case 3086 * of error. 3087 */ 3088 memset(&error, 0x99, sizeof(error)); 3089 job = calloc(1, sizeof(*job)); 3090 if (!job) { 3091 printf("Queue action destroy job allocate failed\n"); 3092 return -ENOMEM; 3093 } 3094 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3095 job->pia = pia; 3096 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3097 rte_flow_async_action_list_handle_destroy 3098 (port_id, queue_id, 3099 &attr, pia->list_handle, 3100 job, &error) : 3101 rte_flow_async_action_handle_destroy 3102 (port_id, queue_id, &attr, pia->handle, 3103 job, &error); 3104 if (ret) { 3105 free(job); 3106 ret = port_flow_complain(&error); 3107 continue; 3108 } 3109 *tmp = pia->next; 3110 printf("Indirect action #%u destruction queued\n", 3111 pia->id); 3112 break; 3113 } 3114 if (i == n) 3115 tmp = &(*tmp)->next; 3116 } 3117 return ret; 3118 } 3119 3120 /** Enqueue indirect action update operation. */ 3121 int 3122 port_queue_action_handle_update(portid_t port_id, 3123 uint32_t queue_id, bool postpone, uint32_t id, 3124 const struct rte_flow_action *action) 3125 { 3126 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3127 struct rte_port *port; 3128 struct rte_flow_error error; 3129 struct rte_flow_action_handle *action_handle; 3130 struct queue_job *job; 3131 struct port_indirect_action *pia; 3132 struct rte_flow_update_meter_mark mtr_update; 3133 const void *update; 3134 3135 action_handle = port_action_handle_get_by_id(port_id, id); 3136 if (!action_handle) 3137 return -EINVAL; 3138 3139 port = &ports[port_id]; 3140 if (queue_id >= port->queue_nb) { 3141 printf("Queue #%u is invalid\n", queue_id); 3142 return -EINVAL; 3143 } 3144 3145 job = calloc(1, sizeof(*job)); 3146 if (!job) { 3147 printf("Queue action update job allocate failed\n"); 3148 return -ENOMEM; 3149 } 3150 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3151 3152 pia = action_get_by_id(port_id, id); 3153 if (!pia) { 3154 free(job); 3155 return -EINVAL; 3156 } 3157 3158 switch (pia->type) { 3159 case RTE_FLOW_ACTION_TYPE_AGE: 3160 update = action->conf; 3161 break; 3162 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3163 rte_memcpy(&mtr_update.meter_mark, action->conf, 3164 sizeof(struct rte_flow_action_meter_mark)); 3165 mtr_update.profile_valid = 1; 3166 mtr_update.policy_valid = 1; 3167 mtr_update.color_mode_valid = 1; 3168 mtr_update.init_color_valid = 1; 3169 mtr_update.state_valid = 1; 3170 update = &mtr_update; 3171 break; 3172 default: 3173 update = action; 3174 break; 3175 } 3176 3177 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3178 action_handle, update, job, &error)) { 3179 free(job); 3180 return port_flow_complain(&error); 3181 } 3182 printf("Indirect action #%u update queued\n", id); 3183 return 0; 3184 } 3185 3186 void 3187 port_queue_action_handle_query_update(portid_t port_id, 3188 uint32_t queue_id, bool postpone, 3189 uint32_t id, 3190 enum rte_flow_query_update_mode qu_mode, 3191 const struct rte_flow_action *action) 3192 { 3193 int ret; 3194 struct rte_flow_error error; 3195 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3196 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3197 struct queue_job *job; 3198 3199 if (!pia || !pia->handle) 3200 return; 3201 job = calloc(1, sizeof(*job)); 3202 if (!job) 3203 return; 3204 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3205 job->pia = pia; 3206 3207 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3208 &attr, pia->handle, 3209 action, 3210 &job->query, 3211 qu_mode, job, 3212 &error); 3213 if (ret) { 3214 port_flow_complain(&error); 3215 free(job); 3216 } else { 3217 printf("port-%u: indirect action #%u update-and-query queued\n", 3218 port_id, id); 3219 } 3220 } 3221 3222 /** Enqueue indirect action query operation. */ 3223 int 3224 port_queue_action_handle_query(portid_t port_id, 3225 uint32_t queue_id, bool postpone, uint32_t id) 3226 { 3227 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3228 struct rte_port *port; 3229 struct rte_flow_error error; 3230 struct rte_flow_action_handle *action_handle; 3231 struct port_indirect_action *pia; 3232 struct queue_job *job; 3233 3234 pia = action_get_by_id(port_id, id); 3235 action_handle = pia ? pia->handle : NULL; 3236 if (!action_handle) 3237 return -EINVAL; 3238 3239 port = &ports[port_id]; 3240 if (queue_id >= port->queue_nb) { 3241 printf("Queue #%u is invalid\n", queue_id); 3242 return -EINVAL; 3243 } 3244 3245 job = calloc(1, sizeof(*job)); 3246 if (!job) { 3247 printf("Queue action update job allocate failed\n"); 3248 return -ENOMEM; 3249 } 3250 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3251 job->pia = pia; 3252 3253 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3254 action_handle, &job->query, job, &error)) { 3255 free(job); 3256 return port_flow_complain(&error); 3257 } 3258 printf("Indirect action #%u update queued\n", id); 3259 return 0; 3260 } 3261 3262 /** Push all the queue operations in the queue to the NIC. */ 3263 int 3264 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3265 { 3266 struct rte_port *port; 3267 struct rte_flow_error error; 3268 int ret = 0; 3269 3270 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3271 port_id == (portid_t)RTE_PORT_ALL) 3272 return -EINVAL; 3273 port = &ports[port_id]; 3274 3275 if (queue_id >= port->queue_nb) { 3276 printf("Queue #%u is invalid\n", queue_id); 3277 return -EINVAL; 3278 } 3279 3280 memset(&error, 0x55, sizeof(error)); 3281 ret = rte_flow_push(port_id, queue_id, &error); 3282 if (ret < 0) { 3283 printf("Failed to push operations in the queue\n"); 3284 return -EINVAL; 3285 } 3286 printf("Queue #%u operations pushed\n", queue_id); 3287 return ret; 3288 } 3289 3290 /** Pull queue operation results from the queue. */ 3291 static int 3292 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3293 const uint64_t *rule, int nb_flows) 3294 { 3295 struct rte_port *port = &ports[port_id]; 3296 struct rte_flow_op_result *res; 3297 struct rte_flow_error error; 3298 uint32_t n = nb_flows; 3299 int ret = 0; 3300 int i; 3301 3302 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3303 if (!res) { 3304 printf("Failed to allocate memory for pulled results\n"); 3305 return -ENOMEM; 3306 } 3307 3308 memset(&error, 0x66, sizeof(error)); 3309 while (nb_flows > 0) { 3310 int success = 0; 3311 3312 if (n > port->queue_sz) 3313 n = port->queue_sz; 3314 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3315 if (ret < 0) { 3316 free(res); 3317 return ret; 3318 } 3319 ret = rte_flow_push(port_id, queue_id, &error); 3320 if (ret < 0) { 3321 printf("Failed to push operations in the queue: %s\n", 3322 strerror(-ret)); 3323 free(res); 3324 return ret; 3325 } 3326 while (success < nb_flows) { 3327 ret = rte_flow_pull(port_id, queue_id, res, 3328 port->queue_sz, &error); 3329 if (ret < 0) { 3330 printf("Failed to pull a operation results: %s\n", 3331 strerror(-ret)); 3332 free(res); 3333 return ret; 3334 } 3335 3336 for (i = 0; i < ret; i++) { 3337 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3338 success++; 3339 } 3340 } 3341 rule += n; 3342 nb_flows -= n; 3343 n = nb_flows; 3344 } 3345 3346 free(res); 3347 return ret; 3348 } 3349 3350 /** List simply and destroy all aged flows per queue. */ 3351 void 3352 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3353 { 3354 void **contexts; 3355 int nb_context, total = 0, idx; 3356 uint64_t *rules = NULL; 3357 struct rte_port *port; 3358 struct rte_flow_error error; 3359 enum age_action_context_type *type; 3360 union { 3361 struct port_flow *pf; 3362 struct port_indirect_action *pia; 3363 } ctx; 3364 3365 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3366 port_id == (portid_t)RTE_PORT_ALL) 3367 return; 3368 port = &ports[port_id]; 3369 if (queue_id >= port->queue_nb) { 3370 printf("Error: queue #%u is invalid\n", queue_id); 3371 return; 3372 } 3373 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3374 if (total < 0) { 3375 port_flow_complain(&error); 3376 return; 3377 } 3378 printf("Port %u queue %u total aged flows: %d\n", 3379 port_id, queue_id, total); 3380 if (total == 0) 3381 return; 3382 contexts = calloc(total, sizeof(void *)); 3383 if (contexts == NULL) { 3384 printf("Cannot allocate contexts for aged flow\n"); 3385 return; 3386 } 3387 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3388 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3389 total, &error); 3390 if (nb_context > total) { 3391 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3392 port_id, queue_id, nb_context, total); 3393 free(contexts); 3394 return; 3395 } 3396 if (destroy) { 3397 rules = malloc(sizeof(uint32_t) * nb_context); 3398 if (rules == NULL) 3399 printf("Cannot allocate memory for destroy aged flow\n"); 3400 } 3401 total = 0; 3402 for (idx = 0; idx < nb_context; idx++) { 3403 if (!contexts[idx]) { 3404 printf("Error: get Null context in port %u queue %u\n", 3405 port_id, queue_id); 3406 continue; 3407 } 3408 type = (enum age_action_context_type *)contexts[idx]; 3409 switch (*type) { 3410 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3411 ctx.pf = container_of(type, struct port_flow, age_type); 3412 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3413 "\t%c%c%c\t\n", 3414 "Flow", 3415 ctx.pf->id, 3416 ctx.pf->rule.attr->group, 3417 ctx.pf->rule.attr->priority, 3418 ctx.pf->rule.attr->ingress ? 'i' : '-', 3419 ctx.pf->rule.attr->egress ? 'e' : '-', 3420 ctx.pf->rule.attr->transfer ? 't' : '-'); 3421 if (rules != NULL) { 3422 rules[total] = ctx.pf->id; 3423 total++; 3424 } 3425 break; 3426 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3427 ctx.pia = container_of(type, 3428 struct port_indirect_action, 3429 age_type); 3430 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3431 ctx.pia->id); 3432 break; 3433 default: 3434 printf("Error: invalid context type %u\n", port_id); 3435 break; 3436 } 3437 } 3438 if (rules != NULL) { 3439 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3440 free(rules); 3441 } 3442 printf("\n%d flows destroyed\n", total); 3443 free(contexts); 3444 } 3445 3446 /** Pull queue operation results from the queue. */ 3447 int 3448 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3449 { 3450 struct rte_port *port; 3451 struct rte_flow_op_result *res; 3452 struct rte_flow_error error; 3453 int ret = 0; 3454 int success = 0; 3455 int i; 3456 struct queue_job *job; 3457 3458 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3459 port_id == (portid_t)RTE_PORT_ALL) 3460 return -EINVAL; 3461 port = &ports[port_id]; 3462 3463 if (queue_id >= port->queue_nb) { 3464 printf("Queue #%u is invalid\n", queue_id); 3465 return -EINVAL; 3466 } 3467 3468 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3469 if (!res) { 3470 printf("Failed to allocate memory for pulled results\n"); 3471 return -ENOMEM; 3472 } 3473 3474 memset(&error, 0x66, sizeof(error)); 3475 ret = rte_flow_pull(port_id, queue_id, res, 3476 port->queue_sz, &error); 3477 if (ret < 0) { 3478 printf("Failed to pull a operation results\n"); 3479 free(res); 3480 return -EINVAL; 3481 } 3482 3483 for (i = 0; i < ret; i++) { 3484 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3485 success++; 3486 job = (struct queue_job *)res[i].user_data; 3487 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3488 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3489 free(job->pf); 3490 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3491 free(job->pia); 3492 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3493 port_action_handle_query_dump(port_id, job->pia, 3494 &job->query); 3495 free(job); 3496 } 3497 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3498 queue_id, ret, ret - success, success); 3499 free(res); 3500 return ret; 3501 } 3502 3503 /** Create flow rule. */ 3504 int 3505 port_flow_create(portid_t port_id, 3506 const struct rte_flow_attr *attr, 3507 const struct rte_flow_item *pattern, 3508 const struct rte_flow_action *actions, 3509 const struct tunnel_ops *tunnel_ops) 3510 { 3511 struct rte_flow *flow; 3512 struct rte_port *port; 3513 struct port_flow *pf; 3514 uint32_t id = 0; 3515 struct rte_flow_error error; 3516 struct port_flow_tunnel *pft = NULL; 3517 struct rte_flow_action_age *age = age_action_get(actions); 3518 3519 port = &ports[port_id]; 3520 if (port->flow_list) { 3521 if (port->flow_list->id == UINT32_MAX) { 3522 fprintf(stderr, 3523 "Highest rule ID is already assigned, delete it first"); 3524 return -ENOMEM; 3525 } 3526 id = port->flow_list->id + 1; 3527 } 3528 if (tunnel_ops->enabled) { 3529 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3530 actions, tunnel_ops); 3531 if (!pft) 3532 return -ENOENT; 3533 if (pft->items) 3534 pattern = pft->items; 3535 if (pft->actions) 3536 actions = pft->actions; 3537 } 3538 pf = port_flow_new(attr, pattern, actions, &error); 3539 if (!pf) 3540 return port_flow_complain(&error); 3541 if (age) { 3542 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3543 age->context = &pf->age_type; 3544 } 3545 /* Poisoning to make sure PMDs update it in case of error. */ 3546 memset(&error, 0x22, sizeof(error)); 3547 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3548 if (!flow) { 3549 if (tunnel_ops->enabled) 3550 port_flow_tunnel_offload_cmd_release(port_id, 3551 tunnel_ops, pft); 3552 free(pf); 3553 return port_flow_complain(&error); 3554 } 3555 pf->next = port->flow_list; 3556 pf->id = id; 3557 pf->flow = flow; 3558 port->flow_list = pf; 3559 if (tunnel_ops->enabled) 3560 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3561 printf("Flow rule #%"PRIu64" created\n", pf->id); 3562 return 0; 3563 } 3564 3565 /** Destroy a number of flow rules. */ 3566 int 3567 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule) 3568 { 3569 struct rte_port *port; 3570 struct port_flow **tmp; 3571 int ret = 0; 3572 3573 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3574 port_id == (portid_t)RTE_PORT_ALL) 3575 return -EINVAL; 3576 port = &ports[port_id]; 3577 tmp = &port->flow_list; 3578 while (*tmp) { 3579 uint32_t i; 3580 3581 for (i = 0; i != n; ++i) { 3582 struct rte_flow_error error; 3583 struct port_flow *pf = *tmp; 3584 3585 if (rule[i] != pf->id) 3586 continue; 3587 /* 3588 * Poisoning to make sure PMDs update it in case 3589 * of error. 3590 */ 3591 memset(&error, 0x33, sizeof(error)); 3592 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3593 ret = port_flow_complain(&error); 3594 continue; 3595 } 3596 printf("Flow rule #%"PRIu64" destroyed\n", pf->id); 3597 *tmp = pf->next; 3598 free(pf); 3599 break; 3600 } 3601 if (i == n) 3602 tmp = &(*tmp)->next; 3603 } 3604 return ret; 3605 } 3606 3607 /** Remove all flow rules. */ 3608 int 3609 port_flow_flush(portid_t port_id) 3610 { 3611 struct rte_flow_error error; 3612 struct rte_port *port; 3613 int ret = 0; 3614 3615 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3616 port_id == (portid_t)RTE_PORT_ALL) 3617 return -EINVAL; 3618 3619 port = &ports[port_id]; 3620 3621 if (port->flow_list == NULL) 3622 return ret; 3623 3624 /* Poisoning to make sure PMDs update it in case of error. */ 3625 memset(&error, 0x44, sizeof(error)); 3626 if (rte_flow_flush(port_id, &error)) { 3627 port_flow_complain(&error); 3628 } 3629 3630 while (port->flow_list) { 3631 struct port_flow *pf = port->flow_list->next; 3632 3633 free(port->flow_list); 3634 port->flow_list = pf; 3635 } 3636 return ret; 3637 } 3638 3639 /** Dump flow rules. */ 3640 int 3641 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id, 3642 const char *file_name) 3643 { 3644 int ret = 0; 3645 FILE *file = stdout; 3646 struct rte_flow_error error; 3647 struct rte_port *port; 3648 struct port_flow *pflow; 3649 struct rte_flow *tmpFlow = NULL; 3650 bool found = false; 3651 3652 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3653 port_id == (portid_t)RTE_PORT_ALL) 3654 return -EINVAL; 3655 3656 if (!dump_all) { 3657 port = &ports[port_id]; 3658 pflow = port->flow_list; 3659 while (pflow) { 3660 if (rule_id != pflow->id) { 3661 pflow = pflow->next; 3662 } else { 3663 tmpFlow = pflow->flow; 3664 if (tmpFlow) 3665 found = true; 3666 break; 3667 } 3668 } 3669 if (found == false) { 3670 fprintf(stderr, "Failed to dump to flow %"PRIu64"\n", 3671 rule_id); 3672 return -EINVAL; 3673 } 3674 } 3675 3676 if (file_name && strlen(file_name)) { 3677 file = fopen(file_name, "w"); 3678 if (!file) { 3679 fprintf(stderr, "Failed to create file %s: %s\n", 3680 file_name, strerror(errno)); 3681 return -errno; 3682 } 3683 } 3684 3685 if (!dump_all) 3686 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3687 else 3688 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3689 if (ret) { 3690 port_flow_complain(&error); 3691 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3692 } else 3693 printf("Flow dump finished\n"); 3694 if (file_name && strlen(file_name)) 3695 fclose(file); 3696 return ret; 3697 } 3698 3699 /** Query a flow rule. */ 3700 int 3701 port_flow_query(portid_t port_id, uint64_t rule, 3702 const struct rte_flow_action *action) 3703 { 3704 struct rte_flow_error error; 3705 struct rte_port *port; 3706 struct port_flow *pf; 3707 const char *name; 3708 union { 3709 struct rte_flow_query_count count; 3710 struct rte_flow_action_rss rss_conf; 3711 struct rte_flow_query_age age; 3712 } query; 3713 int ret; 3714 3715 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3716 port_id == (portid_t)RTE_PORT_ALL) 3717 return -EINVAL; 3718 port = &ports[port_id]; 3719 for (pf = port->flow_list; pf; pf = pf->next) 3720 if (pf->id == rule) 3721 break; 3722 if (!pf) { 3723 fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule); 3724 return -ENOENT; 3725 } 3726 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3727 &name, sizeof(name), 3728 (void *)(uintptr_t)action->type, &error); 3729 if (ret < 0) 3730 return port_flow_complain(&error); 3731 switch (action->type) { 3732 case RTE_FLOW_ACTION_TYPE_COUNT: 3733 case RTE_FLOW_ACTION_TYPE_RSS: 3734 case RTE_FLOW_ACTION_TYPE_AGE: 3735 break; 3736 default: 3737 fprintf(stderr, "Cannot query action type %d (%s)\n", 3738 action->type, name); 3739 return -ENOTSUP; 3740 } 3741 /* Poisoning to make sure PMDs update it in case of error. */ 3742 memset(&error, 0x55, sizeof(error)); 3743 memset(&query, 0, sizeof(query)); 3744 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3745 return port_flow_complain(&error); 3746 switch (action->type) { 3747 case RTE_FLOW_ACTION_TYPE_COUNT: 3748 printf("%s:\n" 3749 " hits_set: %u\n" 3750 " bytes_set: %u\n" 3751 " hits: %" PRIu64 "\n" 3752 " bytes: %" PRIu64 "\n", 3753 name, 3754 query.count.hits_set, 3755 query.count.bytes_set, 3756 query.count.hits, 3757 query.count.bytes); 3758 break; 3759 case RTE_FLOW_ACTION_TYPE_RSS: 3760 rss_config_display(&query.rss_conf); 3761 break; 3762 case RTE_FLOW_ACTION_TYPE_AGE: 3763 printf("%s:\n" 3764 " aged: %u\n" 3765 " sec_since_last_hit_valid: %u\n" 3766 " sec_since_last_hit: %" PRIu32 "\n", 3767 name, 3768 query.age.aged, 3769 query.age.sec_since_last_hit_valid, 3770 query.age.sec_since_last_hit); 3771 break; 3772 default: 3773 fprintf(stderr, 3774 "Cannot display result for action type %d (%s)\n", 3775 action->type, name); 3776 break; 3777 } 3778 return 0; 3779 } 3780 3781 /** List simply and destroy all aged flows. */ 3782 void 3783 port_flow_aged(portid_t port_id, uint8_t destroy) 3784 { 3785 void **contexts; 3786 int nb_context, total = 0, idx; 3787 struct rte_flow_error error; 3788 enum age_action_context_type *type; 3789 union { 3790 struct port_flow *pf; 3791 struct port_indirect_action *pia; 3792 } ctx; 3793 3794 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3795 port_id == (portid_t)RTE_PORT_ALL) 3796 return; 3797 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3798 printf("Port %u total aged flows: %d\n", port_id, total); 3799 if (total < 0) { 3800 port_flow_complain(&error); 3801 return; 3802 } 3803 if (total == 0) 3804 return; 3805 contexts = malloc(sizeof(void *) * total); 3806 if (contexts == NULL) { 3807 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3808 return; 3809 } 3810 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3811 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3812 if (nb_context != total) { 3813 fprintf(stderr, 3814 "Port:%d get aged flows count(%d) != total(%d)\n", 3815 port_id, nb_context, total); 3816 free(contexts); 3817 return; 3818 } 3819 total = 0; 3820 for (idx = 0; idx < nb_context; idx++) { 3821 if (!contexts[idx]) { 3822 fprintf(stderr, "Error: get Null context in port %u\n", 3823 port_id); 3824 continue; 3825 } 3826 type = (enum age_action_context_type *)contexts[idx]; 3827 switch (*type) { 3828 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3829 ctx.pf = container_of(type, struct port_flow, age_type); 3830 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3831 "\t%c%c%c\t\n", 3832 "Flow", 3833 ctx.pf->id, 3834 ctx.pf->rule.attr->group, 3835 ctx.pf->rule.attr->priority, 3836 ctx.pf->rule.attr->ingress ? 'i' : '-', 3837 ctx.pf->rule.attr->egress ? 'e' : '-', 3838 ctx.pf->rule.attr->transfer ? 't' : '-'); 3839 if (destroy && !port_flow_destroy(port_id, 1, 3840 &ctx.pf->id)) 3841 total++; 3842 break; 3843 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3844 ctx.pia = container_of(type, 3845 struct port_indirect_action, age_type); 3846 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3847 ctx.pia->id); 3848 break; 3849 default: 3850 fprintf(stderr, "Error: invalid context type %u\n", 3851 port_id); 3852 break; 3853 } 3854 } 3855 printf("\n%d flows destroyed\n", total); 3856 free(contexts); 3857 } 3858 3859 /** List flow rules. */ 3860 void 3861 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3862 { 3863 struct rte_port *port; 3864 struct port_flow *pf; 3865 struct port_flow *list = NULL; 3866 uint32_t i; 3867 3868 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3869 port_id == (portid_t)RTE_PORT_ALL) 3870 return; 3871 port = &ports[port_id]; 3872 if (!port->flow_list) 3873 return; 3874 /* Sort flows by group, priority and ID. */ 3875 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3876 struct port_flow **tmp; 3877 const struct rte_flow_attr *curr = pf->rule.attr; 3878 3879 if (n) { 3880 /* Filter out unwanted groups. */ 3881 for (i = 0; i != n; ++i) 3882 if (curr->group == group[i]) 3883 break; 3884 if (i == n) 3885 continue; 3886 } 3887 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3888 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3889 3890 if (curr->group > comp->group || 3891 (curr->group == comp->group && 3892 curr->priority > comp->priority) || 3893 (curr->group == comp->group && 3894 curr->priority == comp->priority && 3895 pf->id > (*tmp)->id)) 3896 continue; 3897 break; 3898 } 3899 pf->tmp = *tmp; 3900 *tmp = pf; 3901 } 3902 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3903 for (pf = list; pf != NULL; pf = pf->tmp) { 3904 const struct rte_flow_item *item = pf->rule.pattern; 3905 const struct rte_flow_action *action = pf->rule.actions; 3906 const char *name; 3907 3908 printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3909 pf->id, 3910 pf->rule.attr->group, 3911 pf->rule.attr->priority, 3912 pf->rule.attr->ingress ? 'i' : '-', 3913 pf->rule.attr->egress ? 'e' : '-', 3914 pf->rule.attr->transfer ? 't' : '-'); 3915 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3916 if ((uint32_t)item->type > INT_MAX) 3917 name = "PMD_INTERNAL"; 3918 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3919 &name, sizeof(name), 3920 (void *)(uintptr_t)item->type, 3921 NULL) <= 0) 3922 name = "[UNKNOWN]"; 3923 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3924 printf("%s ", name); 3925 ++item; 3926 } 3927 printf("=>"); 3928 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3929 if ((uint32_t)action->type > INT_MAX) 3930 name = "PMD_INTERNAL"; 3931 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3932 &name, sizeof(name), 3933 (void *)(uintptr_t)action->type, 3934 NULL) <= 0) 3935 name = "[UNKNOWN]"; 3936 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3937 printf(" %s", name); 3938 ++action; 3939 } 3940 printf("\n"); 3941 } 3942 } 3943 3944 /** Restrict ingress traffic to the defined flow rules. */ 3945 int 3946 port_flow_isolate(portid_t port_id, int set) 3947 { 3948 struct rte_flow_error error; 3949 3950 /* Poisoning to make sure PMDs update it in case of error. */ 3951 memset(&error, 0x66, sizeof(error)); 3952 if (rte_flow_isolate(port_id, set, &error)) 3953 return port_flow_complain(&error); 3954 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3955 port_id, 3956 set ? "now restricted" : "not restricted anymore"); 3957 return 0; 3958 } 3959 3960 /* 3961 * RX/TX ring descriptors display functions. 3962 */ 3963 int 3964 rx_queue_id_is_invalid(queueid_t rxq_id) 3965 { 3966 if (rxq_id < nb_rxq) 3967 return 0; 3968 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3969 rxq_id, nb_rxq); 3970 return 1; 3971 } 3972 3973 int 3974 tx_queue_id_is_invalid(queueid_t txq_id) 3975 { 3976 if (txq_id < nb_txq) 3977 return 0; 3978 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3979 txq_id, nb_txq); 3980 return 1; 3981 } 3982 3983 static int 3984 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3985 { 3986 struct rte_port *port = &ports[port_id]; 3987 struct rte_eth_rxq_info rx_qinfo; 3988 int ret; 3989 3990 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3991 if (ret == 0) { 3992 *ring_size = rx_qinfo.nb_desc; 3993 return ret; 3994 } 3995 3996 if (ret != -ENOTSUP) 3997 return ret; 3998 /* 3999 * If the rte_eth_rx_queue_info_get is not support for this PMD, 4000 * ring_size stored in testpmd will be used for validity verification. 4001 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4002 * being 0, it will use a default value provided by PMDs to setup this 4003 * rxq. If the default value is 0, it will use the 4004 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4005 */ 4006 if (port->nb_rx_desc[rxq_id]) 4007 *ring_size = port->nb_rx_desc[rxq_id]; 4008 else if (port->dev_info.default_rxportconf.ring_size) 4009 *ring_size = port->dev_info.default_rxportconf.ring_size; 4010 else 4011 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4012 return 0; 4013 } 4014 4015 static int 4016 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4017 { 4018 struct rte_port *port = &ports[port_id]; 4019 struct rte_eth_txq_info tx_qinfo; 4020 int ret; 4021 4022 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4023 if (ret == 0) { 4024 *ring_size = tx_qinfo.nb_desc; 4025 return ret; 4026 } 4027 4028 if (ret != -ENOTSUP) 4029 return ret; 4030 /* 4031 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4032 * ring_size stored in testpmd will be used for validity verification. 4033 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4034 * being 0, it will use a default value provided by PMDs to setup this 4035 * txq. If the default value is 0, it will use the 4036 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4037 */ 4038 if (port->nb_tx_desc[txq_id]) 4039 *ring_size = port->nb_tx_desc[txq_id]; 4040 else if (port->dev_info.default_txportconf.ring_size) 4041 *ring_size = port->dev_info.default_txportconf.ring_size; 4042 else 4043 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4044 return 0; 4045 } 4046 4047 static int 4048 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4049 { 4050 uint16_t ring_size; 4051 int ret; 4052 4053 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4054 if (ret) 4055 return 1; 4056 4057 if (rxdesc_id < ring_size) 4058 return 0; 4059 4060 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4061 rxdesc_id, ring_size); 4062 return 1; 4063 } 4064 4065 static int 4066 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4067 { 4068 uint16_t ring_size; 4069 int ret; 4070 4071 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4072 if (ret) 4073 return 1; 4074 4075 if (txdesc_id < ring_size) 4076 return 0; 4077 4078 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4079 txdesc_id, ring_size); 4080 return 1; 4081 } 4082 4083 static const struct rte_memzone * 4084 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4085 { 4086 char mz_name[RTE_MEMZONE_NAMESIZE]; 4087 const struct rte_memzone *mz; 4088 4089 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4090 port_id, q_id, ring_name); 4091 mz = rte_memzone_lookup(mz_name); 4092 if (mz == NULL) 4093 fprintf(stderr, 4094 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4095 ring_name, port_id, q_id, mz_name); 4096 return mz; 4097 } 4098 4099 union igb_ring_dword { 4100 uint64_t dword; 4101 struct { 4102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4103 uint32_t lo; 4104 uint32_t hi; 4105 #else 4106 uint32_t hi; 4107 uint32_t lo; 4108 #endif 4109 } words; 4110 }; 4111 4112 struct igb_ring_desc_32_bytes { 4113 union igb_ring_dword lo_dword; 4114 union igb_ring_dword hi_dword; 4115 union igb_ring_dword resv1; 4116 union igb_ring_dword resv2; 4117 }; 4118 4119 struct igb_ring_desc_16_bytes { 4120 union igb_ring_dword lo_dword; 4121 union igb_ring_dword hi_dword; 4122 }; 4123 4124 static void 4125 ring_rxd_display_dword(union igb_ring_dword dword) 4126 { 4127 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4128 (unsigned)dword.words.hi); 4129 } 4130 4131 static void 4132 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4133 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4134 portid_t port_id, 4135 #else 4136 __rte_unused portid_t port_id, 4137 #endif 4138 uint16_t desc_id) 4139 { 4140 struct igb_ring_desc_16_bytes *ring = 4141 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4142 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4143 int ret; 4144 struct rte_eth_dev_info dev_info; 4145 4146 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4147 if (ret != 0) 4148 return; 4149 4150 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4151 /* 32 bytes RX descriptor, i40e only */ 4152 struct igb_ring_desc_32_bytes *ring = 4153 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4154 ring[desc_id].lo_dword.dword = 4155 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4156 ring_rxd_display_dword(ring[desc_id].lo_dword); 4157 ring[desc_id].hi_dword.dword = 4158 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4159 ring_rxd_display_dword(ring[desc_id].hi_dword); 4160 ring[desc_id].resv1.dword = 4161 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4162 ring_rxd_display_dword(ring[desc_id].resv1); 4163 ring[desc_id].resv2.dword = 4164 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4165 ring_rxd_display_dword(ring[desc_id].resv2); 4166 4167 return; 4168 } 4169 #endif 4170 /* 16 bytes RX descriptor */ 4171 ring[desc_id].lo_dword.dword = 4172 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4173 ring_rxd_display_dword(ring[desc_id].lo_dword); 4174 ring[desc_id].hi_dword.dword = 4175 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4176 ring_rxd_display_dword(ring[desc_id].hi_dword); 4177 } 4178 4179 static void 4180 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4181 { 4182 struct igb_ring_desc_16_bytes *ring; 4183 struct igb_ring_desc_16_bytes txd; 4184 4185 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4186 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4187 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4188 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4189 (unsigned)txd.lo_dword.words.lo, 4190 (unsigned)txd.lo_dword.words.hi, 4191 (unsigned)txd.hi_dword.words.lo, 4192 (unsigned)txd.hi_dword.words.hi); 4193 } 4194 4195 void 4196 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4197 { 4198 const struct rte_memzone *rx_mz; 4199 4200 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4201 return; 4202 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4203 if (rx_mz == NULL) 4204 return; 4205 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4206 } 4207 4208 void 4209 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4210 { 4211 const struct rte_memzone *tx_mz; 4212 4213 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4214 return; 4215 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4216 if (tx_mz == NULL) 4217 return; 4218 ring_tx_descriptor_display(tx_mz, txd_id); 4219 } 4220 4221 void 4222 fwd_lcores_config_display(void) 4223 { 4224 lcoreid_t lc_id; 4225 4226 printf("List of forwarding lcores:"); 4227 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4228 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4229 printf("\n"); 4230 } 4231 void 4232 rxtx_config_display(void) 4233 { 4234 portid_t pid; 4235 queueid_t qid; 4236 4237 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4238 cur_fwd_eng->fwd_mode_name, 4239 cur_fwd_eng->status ? "-" : "", 4240 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4241 retry_enabled == 0 ? "" : " with retry", 4242 nb_pkt_per_burst); 4243 4244 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4245 printf(" packet len=%u - nb packet segments=%d\n", 4246 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4247 4248 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4249 nb_fwd_lcores, nb_fwd_ports); 4250 4251 RTE_ETH_FOREACH_DEV(pid) { 4252 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4253 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4254 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4255 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4256 struct rte_eth_rxq_info rx_qinfo; 4257 struct rte_eth_txq_info tx_qinfo; 4258 uint16_t rx_free_thresh_tmp; 4259 uint16_t tx_free_thresh_tmp; 4260 uint16_t tx_rs_thresh_tmp; 4261 uint16_t nb_rx_desc_tmp; 4262 uint16_t nb_tx_desc_tmp; 4263 uint64_t offloads_tmp; 4264 uint8_t pthresh_tmp; 4265 uint8_t hthresh_tmp; 4266 uint8_t wthresh_tmp; 4267 int32_t rc; 4268 4269 /* per port config */ 4270 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4271 (unsigned int)pid, nb_rxq, nb_txq); 4272 4273 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4274 ports[pid].dev_conf.rxmode.offloads, 4275 ports[pid].dev_conf.txmode.offloads); 4276 4277 /* per rx queue config only for first queue to be less verbose */ 4278 for (qid = 0; qid < 1; qid++) { 4279 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4280 if (rc) { 4281 nb_rx_desc_tmp = nb_rx_desc[qid]; 4282 rx_free_thresh_tmp = 4283 rx_conf[qid].rx_free_thresh; 4284 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4285 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4286 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4287 offloads_tmp = rx_conf[qid].offloads; 4288 } else { 4289 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4290 rx_free_thresh_tmp = 4291 rx_qinfo.conf.rx_free_thresh; 4292 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4293 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4294 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4295 offloads_tmp = rx_qinfo.conf.offloads; 4296 } 4297 4298 printf(" RX queue: %d\n", qid); 4299 printf(" RX desc=%d - RX free threshold=%d\n", 4300 nb_rx_desc_tmp, rx_free_thresh_tmp); 4301 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4302 " wthresh=%d\n", 4303 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4304 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4305 if (rx_conf->share_group > 0) 4306 printf(" share_group=%u share_qid=%u", 4307 rx_conf->share_group, 4308 rx_conf->share_qid); 4309 printf("\n"); 4310 } 4311 4312 /* per tx queue config only for first queue to be less verbose */ 4313 for (qid = 0; qid < 1; qid++) { 4314 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4315 if (rc) { 4316 nb_tx_desc_tmp = nb_tx_desc[qid]; 4317 tx_free_thresh_tmp = 4318 tx_conf[qid].tx_free_thresh; 4319 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4320 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4321 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4322 offloads_tmp = tx_conf[qid].offloads; 4323 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4324 } else { 4325 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4326 tx_free_thresh_tmp = 4327 tx_qinfo.conf.tx_free_thresh; 4328 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4329 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4330 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4331 offloads_tmp = tx_qinfo.conf.offloads; 4332 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4333 } 4334 4335 printf(" TX queue: %d\n", qid); 4336 printf(" TX desc=%d - TX free threshold=%d\n", 4337 nb_tx_desc_tmp, tx_free_thresh_tmp); 4338 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4339 " wthresh=%d\n", 4340 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4341 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4342 offloads_tmp, tx_rs_thresh_tmp); 4343 } 4344 } 4345 } 4346 4347 void 4348 port_rss_reta_info(portid_t port_id, 4349 struct rte_eth_rss_reta_entry64 *reta_conf, 4350 uint16_t nb_entries) 4351 { 4352 uint16_t i, idx, shift; 4353 int ret; 4354 4355 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4356 return; 4357 4358 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4359 if (ret != 0) { 4360 fprintf(stderr, 4361 "Failed to get RSS RETA info, return code = %d\n", 4362 ret); 4363 return; 4364 } 4365 4366 for (i = 0; i < nb_entries; i++) { 4367 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4368 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4369 if (!(reta_conf[idx].mask & (1ULL << shift))) 4370 continue; 4371 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4372 i, reta_conf[idx].reta[shift]); 4373 } 4374 } 4375 4376 /* 4377 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4378 * key of the port. 4379 */ 4380 void 4381 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4382 { 4383 struct rte_eth_rss_conf rss_conf = {0}; 4384 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4385 uint64_t rss_hf; 4386 uint8_t i; 4387 int diag; 4388 struct rte_eth_dev_info dev_info; 4389 uint8_t hash_key_size; 4390 int ret; 4391 4392 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4393 return; 4394 4395 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4396 if (ret != 0) 4397 return; 4398 4399 if (dev_info.hash_key_size > 0 && 4400 dev_info.hash_key_size <= sizeof(rss_key)) 4401 hash_key_size = dev_info.hash_key_size; 4402 else { 4403 fprintf(stderr, 4404 "dev_info did not provide a valid hash key size\n"); 4405 return; 4406 } 4407 4408 /* Get RSS hash key if asked to display it */ 4409 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4410 rss_conf.rss_key_len = hash_key_size; 4411 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4412 if (diag != 0) { 4413 switch (diag) { 4414 case -ENODEV: 4415 fprintf(stderr, "port index %d invalid\n", port_id); 4416 break; 4417 case -ENOTSUP: 4418 fprintf(stderr, "operation not supported by device\n"); 4419 break; 4420 default: 4421 fprintf(stderr, "operation failed - diag=%d\n", diag); 4422 break; 4423 } 4424 return; 4425 } 4426 rss_hf = rss_conf.rss_hf; 4427 if (rss_hf == 0) { 4428 printf("RSS disabled\n"); 4429 return; 4430 } 4431 printf("RSS functions:\n"); 4432 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4433 if (!show_rss_key) 4434 return; 4435 printf("RSS key:\n"); 4436 for (i = 0; i < hash_key_size; i++) 4437 printf("%02X", rss_key[i]); 4438 printf("\n"); 4439 } 4440 4441 void 4442 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4443 uint8_t hash_key_len) 4444 { 4445 struct rte_eth_rss_conf rss_conf; 4446 int diag; 4447 4448 rss_conf.rss_key = NULL; 4449 rss_conf.rss_key_len = 0; 4450 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4451 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4452 if (diag == 0) { 4453 rss_conf.rss_key = hash_key; 4454 rss_conf.rss_key_len = hash_key_len; 4455 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4456 } 4457 if (diag == 0) 4458 return; 4459 4460 switch (diag) { 4461 case -ENODEV: 4462 fprintf(stderr, "port index %d invalid\n", port_id); 4463 break; 4464 case -ENOTSUP: 4465 fprintf(stderr, "operation not supported by device\n"); 4466 break; 4467 default: 4468 fprintf(stderr, "operation failed - diag=%d\n", diag); 4469 break; 4470 } 4471 } 4472 4473 /* 4474 * Check whether a shared rxq scheduled on other lcores. 4475 */ 4476 static bool 4477 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4478 portid_t src_port, queueid_t src_rxq, 4479 uint32_t share_group, queueid_t share_rxq) 4480 { 4481 streamid_t sm_id; 4482 streamid_t nb_fs_per_lcore; 4483 lcoreid_t nb_fc; 4484 lcoreid_t lc_id; 4485 struct fwd_stream *fs; 4486 struct rte_port *port; 4487 struct rte_eth_dev_info *dev_info; 4488 struct rte_eth_rxconf *rxq_conf; 4489 4490 nb_fc = cur_fwd_config.nb_fwd_lcores; 4491 /* Check remaining cores. */ 4492 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4493 sm_id = fwd_lcores[lc_id]->stream_idx; 4494 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4495 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4496 sm_id++) { 4497 fs = fwd_streams[sm_id]; 4498 port = &ports[fs->rx_port]; 4499 dev_info = &port->dev_info; 4500 rxq_conf = &port->rxq[fs->rx_queue].conf; 4501 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4502 == 0 || rxq_conf->share_group == 0) 4503 /* Not shared rxq. */ 4504 continue; 4505 if (domain_id != port->dev_info.switch_info.domain_id) 4506 continue; 4507 if (rxq_conf->share_group != share_group) 4508 continue; 4509 if (rxq_conf->share_qid != share_rxq) 4510 continue; 4511 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4512 share_group, share_rxq); 4513 printf(" lcore %hhu Port %hu queue %hu\n", 4514 src_lc, src_port, src_rxq); 4515 printf(" lcore %hhu Port %hu queue %hu\n", 4516 lc_id, fs->rx_port, fs->rx_queue); 4517 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4518 nb_rxq); 4519 return true; 4520 } 4521 } 4522 return false; 4523 } 4524 4525 /* 4526 * Check shared rxq configuration. 4527 * 4528 * Shared group must not being scheduled on different core. 4529 */ 4530 bool 4531 pkt_fwd_shared_rxq_check(void) 4532 { 4533 streamid_t sm_id; 4534 streamid_t nb_fs_per_lcore; 4535 lcoreid_t nb_fc; 4536 lcoreid_t lc_id; 4537 struct fwd_stream *fs; 4538 uint16_t domain_id; 4539 struct rte_port *port; 4540 struct rte_eth_dev_info *dev_info; 4541 struct rte_eth_rxconf *rxq_conf; 4542 4543 if (rxq_share == 0) 4544 return true; 4545 nb_fc = cur_fwd_config.nb_fwd_lcores; 4546 /* 4547 * Check streams on each core, make sure the same switch domain + 4548 * group + queue doesn't get scheduled on other cores. 4549 */ 4550 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4551 sm_id = fwd_lcores[lc_id]->stream_idx; 4552 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4553 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4554 sm_id++) { 4555 fs = fwd_streams[sm_id]; 4556 /* Update lcore info stream being scheduled. */ 4557 fs->lcore = fwd_lcores[lc_id]; 4558 port = &ports[fs->rx_port]; 4559 dev_info = &port->dev_info; 4560 rxq_conf = &port->rxq[fs->rx_queue].conf; 4561 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4562 == 0 || rxq_conf->share_group == 0) 4563 /* Not shared rxq. */ 4564 continue; 4565 /* Check shared rxq not scheduled on remaining cores. */ 4566 domain_id = port->dev_info.switch_info.domain_id; 4567 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4568 fs->rx_port, 4569 fs->rx_queue, 4570 rxq_conf->share_group, 4571 rxq_conf->share_qid)) 4572 return false; 4573 } 4574 } 4575 return true; 4576 } 4577 4578 /* 4579 * Setup forwarding configuration for each logical core. 4580 */ 4581 static void 4582 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4583 { 4584 streamid_t nb_fs_per_lcore; 4585 streamid_t nb_fs; 4586 streamid_t sm_id; 4587 lcoreid_t nb_extra; 4588 lcoreid_t nb_fc; 4589 lcoreid_t nb_lc; 4590 lcoreid_t lc_id; 4591 4592 nb_fs = cfg->nb_fwd_streams; 4593 nb_fc = cfg->nb_fwd_lcores; 4594 if (nb_fs <= nb_fc) { 4595 nb_fs_per_lcore = 1; 4596 nb_extra = 0; 4597 } else { 4598 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4599 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4600 } 4601 4602 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4603 sm_id = 0; 4604 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4605 fwd_lcores[lc_id]->stream_idx = sm_id; 4606 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4607 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4608 } 4609 4610 /* 4611 * Assign extra remaining streams, if any. 4612 */ 4613 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4614 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4615 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4616 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4617 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4618 } 4619 } 4620 4621 static portid_t 4622 fwd_topology_tx_port_get(portid_t rxp) 4623 { 4624 static int warning_once = 1; 4625 4626 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4627 4628 switch (port_topology) { 4629 default: 4630 case PORT_TOPOLOGY_PAIRED: 4631 if ((rxp & 0x1) == 0) { 4632 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4633 return rxp + 1; 4634 if (warning_once) { 4635 fprintf(stderr, 4636 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4637 warning_once = 0; 4638 } 4639 return rxp; 4640 } 4641 return rxp - 1; 4642 case PORT_TOPOLOGY_CHAINED: 4643 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4644 case PORT_TOPOLOGY_LOOP: 4645 return rxp; 4646 } 4647 } 4648 4649 static void 4650 simple_fwd_config_setup(void) 4651 { 4652 portid_t i; 4653 4654 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4655 cur_fwd_config.nb_fwd_streams = 4656 (streamid_t) cur_fwd_config.nb_fwd_ports; 4657 4658 /* reinitialize forwarding streams */ 4659 init_fwd_streams(); 4660 4661 /* 4662 * In the simple forwarding test, the number of forwarding cores 4663 * must be lower or equal to the number of forwarding ports. 4664 */ 4665 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4666 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4667 cur_fwd_config.nb_fwd_lcores = 4668 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4669 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4670 4671 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4672 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4673 fwd_streams[i]->rx_queue = 0; 4674 fwd_streams[i]->tx_port = 4675 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4676 fwd_streams[i]->tx_queue = 0; 4677 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4678 fwd_streams[i]->retry_enabled = retry_enabled; 4679 } 4680 } 4681 4682 /** 4683 * For the RSS forwarding test all streams distributed over lcores. Each stream 4684 * being composed of a RX queue to poll on a RX port for input messages, 4685 * associated with a TX queue of a TX port where to send forwarded packets. 4686 */ 4687 static void 4688 rss_fwd_config_setup(void) 4689 { 4690 portid_t rxp; 4691 portid_t txp; 4692 queueid_t rxq; 4693 queueid_t nb_q; 4694 streamid_t sm_id; 4695 int start; 4696 int end; 4697 4698 nb_q = nb_rxq; 4699 if (nb_q > nb_txq) 4700 nb_q = nb_txq; 4701 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4702 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4703 cur_fwd_config.nb_fwd_streams = 4704 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4705 4706 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4707 cur_fwd_config.nb_fwd_lcores = 4708 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4709 4710 /* reinitialize forwarding streams */ 4711 init_fwd_streams(); 4712 4713 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4714 4715 if (proc_id > 0 && nb_q % num_procs != 0) 4716 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4717 4718 /** 4719 * In multi-process, All queues are allocated to different 4720 * processes based on num_procs and proc_id. For example: 4721 * if supports 4 queues(nb_q), 2 processes(num_procs), 4722 * the 0~1 queue for primary process. 4723 * the 2~3 queue for secondary process. 4724 */ 4725 start = proc_id * nb_q / num_procs; 4726 end = start + nb_q / num_procs; 4727 rxp = 0; 4728 rxq = start; 4729 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4730 struct fwd_stream *fs; 4731 4732 fs = fwd_streams[sm_id]; 4733 txp = fwd_topology_tx_port_get(rxp); 4734 fs->rx_port = fwd_ports_ids[rxp]; 4735 fs->rx_queue = rxq; 4736 fs->tx_port = fwd_ports_ids[txp]; 4737 fs->tx_queue = rxq; 4738 fs->peer_addr = fs->tx_port; 4739 fs->retry_enabled = retry_enabled; 4740 rxp++; 4741 if (rxp < nb_fwd_ports) 4742 continue; 4743 rxp = 0; 4744 rxq++; 4745 if (rxq >= end) 4746 rxq = start; 4747 } 4748 } 4749 4750 static uint16_t 4751 get_fwd_port_total_tc_num(void) 4752 { 4753 struct rte_eth_dcb_info dcb_info; 4754 uint16_t total_tc_num = 0; 4755 unsigned int i; 4756 4757 for (i = 0; i < nb_fwd_ports; i++) { 4758 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4759 total_tc_num += dcb_info.nb_tcs; 4760 } 4761 4762 return total_tc_num; 4763 } 4764 4765 /** 4766 * For the DCB forwarding test, each core is assigned on each traffic class. 4767 * 4768 * Each core is assigned a multi-stream, each stream being composed of 4769 * a RX queue to poll on a RX port for input messages, associated with 4770 * a TX queue of a TX port where to send forwarded packets. All RX and 4771 * TX queues are mapping to the same traffic class. 4772 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4773 * the same core 4774 */ 4775 static void 4776 dcb_fwd_config_setup(void) 4777 { 4778 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4779 portid_t txp, rxp = 0; 4780 queueid_t txq, rxq = 0; 4781 lcoreid_t lc_id; 4782 uint16_t nb_rx_queue, nb_tx_queue; 4783 uint16_t i, j, k, sm_id = 0; 4784 uint16_t total_tc_num; 4785 struct rte_port *port; 4786 uint8_t tc = 0; 4787 portid_t pid; 4788 int ret; 4789 4790 /* 4791 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4792 * or RTE_PORT_STOPPED. 4793 * 4794 * Re-configure ports to get updated mapping between tc and queue in 4795 * case the queue number of the port is changed. Skip for started ports 4796 * since modifying queue number and calling dev_configure need to stop 4797 * ports first. 4798 */ 4799 for (pid = 0; pid < nb_fwd_ports; pid++) { 4800 if (port_is_started(pid) == 1) 4801 continue; 4802 4803 port = &ports[pid]; 4804 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4805 &port->dev_conf); 4806 if (ret < 0) { 4807 fprintf(stderr, 4808 "Failed to re-configure port %d, ret = %d.\n", 4809 pid, ret); 4810 return; 4811 } 4812 } 4813 4814 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4815 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4816 cur_fwd_config.nb_fwd_streams = 4817 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4818 total_tc_num = get_fwd_port_total_tc_num(); 4819 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4820 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4821 4822 /* reinitialize forwarding streams */ 4823 init_fwd_streams(); 4824 sm_id = 0; 4825 txp = 1; 4826 /* get the dcb info on the first RX and TX ports */ 4827 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4828 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4829 4830 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4831 fwd_lcores[lc_id]->stream_nb = 0; 4832 fwd_lcores[lc_id]->stream_idx = sm_id; 4833 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4834 /* if the nb_queue is zero, means this tc is 4835 * not enabled on the POOL 4836 */ 4837 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4838 break; 4839 k = fwd_lcores[lc_id]->stream_nb + 4840 fwd_lcores[lc_id]->stream_idx; 4841 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4842 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4843 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4844 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4845 for (j = 0; j < nb_rx_queue; j++) { 4846 struct fwd_stream *fs; 4847 4848 fs = fwd_streams[k + j]; 4849 fs->rx_port = fwd_ports_ids[rxp]; 4850 fs->rx_queue = rxq + j; 4851 fs->tx_port = fwd_ports_ids[txp]; 4852 fs->tx_queue = txq + j % nb_tx_queue; 4853 fs->peer_addr = fs->tx_port; 4854 fs->retry_enabled = retry_enabled; 4855 } 4856 fwd_lcores[lc_id]->stream_nb += 4857 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4858 } 4859 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4860 4861 tc++; 4862 if (tc < rxp_dcb_info.nb_tcs) 4863 continue; 4864 /* Restart from TC 0 on next RX port */ 4865 tc = 0; 4866 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4867 rxp = (portid_t) 4868 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4869 else 4870 rxp++; 4871 if (rxp >= nb_fwd_ports) 4872 return; 4873 /* get the dcb information on next RX and TX ports */ 4874 if ((rxp & 0x1) == 0) 4875 txp = (portid_t) (rxp + 1); 4876 else 4877 txp = (portid_t) (rxp - 1); 4878 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4879 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4880 } 4881 } 4882 4883 static void 4884 icmp_echo_config_setup(void) 4885 { 4886 portid_t rxp; 4887 queueid_t rxq; 4888 lcoreid_t lc_id; 4889 uint16_t sm_id; 4890 4891 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4892 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4893 (nb_txq * nb_fwd_ports); 4894 else 4895 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4896 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4897 cur_fwd_config.nb_fwd_streams = 4898 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4899 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4900 cur_fwd_config.nb_fwd_lcores = 4901 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4902 if (verbose_level > 0) { 4903 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4904 __FUNCTION__, 4905 cur_fwd_config.nb_fwd_lcores, 4906 cur_fwd_config.nb_fwd_ports, 4907 cur_fwd_config.nb_fwd_streams); 4908 } 4909 4910 /* reinitialize forwarding streams */ 4911 init_fwd_streams(); 4912 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4913 rxp = 0; rxq = 0; 4914 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4915 if (verbose_level > 0) 4916 printf(" core=%d: \n", lc_id); 4917 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4918 struct fwd_stream *fs; 4919 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4920 fs->rx_port = fwd_ports_ids[rxp]; 4921 fs->rx_queue = rxq; 4922 fs->tx_port = fs->rx_port; 4923 fs->tx_queue = rxq; 4924 fs->peer_addr = fs->tx_port; 4925 fs->retry_enabled = retry_enabled; 4926 if (verbose_level > 0) 4927 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4928 sm_id, fs->rx_port, fs->rx_queue, 4929 fs->tx_queue); 4930 rxq = (queueid_t) (rxq + 1); 4931 if (rxq == nb_rxq) { 4932 rxq = 0; 4933 rxp = (portid_t) (rxp + 1); 4934 } 4935 } 4936 } 4937 } 4938 4939 void 4940 fwd_config_setup(void) 4941 { 4942 struct rte_port *port; 4943 portid_t pt_id; 4944 unsigned int i; 4945 4946 cur_fwd_config.fwd_eng = cur_fwd_eng; 4947 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4948 icmp_echo_config_setup(); 4949 return; 4950 } 4951 4952 if ((nb_rxq > 1) && (nb_txq > 1)){ 4953 if (dcb_config) { 4954 for (i = 0; i < nb_fwd_ports; i++) { 4955 pt_id = fwd_ports_ids[i]; 4956 port = &ports[pt_id]; 4957 if (!port->dcb_flag) { 4958 fprintf(stderr, 4959 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4960 return; 4961 } 4962 } 4963 if (nb_fwd_lcores == 1) { 4964 fprintf(stderr, 4965 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4966 return; 4967 } 4968 4969 dcb_fwd_config_setup(); 4970 } else 4971 rss_fwd_config_setup(); 4972 } 4973 else 4974 simple_fwd_config_setup(); 4975 } 4976 4977 static const char * 4978 mp_alloc_to_str(uint8_t mode) 4979 { 4980 switch (mode) { 4981 case MP_ALLOC_NATIVE: 4982 return "native"; 4983 case MP_ALLOC_ANON: 4984 return "anon"; 4985 case MP_ALLOC_XMEM: 4986 return "xmem"; 4987 case MP_ALLOC_XMEM_HUGE: 4988 return "xmemhuge"; 4989 case MP_ALLOC_XBUF: 4990 return "xbuf"; 4991 default: 4992 return "invalid"; 4993 } 4994 } 4995 4996 void 4997 pkt_fwd_config_display(struct fwd_config *cfg) 4998 { 4999 struct fwd_stream *fs; 5000 lcoreid_t lc_id; 5001 streamid_t sm_id; 5002 5003 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5004 "NUMA support %s, MP allocation mode: %s\n", 5005 cfg->fwd_eng->fwd_mode_name, 5006 cfg->fwd_eng->status ? "-" : "", 5007 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5008 retry_enabled == 0 ? "" : " with retry", 5009 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5010 numa_support == 1 ? "enabled" : "disabled", 5011 mp_alloc_to_str(mp_alloc_type)); 5012 5013 if (retry_enabled) 5014 printf("TX retry num: %u, delay between TX retries: %uus\n", 5015 burst_tx_retry_num, burst_tx_delay_time); 5016 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5017 printf("Logical Core %u (socket %u) forwards packets on " 5018 "%d streams:", 5019 fwd_lcores_cpuids[lc_id], 5020 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5021 fwd_lcores[lc_id]->stream_nb); 5022 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5023 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5024 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5025 "P=%d/Q=%d (socket %u) ", 5026 fs->rx_port, fs->rx_queue, 5027 ports[fs->rx_port].socket_id, 5028 fs->tx_port, fs->tx_queue, 5029 ports[fs->tx_port].socket_id); 5030 print_ethaddr("peer=", 5031 &peer_eth_addrs[fs->peer_addr]); 5032 } 5033 printf("\n"); 5034 } 5035 printf("\n"); 5036 } 5037 5038 void 5039 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5040 { 5041 struct rte_ether_addr new_peer_addr; 5042 if (!rte_eth_dev_is_valid_port(port_id)) { 5043 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5044 return; 5045 } 5046 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5047 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5048 peer_addr); 5049 return; 5050 } 5051 peer_eth_addrs[port_id] = new_peer_addr; 5052 } 5053 5054 int 5055 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5056 { 5057 unsigned int i; 5058 unsigned int lcore_cpuid; 5059 int record_now; 5060 5061 record_now = 0; 5062 again: 5063 for (i = 0; i < nb_lc; i++) { 5064 lcore_cpuid = lcorelist[i]; 5065 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5066 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5067 return -1; 5068 } 5069 if (lcore_cpuid == rte_get_main_lcore()) { 5070 fprintf(stderr, 5071 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5072 lcore_cpuid); 5073 return -1; 5074 } 5075 if (record_now) 5076 fwd_lcores_cpuids[i] = lcore_cpuid; 5077 } 5078 if (record_now == 0) { 5079 record_now = 1; 5080 goto again; 5081 } 5082 nb_cfg_lcores = (lcoreid_t) nb_lc; 5083 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5084 printf("previous number of forwarding cores %u - changed to " 5085 "number of configured cores %u\n", 5086 (unsigned int) nb_fwd_lcores, nb_lc); 5087 nb_fwd_lcores = (lcoreid_t) nb_lc; 5088 } 5089 5090 return 0; 5091 } 5092 5093 int 5094 set_fwd_lcores_mask(uint64_t lcoremask) 5095 { 5096 unsigned int lcorelist[64]; 5097 unsigned int nb_lc; 5098 unsigned int i; 5099 5100 if (lcoremask == 0) { 5101 fprintf(stderr, "Invalid NULL mask of cores\n"); 5102 return -1; 5103 } 5104 nb_lc = 0; 5105 for (i = 0; i < 64; i++) { 5106 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5107 continue; 5108 lcorelist[nb_lc++] = i; 5109 } 5110 return set_fwd_lcores_list(lcorelist, nb_lc); 5111 } 5112 5113 void 5114 set_fwd_lcores_number(uint16_t nb_lc) 5115 { 5116 if (test_done == 0) { 5117 fprintf(stderr, "Please stop forwarding first\n"); 5118 return; 5119 } 5120 if (nb_lc > nb_cfg_lcores) { 5121 fprintf(stderr, 5122 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5123 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5124 return; 5125 } 5126 nb_fwd_lcores = (lcoreid_t) nb_lc; 5127 printf("Number of forwarding cores set to %u\n", 5128 (unsigned int) nb_fwd_lcores); 5129 } 5130 5131 void 5132 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5133 { 5134 unsigned int i; 5135 portid_t port_id; 5136 int record_now; 5137 5138 record_now = 0; 5139 again: 5140 for (i = 0; i < nb_pt; i++) { 5141 port_id = (portid_t) portlist[i]; 5142 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5143 return; 5144 if (record_now) 5145 fwd_ports_ids[i] = port_id; 5146 } 5147 if (record_now == 0) { 5148 record_now = 1; 5149 goto again; 5150 } 5151 nb_cfg_ports = (portid_t) nb_pt; 5152 if (nb_fwd_ports != (portid_t) nb_pt) { 5153 printf("previous number of forwarding ports %u - changed to " 5154 "number of configured ports %u\n", 5155 (unsigned int) nb_fwd_ports, nb_pt); 5156 nb_fwd_ports = (portid_t) nb_pt; 5157 } 5158 } 5159 5160 /** 5161 * Parse the user input and obtain the list of forwarding ports 5162 * 5163 * @param[in] list 5164 * String containing the user input. User can specify 5165 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5166 * For example, if the user wants to use all the available 5167 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5168 * If the user wants to use only the ports 1,2 then the input 5169 * is 1,2. 5170 * valid characters are '-' and ',' 5171 * @param[out] values 5172 * This array will be filled with a list of port IDs 5173 * based on the user input 5174 * Note that duplicate entries are discarded and only the first 5175 * count entries in this array are port IDs and all the rest 5176 * will contain default values 5177 * @param[in] maxsize 5178 * This parameter denotes 2 things 5179 * 1) Number of elements in the values array 5180 * 2) Maximum value of each element in the values array 5181 * @return 5182 * On success, returns total count of parsed port IDs 5183 * On failure, returns 0 5184 */ 5185 static unsigned int 5186 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5187 { 5188 unsigned int count = 0; 5189 char *end = NULL; 5190 int min, max; 5191 int value, i; 5192 unsigned int marked[maxsize]; 5193 5194 if (list == NULL || values == NULL) 5195 return 0; 5196 5197 for (i = 0; i < (int)maxsize; i++) 5198 marked[i] = 0; 5199 5200 min = INT_MAX; 5201 5202 do { 5203 /*Remove the blank spaces if any*/ 5204 while (isblank(*list)) 5205 list++; 5206 if (*list == '\0') 5207 break; 5208 errno = 0; 5209 value = strtol(list, &end, 10); 5210 if (errno || end == NULL) 5211 return 0; 5212 if (value < 0 || value >= (int)maxsize) 5213 return 0; 5214 while (isblank(*end)) 5215 end++; 5216 if (*end == '-' && min == INT_MAX) { 5217 min = value; 5218 } else if ((*end == ',') || (*end == '\0')) { 5219 max = value; 5220 if (min == INT_MAX) 5221 min = value; 5222 for (i = min; i <= max; i++) { 5223 if (count < maxsize) { 5224 if (marked[i]) 5225 continue; 5226 values[count] = i; 5227 marked[i] = 1; 5228 count++; 5229 } 5230 } 5231 min = INT_MAX; 5232 } else 5233 return 0; 5234 list = end + 1; 5235 } while (*end != '\0'); 5236 5237 return count; 5238 } 5239 5240 void 5241 parse_fwd_portlist(const char *portlist) 5242 { 5243 unsigned int portcount; 5244 unsigned int portindex[RTE_MAX_ETHPORTS]; 5245 unsigned int i, valid_port_count = 0; 5246 5247 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5248 if (!portcount) 5249 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5250 5251 /* 5252 * Here we verify the validity of the ports 5253 * and thereby calculate the total number of 5254 * valid ports 5255 */ 5256 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5257 if (rte_eth_dev_is_valid_port(portindex[i])) { 5258 portindex[valid_port_count] = portindex[i]; 5259 valid_port_count++; 5260 } 5261 } 5262 5263 set_fwd_ports_list(portindex, valid_port_count); 5264 } 5265 5266 void 5267 set_fwd_ports_mask(uint64_t portmask) 5268 { 5269 unsigned int portlist[64]; 5270 unsigned int nb_pt; 5271 unsigned int i; 5272 5273 if (portmask == 0) { 5274 fprintf(stderr, "Invalid NULL mask of ports\n"); 5275 return; 5276 } 5277 nb_pt = 0; 5278 RTE_ETH_FOREACH_DEV(i) { 5279 if (! ((uint64_t)(1ULL << i) & portmask)) 5280 continue; 5281 portlist[nb_pt++] = i; 5282 } 5283 set_fwd_ports_list(portlist, nb_pt); 5284 } 5285 5286 void 5287 set_fwd_ports_number(uint16_t nb_pt) 5288 { 5289 if (nb_pt > nb_cfg_ports) { 5290 fprintf(stderr, 5291 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5292 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5293 return; 5294 } 5295 nb_fwd_ports = (portid_t) nb_pt; 5296 printf("Number of forwarding ports set to %u\n", 5297 (unsigned int) nb_fwd_ports); 5298 } 5299 5300 int 5301 port_is_forwarding(portid_t port_id) 5302 { 5303 unsigned int i; 5304 5305 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5306 return -1; 5307 5308 for (i = 0; i < nb_fwd_ports; i++) { 5309 if (fwd_ports_ids[i] == port_id) 5310 return 1; 5311 } 5312 5313 return 0; 5314 } 5315 5316 void 5317 set_nb_pkt_per_burst(uint16_t nb) 5318 { 5319 if (nb > MAX_PKT_BURST) { 5320 fprintf(stderr, 5321 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5322 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5323 return; 5324 } 5325 nb_pkt_per_burst = nb; 5326 printf("Number of packets per burst set to %u\n", 5327 (unsigned int) nb_pkt_per_burst); 5328 } 5329 5330 static const char * 5331 tx_split_get_name(enum tx_pkt_split split) 5332 { 5333 uint32_t i; 5334 5335 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5336 if (tx_split_name[i].split == split) 5337 return tx_split_name[i].name; 5338 } 5339 return NULL; 5340 } 5341 5342 void 5343 set_tx_pkt_split(const char *name) 5344 { 5345 uint32_t i; 5346 5347 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5348 if (strcmp(tx_split_name[i].name, name) == 0) { 5349 tx_pkt_split = tx_split_name[i].split; 5350 return; 5351 } 5352 } 5353 fprintf(stderr, "unknown value: \"%s\"\n", name); 5354 } 5355 5356 int 5357 parse_fec_mode(const char *name, uint32_t *fec_capa) 5358 { 5359 uint8_t i; 5360 5361 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5362 if (strcmp(fec_mode_name[i].name, name) == 0) { 5363 *fec_capa = 5364 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5365 return 0; 5366 } 5367 } 5368 return -1; 5369 } 5370 5371 void 5372 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5373 { 5374 unsigned int i, j; 5375 5376 printf("FEC capabilities:\n"); 5377 5378 for (i = 0; i < num; i++) { 5379 printf("%s : ", 5380 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5381 5382 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5383 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5384 speed_fec_capa[i].capa) 5385 printf("%s ", fec_mode_name[j].name); 5386 } 5387 printf("\n"); 5388 } 5389 } 5390 5391 void 5392 show_rx_pkt_offsets(void) 5393 { 5394 uint32_t i, n; 5395 5396 n = rx_pkt_nb_offs; 5397 printf("Number of offsets: %u\n", n); 5398 if (n) { 5399 printf("Segment offsets: "); 5400 for (i = 0; i != n - 1; i++) 5401 printf("%hu,", rx_pkt_seg_offsets[i]); 5402 printf("%hu\n", rx_pkt_seg_lengths[i]); 5403 } 5404 } 5405 5406 void 5407 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5408 { 5409 unsigned int i; 5410 5411 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5412 printf("nb segments per RX packets=%u >= " 5413 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5414 return; 5415 } 5416 5417 /* 5418 * No extra check here, the segment length will be checked by PMD 5419 * in the extended queue setup. 5420 */ 5421 for (i = 0; i < nb_offs; i++) { 5422 if (seg_offsets[i] >= UINT16_MAX) { 5423 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5424 i, seg_offsets[i]); 5425 return; 5426 } 5427 } 5428 5429 for (i = 0; i < nb_offs; i++) 5430 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5431 5432 rx_pkt_nb_offs = (uint8_t) nb_offs; 5433 } 5434 5435 void 5436 show_rx_pkt_segments(void) 5437 { 5438 uint32_t i, n; 5439 5440 n = rx_pkt_nb_segs; 5441 printf("Number of segments: %u\n", n); 5442 if (n) { 5443 printf("Segment sizes: "); 5444 for (i = 0; i != n - 1; i++) 5445 printf("%hu,", rx_pkt_seg_lengths[i]); 5446 printf("%hu\n", rx_pkt_seg_lengths[i]); 5447 } 5448 } 5449 5450 static const char *get_ptype_str(uint32_t ptype) 5451 { 5452 const char *str; 5453 5454 switch (ptype) { 5455 case RTE_PTYPE_L2_ETHER: 5456 str = "eth"; 5457 break; 5458 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5459 str = "ipv4"; 5460 break; 5461 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5462 str = "ipv6"; 5463 break; 5464 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5465 str = "ipv4-tcp"; 5466 break; 5467 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5468 str = "ipv4-udp"; 5469 break; 5470 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5471 str = "ipv4-sctp"; 5472 break; 5473 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5474 str = "ipv6-tcp"; 5475 break; 5476 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5477 str = "ipv6-udp"; 5478 break; 5479 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5480 str = "ipv6-sctp"; 5481 break; 5482 case RTE_PTYPE_TUNNEL_GRENAT: 5483 str = "grenat"; 5484 break; 5485 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5486 str = "inner-eth"; 5487 break; 5488 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5489 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5490 str = "inner-ipv4"; 5491 break; 5492 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5493 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5494 str = "inner-ipv6"; 5495 break; 5496 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5497 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5498 str = "inner-ipv4-tcp"; 5499 break; 5500 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5501 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5502 str = "inner-ipv4-udp"; 5503 break; 5504 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5505 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5506 str = "inner-ipv4-sctp"; 5507 break; 5508 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5509 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5510 str = "inner-ipv6-tcp"; 5511 break; 5512 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5513 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5514 str = "inner-ipv6-udp"; 5515 break; 5516 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5517 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5518 str = "inner-ipv6-sctp"; 5519 break; 5520 default: 5521 str = "unsupported"; 5522 } 5523 5524 return str; 5525 } 5526 5527 void 5528 show_rx_pkt_hdrs(void) 5529 { 5530 uint32_t i, n; 5531 5532 n = rx_pkt_nb_segs; 5533 printf("Number of segments: %u\n", n); 5534 if (n) { 5535 printf("Packet segs: "); 5536 for (i = 0; i < n - 1; i++) 5537 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5538 printf("payload\n"); 5539 } 5540 } 5541 5542 void 5543 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5544 { 5545 unsigned int i; 5546 5547 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5548 printf("nb segments per RX packets=%u > " 5549 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5550 return; 5551 } 5552 5553 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5554 5555 for (i = 0; i < nb_segs; i++) 5556 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5557 /* 5558 * We calculate the number of hdrs, but payload is not included, 5559 * so rx_pkt_nb_segs would increase 1. 5560 */ 5561 rx_pkt_nb_segs = nb_segs + 1; 5562 } 5563 5564 void 5565 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5566 { 5567 unsigned int i; 5568 5569 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5570 printf("nb segments per RX packets=%u >= " 5571 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5572 return; 5573 } 5574 5575 /* 5576 * No extra check here, the segment length will be checked by PMD 5577 * in the extended queue setup. 5578 */ 5579 for (i = 0; i < nb_segs; i++) { 5580 if (seg_lengths[i] >= UINT16_MAX) { 5581 printf("length[%u]=%u > UINT16_MAX - give up\n", 5582 i, seg_lengths[i]); 5583 return; 5584 } 5585 } 5586 5587 for (i = 0; i < nb_segs; i++) 5588 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5589 5590 rx_pkt_nb_segs = (uint8_t) nb_segs; 5591 } 5592 5593 void 5594 show_tx_pkt_segments(void) 5595 { 5596 uint32_t i, n; 5597 const char *split; 5598 5599 n = tx_pkt_nb_segs; 5600 split = tx_split_get_name(tx_pkt_split); 5601 5602 printf("Number of segments: %u\n", n); 5603 printf("Segment sizes: "); 5604 for (i = 0; i != n - 1; i++) 5605 printf("%hu,", tx_pkt_seg_lengths[i]); 5606 printf("%hu\n", tx_pkt_seg_lengths[i]); 5607 printf("Split packet: %s\n", split); 5608 } 5609 5610 static bool 5611 nb_segs_is_invalid(unsigned int nb_segs) 5612 { 5613 uint16_t ring_size; 5614 uint16_t queue_id; 5615 uint16_t port_id; 5616 int ret; 5617 5618 RTE_ETH_FOREACH_DEV(port_id) { 5619 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5620 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5621 if (ret) { 5622 /* Port may not be initialized yet, can't say 5623 * the port is invalid in this stage. 5624 */ 5625 continue; 5626 } 5627 if (ring_size < nb_segs) { 5628 printf("nb segments per TX packets=%u >= TX " 5629 "queue(%u) ring_size=%u - txpkts ignored\n", 5630 nb_segs, queue_id, ring_size); 5631 return true; 5632 } 5633 } 5634 } 5635 5636 return false; 5637 } 5638 5639 void 5640 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5641 { 5642 uint16_t tx_pkt_len; 5643 unsigned int i; 5644 5645 /* 5646 * For single segment settings failed check is ignored. 5647 * It is a very basic capability to send the single segment 5648 * packets, suppose it is always supported. 5649 */ 5650 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5651 fprintf(stderr, 5652 "Tx segment size(%u) is not supported - txpkts ignored\n", 5653 nb_segs); 5654 return; 5655 } 5656 5657 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5658 fprintf(stderr, 5659 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5660 nb_segs, RTE_MAX_SEGS_PER_PKT); 5661 return; 5662 } 5663 5664 /* 5665 * Check that each segment length is greater or equal than 5666 * the mbuf data size. 5667 * Check also that the total packet length is greater or equal than the 5668 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5669 * 20 + 8). 5670 */ 5671 tx_pkt_len = 0; 5672 for (i = 0; i < nb_segs; i++) { 5673 if (seg_lengths[i] > mbuf_data_size[0]) { 5674 fprintf(stderr, 5675 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5676 i, seg_lengths[i], mbuf_data_size[0]); 5677 return; 5678 } 5679 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5680 } 5681 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5682 fprintf(stderr, "total packet length=%u < %d - give up\n", 5683 (unsigned) tx_pkt_len, 5684 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5685 return; 5686 } 5687 5688 for (i = 0; i < nb_segs; i++) 5689 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5690 5691 tx_pkt_length = tx_pkt_len; 5692 tx_pkt_nb_segs = (uint8_t) nb_segs; 5693 } 5694 5695 void 5696 show_tx_pkt_times(void) 5697 { 5698 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5699 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5700 } 5701 5702 void 5703 set_tx_pkt_times(unsigned int *tx_times) 5704 { 5705 tx_pkt_times_inter = tx_times[0]; 5706 tx_pkt_times_intra = tx_times[1]; 5707 } 5708 5709 #ifdef RTE_LIB_GRO 5710 void 5711 setup_gro(const char *onoff, portid_t port_id) 5712 { 5713 if (!rte_eth_dev_is_valid_port(port_id)) { 5714 fprintf(stderr, "invalid port id %u\n", port_id); 5715 return; 5716 } 5717 if (test_done == 0) { 5718 fprintf(stderr, 5719 "Before enable/disable GRO, please stop forwarding first\n"); 5720 return; 5721 } 5722 if (strcmp(onoff, "on") == 0) { 5723 if (gro_ports[port_id].enable != 0) { 5724 fprintf(stderr, 5725 "Port %u has enabled GRO. Please disable GRO first\n", 5726 port_id); 5727 return; 5728 } 5729 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5730 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5731 gro_ports[port_id].param.max_flow_num = 5732 GRO_DEFAULT_FLOW_NUM; 5733 gro_ports[port_id].param.max_item_per_flow = 5734 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5735 } 5736 gro_ports[port_id].enable = 1; 5737 } else { 5738 if (gro_ports[port_id].enable == 0) { 5739 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5740 return; 5741 } 5742 gro_ports[port_id].enable = 0; 5743 } 5744 } 5745 5746 void 5747 setup_gro_flush_cycles(uint8_t cycles) 5748 { 5749 if (test_done == 0) { 5750 fprintf(stderr, 5751 "Before change flush interval for GRO, please stop forwarding first.\n"); 5752 return; 5753 } 5754 5755 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5756 GRO_DEFAULT_FLUSH_CYCLES) { 5757 fprintf(stderr, 5758 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5759 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5760 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5761 } 5762 5763 gro_flush_cycles = cycles; 5764 } 5765 5766 void 5767 show_gro(portid_t port_id) 5768 { 5769 struct rte_gro_param *param; 5770 uint32_t max_pkts_num; 5771 5772 param = &gro_ports[port_id].param; 5773 5774 if (!rte_eth_dev_is_valid_port(port_id)) { 5775 fprintf(stderr, "Invalid port id %u.\n", port_id); 5776 return; 5777 } 5778 if (gro_ports[port_id].enable) { 5779 printf("GRO type: TCP/IPv4\n"); 5780 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5781 max_pkts_num = param->max_flow_num * 5782 param->max_item_per_flow; 5783 } else 5784 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5785 printf("Max number of packets to perform GRO: %u\n", 5786 max_pkts_num); 5787 printf("Flushing cycles: %u\n", gro_flush_cycles); 5788 } else 5789 printf("Port %u doesn't enable GRO.\n", port_id); 5790 } 5791 #endif /* RTE_LIB_GRO */ 5792 5793 #ifdef RTE_LIB_GSO 5794 void 5795 setup_gso(const char *mode, portid_t port_id) 5796 { 5797 if (!rte_eth_dev_is_valid_port(port_id)) { 5798 fprintf(stderr, "invalid port id %u\n", port_id); 5799 return; 5800 } 5801 if (strcmp(mode, "on") == 0) { 5802 if (test_done == 0) { 5803 fprintf(stderr, 5804 "before enabling GSO, please stop forwarding first\n"); 5805 return; 5806 } 5807 gso_ports[port_id].enable = 1; 5808 } else if (strcmp(mode, "off") == 0) { 5809 if (test_done == 0) { 5810 fprintf(stderr, 5811 "before disabling GSO, please stop forwarding first\n"); 5812 return; 5813 } 5814 gso_ports[port_id].enable = 0; 5815 } 5816 } 5817 #endif /* RTE_LIB_GSO */ 5818 5819 char* 5820 list_pkt_forwarding_modes(void) 5821 { 5822 static char fwd_modes[128] = ""; 5823 const char *separator = "|"; 5824 struct fwd_engine *fwd_eng; 5825 unsigned i = 0; 5826 5827 if (strlen (fwd_modes) == 0) { 5828 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5829 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5830 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5831 strncat(fwd_modes, separator, 5832 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5833 } 5834 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5835 } 5836 5837 return fwd_modes; 5838 } 5839 5840 char* 5841 list_pkt_forwarding_retry_modes(void) 5842 { 5843 static char fwd_modes[128] = ""; 5844 const char *separator = "|"; 5845 struct fwd_engine *fwd_eng; 5846 unsigned i = 0; 5847 5848 if (strlen(fwd_modes) == 0) { 5849 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5850 if (fwd_eng == &rx_only_engine) 5851 continue; 5852 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5853 sizeof(fwd_modes) - 5854 strlen(fwd_modes) - 1); 5855 strncat(fwd_modes, separator, 5856 sizeof(fwd_modes) - 5857 strlen(fwd_modes) - 1); 5858 } 5859 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5860 } 5861 5862 return fwd_modes; 5863 } 5864 5865 void 5866 set_pkt_forwarding_mode(const char *fwd_mode_name) 5867 { 5868 struct fwd_engine *fwd_eng; 5869 unsigned i; 5870 5871 i = 0; 5872 while ((fwd_eng = fwd_engines[i]) != NULL) { 5873 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5874 printf("Set %s packet forwarding mode%s\n", 5875 fwd_mode_name, 5876 retry_enabled == 0 ? "" : " with retry"); 5877 cur_fwd_eng = fwd_eng; 5878 return; 5879 } 5880 i++; 5881 } 5882 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5883 } 5884 5885 void 5886 add_rx_dump_callbacks(portid_t portid) 5887 { 5888 struct rte_eth_dev_info dev_info; 5889 uint16_t queue; 5890 int ret; 5891 5892 if (port_id_is_invalid(portid, ENABLED_WARN)) 5893 return; 5894 5895 ret = eth_dev_info_get_print_err(portid, &dev_info); 5896 if (ret != 0) 5897 return; 5898 5899 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5900 if (!ports[portid].rx_dump_cb[queue]) 5901 ports[portid].rx_dump_cb[queue] = 5902 rte_eth_add_rx_callback(portid, queue, 5903 dump_rx_pkts, NULL); 5904 } 5905 5906 void 5907 add_tx_dump_callbacks(portid_t portid) 5908 { 5909 struct rte_eth_dev_info dev_info; 5910 uint16_t queue; 5911 int ret; 5912 5913 if (port_id_is_invalid(portid, ENABLED_WARN)) 5914 return; 5915 5916 ret = eth_dev_info_get_print_err(portid, &dev_info); 5917 if (ret != 0) 5918 return; 5919 5920 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5921 if (!ports[portid].tx_dump_cb[queue]) 5922 ports[portid].tx_dump_cb[queue] = 5923 rte_eth_add_tx_callback(portid, queue, 5924 dump_tx_pkts, NULL); 5925 } 5926 5927 void 5928 remove_rx_dump_callbacks(portid_t portid) 5929 { 5930 struct rte_eth_dev_info dev_info; 5931 uint16_t queue; 5932 int ret; 5933 5934 if (port_id_is_invalid(portid, ENABLED_WARN)) 5935 return; 5936 5937 ret = eth_dev_info_get_print_err(portid, &dev_info); 5938 if (ret != 0) 5939 return; 5940 5941 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5942 if (ports[portid].rx_dump_cb[queue]) { 5943 rte_eth_remove_rx_callback(portid, queue, 5944 ports[portid].rx_dump_cb[queue]); 5945 ports[portid].rx_dump_cb[queue] = NULL; 5946 } 5947 } 5948 5949 void 5950 remove_tx_dump_callbacks(portid_t portid) 5951 { 5952 struct rte_eth_dev_info dev_info; 5953 uint16_t queue; 5954 int ret; 5955 5956 if (port_id_is_invalid(portid, ENABLED_WARN)) 5957 return; 5958 5959 ret = eth_dev_info_get_print_err(portid, &dev_info); 5960 if (ret != 0) 5961 return; 5962 5963 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5964 if (ports[portid].tx_dump_cb[queue]) { 5965 rte_eth_remove_tx_callback(portid, queue, 5966 ports[portid].tx_dump_cb[queue]); 5967 ports[portid].tx_dump_cb[queue] = NULL; 5968 } 5969 } 5970 5971 void 5972 configure_rxtx_dump_callbacks(uint16_t verbose) 5973 { 5974 portid_t portid; 5975 5976 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5977 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5978 return; 5979 #endif 5980 5981 RTE_ETH_FOREACH_DEV(portid) 5982 { 5983 if (verbose == 1 || verbose > 2) 5984 add_rx_dump_callbacks(portid); 5985 else 5986 remove_rx_dump_callbacks(portid); 5987 if (verbose >= 2) 5988 add_tx_dump_callbacks(portid); 5989 else 5990 remove_tx_dump_callbacks(portid); 5991 } 5992 } 5993 5994 void 5995 set_verbose_level(uint16_t vb_level) 5996 { 5997 printf("Change verbose level from %u to %u\n", 5998 (unsigned int) verbose_level, (unsigned int) vb_level); 5999 verbose_level = vb_level; 6000 configure_rxtx_dump_callbacks(verbose_level); 6001 } 6002 6003 void 6004 vlan_extend_set(portid_t port_id, int on) 6005 { 6006 int diag; 6007 int vlan_offload; 6008 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6009 6010 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6011 return; 6012 6013 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6014 6015 if (on) { 6016 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6017 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6018 } else { 6019 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6020 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6021 } 6022 6023 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6024 if (diag < 0) { 6025 fprintf(stderr, 6026 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6027 port_id, on, diag); 6028 return; 6029 } 6030 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6031 } 6032 6033 void 6034 rx_vlan_strip_set(portid_t port_id, int on) 6035 { 6036 int diag; 6037 int vlan_offload; 6038 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6039 6040 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6041 return; 6042 6043 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6044 6045 if (on) { 6046 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6047 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6048 } else { 6049 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6050 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6051 } 6052 6053 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6054 if (diag < 0) { 6055 fprintf(stderr, 6056 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6057 __func__, port_id, on, diag); 6058 return; 6059 } 6060 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6061 } 6062 6063 void 6064 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6065 { 6066 int diag; 6067 6068 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6069 return; 6070 6071 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6072 if (diag < 0) 6073 fprintf(stderr, 6074 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6075 __func__, port_id, queue_id, on, diag); 6076 } 6077 6078 void 6079 rx_vlan_filter_set(portid_t port_id, int on) 6080 { 6081 int diag; 6082 int vlan_offload; 6083 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6084 6085 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6086 return; 6087 6088 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6089 6090 if (on) { 6091 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6092 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6093 } else { 6094 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6095 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6096 } 6097 6098 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6099 if (diag < 0) { 6100 fprintf(stderr, 6101 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6102 __func__, port_id, on, diag); 6103 return; 6104 } 6105 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6106 } 6107 6108 void 6109 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6110 { 6111 int diag; 6112 int vlan_offload; 6113 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6114 6115 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6116 return; 6117 6118 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6119 6120 if (on) { 6121 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6122 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6123 } else { 6124 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6125 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6126 } 6127 6128 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6129 if (diag < 0) { 6130 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6131 __func__, port_id, on, diag); 6132 return; 6133 } 6134 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6135 } 6136 6137 int 6138 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6139 { 6140 int diag; 6141 6142 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6143 return 1; 6144 if (vlan_id_is_invalid(vlan_id)) 6145 return 1; 6146 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6147 if (diag == 0) 6148 return 0; 6149 fprintf(stderr, 6150 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6151 port_id, vlan_id, on, diag); 6152 return -1; 6153 } 6154 6155 void 6156 rx_vlan_all_filter_set(portid_t port_id, int on) 6157 { 6158 uint16_t vlan_id; 6159 6160 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6161 return; 6162 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6163 if (rx_vft_set(port_id, vlan_id, on)) 6164 break; 6165 } 6166 } 6167 6168 void 6169 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6170 { 6171 int diag; 6172 6173 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6174 return; 6175 6176 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6177 if (diag == 0) 6178 return; 6179 6180 fprintf(stderr, 6181 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6182 port_id, vlan_type, tp_id, diag); 6183 } 6184 6185 void 6186 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6187 { 6188 struct rte_eth_dev_info dev_info; 6189 int ret; 6190 6191 if (vlan_id_is_invalid(vlan_id)) 6192 return; 6193 6194 if (ports[port_id].dev_conf.txmode.offloads & 6195 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6196 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6197 return; 6198 } 6199 6200 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6201 if (ret != 0) 6202 return; 6203 6204 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6205 fprintf(stderr, 6206 "Error: vlan insert is not supported by port %d\n", 6207 port_id); 6208 return; 6209 } 6210 6211 tx_vlan_reset(port_id); 6212 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6213 ports[port_id].tx_vlan_id = vlan_id; 6214 } 6215 6216 void 6217 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6218 { 6219 struct rte_eth_dev_info dev_info; 6220 int ret; 6221 6222 if (vlan_id_is_invalid(vlan_id)) 6223 return; 6224 if (vlan_id_is_invalid(vlan_id_outer)) 6225 return; 6226 6227 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6228 if (ret != 0) 6229 return; 6230 6231 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6232 fprintf(stderr, 6233 "Error: qinq insert not supported by port %d\n", 6234 port_id); 6235 return; 6236 } 6237 6238 tx_vlan_reset(port_id); 6239 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6240 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6241 ports[port_id].tx_vlan_id = vlan_id; 6242 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6243 } 6244 6245 void 6246 tx_vlan_reset(portid_t port_id) 6247 { 6248 ports[port_id].dev_conf.txmode.offloads &= 6249 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6250 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6251 ports[port_id].tx_vlan_id = 0; 6252 ports[port_id].tx_vlan_id_outer = 0; 6253 } 6254 6255 void 6256 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6257 { 6258 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6259 return; 6260 6261 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6262 } 6263 6264 void 6265 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6266 { 6267 int ret; 6268 6269 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6270 return; 6271 6272 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6273 return; 6274 6275 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6276 fprintf(stderr, "map_value not in required range 0..%d\n", 6277 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6278 return; 6279 } 6280 6281 if (!is_rx) { /* tx */ 6282 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6283 map_value); 6284 if (ret) { 6285 fprintf(stderr, 6286 "failed to set tx queue stats mapping.\n"); 6287 return; 6288 } 6289 } else { /* rx */ 6290 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6291 map_value); 6292 if (ret) { 6293 fprintf(stderr, 6294 "failed to set rx queue stats mapping.\n"); 6295 return; 6296 } 6297 } 6298 } 6299 6300 void 6301 set_xstats_hide_zero(uint8_t on_off) 6302 { 6303 xstats_hide_zero = on_off; 6304 } 6305 6306 void 6307 set_record_core_cycles(uint8_t on_off) 6308 { 6309 record_core_cycles = on_off; 6310 } 6311 6312 void 6313 set_record_burst_stats(uint8_t on_off) 6314 { 6315 record_burst_stats = on_off; 6316 } 6317 6318 uint16_t 6319 str_to_flowtype(const char *string) 6320 { 6321 uint8_t i; 6322 6323 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6324 if (!strcmp(flowtype_str_table[i].str, string)) 6325 return flowtype_str_table[i].ftype; 6326 } 6327 6328 if (isdigit(string[0])) { 6329 int val = atoi(string); 6330 if (val > 0 && val < 64) 6331 return (uint16_t)val; 6332 } 6333 6334 return RTE_ETH_FLOW_UNKNOWN; 6335 } 6336 6337 const char* 6338 flowtype_to_str(uint16_t flow_type) 6339 { 6340 uint8_t i; 6341 6342 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6343 if (flowtype_str_table[i].ftype == flow_type) 6344 return flowtype_str_table[i].str; 6345 } 6346 6347 return NULL; 6348 } 6349 6350 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6351 6352 static inline void 6353 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6354 { 6355 struct rte_eth_flex_payload_cfg *cfg; 6356 uint32_t i, j; 6357 6358 for (i = 0; i < flex_conf->nb_payloads; i++) { 6359 cfg = &flex_conf->flex_set[i]; 6360 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6361 printf("\n RAW: "); 6362 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6363 printf("\n L2_PAYLOAD: "); 6364 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6365 printf("\n L3_PAYLOAD: "); 6366 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6367 printf("\n L4_PAYLOAD: "); 6368 else 6369 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6370 for (j = 0; j < num; j++) 6371 printf(" %-5u", cfg->src_offset[j]); 6372 } 6373 printf("\n"); 6374 } 6375 6376 static inline void 6377 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6378 { 6379 struct rte_eth_fdir_flex_mask *mask; 6380 uint32_t i, j; 6381 const char *p; 6382 6383 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6384 mask = &flex_conf->flex_mask[i]; 6385 p = flowtype_to_str(mask->flow_type); 6386 printf("\n %s:\t", p ? p : "unknown"); 6387 for (j = 0; j < num; j++) 6388 printf(" %02x", mask->mask[j]); 6389 } 6390 printf("\n"); 6391 } 6392 6393 static inline void 6394 print_fdir_flow_type(uint32_t flow_types_mask) 6395 { 6396 int i; 6397 const char *p; 6398 6399 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6400 if (!(flow_types_mask & (1 << i))) 6401 continue; 6402 p = flowtype_to_str(i); 6403 if (p) 6404 printf(" %s", p); 6405 else 6406 printf(" unknown"); 6407 } 6408 printf("\n"); 6409 } 6410 6411 static int 6412 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6413 struct rte_eth_fdir_stats *fdir_stat) 6414 { 6415 int ret = -ENOTSUP; 6416 6417 #ifdef RTE_NET_I40E 6418 if (ret == -ENOTSUP) { 6419 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6420 if (!ret) 6421 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6422 } 6423 #endif 6424 #ifdef RTE_NET_IXGBE 6425 if (ret == -ENOTSUP) { 6426 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6427 if (!ret) 6428 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6429 } 6430 #endif 6431 switch (ret) { 6432 case 0: 6433 break; 6434 case -ENOTSUP: 6435 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6436 port_id); 6437 break; 6438 default: 6439 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6440 break; 6441 } 6442 return ret; 6443 } 6444 6445 void 6446 fdir_get_infos(portid_t port_id) 6447 { 6448 struct rte_eth_fdir_stats fdir_stat; 6449 struct rte_eth_fdir_info fdir_info; 6450 6451 static const char *fdir_stats_border = "########################"; 6452 6453 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6454 return; 6455 6456 memset(&fdir_info, 0, sizeof(fdir_info)); 6457 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6458 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6459 return; 6460 6461 printf("\n %s FDIR infos for port %-2d %s\n", 6462 fdir_stats_border, port_id, fdir_stats_border); 6463 printf(" MODE: "); 6464 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6465 printf(" PERFECT\n"); 6466 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6467 printf(" PERFECT-MAC-VLAN\n"); 6468 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6469 printf(" PERFECT-TUNNEL\n"); 6470 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6471 printf(" SIGNATURE\n"); 6472 else 6473 printf(" DISABLE\n"); 6474 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6475 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6476 printf(" SUPPORTED FLOW TYPE: "); 6477 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6478 } 6479 printf(" FLEX PAYLOAD INFO:\n"); 6480 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6481 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6482 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6483 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6484 fdir_info.flex_payload_unit, 6485 fdir_info.max_flex_payload_segment_num, 6486 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6487 if (fdir_info.flex_conf.nb_payloads > 0) { 6488 printf(" FLEX PAYLOAD SRC OFFSET:"); 6489 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6490 } 6491 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6492 printf(" FLEX MASK CFG:"); 6493 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6494 } 6495 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6496 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6497 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6498 fdir_info.guarant_spc, fdir_info.best_spc); 6499 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6500 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6501 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6502 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6503 fdir_stat.collision, fdir_stat.free, 6504 fdir_stat.maxhash, fdir_stat.maxlen, 6505 fdir_stat.add, fdir_stat.remove, 6506 fdir_stat.f_add, fdir_stat.f_remove); 6507 printf(" %s############################%s\n", 6508 fdir_stats_border, fdir_stats_border); 6509 } 6510 6511 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6512 6513 void 6514 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6515 { 6516 #ifdef RTE_NET_IXGBE 6517 int diag; 6518 6519 if (is_rx) 6520 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6521 else 6522 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6523 6524 if (diag == 0) 6525 return; 6526 fprintf(stderr, 6527 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6528 is_rx ? "rx" : "tx", port_id, diag); 6529 return; 6530 #endif 6531 fprintf(stderr, "VF %s setting not supported for port %d\n", 6532 is_rx ? "Rx" : "Tx", port_id); 6533 RTE_SET_USED(vf); 6534 RTE_SET_USED(on); 6535 } 6536 6537 int 6538 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6539 { 6540 int diag; 6541 struct rte_eth_link link; 6542 int ret; 6543 6544 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6545 return 1; 6546 ret = eth_link_get_nowait_print_err(port_id, &link); 6547 if (ret < 0) 6548 return 1; 6549 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6550 rate > link.link_speed) { 6551 fprintf(stderr, 6552 "Invalid rate value:%u bigger than link speed: %u\n", 6553 rate, link.link_speed); 6554 return 1; 6555 } 6556 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6557 if (diag == 0) 6558 return diag; 6559 fprintf(stderr, 6560 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6561 port_id, diag); 6562 return diag; 6563 } 6564 6565 int 6566 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6567 { 6568 int diag = -ENOTSUP; 6569 6570 RTE_SET_USED(vf); 6571 RTE_SET_USED(rate); 6572 RTE_SET_USED(q_msk); 6573 6574 #ifdef RTE_NET_IXGBE 6575 if (diag == -ENOTSUP) 6576 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6577 q_msk); 6578 #endif 6579 #ifdef RTE_NET_BNXT 6580 if (diag == -ENOTSUP) 6581 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6582 #endif 6583 if (diag == 0) 6584 return diag; 6585 6586 fprintf(stderr, 6587 "%s for port_id=%d failed diag=%d\n", 6588 __func__, port_id, diag); 6589 return diag; 6590 } 6591 6592 int 6593 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6594 { 6595 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6596 return -EINVAL; 6597 6598 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6599 } 6600 6601 /* 6602 * Functions to manage the set of filtered Multicast MAC addresses. 6603 * 6604 * A pool of filtered multicast MAC addresses is associated with each port. 6605 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6606 * The address of the pool and the number of valid multicast MAC addresses 6607 * recorded in the pool are stored in the fields "mc_addr_pool" and 6608 * "mc_addr_nb" of the "rte_port" data structure. 6609 * 6610 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6611 * to be supplied a contiguous array of multicast MAC addresses. 6612 * To comply with this constraint, the set of multicast addresses recorded 6613 * into the pool are systematically compacted at the beginning of the pool. 6614 * Hence, when a multicast address is removed from the pool, all following 6615 * addresses, if any, are copied back to keep the set contiguous. 6616 */ 6617 #define MCAST_POOL_INC 32 6618 6619 static int 6620 mcast_addr_pool_extend(struct rte_port *port) 6621 { 6622 struct rte_ether_addr *mc_pool; 6623 size_t mc_pool_size; 6624 6625 /* 6626 * If a free entry is available at the end of the pool, just 6627 * increment the number of recorded multicast addresses. 6628 */ 6629 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6630 port->mc_addr_nb++; 6631 return 0; 6632 } 6633 6634 /* 6635 * [re]allocate a pool with MCAST_POOL_INC more entries. 6636 * The previous test guarantees that port->mc_addr_nb is a multiple 6637 * of MCAST_POOL_INC. 6638 */ 6639 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6640 MCAST_POOL_INC); 6641 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6642 mc_pool_size); 6643 if (mc_pool == NULL) { 6644 fprintf(stderr, 6645 "allocation of pool of %u multicast addresses failed\n", 6646 port->mc_addr_nb + MCAST_POOL_INC); 6647 return -ENOMEM; 6648 } 6649 6650 port->mc_addr_pool = mc_pool; 6651 port->mc_addr_nb++; 6652 return 0; 6653 6654 } 6655 6656 static void 6657 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6658 { 6659 if (mcast_addr_pool_extend(port) != 0) 6660 return; 6661 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6662 } 6663 6664 static void 6665 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6666 { 6667 port->mc_addr_nb--; 6668 if (addr_idx == port->mc_addr_nb) { 6669 /* No need to recompact the set of multicast addresses. */ 6670 if (port->mc_addr_nb == 0) { 6671 /* free the pool of multicast addresses. */ 6672 free(port->mc_addr_pool); 6673 port->mc_addr_pool = NULL; 6674 } 6675 return; 6676 } 6677 memmove(&port->mc_addr_pool[addr_idx], 6678 &port->mc_addr_pool[addr_idx + 1], 6679 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6680 } 6681 6682 int 6683 mcast_addr_pool_destroy(portid_t port_id) 6684 { 6685 struct rte_port *port; 6686 6687 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6688 port_id == (portid_t)RTE_PORT_ALL) 6689 return -EINVAL; 6690 port = &ports[port_id]; 6691 6692 if (port->mc_addr_nb != 0) { 6693 /* free the pool of multicast addresses. */ 6694 free(port->mc_addr_pool); 6695 port->mc_addr_pool = NULL; 6696 port->mc_addr_nb = 0; 6697 } 6698 return 0; 6699 } 6700 6701 static int 6702 eth_port_multicast_addr_list_set(portid_t port_id) 6703 { 6704 struct rte_port *port; 6705 int diag; 6706 6707 port = &ports[port_id]; 6708 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6709 port->mc_addr_nb); 6710 if (diag < 0) 6711 fprintf(stderr, 6712 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6713 port_id, port->mc_addr_nb, diag); 6714 6715 return diag; 6716 } 6717 6718 void 6719 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6720 { 6721 struct rte_port *port; 6722 uint32_t i; 6723 6724 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6725 return; 6726 6727 port = &ports[port_id]; 6728 6729 /* 6730 * Check that the added multicast MAC address is not already recorded 6731 * in the pool of multicast addresses. 6732 */ 6733 for (i = 0; i < port->mc_addr_nb; i++) { 6734 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6735 fprintf(stderr, 6736 "multicast address already filtered by port\n"); 6737 return; 6738 } 6739 } 6740 6741 mcast_addr_pool_append(port, mc_addr); 6742 if (eth_port_multicast_addr_list_set(port_id) < 0) 6743 /* Rollback on failure, remove the address from the pool */ 6744 mcast_addr_pool_remove(port, i); 6745 } 6746 6747 void 6748 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6749 { 6750 struct rte_port *port; 6751 uint32_t i; 6752 6753 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6754 return; 6755 6756 port = &ports[port_id]; 6757 6758 /* 6759 * Search the pool of multicast MAC addresses for the removed address. 6760 */ 6761 for (i = 0; i < port->mc_addr_nb; i++) { 6762 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6763 break; 6764 } 6765 if (i == port->mc_addr_nb) { 6766 fprintf(stderr, "multicast address not filtered by port %d\n", 6767 port_id); 6768 return; 6769 } 6770 6771 mcast_addr_pool_remove(port, i); 6772 if (eth_port_multicast_addr_list_set(port_id) < 0) 6773 /* Rollback on failure, add the address back into the pool */ 6774 mcast_addr_pool_append(port, mc_addr); 6775 } 6776 6777 void 6778 port_dcb_info_display(portid_t port_id) 6779 { 6780 struct rte_eth_dcb_info dcb_info; 6781 uint16_t i; 6782 int ret; 6783 static const char *border = "================"; 6784 6785 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6786 return; 6787 6788 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6789 if (ret) { 6790 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6791 port_id); 6792 return; 6793 } 6794 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6795 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6796 printf("\n TC : "); 6797 for (i = 0; i < dcb_info.nb_tcs; i++) 6798 printf("\t%4d", i); 6799 printf("\n Priority : "); 6800 for (i = 0; i < dcb_info.nb_tcs; i++) 6801 printf("\t%4d", dcb_info.prio_tc[i]); 6802 printf("\n BW percent :"); 6803 for (i = 0; i < dcb_info.nb_tcs; i++) 6804 printf("\t%4d%%", dcb_info.tc_bws[i]); 6805 printf("\n RXQ base : "); 6806 for (i = 0; i < dcb_info.nb_tcs; i++) 6807 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6808 printf("\n RXQ number :"); 6809 for (i = 0; i < dcb_info.nb_tcs; i++) 6810 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6811 printf("\n TXQ base : "); 6812 for (i = 0; i < dcb_info.nb_tcs; i++) 6813 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6814 printf("\n TXQ number :"); 6815 for (i = 0; i < dcb_info.nb_tcs; i++) 6816 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6817 printf("\n"); 6818 } 6819 6820 uint8_t * 6821 open_file(const char *file_path, uint32_t *size) 6822 { 6823 int fd = open(file_path, O_RDONLY); 6824 off_t pkg_size; 6825 uint8_t *buf = NULL; 6826 int ret = 0; 6827 struct stat st_buf; 6828 6829 if (size) 6830 *size = 0; 6831 6832 if (fd == -1) { 6833 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6834 return buf; 6835 } 6836 6837 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6838 close(fd); 6839 fprintf(stderr, "%s: File operations failed\n", __func__); 6840 return buf; 6841 } 6842 6843 pkg_size = st_buf.st_size; 6844 if (pkg_size < 0) { 6845 close(fd); 6846 fprintf(stderr, "%s: File operations failed\n", __func__); 6847 return buf; 6848 } 6849 6850 buf = (uint8_t *)malloc(pkg_size); 6851 if (!buf) { 6852 close(fd); 6853 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6854 return buf; 6855 } 6856 6857 ret = read(fd, buf, pkg_size); 6858 if (ret < 0) { 6859 close(fd); 6860 fprintf(stderr, "%s: File read operation failed\n", __func__); 6861 close_file(buf); 6862 return NULL; 6863 } 6864 6865 if (size) 6866 *size = pkg_size; 6867 6868 close(fd); 6869 6870 return buf; 6871 } 6872 6873 int 6874 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6875 { 6876 FILE *fh = fopen(file_path, "wb"); 6877 6878 if (fh == NULL) { 6879 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6880 return -1; 6881 } 6882 6883 if (fwrite(buf, 1, size, fh) != size) { 6884 fclose(fh); 6885 fprintf(stderr, "%s: File write operation failed\n", __func__); 6886 return -1; 6887 } 6888 6889 fclose(fh); 6890 6891 return 0; 6892 } 6893 6894 int 6895 close_file(uint8_t *buf) 6896 { 6897 if (buf) { 6898 free((void *)buf); 6899 return 0; 6900 } 6901 6902 return -1; 6903 } 6904 6905 void 6906 show_macs(portid_t port_id) 6907 { 6908 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6909 struct rte_eth_dev_info dev_info; 6910 int32_t i, rc, num_macs = 0; 6911 6912 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6913 return; 6914 6915 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6916 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6917 if (rc < 0) 6918 return; 6919 6920 for (i = 0; i < rc; i++) { 6921 6922 /* skip zero address */ 6923 if (rte_is_zero_ether_addr(&addr[i])) 6924 continue; 6925 6926 num_macs++; 6927 } 6928 6929 printf("Number of MAC address added: %d\n", num_macs); 6930 6931 for (i = 0; i < rc; i++) { 6932 6933 /* skip zero address */ 6934 if (rte_is_zero_ether_addr(&addr[i])) 6935 continue; 6936 6937 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6938 printf(" %s\n", buf); 6939 } 6940 } 6941 6942 void 6943 show_mcast_macs(portid_t port_id) 6944 { 6945 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6946 struct rte_ether_addr *addr; 6947 struct rte_port *port; 6948 uint32_t i; 6949 6950 port = &ports[port_id]; 6951 6952 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6953 6954 for (i = 0; i < port->mc_addr_nb; i++) { 6955 addr = &port->mc_addr_pool[i]; 6956 6957 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6958 printf(" %s\n", buf); 6959 } 6960 } 6961