1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 { 174 .mode = RTE_ETH_FEC_LLRS, 175 .name = "llrs", 176 }, 177 }; 178 179 static const struct { 180 char str[32]; 181 uint16_t ftype; 182 } flowtype_str_table[] = { 183 {"raw", RTE_ETH_FLOW_RAW}, 184 {"ipv4", RTE_ETH_FLOW_IPV4}, 185 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 186 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 187 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 188 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 189 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 190 {"ipv6", RTE_ETH_FLOW_IPV6}, 191 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 192 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 193 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 194 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 195 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 196 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 197 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 198 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 199 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 200 {"port", RTE_ETH_FLOW_PORT}, 201 {"vxlan", RTE_ETH_FLOW_VXLAN}, 202 {"geneve", RTE_ETH_FLOW_GENEVE}, 203 {"nvgre", RTE_ETH_FLOW_NVGRE}, 204 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 205 {"gtpu", RTE_ETH_FLOW_GTPU}, 206 }; 207 208 static void 209 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 210 { 211 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 212 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 213 printf("%s%s", name, buf); 214 } 215 216 static void 217 nic_xstats_display_periodic(portid_t port_id) 218 { 219 struct xstat_display_info *xstats_info; 220 uint64_t *prev_values, *curr_values; 221 uint64_t diff_value, value_rate; 222 struct timespec cur_time; 223 uint64_t *ids_supp; 224 size_t ids_supp_sz; 225 uint64_t diff_ns; 226 unsigned int i; 227 int rc; 228 229 xstats_info = &ports[port_id].xstats_info; 230 231 ids_supp_sz = xstats_info->ids_supp_sz; 232 if (ids_supp_sz == 0) 233 return; 234 235 printf("\n"); 236 237 ids_supp = xstats_info->ids_supp; 238 prev_values = xstats_info->prev_values; 239 curr_values = xstats_info->curr_values; 240 241 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 242 ids_supp_sz); 243 if (rc != (int)ids_supp_sz) { 244 fprintf(stderr, 245 "Failed to get values of %zu xstats for port %u - return code %d\n", 246 ids_supp_sz, port_id, rc); 247 return; 248 } 249 250 diff_ns = 0; 251 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 252 uint64_t ns; 253 254 ns = cur_time.tv_sec * NS_PER_SEC; 255 ns += cur_time.tv_nsec; 256 257 if (xstats_info->prev_ns != 0) 258 diff_ns = ns - xstats_info->prev_ns; 259 xstats_info->prev_ns = ns; 260 } 261 262 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 263 for (i = 0; i < ids_supp_sz; i++) { 264 diff_value = (curr_values[i] > prev_values[i]) ? 265 (curr_values[i] - prev_values[i]) : 0; 266 prev_values[i] = curr_values[i]; 267 value_rate = diff_ns > 0 ? 268 (double)diff_value / diff_ns * NS_PER_SEC : 0; 269 270 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 271 xstats_display[i].name, curr_values[i], value_rate); 272 } 273 } 274 275 void 276 nic_stats_display(portid_t port_id) 277 { 278 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 279 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 283 struct timespec cur_time; 284 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 285 diff_ns; 286 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 287 struct rte_eth_stats stats; 288 static const char *nic_stats_border = "########################"; 289 int ret; 290 291 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 292 print_valid_ports(); 293 return; 294 } 295 ret = rte_eth_stats_get(port_id, &stats); 296 if (ret != 0) { 297 fprintf(stderr, 298 "%s: Error: failed to get stats (port %u): %d", 299 __func__, port_id, ret); 300 return; 301 } 302 printf("\n %s NIC statistics for port %-2d %s\n", 303 nic_stats_border, port_id, nic_stats_border); 304 305 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 306 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 307 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 308 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 309 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 310 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 311 312 diff_ns = 0; 313 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 314 uint64_t ns; 315 316 ns = cur_time.tv_sec * NS_PER_SEC; 317 ns += cur_time.tv_nsec; 318 319 if (prev_ns[port_id] != 0) 320 diff_ns = ns - prev_ns[port_id]; 321 prev_ns[port_id] = ns; 322 } 323 324 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 325 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 326 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 327 (stats.opackets - prev_pkts_tx[port_id]) : 0; 328 prev_pkts_rx[port_id] = stats.ipackets; 329 prev_pkts_tx[port_id] = stats.opackets; 330 mpps_rx = diff_ns > 0 ? 331 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 332 mpps_tx = diff_ns > 0 ? 333 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 334 335 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 336 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 337 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 338 (stats.obytes - prev_bytes_tx[port_id]) : 0; 339 prev_bytes_rx[port_id] = stats.ibytes; 340 prev_bytes_tx[port_id] = stats.obytes; 341 mbps_rx = diff_ns > 0 ? 342 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 343 mbps_tx = diff_ns > 0 ? 344 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 345 346 printf("\n Throughput (since last show)\n"); 347 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 348 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 349 mpps_tx, mbps_tx * 8); 350 351 if (xstats_display_num > 0) 352 nic_xstats_display_periodic(port_id); 353 354 printf(" %s############################%s\n", 355 nic_stats_border, nic_stats_border); 356 } 357 358 void 359 nic_stats_clear(portid_t port_id) 360 { 361 int ret; 362 363 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 364 print_valid_ports(); 365 return; 366 } 367 368 ret = rte_eth_stats_reset(port_id); 369 if (ret != 0) { 370 fprintf(stderr, 371 "%s: Error: failed to reset stats (port %u): %s", 372 __func__, port_id, strerror(-ret)); 373 return; 374 } 375 376 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 377 if (ret != 0) { 378 if (ret < 0) 379 ret = -ret; 380 fprintf(stderr, 381 "%s: Error: failed to get stats (port %u): %s", 382 __func__, port_id, strerror(ret)); 383 return; 384 } 385 printf("\n NIC statistics for port %d cleared\n", port_id); 386 } 387 388 void 389 nic_xstats_display(portid_t port_id) 390 { 391 struct rte_eth_xstat *xstats; 392 int cnt_xstats, idx_xstat; 393 struct rte_eth_xstat_name *xstats_names; 394 395 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 396 print_valid_ports(); 397 return; 398 } 399 printf("###### NIC extended statistics for port %-2d\n", port_id); 400 if (!rte_eth_dev_is_valid_port(port_id)) { 401 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 402 return; 403 } 404 405 /* Get count */ 406 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 407 if (cnt_xstats < 0) { 408 fprintf(stderr, "Error: Cannot get count of xstats\n"); 409 return; 410 } 411 412 /* Get id-name lookup table */ 413 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 414 if (xstats_names == NULL) { 415 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 416 return; 417 } 418 if (cnt_xstats != rte_eth_xstats_get_names( 419 port_id, xstats_names, cnt_xstats)) { 420 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 421 free(xstats_names); 422 return; 423 } 424 425 /* Get stats themselves */ 426 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 427 if (xstats == NULL) { 428 fprintf(stderr, "Cannot allocate memory for xstats\n"); 429 free(xstats_names); 430 return; 431 } 432 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 433 fprintf(stderr, "Error: Unable to get xstats\n"); 434 free(xstats_names); 435 free(xstats); 436 return; 437 } 438 439 /* Display xstats */ 440 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 441 if (xstats_hide_zero && !xstats[idx_xstat].value) 442 continue; 443 printf("%s: %"PRIu64"\n", 444 xstats_names[idx_xstat].name, 445 xstats[idx_xstat].value); 446 } 447 free(xstats_names); 448 free(xstats); 449 } 450 451 void 452 nic_xstats_clear(portid_t port_id) 453 { 454 int ret; 455 456 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 457 print_valid_ports(); 458 return; 459 } 460 461 ret = rte_eth_xstats_reset(port_id); 462 if (ret != 0) { 463 fprintf(stderr, 464 "%s: Error: failed to reset xstats (port %u): %s\n", 465 __func__, port_id, strerror(-ret)); 466 return; 467 } 468 469 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 470 if (ret != 0) { 471 if (ret < 0) 472 ret = -ret; 473 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 474 __func__, port_id, strerror(ret)); 475 return; 476 } 477 } 478 479 static const char * 480 get_queue_state_name(uint8_t queue_state) 481 { 482 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 483 return "stopped"; 484 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 485 return "started"; 486 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 487 return "hairpin"; 488 else 489 return "unknown"; 490 } 491 492 void 493 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_rxq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 512 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 513 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 514 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 515 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 516 printf("\nRX drop packets: %s", 517 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 518 printf("\nRX deferred start: %s", 519 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 520 printf("\nRX scattered packets: %s", 521 (qinfo.scattered_rx != 0) ? "on" : "off"); 522 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 523 if (qinfo.rx_buf_size != 0) 524 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 525 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 526 527 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 528 printf("\nBurst mode: %s%s", 529 mode.info, 530 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 531 " (per queue)" : ""); 532 533 printf("\n"); 534 } 535 536 void 537 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 538 { 539 struct rte_eth_burst_mode mode; 540 struct rte_eth_txq_info qinfo; 541 int32_t rc; 542 static const char *info_border = "*********************"; 543 544 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 545 if (rc != 0) { 546 fprintf(stderr, 547 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 548 port_id, queue_id, strerror(-rc), rc); 549 return; 550 } 551 552 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 553 info_border, port_id, queue_id, info_border); 554 555 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 556 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 557 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 558 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 559 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 560 printf("\nTX deferred start: %s", 561 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 562 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 563 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 564 565 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 566 printf("\nBurst mode: %s%s", 567 mode.info, 568 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 569 " (per queue)" : ""); 570 571 printf("\n"); 572 } 573 574 static int bus_match_all(const struct rte_bus *bus, const void *data) 575 { 576 RTE_SET_USED(bus); 577 RTE_SET_USED(data); 578 return 0; 579 } 580 581 static void 582 device_infos_display_speeds(uint32_t speed_capa) 583 { 584 printf("\n\tDevice speed capability:"); 585 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 586 printf(" Autonegotiate (all speeds)"); 587 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 588 printf(" Disable autonegotiate (fixed speed) "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 590 printf(" 10 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 592 printf(" 10 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 594 printf(" 100 Mbps half-duplex "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 596 printf(" 100 Mbps full-duplex "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 598 printf(" 1 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 600 printf(" 2.5 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 602 printf(" 5 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 604 printf(" 10 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 606 printf(" 20 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 608 printf(" 25 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 610 printf(" 40 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 612 printf(" 50 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 614 printf(" 56 Gbps "); 615 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 616 printf(" 100 Gbps "); 617 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 618 printf(" 200 Gbps "); 619 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 620 printf(" 400 Gbps "); 621 } 622 623 void 624 device_infos_display(const char *identifier) 625 { 626 static const char *info_border = "*********************"; 627 struct rte_bus *start = NULL, *next; 628 struct rte_dev_iterator dev_iter; 629 char name[RTE_ETH_NAME_MAX_LEN]; 630 struct rte_ether_addr mac_addr; 631 struct rte_device *dev; 632 struct rte_devargs da; 633 portid_t port_id; 634 struct rte_eth_dev_info dev_info; 635 char devstr[128]; 636 637 memset(&da, 0, sizeof(da)); 638 if (!identifier) 639 goto skip_parse; 640 641 if (rte_devargs_parsef(&da, "%s", identifier)) { 642 fprintf(stderr, "cannot parse identifier\n"); 643 return; 644 } 645 646 skip_parse: 647 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 648 649 start = next; 650 if (identifier && da.bus != next) 651 continue; 652 653 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 654 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 655 656 if (rte_dev_driver(dev) == NULL) 657 continue; 658 /* Check for matching device if identifier is present */ 659 if (identifier && 660 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 661 continue; 662 printf("\n%s Infos for device %s %s\n", 663 info_border, rte_dev_name(dev), info_border); 664 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 665 printf("\nBus information: %s", 666 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 667 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 668 printf("\nDevargs: %s", 669 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 670 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 671 printf("\n"); 672 673 /* List ports with matching device name */ 674 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 675 printf("\n\tPort id: %-2d", port_id); 676 if (eth_macaddr_get_print_err(port_id, 677 &mac_addr) == 0) 678 print_ethaddr("\n\tMAC address: ", 679 &mac_addr); 680 rte_eth_dev_get_name_by_port(port_id, name); 681 printf("\n\tDevice name: %s", name); 682 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 683 device_infos_display_speeds(dev_info.speed_capa); 684 printf("\n"); 685 } 686 } 687 }; 688 rte_devargs_reset(&da); 689 } 690 691 static void 692 print_dev_capabilities(uint64_t capabilities) 693 { 694 uint64_t single_capa; 695 int begin; 696 int end; 697 int bit; 698 699 if (capabilities == 0) 700 return; 701 702 begin = __builtin_ctzll(capabilities); 703 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 704 705 single_capa = 1ULL << begin; 706 for (bit = begin; bit < end; bit++) { 707 if (capabilities & single_capa) 708 printf(" %s", 709 rte_eth_dev_capability_name(single_capa)); 710 single_capa <<= 1; 711 } 712 } 713 714 uint64_t 715 str_to_rsstypes(const char *str) 716 { 717 uint16_t i; 718 719 for (i = 0; rss_type_table[i].str != NULL; i++) { 720 if (strcmp(rss_type_table[i].str, str) == 0) 721 return rss_type_table[i].rss_type; 722 } 723 724 return 0; 725 } 726 727 const char * 728 rsstypes_to_str(uint64_t rss_type) 729 { 730 uint16_t i; 731 732 for (i = 0; rss_type_table[i].str != NULL; i++) { 733 if (rss_type_table[i].rss_type == rss_type) 734 return rss_type_table[i].str; 735 } 736 737 return NULL; 738 } 739 740 static void 741 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 742 { 743 uint16_t user_defined_str_len; 744 uint16_t total_len = 0; 745 uint16_t str_len = 0; 746 uint64_t rss_offload; 747 uint16_t i; 748 749 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 750 rss_offload = RTE_BIT64(i); 751 if ((offload_types & rss_offload) != 0) { 752 const char *p = rsstypes_to_str(rss_offload); 753 754 user_defined_str_len = 755 strlen("user-defined-") + (i / 10 + 1); 756 str_len = p ? strlen(p) : user_defined_str_len; 757 str_len += 2; /* add two spaces */ 758 if (total_len + str_len >= char_num_per_line) { 759 total_len = 0; 760 printf("\n"); 761 } 762 763 if (p) 764 printf(" %s", p); 765 else 766 printf(" user-defined-%u", i); 767 total_len += str_len; 768 } 769 } 770 printf("\n"); 771 } 772 773 void 774 port_infos_display(portid_t port_id) 775 { 776 struct rte_port *port; 777 struct rte_ether_addr mac_addr; 778 struct rte_eth_link link; 779 struct rte_eth_dev_info dev_info; 780 int vlan_offload; 781 struct rte_mempool * mp; 782 static const char *info_border = "*********************"; 783 uint16_t mtu; 784 char name[RTE_ETH_NAME_MAX_LEN]; 785 int ret; 786 char fw_version[ETHDEV_FWVERS_LEN]; 787 788 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 789 print_valid_ports(); 790 return; 791 } 792 port = &ports[port_id]; 793 ret = eth_link_get_nowait_print_err(port_id, &link); 794 if (ret < 0) 795 return; 796 797 ret = eth_dev_info_get_print_err(port_id, &dev_info); 798 if (ret != 0) 799 return; 800 801 printf("\n%s Infos for port %-2d %s\n", 802 info_border, port_id, info_border); 803 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 804 print_ethaddr("MAC address: ", &mac_addr); 805 rte_eth_dev_get_name_by_port(port_id, name); 806 printf("\nDevice name: %s", name); 807 printf("\nDriver name: %s", dev_info.driver_name); 808 809 if (rte_eth_dev_fw_version_get(port_id, fw_version, 810 ETHDEV_FWVERS_LEN) == 0) 811 printf("\nFirmware-version: %s", fw_version); 812 else 813 printf("\nFirmware-version: %s", "not available"); 814 815 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 816 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 817 printf("\nConnect to socket: %u", port->socket_id); 818 819 if (port_numa[port_id] != NUMA_NO_CONFIG) { 820 mp = mbuf_pool_find(port_numa[port_id], 0); 821 if (mp) 822 printf("\nmemory allocation on the socket: %d", 823 port_numa[port_id]); 824 } else 825 printf("\nmemory allocation on the socket: %u",port->socket_id); 826 827 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 828 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 829 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 830 ("full-duplex") : ("half-duplex")); 831 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 832 ("On") : ("Off")); 833 834 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 835 printf("MTU: %u\n", mtu); 836 837 printf("Promiscuous mode: %s\n", 838 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 839 printf("Allmulticast mode: %s\n", 840 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 841 printf("Maximum number of MAC addresses: %u\n", 842 (unsigned int)(port->dev_info.max_mac_addrs)); 843 printf("Maximum number of MAC addresses of hash filtering: %u\n", 844 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 845 846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 847 if (vlan_offload >= 0){ 848 printf("VLAN offload: \n"); 849 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 850 printf(" strip on, "); 851 else 852 printf(" strip off, "); 853 854 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 855 printf("filter on, "); 856 else 857 printf("filter off, "); 858 859 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 860 printf("extend on, "); 861 else 862 printf("extend off, "); 863 864 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 865 printf("qinq strip on\n"); 866 else 867 printf("qinq strip off\n"); 868 } 869 870 if (dev_info.hash_key_size > 0) 871 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 872 if (dev_info.reta_size > 0) 873 printf("Redirection table size: %u\n", dev_info.reta_size); 874 if (!dev_info.flow_type_rss_offloads) 875 printf("No RSS offload flow type is supported.\n"); 876 else { 877 printf("Supported RSS offload flow types:\n"); 878 rss_offload_types_display(dev_info.flow_type_rss_offloads, 879 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 880 } 881 882 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 883 printf("Maximum configurable length of RX packet: %u\n", 884 dev_info.max_rx_pktlen); 885 printf("Maximum configurable size of LRO aggregated packet: %u\n", 886 dev_info.max_lro_pkt_size); 887 if (dev_info.max_vfs) 888 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 889 if (dev_info.max_vmdq_pools) 890 printf("Maximum number of VMDq pools: %u\n", 891 dev_info.max_vmdq_pools); 892 893 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 894 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 895 printf("Max possible number of RXDs per queue: %hu\n", 896 dev_info.rx_desc_lim.nb_max); 897 printf("Min possible number of RXDs per queue: %hu\n", 898 dev_info.rx_desc_lim.nb_min); 899 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 900 901 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 902 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 903 printf("Max possible number of TXDs per queue: %hu\n", 904 dev_info.tx_desc_lim.nb_max); 905 printf("Min possible number of TXDs per queue: %hu\n", 906 dev_info.tx_desc_lim.nb_min); 907 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 908 printf("Max segment number per packet: %hu\n", 909 dev_info.tx_desc_lim.nb_seg_max); 910 printf("Max segment number per MTU/TSO: %hu\n", 911 dev_info.tx_desc_lim.nb_mtu_seg_max); 912 913 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 914 print_dev_capabilities(dev_info.dev_capa); 915 printf(" )\n"); 916 /* Show switch info only if valid switch domain and port id is set */ 917 if (dev_info.switch_info.domain_id != 918 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 919 if (dev_info.switch_info.name) 920 printf("Switch name: %s\n", dev_info.switch_info.name); 921 922 printf("Switch domain Id: %u\n", 923 dev_info.switch_info.domain_id); 924 printf("Switch Port Id: %u\n", 925 dev_info.switch_info.port_id); 926 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 927 printf("Switch Rx domain: %u\n", 928 dev_info.switch_info.rx_domain); 929 } 930 printf("Device error handling mode: "); 931 switch (dev_info.err_handle_mode) { 932 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 933 printf("none\n"); 934 break; 935 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 936 printf("passive\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 939 printf("proactive\n"); 940 break; 941 default: 942 printf("unknown\n"); 943 break; 944 } 945 printf("Device private info:\n"); 946 ret = rte_eth_dev_priv_dump(port_id, stdout); 947 if (ret == -ENOTSUP) 948 printf(" none\n"); 949 else if (ret < 0) 950 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 951 ret, strerror(-ret)); 952 } 953 954 void 955 port_summary_header_display(void) 956 { 957 uint16_t port_number; 958 959 port_number = rte_eth_dev_count_avail(); 960 printf("Number of available ports: %i\n", port_number); 961 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 962 "Driver", "Status", "Link"); 963 } 964 965 void 966 port_summary_display(portid_t port_id) 967 { 968 struct rte_ether_addr mac_addr; 969 struct rte_eth_link link; 970 struct rte_eth_dev_info dev_info; 971 char name[RTE_ETH_NAME_MAX_LEN]; 972 int ret; 973 974 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 975 print_valid_ports(); 976 return; 977 } 978 979 ret = eth_link_get_nowait_print_err(port_id, &link); 980 if (ret < 0) 981 return; 982 983 ret = eth_dev_info_get_print_err(port_id, &dev_info); 984 if (ret != 0) 985 return; 986 987 rte_eth_dev_get_name_by_port(port_id, name); 988 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 989 if (ret != 0) 990 return; 991 992 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 993 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 994 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 995 rte_eth_link_speed_to_str(link.link_speed)); 996 } 997 998 void 999 port_eeprom_display(portid_t port_id) 1000 { 1001 struct rte_dev_eeprom_info einfo; 1002 int ret; 1003 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1004 print_valid_ports(); 1005 return; 1006 } 1007 1008 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1009 if (len_eeprom < 0) { 1010 switch (len_eeprom) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", 1022 len_eeprom); 1023 break; 1024 } 1025 return; 1026 } 1027 1028 einfo.offset = 0; 1029 einfo.length = len_eeprom; 1030 einfo.data = calloc(1, len_eeprom); 1031 if (!einfo.data) { 1032 fprintf(stderr, 1033 "Allocation of port %u eeprom data failed\n", 1034 port_id); 1035 return; 1036 } 1037 1038 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1039 if (ret != 0) { 1040 switch (ret) { 1041 case -ENODEV: 1042 fprintf(stderr, "port index %d invalid\n", port_id); 1043 break; 1044 case -ENOTSUP: 1045 fprintf(stderr, "operation not supported by device\n"); 1046 break; 1047 case -EIO: 1048 fprintf(stderr, "device is removed\n"); 1049 break; 1050 default: 1051 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1052 break; 1053 } 1054 free(einfo.data); 1055 return; 1056 } 1057 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1058 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1059 free(einfo.data); 1060 } 1061 1062 void 1063 port_module_eeprom_display(portid_t port_id) 1064 { 1065 struct rte_eth_dev_module_info minfo; 1066 struct rte_dev_eeprom_info einfo; 1067 int ret; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1070 print_valid_ports(); 1071 return; 1072 } 1073 1074 1075 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 return; 1093 } 1094 1095 einfo.offset = 0; 1096 einfo.length = minfo.eeprom_len; 1097 einfo.data = calloc(1, minfo.eeprom_len); 1098 if (!einfo.data) { 1099 fprintf(stderr, 1100 "Allocation of port %u eeprom data failed\n", 1101 port_id); 1102 return; 1103 } 1104 1105 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1106 if (ret != 0) { 1107 switch (ret) { 1108 case -ENODEV: 1109 fprintf(stderr, "port index %d invalid\n", port_id); 1110 break; 1111 case -ENOTSUP: 1112 fprintf(stderr, "operation not supported by device\n"); 1113 break; 1114 case -EIO: 1115 fprintf(stderr, "device is removed\n"); 1116 break; 1117 default: 1118 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1119 ret); 1120 break; 1121 } 1122 free(einfo.data); 1123 return; 1124 } 1125 1126 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1127 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1128 free(einfo.data); 1129 } 1130 1131 int 1132 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1133 { 1134 uint16_t pid; 1135 1136 if (port_id == (portid_t)RTE_PORT_ALL) 1137 return 0; 1138 1139 RTE_ETH_FOREACH_DEV(pid) 1140 if (port_id == pid) 1141 return 0; 1142 1143 if (warning == ENABLED_WARN) 1144 fprintf(stderr, "Invalid port %d\n", port_id); 1145 1146 return 1; 1147 } 1148 1149 void print_valid_ports(void) 1150 { 1151 portid_t pid; 1152 1153 printf("The valid ports array is ["); 1154 RTE_ETH_FOREACH_DEV(pid) { 1155 printf(" %d", pid); 1156 } 1157 printf(" ]\n"); 1158 } 1159 1160 static int 1161 vlan_id_is_invalid(uint16_t vlan_id) 1162 { 1163 if (vlan_id < 4096) 1164 return 0; 1165 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1166 return 1; 1167 } 1168 1169 static uint32_t 1170 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1171 { 1172 uint32_t overhead_len; 1173 1174 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1175 overhead_len = max_rx_pktlen - max_mtu; 1176 else 1177 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1178 1179 return overhead_len; 1180 } 1181 1182 static int 1183 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1184 { 1185 struct rte_eth_dev_info dev_info; 1186 uint32_t overhead_len; 1187 uint32_t frame_size; 1188 int ret; 1189 1190 ret = rte_eth_dev_info_get(port_id, &dev_info); 1191 if (ret != 0) 1192 return ret; 1193 1194 if (mtu < dev_info.min_mtu) { 1195 fprintf(stderr, 1196 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1197 mtu, dev_info.min_mtu, port_id); 1198 return -EINVAL; 1199 } 1200 if (mtu > dev_info.max_mtu) { 1201 fprintf(stderr, 1202 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1203 mtu, dev_info.max_mtu, port_id); 1204 return -EINVAL; 1205 } 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 frame_size = mtu + overhead_len; 1210 if (frame_size > dev_info.max_rx_pktlen) { 1211 fprintf(stderr, 1212 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1213 frame_size, dev_info.max_rx_pktlen, port_id); 1214 return -EINVAL; 1215 } 1216 1217 return 0; 1218 } 1219 1220 void 1221 port_mtu_set(portid_t port_id, uint16_t mtu) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 int diag; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 1229 diag = eth_dev_validate_mtu(port_id, mtu); 1230 if (diag != 0) 1231 return; 1232 1233 if (port->need_reconfig == 0) { 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag != 0) { 1236 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1237 return; 1238 } 1239 } 1240 1241 port->dev_conf.rxmode.mtu = mtu; 1242 } 1243 1244 /* Generic flow management functions. */ 1245 1246 static struct port_flow_tunnel * 1247 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1248 { 1249 struct port_flow_tunnel *flow_tunnel; 1250 1251 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1252 if (flow_tunnel->id == port_tunnel_id) 1253 goto out; 1254 } 1255 flow_tunnel = NULL; 1256 1257 out: 1258 return flow_tunnel; 1259 } 1260 1261 const char * 1262 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1263 { 1264 const char *type; 1265 switch (tunnel->type) { 1266 default: 1267 type = "unknown"; 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_VXLAN: 1270 type = "vxlan"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_GRE: 1273 type = "gre"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_NVGRE: 1276 type = "nvgre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_GENEVE: 1279 type = "geneve"; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 struct port_flow_tunnel * 1287 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1288 { 1289 struct rte_port *port = &ports[port_id]; 1290 struct port_flow_tunnel *flow_tunnel; 1291 1292 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1293 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1294 goto out; 1295 } 1296 flow_tunnel = NULL; 1297 1298 out: 1299 return flow_tunnel; 1300 } 1301 1302 void port_flow_tunnel_list(portid_t port_id) 1303 { 1304 struct rte_port *port = &ports[port_id]; 1305 struct port_flow_tunnel *flt; 1306 1307 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1308 printf("port %u tunnel #%u type=%s", 1309 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1310 if (flt->tunnel.tun_id) 1311 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1312 printf("\n"); 1313 } 1314 } 1315 1316 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1317 { 1318 struct rte_port *port = &ports[port_id]; 1319 struct port_flow_tunnel *flt; 1320 1321 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1322 if (flt->id == tunnel_id) 1323 break; 1324 } 1325 if (flt) { 1326 LIST_REMOVE(flt, chain); 1327 free(flt); 1328 printf("port %u: flow tunnel #%u destroyed\n", 1329 port_id, tunnel_id); 1330 } 1331 } 1332 1333 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1334 { 1335 struct rte_port *port = &ports[port_id]; 1336 enum rte_flow_item_type type; 1337 struct port_flow_tunnel *flt; 1338 1339 if (!strcmp(ops->type, "vxlan")) 1340 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1341 else if (!strcmp(ops->type, "gre")) 1342 type = RTE_FLOW_ITEM_TYPE_GRE; 1343 else if (!strcmp(ops->type, "nvgre")) 1344 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1345 else if (!strcmp(ops->type, "geneve")) 1346 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1347 else { 1348 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1349 ops->type); 1350 return; 1351 } 1352 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1353 if (flt->tunnel.type == type) 1354 break; 1355 } 1356 if (!flt) { 1357 flt = calloc(1, sizeof(*flt)); 1358 if (!flt) { 1359 fprintf(stderr, "failed to allocate port flt object\n"); 1360 return; 1361 } 1362 flt->tunnel.type = type; 1363 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1364 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1365 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1366 } 1367 printf("port %d: flow tunnel #%u type %s\n", 1368 port_id, flt->id, ops->type); 1369 } 1370 1371 /** Generate a port_flow entry from attributes/pattern/actions. */ 1372 static struct port_flow * 1373 port_flow_new(const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions, 1376 struct rte_flow_error *error) 1377 { 1378 const struct rte_flow_conv_rule rule = { 1379 .attr_ro = attr, 1380 .pattern_ro = pattern, 1381 .actions_ro = actions, 1382 }; 1383 struct port_flow *pf; 1384 int ret; 1385 1386 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1387 if (ret < 0) 1388 return NULL; 1389 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1390 if (!pf) { 1391 rte_flow_error_set 1392 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1393 "calloc() failed"); 1394 return NULL; 1395 } 1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1397 error) >= 0) 1398 return pf; 1399 free(pf); 1400 return NULL; 1401 } 1402 1403 /** Print a message out of a flow error. */ 1404 static int 1405 port_flow_complain(struct rte_flow_error *error) 1406 { 1407 static const char *const errstrlist[] = { 1408 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1409 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1410 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1411 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1412 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1413 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1417 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1418 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1419 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1421 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1422 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1423 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1424 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1425 }; 1426 const char *errstr; 1427 char buf[32]; 1428 int err = rte_errno; 1429 1430 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1431 !errstrlist[error->type]) 1432 errstr = "unknown type"; 1433 else 1434 errstr = errstrlist[error->type]; 1435 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1436 __func__, error->type, errstr, 1437 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1438 error->cause), buf) : "", 1439 error->message ? error->message : "(no stated reason)", 1440 rte_strerror(err)); 1441 1442 switch (error->type) { 1443 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1444 fprintf(stderr, "The status suggests the use of \"transfer\" " 1445 "as the possible cause of the failure. Make " 1446 "sure that the flow in question and its " 1447 "indirect components (if any) are managed " 1448 "via \"transfer\" proxy port. Use command " 1449 "\"show port (port_id) flow transfer proxy\" " 1450 "to figure out the proxy port ID\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 return -err; 1457 } 1458 1459 static void 1460 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1461 { 1462 uint16_t total_len = 0; 1463 uint16_t str_len; 1464 uint16_t i; 1465 1466 if (rss_types == 0) 1467 return; 1468 1469 for (i = 0; rss_type_table[i].str; i++) { 1470 if (rss_type_table[i].rss_type == 0) 1471 continue; 1472 1473 if ((rss_types & rss_type_table[i].rss_type) == 1474 rss_type_table[i].rss_type) { 1475 /* Contain two spaces */ 1476 str_len = strlen(rss_type_table[i].str) + 2; 1477 if (total_len + str_len > char_num_per_line) { 1478 printf("\n"); 1479 total_len = 0; 1480 } 1481 printf(" %s", rss_type_table[i].str); 1482 total_len += str_len; 1483 } 1484 } 1485 printf("\n"); 1486 } 1487 1488 static void 1489 rss_config_display(struct rte_flow_action_rss *rss_conf) 1490 { 1491 uint8_t i; 1492 1493 if (rss_conf == NULL) { 1494 fprintf(stderr, "Invalid rule\n"); 1495 return; 1496 } 1497 1498 printf("RSS:\n" 1499 " queues:"); 1500 if (rss_conf->queue_num == 0) 1501 printf(" none"); 1502 for (i = 0; i < rss_conf->queue_num; i++) 1503 printf(" %d", rss_conf->queue[i]); 1504 printf("\n"); 1505 1506 printf(" function: "); 1507 switch (rss_conf->func) { 1508 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1509 printf("default\n"); 1510 break; 1511 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1512 printf("toeplitz\n"); 1513 break; 1514 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1515 printf("simple_xor\n"); 1516 break; 1517 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1518 printf("symmetric_toeplitz\n"); 1519 break; 1520 default: 1521 printf("Unknown function\n"); 1522 return; 1523 } 1524 1525 printf(" RSS key:\n"); 1526 if (rss_conf->key_len == 0) { 1527 printf(" none"); 1528 } else { 1529 printf(" key_len: %u\n", rss_conf->key_len); 1530 printf(" key: "); 1531 if (rss_conf->key == NULL) { 1532 printf("none"); 1533 } else { 1534 for (i = 0; i < rss_conf->key_len; i++) 1535 printf("%02X", rss_conf->key[i]); 1536 } 1537 } 1538 printf("\n"); 1539 1540 printf(" types:\n"); 1541 if (rss_conf->types == 0) { 1542 printf(" none\n"); 1543 return; 1544 } 1545 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1546 } 1547 1548 static struct port_indirect_action * 1549 action_get_by_id(portid_t port_id, uint32_t id) 1550 { 1551 struct rte_port *port; 1552 struct port_indirect_action **ppia; 1553 struct port_indirect_action *pia = NULL; 1554 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return NULL; 1558 port = &ports[port_id]; 1559 ppia = &port->actions_list; 1560 while (*ppia) { 1561 if ((*ppia)->id == id) { 1562 pia = *ppia; 1563 break; 1564 } 1565 ppia = &(*ppia)->next; 1566 } 1567 if (!pia) 1568 fprintf(stderr, 1569 "Failed to find indirect action #%u on port %u\n", 1570 id, port_id); 1571 return pia; 1572 } 1573 1574 static int 1575 action_alloc(portid_t port_id, uint32_t id, 1576 struct port_indirect_action **action) 1577 { 1578 struct rte_port *port; 1579 struct port_indirect_action **ppia; 1580 struct port_indirect_action *pia = NULL; 1581 1582 *action = NULL; 1583 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1584 port_id == (portid_t)RTE_PORT_ALL) 1585 return -EINVAL; 1586 port = &ports[port_id]; 1587 if (id == UINT32_MAX) { 1588 /* taking first available ID */ 1589 if (port->actions_list) { 1590 if (port->actions_list->id == UINT32_MAX - 1) { 1591 fprintf(stderr, 1592 "Highest indirect action ID is already assigned, delete it first\n"); 1593 return -ENOMEM; 1594 } 1595 id = port->actions_list->id + 1; 1596 } else { 1597 id = 0; 1598 } 1599 } 1600 pia = calloc(1, sizeof(*pia)); 1601 if (!pia) { 1602 fprintf(stderr, 1603 "Allocation of port %u indirect action failed\n", 1604 port_id); 1605 return -ENOMEM; 1606 } 1607 ppia = &port->actions_list; 1608 while (*ppia && (*ppia)->id > id) 1609 ppia = &(*ppia)->next; 1610 if (*ppia && (*ppia)->id == id) { 1611 fprintf(stderr, 1612 "Indirect action #%u is already assigned, delete it first\n", 1613 id); 1614 free(pia); 1615 return -EINVAL; 1616 } 1617 pia->next = *ppia; 1618 pia->id = id; 1619 *ppia = pia; 1620 *action = pia; 1621 return 0; 1622 } 1623 1624 static int 1625 template_alloc(uint32_t id, struct port_template **template, 1626 struct port_template **list) 1627 { 1628 struct port_template *lst = *list; 1629 struct port_template **ppt; 1630 struct port_template *pt = NULL; 1631 1632 *template = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest template ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of port template failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Template #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *template = pt; 1664 return 0; 1665 } 1666 1667 static int 1668 table_alloc(uint32_t id, struct port_table **table, 1669 struct port_table **list) 1670 { 1671 struct port_table *lst = *list; 1672 struct port_table **ppt; 1673 struct port_table *pt = NULL; 1674 1675 *table = NULL; 1676 if (id == UINT32_MAX) { 1677 /* taking first available ID */ 1678 if (lst) { 1679 if (lst->id == UINT32_MAX - 1) { 1680 printf("Highest table ID is already" 1681 " assigned, delete it first\n"); 1682 return -ENOMEM; 1683 } 1684 id = lst->id + 1; 1685 } else { 1686 id = 0; 1687 } 1688 } 1689 pt = calloc(1, sizeof(*pt)); 1690 if (!pt) { 1691 printf("Allocation of table failed\n"); 1692 return -ENOMEM; 1693 } 1694 ppt = list; 1695 while (*ppt && (*ppt)->id > id) 1696 ppt = &(*ppt)->next; 1697 if (*ppt && (*ppt)->id == id) { 1698 printf("Table #%u is already assigned," 1699 " delete it first\n", id); 1700 free(pt); 1701 return -EINVAL; 1702 } 1703 pt->next = *ppt; 1704 pt->id = id; 1705 *ppt = pt; 1706 *table = pt; 1707 return 0; 1708 } 1709 1710 /** Get info about flow management resources. */ 1711 int 1712 port_flow_get_info(portid_t port_id) 1713 { 1714 struct rte_flow_port_info port_info; 1715 struct rte_flow_queue_info queue_info; 1716 struct rte_flow_error error; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1719 port_id == (portid_t)RTE_PORT_ALL) 1720 return -EINVAL; 1721 /* Poisoning to make sure PMDs update it in case of error. */ 1722 memset(&error, 0x99, sizeof(error)); 1723 memset(&port_info, 0, sizeof(port_info)); 1724 memset(&queue_info, 0, sizeof(queue_info)); 1725 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1726 return port_flow_complain(&error); 1727 printf("Flow engine resources on port %u:\n" 1728 "Number of queues: %d\n" 1729 "Size of queues: %d\n" 1730 "Number of counters: %d\n" 1731 "Number of aging objects: %d\n" 1732 "Number of meter actions: %d\n", 1733 port_id, port_info.max_nb_queues, 1734 queue_info.max_size, 1735 port_info.max_nb_counters, 1736 port_info.max_nb_aging_objects, 1737 port_info.max_nb_meters); 1738 return 0; 1739 } 1740 1741 /** Configure flow management resources. */ 1742 int 1743 port_flow_configure(portid_t port_id, 1744 const struct rte_flow_port_attr *port_attr, 1745 uint16_t nb_queue, 1746 const struct rte_flow_queue_attr *queue_attr) 1747 { 1748 struct rte_port *port; 1749 struct rte_flow_error error; 1750 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1751 int std_queue; 1752 1753 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1754 port_id == (portid_t)RTE_PORT_ALL) 1755 return -EINVAL; 1756 port = &ports[port_id]; 1757 port->queue_nb = nb_queue; 1758 port->queue_sz = queue_attr->size; 1759 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1760 attr_list[std_queue] = queue_attr; 1761 /* Poisoning to make sure PMDs update it in case of error. */ 1762 memset(&error, 0x66, sizeof(error)); 1763 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1764 return port_flow_complain(&error); 1765 printf("Configure flows on port %u: " 1766 "number of queues %d with %d elements\n", 1767 port_id, nb_queue, queue_attr->size); 1768 return 0; 1769 } 1770 1771 static int 1772 action_handle_create(portid_t port_id, 1773 struct port_indirect_action *pia, 1774 const struct rte_flow_indir_action_conf *conf, 1775 const struct rte_flow_action *action, 1776 struct rte_flow_error *error) 1777 { 1778 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1779 struct rte_flow_action_age *age = 1780 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1781 1782 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1783 age->context = &pia->age_type; 1784 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1785 struct rte_flow_action_conntrack *ct = 1786 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1787 1788 memcpy(ct, &conntrack_context, sizeof(*ct)); 1789 } 1790 pia->type = action->type; 1791 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1792 error); 1793 return pia->handle ? 0 : -1; 1794 } 1795 1796 static int 1797 action_list_handle_create(portid_t port_id, 1798 struct port_indirect_action *pia, 1799 const struct rte_flow_indir_action_conf *conf, 1800 const struct rte_flow_action *actions, 1801 struct rte_flow_error *error) 1802 { 1803 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1804 pia->list_handle = 1805 rte_flow_action_list_handle_create(port_id, conf, 1806 actions, error); 1807 return pia->list_handle ? 0 : -1; 1808 } 1809 /** Create indirect action */ 1810 int 1811 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1812 const struct rte_flow_indir_action_conf *conf, 1813 const struct rte_flow_action *action) 1814 { 1815 struct port_indirect_action *pia; 1816 int ret; 1817 struct rte_flow_error error; 1818 1819 ret = action_alloc(port_id, id, &pia); 1820 if (ret) 1821 return ret; 1822 /* Poisoning to make sure PMDs update it in case of error. */ 1823 memset(&error, 0x22, sizeof(error)); 1824 ret = indirect_list ? 1825 action_list_handle_create(port_id, pia, conf, action, &error) : 1826 action_handle_create(port_id, pia, conf, action, &error); 1827 if (ret) { 1828 uint32_t destroy_id = pia->id; 1829 port_action_handle_destroy(port_id, 1, &destroy_id); 1830 return port_flow_complain(&error); 1831 } 1832 printf("Indirect action #%u created\n", pia->id); 1833 return 0; 1834 } 1835 1836 /** Destroy indirect action */ 1837 int 1838 port_action_handle_destroy(portid_t port_id, 1839 uint32_t n, 1840 const uint32_t *actions) 1841 { 1842 struct rte_port *port; 1843 struct port_indirect_action **tmp; 1844 int ret = 0; 1845 1846 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1847 port_id == (portid_t)RTE_PORT_ALL) 1848 return -EINVAL; 1849 port = &ports[port_id]; 1850 tmp = &port->actions_list; 1851 while (*tmp) { 1852 uint32_t i; 1853 1854 for (i = 0; i != n; ++i) { 1855 struct rte_flow_error error; 1856 struct port_indirect_action *pia = *tmp; 1857 1858 if (actions[i] != pia->id) 1859 continue; 1860 /* 1861 * Poisoning to make sure PMDs update it in case 1862 * of error. 1863 */ 1864 memset(&error, 0x33, sizeof(error)); 1865 1866 if (pia->handle) { 1867 ret = pia->type == 1868 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1869 rte_flow_action_list_handle_destroy 1870 (port_id, pia->list_handle, &error) : 1871 rte_flow_action_handle_destroy 1872 (port_id, pia->handle, &error); 1873 if (ret) { 1874 ret = port_flow_complain(&error); 1875 continue; 1876 } 1877 } 1878 *tmp = pia->next; 1879 printf("Indirect action #%u destroyed\n", pia->id); 1880 free(pia); 1881 break; 1882 } 1883 if (i == n) 1884 tmp = &(*tmp)->next; 1885 } 1886 return ret; 1887 } 1888 1889 int 1890 port_action_handle_flush(portid_t port_id) 1891 { 1892 struct rte_port *port; 1893 struct port_indirect_action **tmp; 1894 int ret = 0; 1895 1896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1897 port_id == (portid_t)RTE_PORT_ALL) 1898 return -EINVAL; 1899 port = &ports[port_id]; 1900 tmp = &port->actions_list; 1901 while (*tmp != NULL) { 1902 struct rte_flow_error error; 1903 struct port_indirect_action *pia = *tmp; 1904 1905 /* Poisoning to make sure PMDs update it in case of error. */ 1906 memset(&error, 0x44, sizeof(error)); 1907 if (pia->handle != NULL) { 1908 ret = pia->type == 1909 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1910 rte_flow_action_list_handle_destroy 1911 (port_id, pia->list_handle, &error) : 1912 rte_flow_action_handle_destroy 1913 (port_id, pia->handle, &error); 1914 if (ret) { 1915 printf("Indirect action #%u not destroyed\n", 1916 pia->id); 1917 ret = port_flow_complain(&error); 1918 } 1919 tmp = &pia->next; 1920 } else { 1921 *tmp = pia->next; 1922 free(pia); 1923 } 1924 } 1925 return ret; 1926 } 1927 1928 /** Get indirect action by port + id */ 1929 struct rte_flow_action_handle * 1930 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1931 { 1932 1933 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1934 1935 return (pia) ? pia->handle : NULL; 1936 } 1937 1938 /** Update indirect action */ 1939 int 1940 port_action_handle_update(portid_t port_id, uint32_t id, 1941 const struct rte_flow_action *action) 1942 { 1943 struct rte_flow_error error; 1944 struct rte_flow_action_handle *action_handle; 1945 struct port_indirect_action *pia; 1946 const void *update; 1947 1948 action_handle = port_action_handle_get_by_id(port_id, id); 1949 if (!action_handle) 1950 return -EINVAL; 1951 pia = action_get_by_id(port_id, id); 1952 if (!pia) 1953 return -EINVAL; 1954 switch (pia->type) { 1955 case RTE_FLOW_ACTION_TYPE_AGE: 1956 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1957 update = action->conf; 1958 break; 1959 default: 1960 update = action; 1961 break; 1962 } 1963 if (rte_flow_action_handle_update(port_id, action_handle, update, 1964 &error)) { 1965 return port_flow_complain(&error); 1966 } 1967 printf("Indirect action #%u updated\n", id); 1968 return 0; 1969 } 1970 1971 static void 1972 port_action_handle_query_dump(portid_t port_id, 1973 const struct port_indirect_action *pia, 1974 union port_action_query *query) 1975 { 1976 if (!pia || !query) 1977 return; 1978 switch (pia->type) { 1979 case RTE_FLOW_ACTION_TYPE_AGE: 1980 printf("Indirect AGE action:\n" 1981 " aged: %u\n" 1982 " sec_since_last_hit_valid: %u\n" 1983 " sec_since_last_hit: %" PRIu32 "\n", 1984 query->age.aged, 1985 query->age.sec_since_last_hit_valid, 1986 query->age.sec_since_last_hit); 1987 break; 1988 case RTE_FLOW_ACTION_TYPE_COUNT: 1989 printf("Indirect COUNT action:\n" 1990 " hits_set: %u\n" 1991 " bytes_set: %u\n" 1992 " hits: %" PRIu64 "\n" 1993 " bytes: %" PRIu64 "\n", 1994 query->count.hits_set, 1995 query->count.bytes_set, 1996 query->count.hits, 1997 query->count.bytes); 1998 break; 1999 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2000 printf("Conntrack Context:\n" 2001 " Peer: %u, Flow dir: %s, Enable: %u\n" 2002 " Live: %u, SACK: %u, CACK: %u\n" 2003 " Packet dir: %s, Liberal: %u, State: %u\n" 2004 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2005 " Last Seq: %u, Last ACK: %u\n" 2006 " Last Win: %u, Last End: %u\n", 2007 query->ct.peer_port, 2008 query->ct.is_original_dir ? "Original" : "Reply", 2009 query->ct.enable, query->ct.live_connection, 2010 query->ct.selective_ack, query->ct.challenge_ack_passed, 2011 query->ct.last_direction ? "Original" : "Reply", 2012 query->ct.liberal_mode, query->ct.state, 2013 query->ct.max_ack_window, query->ct.retransmission_limit, 2014 query->ct.last_index, query->ct.last_seq, 2015 query->ct.last_ack, query->ct.last_window, 2016 query->ct.last_end); 2017 printf(" Original Dir:\n" 2018 " scale: %u, fin: %u, ack seen: %u\n" 2019 " unacked data: %u\n Sent end: %u," 2020 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2021 query->ct.original_dir.scale, 2022 query->ct.original_dir.close_initiated, 2023 query->ct.original_dir.last_ack_seen, 2024 query->ct.original_dir.data_unacked, 2025 query->ct.original_dir.sent_end, 2026 query->ct.original_dir.reply_end, 2027 query->ct.original_dir.max_win, 2028 query->ct.original_dir.max_ack); 2029 printf(" Reply Dir:\n" 2030 " scale: %u, fin: %u, ack seen: %u\n" 2031 " unacked data: %u\n Sent end: %u," 2032 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2033 query->ct.reply_dir.scale, 2034 query->ct.reply_dir.close_initiated, 2035 query->ct.reply_dir.last_ack_seen, 2036 query->ct.reply_dir.data_unacked, 2037 query->ct.reply_dir.sent_end, 2038 query->ct.reply_dir.reply_end, 2039 query->ct.reply_dir.max_win, 2040 query->ct.reply_dir.max_ack); 2041 break; 2042 case RTE_FLOW_ACTION_TYPE_QUOTA: 2043 printf("Indirect QUOTA action %u\n" 2044 " unused quota: %" PRId64 "\n", 2045 pia->id, query->quota.quota); 2046 break; 2047 default: 2048 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2049 pia->type, pia->id, port_id); 2050 break; 2051 } 2052 2053 } 2054 2055 void 2056 port_action_handle_query_update(portid_t port_id, uint32_t id, 2057 enum rte_flow_query_update_mode qu_mode, 2058 const struct rte_flow_action *action) 2059 { 2060 int ret; 2061 struct rte_flow_error error; 2062 struct port_indirect_action *pia; 2063 union port_action_query query; 2064 2065 pia = action_get_by_id(port_id, id); 2066 if (!pia || !pia->handle) 2067 return; 2068 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2069 &query, qu_mode, &error); 2070 if (ret) 2071 port_flow_complain(&error); 2072 else 2073 port_action_handle_query_dump(port_id, pia, &query); 2074 2075 } 2076 2077 int 2078 port_action_handle_query(portid_t port_id, uint32_t id) 2079 { 2080 struct rte_flow_error error; 2081 struct port_indirect_action *pia; 2082 union port_action_query query; 2083 2084 pia = action_get_by_id(port_id, id); 2085 if (!pia) 2086 return -EINVAL; 2087 switch (pia->type) { 2088 case RTE_FLOW_ACTION_TYPE_AGE: 2089 case RTE_FLOW_ACTION_TYPE_COUNT: 2090 case RTE_FLOW_ACTION_TYPE_QUOTA: 2091 break; 2092 default: 2093 fprintf(stderr, 2094 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2095 id, pia->type, port_id); 2096 return -ENOTSUP; 2097 } 2098 /* Poisoning to make sure PMDs update it in case of error. */ 2099 memset(&error, 0x55, sizeof(error)); 2100 memset(&query, 0, sizeof(query)); 2101 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2102 return port_flow_complain(&error); 2103 port_action_handle_query_dump(port_id, pia, &query); 2104 return 0; 2105 } 2106 2107 static struct port_flow_tunnel * 2108 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2109 const struct rte_flow_item *pattern, 2110 const struct rte_flow_action *actions, 2111 const struct tunnel_ops *tunnel_ops) 2112 { 2113 int ret; 2114 struct rte_port *port; 2115 struct port_flow_tunnel *pft; 2116 struct rte_flow_error error; 2117 2118 port = &ports[port_id]; 2119 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2120 if (!pft) { 2121 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2122 tunnel_ops->id); 2123 return NULL; 2124 } 2125 if (tunnel_ops->actions) { 2126 uint32_t num_actions; 2127 const struct rte_flow_action *aptr; 2128 2129 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2130 &pft->pmd_actions, 2131 &pft->num_pmd_actions, 2132 &error); 2133 if (ret) { 2134 port_flow_complain(&error); 2135 return NULL; 2136 } 2137 for (aptr = actions, num_actions = 1; 2138 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2139 aptr++, num_actions++); 2140 pft->actions = malloc( 2141 (num_actions + pft->num_pmd_actions) * 2142 sizeof(actions[0])); 2143 if (!pft->actions) { 2144 rte_flow_tunnel_action_decap_release( 2145 port_id, pft->actions, 2146 pft->num_pmd_actions, &error); 2147 return NULL; 2148 } 2149 rte_memcpy(pft->actions, pft->pmd_actions, 2150 pft->num_pmd_actions * sizeof(actions[0])); 2151 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2152 num_actions * sizeof(actions[0])); 2153 } 2154 if (tunnel_ops->items) { 2155 uint32_t num_items; 2156 const struct rte_flow_item *iptr; 2157 2158 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2159 &pft->pmd_items, 2160 &pft->num_pmd_items, 2161 &error); 2162 if (ret) { 2163 port_flow_complain(&error); 2164 return NULL; 2165 } 2166 for (iptr = pattern, num_items = 1; 2167 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2168 iptr++, num_items++); 2169 pft->items = malloc((num_items + pft->num_pmd_items) * 2170 sizeof(pattern[0])); 2171 if (!pft->items) { 2172 rte_flow_tunnel_item_release( 2173 port_id, pft->pmd_items, 2174 pft->num_pmd_items, &error); 2175 return NULL; 2176 } 2177 rte_memcpy(pft->items, pft->pmd_items, 2178 pft->num_pmd_items * sizeof(pattern[0])); 2179 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2180 num_items * sizeof(pattern[0])); 2181 } 2182 2183 return pft; 2184 } 2185 2186 static void 2187 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2188 const struct tunnel_ops *tunnel_ops, 2189 struct port_flow_tunnel *pft) 2190 { 2191 struct rte_flow_error error; 2192 2193 if (tunnel_ops->actions) { 2194 free(pft->actions); 2195 rte_flow_tunnel_action_decap_release( 2196 port_id, pft->pmd_actions, 2197 pft->num_pmd_actions, &error); 2198 pft->actions = NULL; 2199 pft->pmd_actions = NULL; 2200 } 2201 if (tunnel_ops->items) { 2202 free(pft->items); 2203 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2204 pft->num_pmd_items, 2205 &error); 2206 pft->items = NULL; 2207 pft->pmd_items = NULL; 2208 } 2209 } 2210 2211 /** Add port meter policy */ 2212 int 2213 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2214 const struct rte_flow_action *actions) 2215 { 2216 struct rte_mtr_error error; 2217 const struct rte_flow_action *act = actions; 2218 const struct rte_flow_action *start; 2219 struct rte_mtr_meter_policy_params policy; 2220 uint32_t i = 0, act_n; 2221 int ret; 2222 2223 for (i = 0; i < RTE_COLORS; i++) { 2224 for (act_n = 0, start = act; 2225 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2226 act_n++; 2227 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2228 policy.actions[i] = start; 2229 else 2230 policy.actions[i] = NULL; 2231 act++; 2232 } 2233 ret = rte_mtr_meter_policy_add(port_id, 2234 policy_id, 2235 &policy, &error); 2236 if (ret) 2237 print_mtr_err_msg(&error); 2238 return ret; 2239 } 2240 2241 struct rte_flow_meter_profile * 2242 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2243 { 2244 struct rte_mtr_error error; 2245 struct rte_flow_meter_profile *profile; 2246 2247 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2248 if (!profile) 2249 print_mtr_err_msg(&error); 2250 return profile; 2251 } 2252 struct rte_flow_meter_policy * 2253 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2254 { 2255 struct rte_mtr_error error; 2256 struct rte_flow_meter_policy *policy; 2257 2258 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2259 if (!policy) 2260 print_mtr_err_msg(&error); 2261 return policy; 2262 } 2263 2264 /** Validate flow rule. */ 2265 int 2266 port_flow_validate(portid_t port_id, 2267 const struct rte_flow_attr *attr, 2268 const struct rte_flow_item *pattern, 2269 const struct rte_flow_action *actions, 2270 const struct tunnel_ops *tunnel_ops) 2271 { 2272 struct rte_flow_error error; 2273 struct port_flow_tunnel *pft = NULL; 2274 int ret; 2275 2276 /* Poisoning to make sure PMDs update it in case of error. */ 2277 memset(&error, 0x11, sizeof(error)); 2278 if (tunnel_ops->enabled) { 2279 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2280 actions, tunnel_ops); 2281 if (!pft) 2282 return -ENOENT; 2283 if (pft->items) 2284 pattern = pft->items; 2285 if (pft->actions) 2286 actions = pft->actions; 2287 } 2288 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2289 if (tunnel_ops->enabled) 2290 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2291 if (ret) 2292 return port_flow_complain(&error); 2293 printf("Flow rule validated\n"); 2294 return 0; 2295 } 2296 2297 /** Return age action structure if exists, otherwise NULL. */ 2298 static struct rte_flow_action_age * 2299 age_action_get(const struct rte_flow_action *actions) 2300 { 2301 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2302 switch (actions->type) { 2303 case RTE_FLOW_ACTION_TYPE_AGE: 2304 return (struct rte_flow_action_age *) 2305 (uintptr_t)actions->conf; 2306 default: 2307 break; 2308 } 2309 } 2310 return NULL; 2311 } 2312 2313 /** Create pattern template */ 2314 int 2315 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2316 const struct rte_flow_pattern_template_attr *attr, 2317 const struct rte_flow_item *pattern) 2318 { 2319 struct rte_port *port; 2320 struct port_template *pit; 2321 int ret; 2322 struct rte_flow_error error; 2323 2324 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2325 port_id == (portid_t)RTE_PORT_ALL) 2326 return -EINVAL; 2327 port = &ports[port_id]; 2328 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2329 if (ret) 2330 return ret; 2331 /* Poisoning to make sure PMDs update it in case of error. */ 2332 memset(&error, 0x22, sizeof(error)); 2333 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2334 attr, pattern, &error); 2335 if (!pit->template.pattern_template) { 2336 uint32_t destroy_id = pit->id; 2337 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2338 return port_flow_complain(&error); 2339 } 2340 printf("Pattern template #%u created\n", pit->id); 2341 return 0; 2342 } 2343 2344 /** Destroy pattern template */ 2345 int 2346 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2347 const uint32_t *template) 2348 { 2349 struct rte_port *port; 2350 struct port_template **tmp; 2351 int ret = 0; 2352 2353 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2354 port_id == (portid_t)RTE_PORT_ALL) 2355 return -EINVAL; 2356 port = &ports[port_id]; 2357 tmp = &port->pattern_templ_list; 2358 while (*tmp) { 2359 uint32_t i; 2360 2361 for (i = 0; i != n; ++i) { 2362 struct rte_flow_error error; 2363 struct port_template *pit = *tmp; 2364 2365 if (template[i] != pit->id) 2366 continue; 2367 /* 2368 * Poisoning to make sure PMDs update it in case 2369 * of error. 2370 */ 2371 memset(&error, 0x33, sizeof(error)); 2372 2373 if (pit->template.pattern_template && 2374 rte_flow_pattern_template_destroy(port_id, 2375 pit->template.pattern_template, 2376 &error)) { 2377 ret = port_flow_complain(&error); 2378 continue; 2379 } 2380 *tmp = pit->next; 2381 printf("Pattern template #%u destroyed\n", pit->id); 2382 free(pit); 2383 break; 2384 } 2385 if (i == n) 2386 tmp = &(*tmp)->next; 2387 } 2388 return ret; 2389 } 2390 2391 /** Flush pattern template */ 2392 int 2393 port_flow_pattern_template_flush(portid_t port_id) 2394 { 2395 struct rte_port *port; 2396 struct port_template **tmp; 2397 int ret = 0; 2398 2399 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2400 port_id == (portid_t)RTE_PORT_ALL) 2401 return -EINVAL; 2402 port = &ports[port_id]; 2403 tmp = &port->pattern_templ_list; 2404 while (*tmp) { 2405 struct rte_flow_error error; 2406 struct port_template *pit = *tmp; 2407 2408 /* 2409 * Poisoning to make sure PMDs update it in case 2410 * of error. 2411 */ 2412 memset(&error, 0x33, sizeof(error)); 2413 if (pit->template.pattern_template && 2414 rte_flow_pattern_template_destroy(port_id, 2415 pit->template.pattern_template, &error)) { 2416 printf("Pattern template #%u not destroyed\n", pit->id); 2417 ret = port_flow_complain(&error); 2418 tmp = &pit->next; 2419 } else { 2420 *tmp = pit->next; 2421 free(pit); 2422 } 2423 } 2424 return ret; 2425 } 2426 2427 /** Create actions template */ 2428 int 2429 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2430 const struct rte_flow_actions_template_attr *attr, 2431 const struct rte_flow_action *actions, 2432 const struct rte_flow_action *masks) 2433 { 2434 struct rte_port *port; 2435 struct port_template *pat; 2436 int ret; 2437 struct rte_flow_error error; 2438 2439 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2440 port_id == (portid_t)RTE_PORT_ALL) 2441 return -EINVAL; 2442 port = &ports[port_id]; 2443 ret = template_alloc(id, &pat, &port->actions_templ_list); 2444 if (ret) 2445 return ret; 2446 /* Poisoning to make sure PMDs update it in case of error. */ 2447 memset(&error, 0x22, sizeof(error)); 2448 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2449 attr, actions, masks, &error); 2450 if (!pat->template.actions_template) { 2451 uint32_t destroy_id = pat->id; 2452 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2453 return port_flow_complain(&error); 2454 } 2455 printf("Actions template #%u created\n", pat->id); 2456 return 0; 2457 } 2458 2459 /** Destroy actions template */ 2460 int 2461 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2462 const uint32_t *template) 2463 { 2464 struct rte_port *port; 2465 struct port_template **tmp; 2466 int ret = 0; 2467 2468 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2469 port_id == (portid_t)RTE_PORT_ALL) 2470 return -EINVAL; 2471 port = &ports[port_id]; 2472 tmp = &port->actions_templ_list; 2473 while (*tmp) { 2474 uint32_t i; 2475 2476 for (i = 0; i != n; ++i) { 2477 struct rte_flow_error error; 2478 struct port_template *pat = *tmp; 2479 2480 if (template[i] != pat->id) 2481 continue; 2482 /* 2483 * Poisoning to make sure PMDs update it in case 2484 * of error. 2485 */ 2486 memset(&error, 0x33, sizeof(error)); 2487 2488 if (pat->template.actions_template && 2489 rte_flow_actions_template_destroy(port_id, 2490 pat->template.actions_template, &error)) { 2491 ret = port_flow_complain(&error); 2492 continue; 2493 } 2494 *tmp = pat->next; 2495 printf("Actions template #%u destroyed\n", pat->id); 2496 free(pat); 2497 break; 2498 } 2499 if (i == n) 2500 tmp = &(*tmp)->next; 2501 } 2502 return ret; 2503 } 2504 2505 /** Flush actions template */ 2506 int 2507 port_flow_actions_template_flush(portid_t port_id) 2508 { 2509 struct rte_port *port; 2510 struct port_template **tmp; 2511 int ret = 0; 2512 2513 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2514 port_id == (portid_t)RTE_PORT_ALL) 2515 return -EINVAL; 2516 port = &ports[port_id]; 2517 tmp = &port->actions_templ_list; 2518 while (*tmp) { 2519 struct rte_flow_error error; 2520 struct port_template *pat = *tmp; 2521 2522 /* 2523 * Poisoning to make sure PMDs update it in case 2524 * of error. 2525 */ 2526 memset(&error, 0x33, sizeof(error)); 2527 2528 if (pat->template.actions_template && 2529 rte_flow_actions_template_destroy(port_id, 2530 pat->template.actions_template, &error)) { 2531 ret = port_flow_complain(&error); 2532 printf("Actions template #%u not destroyed\n", pat->id); 2533 tmp = &pat->next; 2534 } else { 2535 *tmp = pat->next; 2536 free(pat); 2537 } 2538 } 2539 return ret; 2540 } 2541 2542 /** Create table */ 2543 int 2544 port_flow_template_table_create(portid_t port_id, uint32_t id, 2545 const struct rte_flow_template_table_attr *table_attr, 2546 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2547 uint32_t nb_actions_templates, uint32_t *actions_templates) 2548 { 2549 struct rte_port *port; 2550 struct port_table *pt; 2551 struct port_template *temp = NULL; 2552 int ret; 2553 uint32_t i; 2554 struct rte_flow_error error; 2555 struct rte_flow_pattern_template 2556 *flow_pattern_templates[nb_pattern_templates]; 2557 struct rte_flow_actions_template 2558 *flow_actions_templates[nb_actions_templates]; 2559 2560 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2561 port_id == (portid_t)RTE_PORT_ALL) 2562 return -EINVAL; 2563 port = &ports[port_id]; 2564 for (i = 0; i < nb_pattern_templates; ++i) { 2565 bool found = false; 2566 temp = port->pattern_templ_list; 2567 while (temp) { 2568 if (pattern_templates[i] == temp->id) { 2569 flow_pattern_templates[i] = 2570 temp->template.pattern_template; 2571 found = true; 2572 break; 2573 } 2574 temp = temp->next; 2575 } 2576 if (!found) { 2577 printf("Pattern template #%u is invalid\n", 2578 pattern_templates[i]); 2579 return -EINVAL; 2580 } 2581 } 2582 for (i = 0; i < nb_actions_templates; ++i) { 2583 bool found = false; 2584 temp = port->actions_templ_list; 2585 while (temp) { 2586 if (actions_templates[i] == temp->id) { 2587 flow_actions_templates[i] = 2588 temp->template.actions_template; 2589 found = true; 2590 break; 2591 } 2592 temp = temp->next; 2593 } 2594 if (!found) { 2595 printf("Actions template #%u is invalid\n", 2596 actions_templates[i]); 2597 return -EINVAL; 2598 } 2599 } 2600 ret = table_alloc(id, &pt, &port->table_list); 2601 if (ret) 2602 return ret; 2603 /* Poisoning to make sure PMDs update it in case of error. */ 2604 memset(&error, 0x22, sizeof(error)); 2605 pt->table = rte_flow_template_table_create(port_id, table_attr, 2606 flow_pattern_templates, nb_pattern_templates, 2607 flow_actions_templates, nb_actions_templates, 2608 &error); 2609 2610 if (!pt->table) { 2611 uint32_t destroy_id = pt->id; 2612 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2613 return port_flow_complain(&error); 2614 } 2615 pt->nb_pattern_templates = nb_pattern_templates; 2616 pt->nb_actions_templates = nb_actions_templates; 2617 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2618 sizeof(struct rte_flow_attr)); 2619 printf("Template table #%u created\n", pt->id); 2620 return 0; 2621 } 2622 2623 /** Destroy table */ 2624 int 2625 port_flow_template_table_destroy(portid_t port_id, 2626 uint32_t n, const uint32_t *table) 2627 { 2628 struct rte_port *port; 2629 struct port_table **tmp; 2630 int ret = 0; 2631 2632 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2633 port_id == (portid_t)RTE_PORT_ALL) 2634 return -EINVAL; 2635 port = &ports[port_id]; 2636 tmp = &port->table_list; 2637 while (*tmp) { 2638 uint32_t i; 2639 2640 for (i = 0; i != n; ++i) { 2641 struct rte_flow_error error; 2642 struct port_table *pt = *tmp; 2643 2644 if (table[i] != pt->id) 2645 continue; 2646 /* 2647 * Poisoning to make sure PMDs update it in case 2648 * of error. 2649 */ 2650 memset(&error, 0x33, sizeof(error)); 2651 2652 if (pt->table && 2653 rte_flow_template_table_destroy(port_id, 2654 pt->table, 2655 &error)) { 2656 ret = port_flow_complain(&error); 2657 continue; 2658 } 2659 *tmp = pt->next; 2660 printf("Template table #%u destroyed\n", pt->id); 2661 free(pt); 2662 break; 2663 } 2664 if (i == n) 2665 tmp = &(*tmp)->next; 2666 } 2667 return ret; 2668 } 2669 2670 /** Flush table */ 2671 int 2672 port_flow_template_table_flush(portid_t port_id) 2673 { 2674 struct rte_port *port; 2675 struct port_table **tmp; 2676 int ret = 0; 2677 2678 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2679 port_id == (portid_t)RTE_PORT_ALL) 2680 return -EINVAL; 2681 port = &ports[port_id]; 2682 tmp = &port->table_list; 2683 while (*tmp) { 2684 struct rte_flow_error error; 2685 struct port_table *pt = *tmp; 2686 2687 /* 2688 * Poisoning to make sure PMDs update it in case 2689 * of error. 2690 */ 2691 memset(&error, 0x33, sizeof(error)); 2692 2693 if (pt->table && 2694 rte_flow_template_table_destroy(port_id, 2695 pt->table, 2696 &error)) { 2697 ret = port_flow_complain(&error); 2698 printf("Template table #%u not destroyed\n", pt->id); 2699 tmp = &pt->next; 2700 } else { 2701 *tmp = pt->next; 2702 free(pt); 2703 } 2704 } 2705 return ret; 2706 } 2707 2708 /** Enqueue create flow rule operation. */ 2709 int 2710 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2711 bool postpone, uint32_t table_id, uint32_t rule_idx, 2712 uint32_t pattern_idx, uint32_t actions_idx, 2713 const struct rte_flow_item *pattern, 2714 const struct rte_flow_action *actions) 2715 { 2716 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2717 struct rte_flow *flow; 2718 struct rte_port *port; 2719 struct port_flow *pf; 2720 struct port_table *pt; 2721 uint32_t id = 0; 2722 bool found; 2723 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2724 struct rte_flow_action_age *age = age_action_get(actions); 2725 struct queue_job *job; 2726 2727 port = &ports[port_id]; 2728 if (port->flow_list) { 2729 if (port->flow_list->id == UINT32_MAX) { 2730 printf("Highest rule ID is already assigned," 2731 " delete it first"); 2732 return -ENOMEM; 2733 } 2734 id = port->flow_list->id + 1; 2735 } 2736 2737 if (queue_id >= port->queue_nb) { 2738 printf("Queue #%u is invalid\n", queue_id); 2739 return -EINVAL; 2740 } 2741 2742 found = false; 2743 pt = port->table_list; 2744 while (pt) { 2745 if (table_id == pt->id) { 2746 found = true; 2747 break; 2748 } 2749 pt = pt->next; 2750 } 2751 if (!found) { 2752 printf("Table #%u is invalid\n", table_id); 2753 return -EINVAL; 2754 } 2755 2756 if (pattern_idx >= pt->nb_pattern_templates) { 2757 printf("Pattern template index #%u is invalid," 2758 " %u templates present in the table\n", 2759 pattern_idx, pt->nb_pattern_templates); 2760 return -EINVAL; 2761 } 2762 if (actions_idx >= pt->nb_actions_templates) { 2763 printf("Actions template index #%u is invalid," 2764 " %u templates present in the table\n", 2765 actions_idx, pt->nb_actions_templates); 2766 return -EINVAL; 2767 } 2768 2769 job = calloc(1, sizeof(*job)); 2770 if (!job) { 2771 printf("Queue flow create job allocate failed\n"); 2772 return -ENOMEM; 2773 } 2774 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2775 2776 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2777 if (!pf) { 2778 free(job); 2779 return port_flow_complain(&error); 2780 } 2781 if (age) { 2782 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2783 age->context = &pf->age_type; 2784 } 2785 /* Poisoning to make sure PMDs update it in case of error. */ 2786 memset(&error, 0x11, sizeof(error)); 2787 if (rule_idx == UINT32_MAX) 2788 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2789 pattern, pattern_idx, actions, actions_idx, job, &error); 2790 else 2791 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2792 rule_idx, actions, actions_idx, job, &error); 2793 if (!flow) { 2794 uint64_t flow_id = pf->id; 2795 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2796 free(job); 2797 return port_flow_complain(&error); 2798 } 2799 2800 pf->next = port->flow_list; 2801 pf->id = id; 2802 pf->table = pt; 2803 pf->flow = flow; 2804 job->pf = pf; 2805 port->flow_list = pf; 2806 printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id); 2807 return 0; 2808 } 2809 2810 /** Enqueue number of destroy flow rules operations. */ 2811 int 2812 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2813 bool postpone, uint32_t n, const uint64_t *rule) 2814 { 2815 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2816 struct rte_port *port; 2817 struct port_flow **tmp; 2818 int ret = 0; 2819 struct queue_job *job; 2820 2821 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2822 port_id == (portid_t)RTE_PORT_ALL) 2823 return -EINVAL; 2824 port = &ports[port_id]; 2825 2826 if (queue_id >= port->queue_nb) { 2827 printf("Queue #%u is invalid\n", queue_id); 2828 return -EINVAL; 2829 } 2830 2831 tmp = &port->flow_list; 2832 while (*tmp) { 2833 uint32_t i; 2834 2835 for (i = 0; i != n; ++i) { 2836 struct rte_flow_error error; 2837 struct port_flow *pf = *tmp; 2838 2839 if (rule[i] != pf->id) 2840 continue; 2841 /* 2842 * Poisoning to make sure PMD 2843 * update it in case of error. 2844 */ 2845 memset(&error, 0x33, sizeof(error)); 2846 job = calloc(1, sizeof(*job)); 2847 if (!job) { 2848 printf("Queue flow destroy job allocate failed\n"); 2849 return -ENOMEM; 2850 } 2851 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2852 job->pf = pf; 2853 2854 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2855 pf->flow, job, &error)) { 2856 free(job); 2857 ret = port_flow_complain(&error); 2858 continue; 2859 } 2860 printf("Flow rule #%"PRIu64" destruction enqueued\n", 2861 pf->id); 2862 *tmp = pf->next; 2863 break; 2864 } 2865 if (i == n) 2866 tmp = &(*tmp)->next; 2867 } 2868 return ret; 2869 } 2870 2871 static void 2872 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2873 struct port_indirect_action *pia, 2874 struct queue_job *job, 2875 const struct rte_flow_op_attr *attr, 2876 const struct rte_flow_indir_action_conf *conf, 2877 const struct rte_flow_action *action, 2878 struct rte_flow_error *error) 2879 { 2880 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2881 struct rte_flow_action_age *age = 2882 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2883 2884 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2885 age->context = &pia->age_type; 2886 } 2887 /* Poisoning to make sure PMDs update it in case of error. */ 2888 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2889 attr, conf, action, 2890 job, error); 2891 pia->type = action->type; 2892 } 2893 2894 static void 2895 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2896 struct port_indirect_action *pia, 2897 struct queue_job *job, 2898 const struct rte_flow_op_attr *attr, 2899 const struct rte_flow_indir_action_conf *conf, 2900 const struct rte_flow_action *action, 2901 struct rte_flow_error *error) 2902 { 2903 /* Poisoning to make sure PMDs update it in case of error. */ 2904 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2905 pia->list_handle = rte_flow_async_action_list_handle_create 2906 (port_id, queue_id, attr, conf, action, 2907 job, error); 2908 } 2909 2910 /** Enqueue update flow rule operation. */ 2911 int 2912 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2913 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2914 const struct rte_flow_action *actions) 2915 { 2916 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2917 struct rte_port *port; 2918 struct port_flow *pf, *uf; 2919 struct port_flow **tmp; 2920 struct port_table *pt; 2921 bool found; 2922 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2923 struct rte_flow_action_age *age = age_action_get(actions); 2924 struct queue_job *job; 2925 2926 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2927 port_id == (portid_t)RTE_PORT_ALL) 2928 return -EINVAL; 2929 port = &ports[port_id]; 2930 2931 if (queue_id >= port->queue_nb) { 2932 printf("Queue #%u is invalid\n", queue_id); 2933 return -EINVAL; 2934 } 2935 2936 found = false; 2937 tmp = &port->flow_list; 2938 while (*tmp) { 2939 pf = *tmp; 2940 if (rule_idx == pf->id) { 2941 found = true; 2942 break; 2943 } 2944 tmp = &(*tmp)->next; 2945 } 2946 if (!found) { 2947 printf("Flow rule #%u is invalid\n", rule_idx); 2948 return -EINVAL; 2949 } 2950 2951 pt = pf->table; 2952 if (actions_idx >= pt->nb_actions_templates) { 2953 printf("Actions template index #%u is invalid," 2954 " %u templates present in the table\n", 2955 actions_idx, pt->nb_actions_templates); 2956 return -EINVAL; 2957 } 2958 2959 job = calloc(1, sizeof(*job)); 2960 if (!job) { 2961 printf("Queue flow create job allocate failed\n"); 2962 return -ENOMEM; 2963 } 2964 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2965 2966 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2967 if (!uf) { 2968 free(job); 2969 return port_flow_complain(&error); 2970 } 2971 2972 if (age) { 2973 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2974 age->context = &uf->age_type; 2975 } 2976 2977 /* 2978 * Poisoning to make sure PMD update it in case of error. 2979 */ 2980 memset(&error, 0x44, sizeof(error)); 2981 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2982 actions, actions_idx, job, &error)) { 2983 free(uf); 2984 free(job); 2985 return port_flow_complain(&error); 2986 } 2987 uf->next = pf->next; 2988 uf->id = pf->id; 2989 uf->table = pt; 2990 uf->flow = pf->flow; 2991 *tmp = uf; 2992 job->pf = pf; 2993 2994 printf("Flow rule #%"PRIu64" update enqueued\n", pf->id); 2995 return 0; 2996 } 2997 2998 /** Enqueue indirect action create operation. */ 2999 int 3000 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 3001 bool postpone, uint32_t id, 3002 const struct rte_flow_indir_action_conf *conf, 3003 const struct rte_flow_action *action) 3004 { 3005 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3006 struct rte_port *port; 3007 struct port_indirect_action *pia; 3008 int ret; 3009 struct rte_flow_error error; 3010 struct queue_job *job; 3011 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3012 3013 3014 ret = action_alloc(port_id, id, &pia); 3015 if (ret) 3016 return ret; 3017 3018 port = &ports[port_id]; 3019 if (queue_id >= port->queue_nb) { 3020 printf("Queue #%u is invalid\n", queue_id); 3021 return -EINVAL; 3022 } 3023 job = calloc(1, sizeof(*job)); 3024 if (!job) { 3025 printf("Queue action create job allocate failed\n"); 3026 return -ENOMEM; 3027 } 3028 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3029 job->pia = pia; 3030 3031 /* Poisoning to make sure PMDs update it in case of error. */ 3032 memset(&error, 0x88, sizeof(error)); 3033 3034 if (is_indirect_list) 3035 queue_action_list_handle_create(port_id, queue_id, pia, job, 3036 &attr, conf, action, &error); 3037 else 3038 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3039 conf, action, &error); 3040 3041 if (!pia->handle) { 3042 uint32_t destroy_id = pia->id; 3043 port_queue_action_handle_destroy(port_id, queue_id, 3044 postpone, 1, &destroy_id); 3045 free(job); 3046 return port_flow_complain(&error); 3047 } 3048 printf("Indirect action #%u creation queued\n", pia->id); 3049 return 0; 3050 } 3051 3052 /** Enqueue indirect action destroy operation. */ 3053 int 3054 port_queue_action_handle_destroy(portid_t port_id, 3055 uint32_t queue_id, bool postpone, 3056 uint32_t n, const uint32_t *actions) 3057 { 3058 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3059 struct rte_port *port; 3060 struct port_indirect_action **tmp; 3061 int ret = 0; 3062 struct queue_job *job; 3063 3064 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3065 port_id == (portid_t)RTE_PORT_ALL) 3066 return -EINVAL; 3067 port = &ports[port_id]; 3068 3069 if (queue_id >= port->queue_nb) { 3070 printf("Queue #%u is invalid\n", queue_id); 3071 return -EINVAL; 3072 } 3073 3074 tmp = &port->actions_list; 3075 while (*tmp) { 3076 uint32_t i; 3077 3078 for (i = 0; i != n; ++i) { 3079 struct rte_flow_error error; 3080 struct port_indirect_action *pia = *tmp; 3081 3082 if (actions[i] != pia->id) 3083 continue; 3084 /* 3085 * Poisoning to make sure PMDs update it in case 3086 * of error. 3087 */ 3088 memset(&error, 0x99, sizeof(error)); 3089 job = calloc(1, sizeof(*job)); 3090 if (!job) { 3091 printf("Queue action destroy job allocate failed\n"); 3092 return -ENOMEM; 3093 } 3094 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3095 job->pia = pia; 3096 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3097 rte_flow_async_action_list_handle_destroy 3098 (port_id, queue_id, 3099 &attr, pia->list_handle, 3100 job, &error) : 3101 rte_flow_async_action_handle_destroy 3102 (port_id, queue_id, &attr, pia->handle, 3103 job, &error); 3104 if (ret) { 3105 free(job); 3106 ret = port_flow_complain(&error); 3107 continue; 3108 } 3109 *tmp = pia->next; 3110 printf("Indirect action #%u destruction queued\n", 3111 pia->id); 3112 break; 3113 } 3114 if (i == n) 3115 tmp = &(*tmp)->next; 3116 } 3117 return ret; 3118 } 3119 3120 /** Enqueue indirect action update operation. */ 3121 int 3122 port_queue_action_handle_update(portid_t port_id, 3123 uint32_t queue_id, bool postpone, uint32_t id, 3124 const struct rte_flow_action *action) 3125 { 3126 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3127 struct rte_port *port; 3128 struct rte_flow_error error; 3129 struct rte_flow_action_handle *action_handle; 3130 struct queue_job *job; 3131 struct port_indirect_action *pia; 3132 struct rte_flow_update_meter_mark mtr_update; 3133 const void *update; 3134 3135 action_handle = port_action_handle_get_by_id(port_id, id); 3136 if (!action_handle) 3137 return -EINVAL; 3138 3139 port = &ports[port_id]; 3140 if (queue_id >= port->queue_nb) { 3141 printf("Queue #%u is invalid\n", queue_id); 3142 return -EINVAL; 3143 } 3144 3145 job = calloc(1, sizeof(*job)); 3146 if (!job) { 3147 printf("Queue action update job allocate failed\n"); 3148 return -ENOMEM; 3149 } 3150 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3151 3152 pia = action_get_by_id(port_id, id); 3153 if (!pia) { 3154 free(job); 3155 return -EINVAL; 3156 } 3157 3158 switch (pia->type) { 3159 case RTE_FLOW_ACTION_TYPE_AGE: 3160 update = action->conf; 3161 break; 3162 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3163 rte_memcpy(&mtr_update.meter_mark, action->conf, 3164 sizeof(struct rte_flow_action_meter_mark)); 3165 mtr_update.profile_valid = 1; 3166 mtr_update.policy_valid = 1; 3167 mtr_update.color_mode_valid = 1; 3168 mtr_update.init_color_valid = 1; 3169 mtr_update.state_valid = 1; 3170 update = &mtr_update; 3171 break; 3172 default: 3173 update = action; 3174 break; 3175 } 3176 3177 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3178 action_handle, update, job, &error)) { 3179 free(job); 3180 return port_flow_complain(&error); 3181 } 3182 printf("Indirect action #%u update queued\n", id); 3183 return 0; 3184 } 3185 3186 void 3187 port_queue_action_handle_query_update(portid_t port_id, 3188 uint32_t queue_id, bool postpone, 3189 uint32_t id, 3190 enum rte_flow_query_update_mode qu_mode, 3191 const struct rte_flow_action *action) 3192 { 3193 int ret; 3194 struct rte_flow_error error; 3195 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3196 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3197 struct queue_job *job; 3198 3199 if (!pia || !pia->handle) 3200 return; 3201 job = calloc(1, sizeof(*job)); 3202 if (!job) 3203 return; 3204 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3205 job->pia = pia; 3206 3207 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3208 &attr, pia->handle, 3209 action, 3210 &job->query, 3211 qu_mode, job, 3212 &error); 3213 if (ret) { 3214 port_flow_complain(&error); 3215 free(job); 3216 } else { 3217 printf("port-%u: indirect action #%u update-and-query queued\n", 3218 port_id, id); 3219 } 3220 } 3221 3222 /** Enqueue indirect action query operation. */ 3223 int 3224 port_queue_action_handle_query(portid_t port_id, 3225 uint32_t queue_id, bool postpone, uint32_t id) 3226 { 3227 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3228 struct rte_port *port; 3229 struct rte_flow_error error; 3230 struct rte_flow_action_handle *action_handle; 3231 struct port_indirect_action *pia; 3232 struct queue_job *job; 3233 3234 pia = action_get_by_id(port_id, id); 3235 action_handle = pia ? pia->handle : NULL; 3236 if (!action_handle) 3237 return -EINVAL; 3238 3239 port = &ports[port_id]; 3240 if (queue_id >= port->queue_nb) { 3241 printf("Queue #%u is invalid\n", queue_id); 3242 return -EINVAL; 3243 } 3244 3245 job = calloc(1, sizeof(*job)); 3246 if (!job) { 3247 printf("Queue action update job allocate failed\n"); 3248 return -ENOMEM; 3249 } 3250 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3251 job->pia = pia; 3252 3253 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3254 action_handle, &job->query, job, &error)) { 3255 free(job); 3256 return port_flow_complain(&error); 3257 } 3258 printf("Indirect action #%u update queued\n", id); 3259 return 0; 3260 } 3261 3262 /** Push all the queue operations in the queue to the NIC. */ 3263 int 3264 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3265 { 3266 struct rte_port *port; 3267 struct rte_flow_error error; 3268 int ret = 0; 3269 3270 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3271 port_id == (portid_t)RTE_PORT_ALL) 3272 return -EINVAL; 3273 port = &ports[port_id]; 3274 3275 if (queue_id >= port->queue_nb) { 3276 printf("Queue #%u is invalid\n", queue_id); 3277 return -EINVAL; 3278 } 3279 3280 memset(&error, 0x55, sizeof(error)); 3281 ret = rte_flow_push(port_id, queue_id, &error); 3282 if (ret < 0) { 3283 printf("Failed to push operations in the queue\n"); 3284 return -EINVAL; 3285 } 3286 printf("Queue #%u operations pushed\n", queue_id); 3287 return ret; 3288 } 3289 3290 /** Pull queue operation results from the queue. */ 3291 static int 3292 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3293 const uint64_t *rule, int nb_flows) 3294 { 3295 struct rte_port *port = &ports[port_id]; 3296 struct rte_flow_op_result *res; 3297 struct rte_flow_error error; 3298 uint32_t n = nb_flows; 3299 int ret = 0; 3300 int i; 3301 3302 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3303 if (!res) { 3304 printf("Failed to allocate memory for pulled results\n"); 3305 return -ENOMEM; 3306 } 3307 3308 memset(&error, 0x66, sizeof(error)); 3309 while (nb_flows > 0) { 3310 int success = 0; 3311 3312 if (n > port->queue_sz) 3313 n = port->queue_sz; 3314 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3315 if (ret < 0) { 3316 free(res); 3317 return ret; 3318 } 3319 ret = rte_flow_push(port_id, queue_id, &error); 3320 if (ret < 0) { 3321 printf("Failed to push operations in the queue: %s\n", 3322 strerror(-ret)); 3323 free(res); 3324 return ret; 3325 } 3326 while (success < nb_flows) { 3327 ret = rte_flow_pull(port_id, queue_id, res, 3328 port->queue_sz, &error); 3329 if (ret < 0) { 3330 printf("Failed to pull a operation results: %s\n", 3331 strerror(-ret)); 3332 free(res); 3333 return ret; 3334 } 3335 3336 for (i = 0; i < ret; i++) { 3337 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3338 success++; 3339 } 3340 } 3341 rule += n; 3342 nb_flows -= n; 3343 n = nb_flows; 3344 } 3345 3346 free(res); 3347 return ret; 3348 } 3349 3350 /** List simply and destroy all aged flows per queue. */ 3351 void 3352 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3353 { 3354 void **contexts; 3355 int nb_context, total = 0, idx; 3356 uint64_t *rules = NULL; 3357 struct rte_port *port; 3358 struct rte_flow_error error; 3359 enum age_action_context_type *type; 3360 union { 3361 struct port_flow *pf; 3362 struct port_indirect_action *pia; 3363 } ctx; 3364 3365 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3366 port_id == (portid_t)RTE_PORT_ALL) 3367 return; 3368 port = &ports[port_id]; 3369 if (queue_id >= port->queue_nb) { 3370 printf("Error: queue #%u is invalid\n", queue_id); 3371 return; 3372 } 3373 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3374 if (total < 0) { 3375 port_flow_complain(&error); 3376 return; 3377 } 3378 printf("Port %u queue %u total aged flows: %d\n", 3379 port_id, queue_id, total); 3380 if (total == 0) 3381 return; 3382 contexts = calloc(total, sizeof(void *)); 3383 if (contexts == NULL) { 3384 printf("Cannot allocate contexts for aged flow\n"); 3385 return; 3386 } 3387 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3388 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3389 total, &error); 3390 if (nb_context > total) { 3391 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3392 port_id, queue_id, nb_context, total); 3393 free(contexts); 3394 return; 3395 } 3396 if (destroy) { 3397 rules = malloc(sizeof(uint32_t) * nb_context); 3398 if (rules == NULL) 3399 printf("Cannot allocate memory for destroy aged flow\n"); 3400 } 3401 total = 0; 3402 for (idx = 0; idx < nb_context; idx++) { 3403 if (!contexts[idx]) { 3404 printf("Error: get Null context in port %u queue %u\n", 3405 port_id, queue_id); 3406 continue; 3407 } 3408 type = (enum age_action_context_type *)contexts[idx]; 3409 switch (*type) { 3410 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3411 ctx.pf = container_of(type, struct port_flow, age_type); 3412 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3413 "\t%c%c%c\t\n", 3414 "Flow", 3415 ctx.pf->id, 3416 ctx.pf->rule.attr->group, 3417 ctx.pf->rule.attr->priority, 3418 ctx.pf->rule.attr->ingress ? 'i' : '-', 3419 ctx.pf->rule.attr->egress ? 'e' : '-', 3420 ctx.pf->rule.attr->transfer ? 't' : '-'); 3421 if (rules != NULL) { 3422 rules[total] = ctx.pf->id; 3423 total++; 3424 } 3425 break; 3426 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3427 ctx.pia = container_of(type, 3428 struct port_indirect_action, 3429 age_type); 3430 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3431 ctx.pia->id); 3432 break; 3433 default: 3434 printf("Error: invalid context type %u\n", port_id); 3435 break; 3436 } 3437 } 3438 if (rules != NULL) { 3439 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3440 free(rules); 3441 } 3442 printf("\n%d flows destroyed\n", total); 3443 free(contexts); 3444 } 3445 3446 /** Pull queue operation results from the queue. */ 3447 int 3448 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3449 { 3450 struct rte_port *port; 3451 struct rte_flow_op_result *res; 3452 struct rte_flow_error error; 3453 int ret = 0; 3454 int success = 0; 3455 int i; 3456 struct queue_job *job; 3457 3458 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3459 port_id == (portid_t)RTE_PORT_ALL) 3460 return -EINVAL; 3461 port = &ports[port_id]; 3462 3463 if (queue_id >= port->queue_nb) { 3464 printf("Queue #%u is invalid\n", queue_id); 3465 return -EINVAL; 3466 } 3467 3468 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3469 if (!res) { 3470 printf("Failed to allocate memory for pulled results\n"); 3471 return -ENOMEM; 3472 } 3473 3474 memset(&error, 0x66, sizeof(error)); 3475 ret = rte_flow_pull(port_id, queue_id, res, 3476 port->queue_sz, &error); 3477 if (ret < 0) { 3478 printf("Failed to pull a operation results\n"); 3479 free(res); 3480 return -EINVAL; 3481 } 3482 3483 for (i = 0; i < ret; i++) { 3484 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3485 success++; 3486 job = (struct queue_job *)res[i].user_data; 3487 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3488 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3489 free(job->pf); 3490 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3491 free(job->pia); 3492 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3493 port_action_handle_query_dump(port_id, job->pia, 3494 &job->query); 3495 free(job); 3496 } 3497 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3498 queue_id, ret, ret - success, success); 3499 free(res); 3500 return ret; 3501 } 3502 3503 /** Create flow rule. */ 3504 int 3505 port_flow_create(portid_t port_id, 3506 const struct rte_flow_attr *attr, 3507 const struct rte_flow_item *pattern, 3508 const struct rte_flow_action *actions, 3509 const struct tunnel_ops *tunnel_ops, 3510 uintptr_t user_id) 3511 { 3512 struct rte_flow *flow; 3513 struct rte_port *port; 3514 struct port_flow *pf; 3515 uint32_t id = 0; 3516 struct rte_flow_error error; 3517 struct port_flow_tunnel *pft = NULL; 3518 struct rte_flow_action_age *age = age_action_get(actions); 3519 3520 port = &ports[port_id]; 3521 if (port->flow_list) { 3522 if (port->flow_list->id == UINT32_MAX) { 3523 fprintf(stderr, 3524 "Highest rule ID is already assigned, delete it first"); 3525 return -ENOMEM; 3526 } 3527 id = port->flow_list->id + 1; 3528 } 3529 if (tunnel_ops->enabled) { 3530 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3531 actions, tunnel_ops); 3532 if (!pft) 3533 return -ENOENT; 3534 if (pft->items) 3535 pattern = pft->items; 3536 if (pft->actions) 3537 actions = pft->actions; 3538 } 3539 pf = port_flow_new(attr, pattern, actions, &error); 3540 if (!pf) 3541 return port_flow_complain(&error); 3542 if (age) { 3543 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3544 age->context = &pf->age_type; 3545 } 3546 /* Poisoning to make sure PMDs update it in case of error. */ 3547 memset(&error, 0x22, sizeof(error)); 3548 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3549 if (!flow) { 3550 if (tunnel_ops->enabled) 3551 port_flow_tunnel_offload_cmd_release(port_id, 3552 tunnel_ops, pft); 3553 free(pf); 3554 return port_flow_complain(&error); 3555 } 3556 pf->next = port->flow_list; 3557 pf->id = id; 3558 pf->user_id = user_id; 3559 pf->flow = flow; 3560 port->flow_list = pf; 3561 if (tunnel_ops->enabled) 3562 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3563 if (user_id) 3564 printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n", 3565 pf->id, pf->user_id); 3566 else 3567 printf("Flow rule #%"PRIu64" created\n", pf->id); 3568 return 0; 3569 } 3570 3571 /** Destroy a number of flow rules. */ 3572 int 3573 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule, 3574 bool is_user_id) 3575 { 3576 struct rte_port *port; 3577 struct port_flow **tmp; 3578 int ret = 0; 3579 3580 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3581 port_id == (portid_t)RTE_PORT_ALL) 3582 return -EINVAL; 3583 port = &ports[port_id]; 3584 tmp = &port->flow_list; 3585 while (*tmp) { 3586 uint32_t i; 3587 3588 for (i = 0; i != n; ++i) { 3589 struct rte_flow_error error; 3590 struct port_flow *pf = *tmp; 3591 3592 if (rule[i] != (is_user_id ? pf->user_id : pf->id)) 3593 continue; 3594 /* 3595 * Poisoning to make sure PMDs update it in case 3596 * of error. 3597 */ 3598 memset(&error, 0x33, sizeof(error)); 3599 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3600 ret = port_flow_complain(&error); 3601 continue; 3602 } 3603 if (is_user_id) 3604 printf("Flow rule #%"PRIu64" destroyed, " 3605 "user-id 0x%"PRIx64"\n", 3606 pf->id, pf->user_id); 3607 else 3608 printf("Flow rule #%"PRIu64" destroyed\n", 3609 pf->id); 3610 *tmp = pf->next; 3611 free(pf); 3612 break; 3613 } 3614 if (i == n) 3615 tmp = &(*tmp)->next; 3616 } 3617 return ret; 3618 } 3619 3620 /** Remove all flow rules. */ 3621 int 3622 port_flow_flush(portid_t port_id) 3623 { 3624 struct rte_flow_error error; 3625 struct rte_port *port; 3626 int ret = 0; 3627 3628 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3629 port_id == (portid_t)RTE_PORT_ALL) 3630 return -EINVAL; 3631 3632 port = &ports[port_id]; 3633 3634 if (port->flow_list == NULL) 3635 return ret; 3636 3637 /* Poisoning to make sure PMDs update it in case of error. */ 3638 memset(&error, 0x44, sizeof(error)); 3639 if (rte_flow_flush(port_id, &error)) { 3640 port_flow_complain(&error); 3641 } 3642 3643 while (port->flow_list) { 3644 struct port_flow *pf = port->flow_list->next; 3645 3646 free(port->flow_list); 3647 port->flow_list = pf; 3648 } 3649 return ret; 3650 } 3651 3652 /** Dump flow rules. */ 3653 int 3654 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id, 3655 const char *file_name, bool is_user_id) 3656 { 3657 int ret = 0; 3658 FILE *file = stdout; 3659 struct rte_flow_error error; 3660 struct rte_port *port; 3661 struct port_flow *pflow; 3662 struct rte_flow *tmpFlow = NULL; 3663 bool found = false; 3664 3665 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3666 port_id == (portid_t)RTE_PORT_ALL) 3667 return -EINVAL; 3668 3669 if (!dump_all) { 3670 port = &ports[port_id]; 3671 pflow = port->flow_list; 3672 while (pflow) { 3673 if (rule_id != 3674 (is_user_id ? pflow->user_id : pflow->id)) { 3675 pflow = pflow->next; 3676 } else { 3677 tmpFlow = pflow->flow; 3678 if (tmpFlow) 3679 found = true; 3680 break; 3681 } 3682 } 3683 if (found == false) { 3684 fprintf(stderr, "Failed to dump to flow %"PRIu64"\n", 3685 rule_id); 3686 return -EINVAL; 3687 } 3688 } 3689 3690 if (file_name && strlen(file_name)) { 3691 file = fopen(file_name, "w"); 3692 if (!file) { 3693 fprintf(stderr, "Failed to create file %s: %s\n", 3694 file_name, strerror(errno)); 3695 return -errno; 3696 } 3697 } 3698 3699 if (!dump_all) 3700 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3701 else 3702 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3703 if (ret) { 3704 port_flow_complain(&error); 3705 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3706 } else 3707 printf("Flow dump finished\n"); 3708 if (file_name && strlen(file_name)) 3709 fclose(file); 3710 return ret; 3711 } 3712 3713 /** Query a flow rule. */ 3714 int 3715 port_flow_query(portid_t port_id, uint64_t rule, 3716 const struct rte_flow_action *action, bool is_user_id) 3717 { 3718 struct rte_flow_error error; 3719 struct rte_port *port; 3720 struct port_flow *pf; 3721 const char *name; 3722 union { 3723 struct rte_flow_query_count count; 3724 struct rte_flow_action_rss rss_conf; 3725 struct rte_flow_query_age age; 3726 } query; 3727 int ret; 3728 3729 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3730 port_id == (portid_t)RTE_PORT_ALL) 3731 return -EINVAL; 3732 port = &ports[port_id]; 3733 for (pf = port->flow_list; pf; pf = pf->next) 3734 if ((is_user_id ? pf->user_id : pf->id) == rule) 3735 break; 3736 if (!pf) { 3737 fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule); 3738 return -ENOENT; 3739 } 3740 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3741 &name, sizeof(name), 3742 (void *)(uintptr_t)action->type, &error); 3743 if (ret < 0) 3744 return port_flow_complain(&error); 3745 switch (action->type) { 3746 case RTE_FLOW_ACTION_TYPE_COUNT: 3747 case RTE_FLOW_ACTION_TYPE_RSS: 3748 case RTE_FLOW_ACTION_TYPE_AGE: 3749 break; 3750 default: 3751 fprintf(stderr, "Cannot query action type %d (%s)\n", 3752 action->type, name); 3753 return -ENOTSUP; 3754 } 3755 /* Poisoning to make sure PMDs update it in case of error. */ 3756 memset(&error, 0x55, sizeof(error)); 3757 memset(&query, 0, sizeof(query)); 3758 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3759 return port_flow_complain(&error); 3760 switch (action->type) { 3761 case RTE_FLOW_ACTION_TYPE_COUNT: 3762 printf("%s:\n" 3763 " hits_set: %u\n" 3764 " bytes_set: %u\n" 3765 " hits: %" PRIu64 "\n" 3766 " bytes: %" PRIu64 "\n", 3767 name, 3768 query.count.hits_set, 3769 query.count.bytes_set, 3770 query.count.hits, 3771 query.count.bytes); 3772 break; 3773 case RTE_FLOW_ACTION_TYPE_RSS: 3774 rss_config_display(&query.rss_conf); 3775 break; 3776 case RTE_FLOW_ACTION_TYPE_AGE: 3777 printf("%s:\n" 3778 " aged: %u\n" 3779 " sec_since_last_hit_valid: %u\n" 3780 " sec_since_last_hit: %" PRIu32 "\n", 3781 name, 3782 query.age.aged, 3783 query.age.sec_since_last_hit_valid, 3784 query.age.sec_since_last_hit); 3785 break; 3786 default: 3787 fprintf(stderr, 3788 "Cannot display result for action type %d (%s)\n", 3789 action->type, name); 3790 break; 3791 } 3792 return 0; 3793 } 3794 3795 /** List simply and destroy all aged flows. */ 3796 void 3797 port_flow_aged(portid_t port_id, uint8_t destroy) 3798 { 3799 void **contexts; 3800 int nb_context, total = 0, idx; 3801 struct rte_flow_error error; 3802 enum age_action_context_type *type; 3803 union { 3804 struct port_flow *pf; 3805 struct port_indirect_action *pia; 3806 } ctx; 3807 3808 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3809 port_id == (portid_t)RTE_PORT_ALL) 3810 return; 3811 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3812 printf("Port %u total aged flows: %d\n", port_id, total); 3813 if (total < 0) { 3814 port_flow_complain(&error); 3815 return; 3816 } 3817 if (total == 0) 3818 return; 3819 contexts = malloc(sizeof(void *) * total); 3820 if (contexts == NULL) { 3821 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3822 return; 3823 } 3824 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3825 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3826 if (nb_context != total) { 3827 fprintf(stderr, 3828 "Port:%d get aged flows count(%d) != total(%d)\n", 3829 port_id, nb_context, total); 3830 free(contexts); 3831 return; 3832 } 3833 total = 0; 3834 for (idx = 0; idx < nb_context; idx++) { 3835 if (!contexts[idx]) { 3836 fprintf(stderr, "Error: get Null context in port %u\n", 3837 port_id); 3838 continue; 3839 } 3840 type = (enum age_action_context_type *)contexts[idx]; 3841 switch (*type) { 3842 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3843 ctx.pf = container_of(type, struct port_flow, age_type); 3844 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3845 "\t%c%c%c\t\n", 3846 "Flow", 3847 ctx.pf->id, 3848 ctx.pf->rule.attr->group, 3849 ctx.pf->rule.attr->priority, 3850 ctx.pf->rule.attr->ingress ? 'i' : '-', 3851 ctx.pf->rule.attr->egress ? 'e' : '-', 3852 ctx.pf->rule.attr->transfer ? 't' : '-'); 3853 if (destroy && !port_flow_destroy(port_id, 1, 3854 &ctx.pf->id, false)) 3855 total++; 3856 break; 3857 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3858 ctx.pia = container_of(type, 3859 struct port_indirect_action, age_type); 3860 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3861 ctx.pia->id); 3862 break; 3863 default: 3864 fprintf(stderr, "Error: invalid context type %u\n", 3865 port_id); 3866 break; 3867 } 3868 } 3869 printf("\n%d flows destroyed\n", total); 3870 free(contexts); 3871 } 3872 3873 /** List flow rules. */ 3874 void 3875 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3876 { 3877 struct rte_port *port; 3878 struct port_flow *pf; 3879 struct port_flow *list = NULL; 3880 uint32_t i; 3881 3882 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3883 port_id == (portid_t)RTE_PORT_ALL) 3884 return; 3885 port = &ports[port_id]; 3886 if (!port->flow_list) 3887 return; 3888 /* Sort flows by group, priority and ID. */ 3889 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3890 struct port_flow **tmp; 3891 const struct rte_flow_attr *curr = pf->rule.attr; 3892 3893 if (n) { 3894 /* Filter out unwanted groups. */ 3895 for (i = 0; i != n; ++i) 3896 if (curr->group == group[i]) 3897 break; 3898 if (i == n) 3899 continue; 3900 } 3901 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3902 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3903 3904 if (curr->group > comp->group || 3905 (curr->group == comp->group && 3906 curr->priority > comp->priority) || 3907 (curr->group == comp->group && 3908 curr->priority == comp->priority && 3909 pf->id > (*tmp)->id)) 3910 continue; 3911 break; 3912 } 3913 pf->tmp = *tmp; 3914 *tmp = pf; 3915 } 3916 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3917 for (pf = list; pf != NULL; pf = pf->tmp) { 3918 const struct rte_flow_item *item = pf->rule.pattern; 3919 const struct rte_flow_action *action = pf->rule.actions; 3920 const char *name; 3921 3922 printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3923 pf->id, 3924 pf->rule.attr->group, 3925 pf->rule.attr->priority, 3926 pf->rule.attr->ingress ? 'i' : '-', 3927 pf->rule.attr->egress ? 'e' : '-', 3928 pf->rule.attr->transfer ? 't' : '-'); 3929 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3930 if ((uint32_t)item->type > INT_MAX) 3931 name = "PMD_INTERNAL"; 3932 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3933 &name, sizeof(name), 3934 (void *)(uintptr_t)item->type, 3935 NULL) <= 0) 3936 name = "[UNKNOWN]"; 3937 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3938 printf("%s ", name); 3939 ++item; 3940 } 3941 printf("=>"); 3942 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3943 if ((uint32_t)action->type > INT_MAX) 3944 name = "PMD_INTERNAL"; 3945 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3946 &name, sizeof(name), 3947 (void *)(uintptr_t)action->type, 3948 NULL) <= 0) 3949 name = "[UNKNOWN]"; 3950 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3951 printf(" %s", name); 3952 ++action; 3953 } 3954 printf("\n"); 3955 } 3956 } 3957 3958 /** Restrict ingress traffic to the defined flow rules. */ 3959 int 3960 port_flow_isolate(portid_t port_id, int set) 3961 { 3962 struct rte_flow_error error; 3963 3964 /* Poisoning to make sure PMDs update it in case of error. */ 3965 memset(&error, 0x66, sizeof(error)); 3966 if (rte_flow_isolate(port_id, set, &error)) 3967 return port_flow_complain(&error); 3968 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3969 port_id, 3970 set ? "now restricted" : "not restricted anymore"); 3971 return 0; 3972 } 3973 3974 /* 3975 * RX/TX ring descriptors display functions. 3976 */ 3977 int 3978 rx_queue_id_is_invalid(queueid_t rxq_id) 3979 { 3980 if (rxq_id < nb_rxq) 3981 return 0; 3982 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3983 rxq_id, nb_rxq); 3984 return 1; 3985 } 3986 3987 int 3988 tx_queue_id_is_invalid(queueid_t txq_id) 3989 { 3990 if (txq_id < nb_txq) 3991 return 0; 3992 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3993 txq_id, nb_txq); 3994 return 1; 3995 } 3996 3997 static int 3998 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3999 { 4000 struct rte_port *port = &ports[port_id]; 4001 struct rte_eth_rxq_info rx_qinfo; 4002 int ret; 4003 4004 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 4005 if (ret == 0) { 4006 *ring_size = rx_qinfo.nb_desc; 4007 return ret; 4008 } 4009 4010 if (ret != -ENOTSUP) 4011 return ret; 4012 /* 4013 * If the rte_eth_rx_queue_info_get is not support for this PMD, 4014 * ring_size stored in testpmd will be used for validity verification. 4015 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4016 * being 0, it will use a default value provided by PMDs to setup this 4017 * rxq. If the default value is 0, it will use the 4018 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4019 */ 4020 if (port->nb_rx_desc[rxq_id]) 4021 *ring_size = port->nb_rx_desc[rxq_id]; 4022 else if (port->dev_info.default_rxportconf.ring_size) 4023 *ring_size = port->dev_info.default_rxportconf.ring_size; 4024 else 4025 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4026 return 0; 4027 } 4028 4029 static int 4030 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4031 { 4032 struct rte_port *port = &ports[port_id]; 4033 struct rte_eth_txq_info tx_qinfo; 4034 int ret; 4035 4036 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4037 if (ret == 0) { 4038 *ring_size = tx_qinfo.nb_desc; 4039 return ret; 4040 } 4041 4042 if (ret != -ENOTSUP) 4043 return ret; 4044 /* 4045 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4046 * ring_size stored in testpmd will be used for validity verification. 4047 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4048 * being 0, it will use a default value provided by PMDs to setup this 4049 * txq. If the default value is 0, it will use the 4050 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4051 */ 4052 if (port->nb_tx_desc[txq_id]) 4053 *ring_size = port->nb_tx_desc[txq_id]; 4054 else if (port->dev_info.default_txportconf.ring_size) 4055 *ring_size = port->dev_info.default_txportconf.ring_size; 4056 else 4057 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4058 return 0; 4059 } 4060 4061 static int 4062 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4063 { 4064 uint16_t ring_size; 4065 int ret; 4066 4067 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4068 if (ret) 4069 return 1; 4070 4071 if (rxdesc_id < ring_size) 4072 return 0; 4073 4074 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4075 rxdesc_id, ring_size); 4076 return 1; 4077 } 4078 4079 static int 4080 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4081 { 4082 uint16_t ring_size; 4083 int ret; 4084 4085 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4086 if (ret) 4087 return 1; 4088 4089 if (txdesc_id < ring_size) 4090 return 0; 4091 4092 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4093 txdesc_id, ring_size); 4094 return 1; 4095 } 4096 4097 static const struct rte_memzone * 4098 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4099 { 4100 char mz_name[RTE_MEMZONE_NAMESIZE]; 4101 const struct rte_memzone *mz; 4102 4103 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4104 port_id, q_id, ring_name); 4105 mz = rte_memzone_lookup(mz_name); 4106 if (mz == NULL) 4107 fprintf(stderr, 4108 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4109 ring_name, port_id, q_id, mz_name); 4110 return mz; 4111 } 4112 4113 union igb_ring_dword { 4114 uint64_t dword; 4115 struct { 4116 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4117 uint32_t lo; 4118 uint32_t hi; 4119 #else 4120 uint32_t hi; 4121 uint32_t lo; 4122 #endif 4123 } words; 4124 }; 4125 4126 struct igb_ring_desc_32_bytes { 4127 union igb_ring_dword lo_dword; 4128 union igb_ring_dword hi_dword; 4129 union igb_ring_dword resv1; 4130 union igb_ring_dword resv2; 4131 }; 4132 4133 struct igb_ring_desc_16_bytes { 4134 union igb_ring_dword lo_dword; 4135 union igb_ring_dword hi_dword; 4136 }; 4137 4138 static void 4139 ring_rxd_display_dword(union igb_ring_dword dword) 4140 { 4141 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4142 (unsigned)dword.words.hi); 4143 } 4144 4145 static void 4146 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4147 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4148 portid_t port_id, 4149 #else 4150 __rte_unused portid_t port_id, 4151 #endif 4152 uint16_t desc_id) 4153 { 4154 struct igb_ring_desc_16_bytes *ring = 4155 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4156 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4157 int ret; 4158 struct rte_eth_dev_info dev_info; 4159 4160 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4161 if (ret != 0) 4162 return; 4163 4164 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4165 /* 32 bytes RX descriptor, i40e only */ 4166 struct igb_ring_desc_32_bytes *ring = 4167 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4168 ring[desc_id].lo_dword.dword = 4169 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4170 ring_rxd_display_dword(ring[desc_id].lo_dword); 4171 ring[desc_id].hi_dword.dword = 4172 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4173 ring_rxd_display_dword(ring[desc_id].hi_dword); 4174 ring[desc_id].resv1.dword = 4175 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4176 ring_rxd_display_dword(ring[desc_id].resv1); 4177 ring[desc_id].resv2.dword = 4178 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4179 ring_rxd_display_dword(ring[desc_id].resv2); 4180 4181 return; 4182 } 4183 #endif 4184 /* 16 bytes RX descriptor */ 4185 ring[desc_id].lo_dword.dword = 4186 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4187 ring_rxd_display_dword(ring[desc_id].lo_dword); 4188 ring[desc_id].hi_dword.dword = 4189 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4190 ring_rxd_display_dword(ring[desc_id].hi_dword); 4191 } 4192 4193 static void 4194 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4195 { 4196 struct igb_ring_desc_16_bytes *ring; 4197 struct igb_ring_desc_16_bytes txd; 4198 4199 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4200 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4201 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4202 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4203 (unsigned)txd.lo_dword.words.lo, 4204 (unsigned)txd.lo_dword.words.hi, 4205 (unsigned)txd.hi_dword.words.lo, 4206 (unsigned)txd.hi_dword.words.hi); 4207 } 4208 4209 void 4210 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4211 { 4212 const struct rte_memzone *rx_mz; 4213 4214 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4215 return; 4216 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4217 if (rx_mz == NULL) 4218 return; 4219 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4220 } 4221 4222 void 4223 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4224 { 4225 const struct rte_memzone *tx_mz; 4226 4227 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4228 return; 4229 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4230 if (tx_mz == NULL) 4231 return; 4232 ring_tx_descriptor_display(tx_mz, txd_id); 4233 } 4234 4235 void 4236 fwd_lcores_config_display(void) 4237 { 4238 lcoreid_t lc_id; 4239 4240 printf("List of forwarding lcores:"); 4241 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4242 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4243 printf("\n"); 4244 } 4245 void 4246 rxtx_config_display(void) 4247 { 4248 portid_t pid; 4249 queueid_t qid; 4250 4251 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4252 cur_fwd_eng->fwd_mode_name, 4253 cur_fwd_eng->status ? "-" : "", 4254 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4255 retry_enabled == 0 ? "" : " with retry", 4256 nb_pkt_per_burst); 4257 4258 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4259 printf(" packet len=%u - nb packet segments=%d\n", 4260 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4261 4262 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4263 nb_fwd_lcores, nb_fwd_ports); 4264 4265 RTE_ETH_FOREACH_DEV(pid) { 4266 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4267 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4268 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4269 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4270 struct rte_eth_rxq_info rx_qinfo; 4271 struct rte_eth_txq_info tx_qinfo; 4272 uint16_t rx_free_thresh_tmp; 4273 uint16_t tx_free_thresh_tmp; 4274 uint16_t tx_rs_thresh_tmp; 4275 uint16_t nb_rx_desc_tmp; 4276 uint16_t nb_tx_desc_tmp; 4277 uint64_t offloads_tmp; 4278 uint8_t pthresh_tmp; 4279 uint8_t hthresh_tmp; 4280 uint8_t wthresh_tmp; 4281 int32_t rc; 4282 4283 /* per port config */ 4284 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4285 (unsigned int)pid, nb_rxq, nb_txq); 4286 4287 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4288 ports[pid].dev_conf.rxmode.offloads, 4289 ports[pid].dev_conf.txmode.offloads); 4290 4291 /* per rx queue config only for first queue to be less verbose */ 4292 for (qid = 0; qid < 1; qid++) { 4293 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4294 if (rc) { 4295 nb_rx_desc_tmp = nb_rx_desc[qid]; 4296 rx_free_thresh_tmp = 4297 rx_conf[qid].rx_free_thresh; 4298 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4299 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4300 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4301 offloads_tmp = rx_conf[qid].offloads; 4302 } else { 4303 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4304 rx_free_thresh_tmp = 4305 rx_qinfo.conf.rx_free_thresh; 4306 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4307 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4308 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4309 offloads_tmp = rx_qinfo.conf.offloads; 4310 } 4311 4312 printf(" RX queue: %d\n", qid); 4313 printf(" RX desc=%d - RX free threshold=%d\n", 4314 nb_rx_desc_tmp, rx_free_thresh_tmp); 4315 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4316 " wthresh=%d\n", 4317 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4318 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4319 if (rx_conf->share_group > 0) 4320 printf(" share_group=%u share_qid=%u", 4321 rx_conf->share_group, 4322 rx_conf->share_qid); 4323 printf("\n"); 4324 } 4325 4326 /* per tx queue config only for first queue to be less verbose */ 4327 for (qid = 0; qid < 1; qid++) { 4328 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4329 if (rc) { 4330 nb_tx_desc_tmp = nb_tx_desc[qid]; 4331 tx_free_thresh_tmp = 4332 tx_conf[qid].tx_free_thresh; 4333 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4334 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4335 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4336 offloads_tmp = tx_conf[qid].offloads; 4337 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4338 } else { 4339 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4340 tx_free_thresh_tmp = 4341 tx_qinfo.conf.tx_free_thresh; 4342 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4343 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4344 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4345 offloads_tmp = tx_qinfo.conf.offloads; 4346 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4347 } 4348 4349 printf(" TX queue: %d\n", qid); 4350 printf(" TX desc=%d - TX free threshold=%d\n", 4351 nb_tx_desc_tmp, tx_free_thresh_tmp); 4352 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4353 " wthresh=%d\n", 4354 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4355 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4356 offloads_tmp, tx_rs_thresh_tmp); 4357 } 4358 } 4359 } 4360 4361 void 4362 port_rss_reta_info(portid_t port_id, 4363 struct rte_eth_rss_reta_entry64 *reta_conf, 4364 uint16_t nb_entries) 4365 { 4366 uint16_t i, idx, shift; 4367 int ret; 4368 4369 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4370 return; 4371 4372 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4373 if (ret != 0) { 4374 fprintf(stderr, 4375 "Failed to get RSS RETA info, return code = %d\n", 4376 ret); 4377 return; 4378 } 4379 4380 for (i = 0; i < nb_entries; i++) { 4381 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4382 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4383 if (!(reta_conf[idx].mask & (1ULL << shift))) 4384 continue; 4385 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4386 i, reta_conf[idx].reta[shift]); 4387 } 4388 } 4389 4390 /* 4391 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4392 * key of the port. 4393 */ 4394 void 4395 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4396 { 4397 struct rte_eth_rss_conf rss_conf = {0}; 4398 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4399 uint64_t rss_hf; 4400 uint8_t i; 4401 int diag; 4402 struct rte_eth_dev_info dev_info; 4403 uint8_t hash_key_size; 4404 int ret; 4405 4406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4407 return; 4408 4409 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4410 if (ret != 0) 4411 return; 4412 4413 if (dev_info.hash_key_size > 0 && 4414 dev_info.hash_key_size <= sizeof(rss_key)) 4415 hash_key_size = dev_info.hash_key_size; 4416 else { 4417 fprintf(stderr, 4418 "dev_info did not provide a valid hash key size\n"); 4419 return; 4420 } 4421 4422 /* Get RSS hash key if asked to display it */ 4423 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4424 rss_conf.rss_key_len = hash_key_size; 4425 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4426 if (diag != 0) { 4427 switch (diag) { 4428 case -ENODEV: 4429 fprintf(stderr, "port index %d invalid\n", port_id); 4430 break; 4431 case -ENOTSUP: 4432 fprintf(stderr, "operation not supported by device\n"); 4433 break; 4434 default: 4435 fprintf(stderr, "operation failed - diag=%d\n", diag); 4436 break; 4437 } 4438 return; 4439 } 4440 rss_hf = rss_conf.rss_hf; 4441 if (rss_hf == 0) { 4442 printf("RSS disabled\n"); 4443 return; 4444 } 4445 printf("RSS functions:\n"); 4446 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4447 if (!show_rss_key) 4448 return; 4449 printf("RSS key:\n"); 4450 for (i = 0; i < hash_key_size; i++) 4451 printf("%02X", rss_key[i]); 4452 printf("\n"); 4453 } 4454 4455 void 4456 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4457 uint8_t hash_key_len) 4458 { 4459 struct rte_eth_rss_conf rss_conf; 4460 int diag; 4461 4462 rss_conf.rss_key = NULL; 4463 rss_conf.rss_key_len = 0; 4464 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4465 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4466 if (diag == 0) { 4467 rss_conf.rss_key = hash_key; 4468 rss_conf.rss_key_len = hash_key_len; 4469 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4470 } 4471 if (diag == 0) 4472 return; 4473 4474 switch (diag) { 4475 case -ENODEV: 4476 fprintf(stderr, "port index %d invalid\n", port_id); 4477 break; 4478 case -ENOTSUP: 4479 fprintf(stderr, "operation not supported by device\n"); 4480 break; 4481 default: 4482 fprintf(stderr, "operation failed - diag=%d\n", diag); 4483 break; 4484 } 4485 } 4486 4487 /* 4488 * Check whether a shared rxq scheduled on other lcores. 4489 */ 4490 static bool 4491 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4492 portid_t src_port, queueid_t src_rxq, 4493 uint32_t share_group, queueid_t share_rxq) 4494 { 4495 streamid_t sm_id; 4496 streamid_t nb_fs_per_lcore; 4497 lcoreid_t nb_fc; 4498 lcoreid_t lc_id; 4499 struct fwd_stream *fs; 4500 struct rte_port *port; 4501 struct rte_eth_dev_info *dev_info; 4502 struct rte_eth_rxconf *rxq_conf; 4503 4504 nb_fc = cur_fwd_config.nb_fwd_lcores; 4505 /* Check remaining cores. */ 4506 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4507 sm_id = fwd_lcores[lc_id]->stream_idx; 4508 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4509 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4510 sm_id++) { 4511 fs = fwd_streams[sm_id]; 4512 port = &ports[fs->rx_port]; 4513 dev_info = &port->dev_info; 4514 rxq_conf = &port->rxq[fs->rx_queue].conf; 4515 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4516 == 0 || rxq_conf->share_group == 0) 4517 /* Not shared rxq. */ 4518 continue; 4519 if (domain_id != port->dev_info.switch_info.domain_id) 4520 continue; 4521 if (rxq_conf->share_group != share_group) 4522 continue; 4523 if (rxq_conf->share_qid != share_rxq) 4524 continue; 4525 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4526 share_group, share_rxq); 4527 printf(" lcore %hhu Port %hu queue %hu\n", 4528 src_lc, src_port, src_rxq); 4529 printf(" lcore %hhu Port %hu queue %hu\n", 4530 lc_id, fs->rx_port, fs->rx_queue); 4531 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4532 nb_rxq); 4533 return true; 4534 } 4535 } 4536 return false; 4537 } 4538 4539 /* 4540 * Check shared rxq configuration. 4541 * 4542 * Shared group must not being scheduled on different core. 4543 */ 4544 bool 4545 pkt_fwd_shared_rxq_check(void) 4546 { 4547 streamid_t sm_id; 4548 streamid_t nb_fs_per_lcore; 4549 lcoreid_t nb_fc; 4550 lcoreid_t lc_id; 4551 struct fwd_stream *fs; 4552 uint16_t domain_id; 4553 struct rte_port *port; 4554 struct rte_eth_dev_info *dev_info; 4555 struct rte_eth_rxconf *rxq_conf; 4556 4557 if (rxq_share == 0) 4558 return true; 4559 nb_fc = cur_fwd_config.nb_fwd_lcores; 4560 /* 4561 * Check streams on each core, make sure the same switch domain + 4562 * group + queue doesn't get scheduled on other cores. 4563 */ 4564 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4565 sm_id = fwd_lcores[lc_id]->stream_idx; 4566 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4567 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4568 sm_id++) { 4569 fs = fwd_streams[sm_id]; 4570 /* Update lcore info stream being scheduled. */ 4571 fs->lcore = fwd_lcores[lc_id]; 4572 port = &ports[fs->rx_port]; 4573 dev_info = &port->dev_info; 4574 rxq_conf = &port->rxq[fs->rx_queue].conf; 4575 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4576 == 0 || rxq_conf->share_group == 0) 4577 /* Not shared rxq. */ 4578 continue; 4579 /* Check shared rxq not scheduled on remaining cores. */ 4580 domain_id = port->dev_info.switch_info.domain_id; 4581 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4582 fs->rx_port, 4583 fs->rx_queue, 4584 rxq_conf->share_group, 4585 rxq_conf->share_qid)) 4586 return false; 4587 } 4588 } 4589 return true; 4590 } 4591 4592 /* 4593 * Setup forwarding configuration for each logical core. 4594 */ 4595 static void 4596 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4597 { 4598 streamid_t nb_fs_per_lcore; 4599 streamid_t nb_fs; 4600 streamid_t sm_id; 4601 lcoreid_t nb_extra; 4602 lcoreid_t nb_fc; 4603 lcoreid_t nb_lc; 4604 lcoreid_t lc_id; 4605 4606 nb_fs = cfg->nb_fwd_streams; 4607 nb_fc = cfg->nb_fwd_lcores; 4608 if (nb_fs <= nb_fc) { 4609 nb_fs_per_lcore = 1; 4610 nb_extra = 0; 4611 } else { 4612 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4613 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4614 } 4615 4616 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4617 sm_id = 0; 4618 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4619 fwd_lcores[lc_id]->stream_idx = sm_id; 4620 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4621 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4622 } 4623 4624 /* 4625 * Assign extra remaining streams, if any. 4626 */ 4627 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4628 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4629 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4630 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4631 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4632 } 4633 } 4634 4635 static portid_t 4636 fwd_topology_tx_port_get(portid_t rxp) 4637 { 4638 static int warning_once = 1; 4639 4640 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4641 4642 switch (port_topology) { 4643 default: 4644 case PORT_TOPOLOGY_PAIRED: 4645 if ((rxp & 0x1) == 0) { 4646 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4647 return rxp + 1; 4648 if (warning_once) { 4649 fprintf(stderr, 4650 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4651 warning_once = 0; 4652 } 4653 return rxp; 4654 } 4655 return rxp - 1; 4656 case PORT_TOPOLOGY_CHAINED: 4657 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4658 case PORT_TOPOLOGY_LOOP: 4659 return rxp; 4660 } 4661 } 4662 4663 static void 4664 simple_fwd_config_setup(void) 4665 { 4666 portid_t i; 4667 4668 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4669 cur_fwd_config.nb_fwd_streams = 4670 (streamid_t) cur_fwd_config.nb_fwd_ports; 4671 4672 /* reinitialize forwarding streams */ 4673 init_fwd_streams(); 4674 4675 /* 4676 * In the simple forwarding test, the number of forwarding cores 4677 * must be lower or equal to the number of forwarding ports. 4678 */ 4679 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4680 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4681 cur_fwd_config.nb_fwd_lcores = 4682 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4683 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4684 4685 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4686 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4687 fwd_streams[i]->rx_queue = 0; 4688 fwd_streams[i]->tx_port = 4689 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4690 fwd_streams[i]->tx_queue = 0; 4691 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4692 fwd_streams[i]->retry_enabled = retry_enabled; 4693 } 4694 } 4695 4696 /** 4697 * For the RSS forwarding test all streams distributed over lcores. Each stream 4698 * being composed of a RX queue to poll on a RX port for input messages, 4699 * associated with a TX queue of a TX port where to send forwarded packets. 4700 */ 4701 static void 4702 rss_fwd_config_setup(void) 4703 { 4704 portid_t rxp; 4705 portid_t txp; 4706 queueid_t rxq; 4707 queueid_t nb_q; 4708 streamid_t sm_id; 4709 int start; 4710 int end; 4711 4712 nb_q = nb_rxq; 4713 if (nb_q > nb_txq) 4714 nb_q = nb_txq; 4715 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4716 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4717 cur_fwd_config.nb_fwd_streams = 4718 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4719 4720 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4721 cur_fwd_config.nb_fwd_lcores = 4722 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4723 4724 /* reinitialize forwarding streams */ 4725 init_fwd_streams(); 4726 4727 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4728 4729 if (proc_id > 0 && nb_q % num_procs != 0) 4730 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4731 4732 /** 4733 * In multi-process, All queues are allocated to different 4734 * processes based on num_procs and proc_id. For example: 4735 * if supports 4 queues(nb_q), 2 processes(num_procs), 4736 * the 0~1 queue for primary process. 4737 * the 2~3 queue for secondary process. 4738 */ 4739 start = proc_id * nb_q / num_procs; 4740 end = start + nb_q / num_procs; 4741 rxp = 0; 4742 rxq = start; 4743 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4744 struct fwd_stream *fs; 4745 4746 fs = fwd_streams[sm_id]; 4747 txp = fwd_topology_tx_port_get(rxp); 4748 fs->rx_port = fwd_ports_ids[rxp]; 4749 fs->rx_queue = rxq; 4750 fs->tx_port = fwd_ports_ids[txp]; 4751 fs->tx_queue = rxq; 4752 fs->peer_addr = fs->tx_port; 4753 fs->retry_enabled = retry_enabled; 4754 rxp++; 4755 if (rxp < nb_fwd_ports) 4756 continue; 4757 rxp = 0; 4758 rxq++; 4759 if (rxq >= end) 4760 rxq = start; 4761 } 4762 } 4763 4764 static uint16_t 4765 get_fwd_port_total_tc_num(void) 4766 { 4767 struct rte_eth_dcb_info dcb_info; 4768 uint16_t total_tc_num = 0; 4769 unsigned int i; 4770 4771 for (i = 0; i < nb_fwd_ports; i++) { 4772 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4773 total_tc_num += dcb_info.nb_tcs; 4774 } 4775 4776 return total_tc_num; 4777 } 4778 4779 /** 4780 * For the DCB forwarding test, each core is assigned on each traffic class. 4781 * 4782 * Each core is assigned a multi-stream, each stream being composed of 4783 * a RX queue to poll on a RX port for input messages, associated with 4784 * a TX queue of a TX port where to send forwarded packets. All RX and 4785 * TX queues are mapping to the same traffic class. 4786 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4787 * the same core 4788 */ 4789 static void 4790 dcb_fwd_config_setup(void) 4791 { 4792 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4793 portid_t txp, rxp = 0; 4794 queueid_t txq, rxq = 0; 4795 lcoreid_t lc_id; 4796 uint16_t nb_rx_queue, nb_tx_queue; 4797 uint16_t i, j, k, sm_id = 0; 4798 uint16_t total_tc_num; 4799 struct rte_port *port; 4800 uint8_t tc = 0; 4801 portid_t pid; 4802 int ret; 4803 4804 /* 4805 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4806 * or RTE_PORT_STOPPED. 4807 * 4808 * Re-configure ports to get updated mapping between tc and queue in 4809 * case the queue number of the port is changed. Skip for started ports 4810 * since modifying queue number and calling dev_configure need to stop 4811 * ports first. 4812 */ 4813 for (pid = 0; pid < nb_fwd_ports; pid++) { 4814 if (port_is_started(pid) == 1) 4815 continue; 4816 4817 port = &ports[pid]; 4818 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4819 &port->dev_conf); 4820 if (ret < 0) { 4821 fprintf(stderr, 4822 "Failed to re-configure port %d, ret = %d.\n", 4823 pid, ret); 4824 return; 4825 } 4826 } 4827 4828 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4829 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4830 cur_fwd_config.nb_fwd_streams = 4831 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4832 total_tc_num = get_fwd_port_total_tc_num(); 4833 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4834 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4835 4836 /* reinitialize forwarding streams */ 4837 init_fwd_streams(); 4838 sm_id = 0; 4839 txp = 1; 4840 /* get the dcb info on the first RX and TX ports */ 4841 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4842 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4843 4844 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4845 fwd_lcores[lc_id]->stream_nb = 0; 4846 fwd_lcores[lc_id]->stream_idx = sm_id; 4847 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4848 /* if the nb_queue is zero, means this tc is 4849 * not enabled on the POOL 4850 */ 4851 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4852 break; 4853 k = fwd_lcores[lc_id]->stream_nb + 4854 fwd_lcores[lc_id]->stream_idx; 4855 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4856 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4857 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4858 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4859 for (j = 0; j < nb_rx_queue; j++) { 4860 struct fwd_stream *fs; 4861 4862 fs = fwd_streams[k + j]; 4863 fs->rx_port = fwd_ports_ids[rxp]; 4864 fs->rx_queue = rxq + j; 4865 fs->tx_port = fwd_ports_ids[txp]; 4866 fs->tx_queue = txq + j % nb_tx_queue; 4867 fs->peer_addr = fs->tx_port; 4868 fs->retry_enabled = retry_enabled; 4869 } 4870 fwd_lcores[lc_id]->stream_nb += 4871 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4872 } 4873 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4874 4875 tc++; 4876 if (tc < rxp_dcb_info.nb_tcs) 4877 continue; 4878 /* Restart from TC 0 on next RX port */ 4879 tc = 0; 4880 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4881 rxp = (portid_t) 4882 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4883 else 4884 rxp++; 4885 if (rxp >= nb_fwd_ports) 4886 return; 4887 /* get the dcb information on next RX and TX ports */ 4888 if ((rxp & 0x1) == 0) 4889 txp = (portid_t) (rxp + 1); 4890 else 4891 txp = (portid_t) (rxp - 1); 4892 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4893 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4894 } 4895 } 4896 4897 static void 4898 icmp_echo_config_setup(void) 4899 { 4900 portid_t rxp; 4901 queueid_t rxq; 4902 lcoreid_t lc_id; 4903 uint16_t sm_id; 4904 4905 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4906 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4907 (nb_txq * nb_fwd_ports); 4908 else 4909 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4910 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4911 cur_fwd_config.nb_fwd_streams = 4912 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4913 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4914 cur_fwd_config.nb_fwd_lcores = 4915 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4916 if (verbose_level > 0) { 4917 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4918 __FUNCTION__, 4919 cur_fwd_config.nb_fwd_lcores, 4920 cur_fwd_config.nb_fwd_ports, 4921 cur_fwd_config.nb_fwd_streams); 4922 } 4923 4924 /* reinitialize forwarding streams */ 4925 init_fwd_streams(); 4926 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4927 rxp = 0; rxq = 0; 4928 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4929 if (verbose_level > 0) 4930 printf(" core=%d: \n", lc_id); 4931 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4932 struct fwd_stream *fs; 4933 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4934 fs->rx_port = fwd_ports_ids[rxp]; 4935 fs->rx_queue = rxq; 4936 fs->tx_port = fs->rx_port; 4937 fs->tx_queue = rxq; 4938 fs->peer_addr = fs->tx_port; 4939 fs->retry_enabled = retry_enabled; 4940 if (verbose_level > 0) 4941 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4942 sm_id, fs->rx_port, fs->rx_queue, 4943 fs->tx_queue); 4944 rxq = (queueid_t) (rxq + 1); 4945 if (rxq == nb_rxq) { 4946 rxq = 0; 4947 rxp = (portid_t) (rxp + 1); 4948 } 4949 } 4950 } 4951 } 4952 4953 void 4954 fwd_config_setup(void) 4955 { 4956 struct rte_port *port; 4957 portid_t pt_id; 4958 unsigned int i; 4959 4960 cur_fwd_config.fwd_eng = cur_fwd_eng; 4961 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4962 icmp_echo_config_setup(); 4963 return; 4964 } 4965 4966 if ((nb_rxq > 1) && (nb_txq > 1)){ 4967 if (dcb_config) { 4968 for (i = 0; i < nb_fwd_ports; i++) { 4969 pt_id = fwd_ports_ids[i]; 4970 port = &ports[pt_id]; 4971 if (!port->dcb_flag) { 4972 fprintf(stderr, 4973 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4974 return; 4975 } 4976 } 4977 if (nb_fwd_lcores == 1) { 4978 fprintf(stderr, 4979 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4980 return; 4981 } 4982 4983 dcb_fwd_config_setup(); 4984 } else 4985 rss_fwd_config_setup(); 4986 } 4987 else 4988 simple_fwd_config_setup(); 4989 } 4990 4991 static const char * 4992 mp_alloc_to_str(uint8_t mode) 4993 { 4994 switch (mode) { 4995 case MP_ALLOC_NATIVE: 4996 return "native"; 4997 case MP_ALLOC_ANON: 4998 return "anon"; 4999 case MP_ALLOC_XMEM: 5000 return "xmem"; 5001 case MP_ALLOC_XMEM_HUGE: 5002 return "xmemhuge"; 5003 case MP_ALLOC_XBUF: 5004 return "xbuf"; 5005 default: 5006 return "invalid"; 5007 } 5008 } 5009 5010 void 5011 pkt_fwd_config_display(struct fwd_config *cfg) 5012 { 5013 struct fwd_stream *fs; 5014 lcoreid_t lc_id; 5015 streamid_t sm_id; 5016 5017 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5018 "NUMA support %s, MP allocation mode: %s\n", 5019 cfg->fwd_eng->fwd_mode_name, 5020 cfg->fwd_eng->status ? "-" : "", 5021 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5022 retry_enabled == 0 ? "" : " with retry", 5023 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5024 numa_support == 1 ? "enabled" : "disabled", 5025 mp_alloc_to_str(mp_alloc_type)); 5026 5027 if (retry_enabled) 5028 printf("TX retry num: %u, delay between TX retries: %uus\n", 5029 burst_tx_retry_num, burst_tx_delay_time); 5030 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5031 printf("Logical Core %u (socket %u) forwards packets on " 5032 "%d streams:", 5033 fwd_lcores_cpuids[lc_id], 5034 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5035 fwd_lcores[lc_id]->stream_nb); 5036 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5037 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5038 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5039 "P=%d/Q=%d (socket %u) ", 5040 fs->rx_port, fs->rx_queue, 5041 ports[fs->rx_port].socket_id, 5042 fs->tx_port, fs->tx_queue, 5043 ports[fs->tx_port].socket_id); 5044 print_ethaddr("peer=", 5045 &peer_eth_addrs[fs->peer_addr]); 5046 } 5047 printf("\n"); 5048 } 5049 printf("\n"); 5050 } 5051 5052 void 5053 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5054 { 5055 struct rte_ether_addr new_peer_addr; 5056 if (!rte_eth_dev_is_valid_port(port_id)) { 5057 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5058 return; 5059 } 5060 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5061 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5062 peer_addr); 5063 return; 5064 } 5065 peer_eth_addrs[port_id] = new_peer_addr; 5066 } 5067 5068 int 5069 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5070 { 5071 unsigned int i; 5072 unsigned int lcore_cpuid; 5073 int record_now; 5074 5075 record_now = 0; 5076 again: 5077 for (i = 0; i < nb_lc; i++) { 5078 lcore_cpuid = lcorelist[i]; 5079 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5080 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5081 return -1; 5082 } 5083 if (lcore_cpuid == rte_get_main_lcore()) { 5084 fprintf(stderr, 5085 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5086 lcore_cpuid); 5087 return -1; 5088 } 5089 if (record_now) 5090 fwd_lcores_cpuids[i] = lcore_cpuid; 5091 } 5092 if (record_now == 0) { 5093 record_now = 1; 5094 goto again; 5095 } 5096 nb_cfg_lcores = (lcoreid_t) nb_lc; 5097 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5098 printf("previous number of forwarding cores %u - changed to " 5099 "number of configured cores %u\n", 5100 (unsigned int) nb_fwd_lcores, nb_lc); 5101 nb_fwd_lcores = (lcoreid_t) nb_lc; 5102 } 5103 5104 return 0; 5105 } 5106 5107 int 5108 set_fwd_lcores_mask(uint64_t lcoremask) 5109 { 5110 unsigned int lcorelist[64]; 5111 unsigned int nb_lc; 5112 unsigned int i; 5113 5114 if (lcoremask == 0) { 5115 fprintf(stderr, "Invalid NULL mask of cores\n"); 5116 return -1; 5117 } 5118 nb_lc = 0; 5119 for (i = 0; i < 64; i++) { 5120 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5121 continue; 5122 lcorelist[nb_lc++] = i; 5123 } 5124 return set_fwd_lcores_list(lcorelist, nb_lc); 5125 } 5126 5127 void 5128 set_fwd_lcores_number(uint16_t nb_lc) 5129 { 5130 if (test_done == 0) { 5131 fprintf(stderr, "Please stop forwarding first\n"); 5132 return; 5133 } 5134 if (nb_lc > nb_cfg_lcores) { 5135 fprintf(stderr, 5136 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5137 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5138 return; 5139 } 5140 nb_fwd_lcores = (lcoreid_t) nb_lc; 5141 printf("Number of forwarding cores set to %u\n", 5142 (unsigned int) nb_fwd_lcores); 5143 } 5144 5145 void 5146 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5147 { 5148 unsigned int i; 5149 portid_t port_id; 5150 int record_now; 5151 5152 record_now = 0; 5153 again: 5154 for (i = 0; i < nb_pt; i++) { 5155 port_id = (portid_t) portlist[i]; 5156 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5157 return; 5158 if (record_now) 5159 fwd_ports_ids[i] = port_id; 5160 } 5161 if (record_now == 0) { 5162 record_now = 1; 5163 goto again; 5164 } 5165 nb_cfg_ports = (portid_t) nb_pt; 5166 if (nb_fwd_ports != (portid_t) nb_pt) { 5167 printf("previous number of forwarding ports %u - changed to " 5168 "number of configured ports %u\n", 5169 (unsigned int) nb_fwd_ports, nb_pt); 5170 nb_fwd_ports = (portid_t) nb_pt; 5171 } 5172 } 5173 5174 /** 5175 * Parse the user input and obtain the list of forwarding ports 5176 * 5177 * @param[in] list 5178 * String containing the user input. User can specify 5179 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5180 * For example, if the user wants to use all the available 5181 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5182 * If the user wants to use only the ports 1,2 then the input 5183 * is 1,2. 5184 * valid characters are '-' and ',' 5185 * @param[out] values 5186 * This array will be filled with a list of port IDs 5187 * based on the user input 5188 * Note that duplicate entries are discarded and only the first 5189 * count entries in this array are port IDs and all the rest 5190 * will contain default values 5191 * @param[in] maxsize 5192 * This parameter denotes 2 things 5193 * 1) Number of elements in the values array 5194 * 2) Maximum value of each element in the values array 5195 * @return 5196 * On success, returns total count of parsed port IDs 5197 * On failure, returns 0 5198 */ 5199 static unsigned int 5200 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5201 { 5202 unsigned int count = 0; 5203 char *end = NULL; 5204 int min, max; 5205 int value, i; 5206 unsigned int marked[maxsize]; 5207 5208 if (list == NULL || values == NULL) 5209 return 0; 5210 5211 for (i = 0; i < (int)maxsize; i++) 5212 marked[i] = 0; 5213 5214 min = INT_MAX; 5215 5216 do { 5217 /*Remove the blank spaces if any*/ 5218 while (isblank(*list)) 5219 list++; 5220 if (*list == '\0') 5221 break; 5222 errno = 0; 5223 value = strtol(list, &end, 10); 5224 if (errno || end == NULL) 5225 return 0; 5226 if (value < 0 || value >= (int)maxsize) 5227 return 0; 5228 while (isblank(*end)) 5229 end++; 5230 if (*end == '-' && min == INT_MAX) { 5231 min = value; 5232 } else if ((*end == ',') || (*end == '\0')) { 5233 max = value; 5234 if (min == INT_MAX) 5235 min = value; 5236 for (i = min; i <= max; i++) { 5237 if (count < maxsize) { 5238 if (marked[i]) 5239 continue; 5240 values[count] = i; 5241 marked[i] = 1; 5242 count++; 5243 } 5244 } 5245 min = INT_MAX; 5246 } else 5247 return 0; 5248 list = end + 1; 5249 } while (*end != '\0'); 5250 5251 return count; 5252 } 5253 5254 void 5255 parse_fwd_portlist(const char *portlist) 5256 { 5257 unsigned int portcount; 5258 unsigned int portindex[RTE_MAX_ETHPORTS]; 5259 unsigned int i, valid_port_count = 0; 5260 5261 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5262 if (!portcount) 5263 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5264 5265 /* 5266 * Here we verify the validity of the ports 5267 * and thereby calculate the total number of 5268 * valid ports 5269 */ 5270 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5271 if (rte_eth_dev_is_valid_port(portindex[i])) { 5272 portindex[valid_port_count] = portindex[i]; 5273 valid_port_count++; 5274 } 5275 } 5276 5277 set_fwd_ports_list(portindex, valid_port_count); 5278 } 5279 5280 void 5281 set_fwd_ports_mask(uint64_t portmask) 5282 { 5283 unsigned int portlist[64]; 5284 unsigned int nb_pt; 5285 unsigned int i; 5286 5287 if (portmask == 0) { 5288 fprintf(stderr, "Invalid NULL mask of ports\n"); 5289 return; 5290 } 5291 nb_pt = 0; 5292 RTE_ETH_FOREACH_DEV(i) { 5293 if (! ((uint64_t)(1ULL << i) & portmask)) 5294 continue; 5295 portlist[nb_pt++] = i; 5296 } 5297 set_fwd_ports_list(portlist, nb_pt); 5298 } 5299 5300 void 5301 set_fwd_ports_number(uint16_t nb_pt) 5302 { 5303 if (nb_pt > nb_cfg_ports) { 5304 fprintf(stderr, 5305 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5306 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5307 return; 5308 } 5309 nb_fwd_ports = (portid_t) nb_pt; 5310 printf("Number of forwarding ports set to %u\n", 5311 (unsigned int) nb_fwd_ports); 5312 } 5313 5314 int 5315 port_is_forwarding(portid_t port_id) 5316 { 5317 unsigned int i; 5318 5319 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5320 return -1; 5321 5322 for (i = 0; i < nb_fwd_ports; i++) { 5323 if (fwd_ports_ids[i] == port_id) 5324 return 1; 5325 } 5326 5327 return 0; 5328 } 5329 5330 void 5331 set_nb_pkt_per_burst(uint16_t nb) 5332 { 5333 if (nb > MAX_PKT_BURST) { 5334 fprintf(stderr, 5335 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5336 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5337 return; 5338 } 5339 nb_pkt_per_burst = nb; 5340 printf("Number of packets per burst set to %u\n", 5341 (unsigned int) nb_pkt_per_burst); 5342 } 5343 5344 static const char * 5345 tx_split_get_name(enum tx_pkt_split split) 5346 { 5347 uint32_t i; 5348 5349 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5350 if (tx_split_name[i].split == split) 5351 return tx_split_name[i].name; 5352 } 5353 return NULL; 5354 } 5355 5356 void 5357 set_tx_pkt_split(const char *name) 5358 { 5359 uint32_t i; 5360 5361 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5362 if (strcmp(tx_split_name[i].name, name) == 0) { 5363 tx_pkt_split = tx_split_name[i].split; 5364 return; 5365 } 5366 } 5367 fprintf(stderr, "unknown value: \"%s\"\n", name); 5368 } 5369 5370 int 5371 parse_fec_mode(const char *name, uint32_t *fec_capa) 5372 { 5373 uint8_t i; 5374 5375 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5376 if (strcmp(fec_mode_name[i].name, name) == 0) { 5377 *fec_capa = 5378 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5379 return 0; 5380 } 5381 } 5382 return -1; 5383 } 5384 5385 void 5386 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5387 { 5388 unsigned int i, j; 5389 5390 printf("FEC capabilities:\n"); 5391 5392 for (i = 0; i < num; i++) { 5393 printf("%s : ", 5394 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5395 5396 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5397 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5398 speed_fec_capa[i].capa) 5399 printf("%s ", fec_mode_name[j].name); 5400 } 5401 printf("\n"); 5402 } 5403 } 5404 5405 void 5406 show_rx_pkt_offsets(void) 5407 { 5408 uint32_t i, n; 5409 5410 n = rx_pkt_nb_offs; 5411 printf("Number of offsets: %u\n", n); 5412 if (n) { 5413 printf("Segment offsets: "); 5414 for (i = 0; i != n - 1; i++) 5415 printf("%hu,", rx_pkt_seg_offsets[i]); 5416 printf("%hu\n", rx_pkt_seg_lengths[i]); 5417 } 5418 } 5419 5420 void 5421 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5422 { 5423 unsigned int i; 5424 5425 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5426 printf("nb segments per RX packets=%u >= " 5427 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5428 return; 5429 } 5430 5431 /* 5432 * No extra check here, the segment length will be checked by PMD 5433 * in the extended queue setup. 5434 */ 5435 for (i = 0; i < nb_offs; i++) { 5436 if (seg_offsets[i] >= UINT16_MAX) { 5437 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5438 i, seg_offsets[i]); 5439 return; 5440 } 5441 } 5442 5443 for (i = 0; i < nb_offs; i++) 5444 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5445 5446 rx_pkt_nb_offs = (uint8_t) nb_offs; 5447 } 5448 5449 void 5450 show_rx_pkt_segments(void) 5451 { 5452 uint32_t i, n; 5453 5454 n = rx_pkt_nb_segs; 5455 printf("Number of segments: %u\n", n); 5456 if (n) { 5457 printf("Segment sizes: "); 5458 for (i = 0; i != n - 1; i++) 5459 printf("%hu,", rx_pkt_seg_lengths[i]); 5460 printf("%hu\n", rx_pkt_seg_lengths[i]); 5461 } 5462 } 5463 5464 static const char *get_ptype_str(uint32_t ptype) 5465 { 5466 const char *str; 5467 5468 switch (ptype) { 5469 case RTE_PTYPE_L2_ETHER: 5470 str = "eth"; 5471 break; 5472 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5473 str = "ipv4"; 5474 break; 5475 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5476 str = "ipv6"; 5477 break; 5478 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5479 str = "ipv4-tcp"; 5480 break; 5481 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5482 str = "ipv4-udp"; 5483 break; 5484 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5485 str = "ipv4-sctp"; 5486 break; 5487 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5488 str = "ipv6-tcp"; 5489 break; 5490 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5491 str = "ipv6-udp"; 5492 break; 5493 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5494 str = "ipv6-sctp"; 5495 break; 5496 case RTE_PTYPE_TUNNEL_GRENAT: 5497 str = "grenat"; 5498 break; 5499 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5500 str = "inner-eth"; 5501 break; 5502 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5503 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5504 str = "inner-ipv4"; 5505 break; 5506 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5507 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5508 str = "inner-ipv6"; 5509 break; 5510 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5512 str = "inner-ipv4-tcp"; 5513 break; 5514 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5516 str = "inner-ipv4-udp"; 5517 break; 5518 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5519 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5520 str = "inner-ipv4-sctp"; 5521 break; 5522 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5523 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5524 str = "inner-ipv6-tcp"; 5525 break; 5526 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5527 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5528 str = "inner-ipv6-udp"; 5529 break; 5530 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5531 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5532 str = "inner-ipv6-sctp"; 5533 break; 5534 default: 5535 str = "unsupported"; 5536 } 5537 5538 return str; 5539 } 5540 5541 void 5542 show_rx_pkt_hdrs(void) 5543 { 5544 uint32_t i, n; 5545 5546 n = rx_pkt_nb_segs; 5547 printf("Number of segments: %u\n", n); 5548 if (n) { 5549 printf("Packet segs: "); 5550 for (i = 0; i < n - 1; i++) 5551 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5552 printf("payload\n"); 5553 } 5554 } 5555 5556 void 5557 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5558 { 5559 unsigned int i; 5560 5561 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5562 printf("nb segments per RX packets=%u > " 5563 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5564 return; 5565 } 5566 5567 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5568 5569 for (i = 0; i < nb_segs; i++) 5570 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5571 /* 5572 * We calculate the number of hdrs, but payload is not included, 5573 * so rx_pkt_nb_segs would increase 1. 5574 */ 5575 rx_pkt_nb_segs = nb_segs + 1; 5576 } 5577 5578 void 5579 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5580 { 5581 unsigned int i; 5582 5583 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5584 printf("nb segments per RX packets=%u >= " 5585 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5586 return; 5587 } 5588 5589 /* 5590 * No extra check here, the segment length will be checked by PMD 5591 * in the extended queue setup. 5592 */ 5593 for (i = 0; i < nb_segs; i++) { 5594 if (seg_lengths[i] >= UINT16_MAX) { 5595 printf("length[%u]=%u > UINT16_MAX - give up\n", 5596 i, seg_lengths[i]); 5597 return; 5598 } 5599 } 5600 5601 for (i = 0; i < nb_segs; i++) 5602 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5603 5604 rx_pkt_nb_segs = (uint8_t) nb_segs; 5605 } 5606 5607 void 5608 show_tx_pkt_segments(void) 5609 { 5610 uint32_t i, n; 5611 const char *split; 5612 5613 n = tx_pkt_nb_segs; 5614 split = tx_split_get_name(tx_pkt_split); 5615 5616 printf("Number of segments: %u\n", n); 5617 printf("Segment sizes: "); 5618 for (i = 0; i != n - 1; i++) 5619 printf("%hu,", tx_pkt_seg_lengths[i]); 5620 printf("%hu\n", tx_pkt_seg_lengths[i]); 5621 printf("Split packet: %s\n", split); 5622 } 5623 5624 static bool 5625 nb_segs_is_invalid(unsigned int nb_segs) 5626 { 5627 uint16_t ring_size; 5628 uint16_t queue_id; 5629 uint16_t port_id; 5630 int ret; 5631 5632 RTE_ETH_FOREACH_DEV(port_id) { 5633 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5634 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5635 if (ret) { 5636 /* Port may not be initialized yet, can't say 5637 * the port is invalid in this stage. 5638 */ 5639 continue; 5640 } 5641 if (ring_size < nb_segs) { 5642 printf("nb segments per TX packets=%u >= TX " 5643 "queue(%u) ring_size=%u - txpkts ignored\n", 5644 nb_segs, queue_id, ring_size); 5645 return true; 5646 } 5647 } 5648 } 5649 5650 return false; 5651 } 5652 5653 void 5654 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5655 { 5656 uint16_t tx_pkt_len; 5657 unsigned int i; 5658 5659 /* 5660 * For single segment settings failed check is ignored. 5661 * It is a very basic capability to send the single segment 5662 * packets, suppose it is always supported. 5663 */ 5664 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5665 fprintf(stderr, 5666 "Tx segment size(%u) is not supported - txpkts ignored\n", 5667 nb_segs); 5668 return; 5669 } 5670 5671 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5672 fprintf(stderr, 5673 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5674 nb_segs, RTE_MAX_SEGS_PER_PKT); 5675 return; 5676 } 5677 5678 /* 5679 * Check that each segment length is greater or equal than 5680 * the mbuf data size. 5681 * Check also that the total packet length is greater or equal than the 5682 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5683 * 20 + 8). 5684 */ 5685 tx_pkt_len = 0; 5686 for (i = 0; i < nb_segs; i++) { 5687 if (seg_lengths[i] > mbuf_data_size[0]) { 5688 fprintf(stderr, 5689 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5690 i, seg_lengths[i], mbuf_data_size[0]); 5691 return; 5692 } 5693 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5694 } 5695 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5696 fprintf(stderr, "total packet length=%u < %d - give up\n", 5697 (unsigned) tx_pkt_len, 5698 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5699 return; 5700 } 5701 5702 for (i = 0; i < nb_segs; i++) 5703 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5704 5705 tx_pkt_length = tx_pkt_len; 5706 tx_pkt_nb_segs = (uint8_t) nb_segs; 5707 } 5708 5709 void 5710 show_tx_pkt_times(void) 5711 { 5712 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5713 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5714 } 5715 5716 void 5717 set_tx_pkt_times(unsigned int *tx_times) 5718 { 5719 tx_pkt_times_inter = tx_times[0]; 5720 tx_pkt_times_intra = tx_times[1]; 5721 } 5722 5723 #ifdef RTE_LIB_GRO 5724 void 5725 setup_gro(const char *onoff, portid_t port_id) 5726 { 5727 if (!rte_eth_dev_is_valid_port(port_id)) { 5728 fprintf(stderr, "invalid port id %u\n", port_id); 5729 return; 5730 } 5731 if (test_done == 0) { 5732 fprintf(stderr, 5733 "Before enable/disable GRO, please stop forwarding first\n"); 5734 return; 5735 } 5736 if (strcmp(onoff, "on") == 0) { 5737 if (gro_ports[port_id].enable != 0) { 5738 fprintf(stderr, 5739 "Port %u has enabled GRO. Please disable GRO first\n", 5740 port_id); 5741 return; 5742 } 5743 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5744 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5745 gro_ports[port_id].param.max_flow_num = 5746 GRO_DEFAULT_FLOW_NUM; 5747 gro_ports[port_id].param.max_item_per_flow = 5748 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5749 } 5750 gro_ports[port_id].enable = 1; 5751 } else { 5752 if (gro_ports[port_id].enable == 0) { 5753 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5754 return; 5755 } 5756 gro_ports[port_id].enable = 0; 5757 } 5758 } 5759 5760 void 5761 setup_gro_flush_cycles(uint8_t cycles) 5762 { 5763 if (test_done == 0) { 5764 fprintf(stderr, 5765 "Before change flush interval for GRO, please stop forwarding first.\n"); 5766 return; 5767 } 5768 5769 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5770 GRO_DEFAULT_FLUSH_CYCLES) { 5771 fprintf(stderr, 5772 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5773 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5774 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5775 } 5776 5777 gro_flush_cycles = cycles; 5778 } 5779 5780 void 5781 show_gro(portid_t port_id) 5782 { 5783 struct rte_gro_param *param; 5784 uint32_t max_pkts_num; 5785 5786 param = &gro_ports[port_id].param; 5787 5788 if (!rte_eth_dev_is_valid_port(port_id)) { 5789 fprintf(stderr, "Invalid port id %u.\n", port_id); 5790 return; 5791 } 5792 if (gro_ports[port_id].enable) { 5793 printf("GRO type: TCP/IPv4\n"); 5794 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5795 max_pkts_num = param->max_flow_num * 5796 param->max_item_per_flow; 5797 } else 5798 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5799 printf("Max number of packets to perform GRO: %u\n", 5800 max_pkts_num); 5801 printf("Flushing cycles: %u\n", gro_flush_cycles); 5802 } else 5803 printf("Port %u doesn't enable GRO.\n", port_id); 5804 } 5805 #endif /* RTE_LIB_GRO */ 5806 5807 #ifdef RTE_LIB_GSO 5808 void 5809 setup_gso(const char *mode, portid_t port_id) 5810 { 5811 if (!rte_eth_dev_is_valid_port(port_id)) { 5812 fprintf(stderr, "invalid port id %u\n", port_id); 5813 return; 5814 } 5815 if (strcmp(mode, "on") == 0) { 5816 if (test_done == 0) { 5817 fprintf(stderr, 5818 "before enabling GSO, please stop forwarding first\n"); 5819 return; 5820 } 5821 gso_ports[port_id].enable = 1; 5822 } else if (strcmp(mode, "off") == 0) { 5823 if (test_done == 0) { 5824 fprintf(stderr, 5825 "before disabling GSO, please stop forwarding first\n"); 5826 return; 5827 } 5828 gso_ports[port_id].enable = 0; 5829 } 5830 } 5831 #endif /* RTE_LIB_GSO */ 5832 5833 char* 5834 list_pkt_forwarding_modes(void) 5835 { 5836 static char fwd_modes[128] = ""; 5837 const char *separator = "|"; 5838 struct fwd_engine *fwd_eng; 5839 unsigned i = 0; 5840 5841 if (strlen (fwd_modes) == 0) { 5842 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5843 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5844 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5845 strncat(fwd_modes, separator, 5846 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5847 } 5848 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5849 } 5850 5851 return fwd_modes; 5852 } 5853 5854 char* 5855 list_pkt_forwarding_retry_modes(void) 5856 { 5857 static char fwd_modes[128] = ""; 5858 const char *separator = "|"; 5859 struct fwd_engine *fwd_eng; 5860 unsigned i = 0; 5861 5862 if (strlen(fwd_modes) == 0) { 5863 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5864 if (fwd_eng == &rx_only_engine) 5865 continue; 5866 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5867 sizeof(fwd_modes) - 5868 strlen(fwd_modes) - 1); 5869 strncat(fwd_modes, separator, 5870 sizeof(fwd_modes) - 5871 strlen(fwd_modes) - 1); 5872 } 5873 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5874 } 5875 5876 return fwd_modes; 5877 } 5878 5879 void 5880 set_pkt_forwarding_mode(const char *fwd_mode_name) 5881 { 5882 struct fwd_engine *fwd_eng; 5883 unsigned i; 5884 5885 i = 0; 5886 while ((fwd_eng = fwd_engines[i]) != NULL) { 5887 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5888 printf("Set %s packet forwarding mode%s\n", 5889 fwd_mode_name, 5890 retry_enabled == 0 ? "" : " with retry"); 5891 cur_fwd_eng = fwd_eng; 5892 return; 5893 } 5894 i++; 5895 } 5896 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5897 } 5898 5899 void 5900 add_rx_dump_callbacks(portid_t portid) 5901 { 5902 struct rte_eth_dev_info dev_info; 5903 uint16_t queue; 5904 int ret; 5905 5906 if (port_id_is_invalid(portid, ENABLED_WARN)) 5907 return; 5908 5909 ret = eth_dev_info_get_print_err(portid, &dev_info); 5910 if (ret != 0) 5911 return; 5912 5913 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5914 if (!ports[portid].rx_dump_cb[queue]) 5915 ports[portid].rx_dump_cb[queue] = 5916 rte_eth_add_rx_callback(portid, queue, 5917 dump_rx_pkts, NULL); 5918 } 5919 5920 void 5921 add_tx_dump_callbacks(portid_t portid) 5922 { 5923 struct rte_eth_dev_info dev_info; 5924 uint16_t queue; 5925 int ret; 5926 5927 if (port_id_is_invalid(portid, ENABLED_WARN)) 5928 return; 5929 5930 ret = eth_dev_info_get_print_err(portid, &dev_info); 5931 if (ret != 0) 5932 return; 5933 5934 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5935 if (!ports[portid].tx_dump_cb[queue]) 5936 ports[portid].tx_dump_cb[queue] = 5937 rte_eth_add_tx_callback(portid, queue, 5938 dump_tx_pkts, NULL); 5939 } 5940 5941 void 5942 remove_rx_dump_callbacks(portid_t portid) 5943 { 5944 struct rte_eth_dev_info dev_info; 5945 uint16_t queue; 5946 int ret; 5947 5948 if (port_id_is_invalid(portid, ENABLED_WARN)) 5949 return; 5950 5951 ret = eth_dev_info_get_print_err(portid, &dev_info); 5952 if (ret != 0) 5953 return; 5954 5955 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5956 if (ports[portid].rx_dump_cb[queue]) { 5957 rte_eth_remove_rx_callback(portid, queue, 5958 ports[portid].rx_dump_cb[queue]); 5959 ports[portid].rx_dump_cb[queue] = NULL; 5960 } 5961 } 5962 5963 void 5964 remove_tx_dump_callbacks(portid_t portid) 5965 { 5966 struct rte_eth_dev_info dev_info; 5967 uint16_t queue; 5968 int ret; 5969 5970 if (port_id_is_invalid(portid, ENABLED_WARN)) 5971 return; 5972 5973 ret = eth_dev_info_get_print_err(portid, &dev_info); 5974 if (ret != 0) 5975 return; 5976 5977 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5978 if (ports[portid].tx_dump_cb[queue]) { 5979 rte_eth_remove_tx_callback(portid, queue, 5980 ports[portid].tx_dump_cb[queue]); 5981 ports[portid].tx_dump_cb[queue] = NULL; 5982 } 5983 } 5984 5985 void 5986 configure_rxtx_dump_callbacks(uint16_t verbose) 5987 { 5988 portid_t portid; 5989 5990 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5991 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5992 return; 5993 #endif 5994 5995 RTE_ETH_FOREACH_DEV(portid) 5996 { 5997 if (verbose == 1 || verbose > 2) 5998 add_rx_dump_callbacks(portid); 5999 else 6000 remove_rx_dump_callbacks(portid); 6001 if (verbose >= 2) 6002 add_tx_dump_callbacks(portid); 6003 else 6004 remove_tx_dump_callbacks(portid); 6005 } 6006 } 6007 6008 void 6009 set_verbose_level(uint16_t vb_level) 6010 { 6011 printf("Change verbose level from %u to %u\n", 6012 (unsigned int) verbose_level, (unsigned int) vb_level); 6013 verbose_level = vb_level; 6014 configure_rxtx_dump_callbacks(verbose_level); 6015 } 6016 6017 void 6018 vlan_extend_set(portid_t port_id, int on) 6019 { 6020 int diag; 6021 int vlan_offload; 6022 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6023 6024 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6025 return; 6026 6027 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6028 6029 if (on) { 6030 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6031 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6032 } else { 6033 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6034 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6035 } 6036 6037 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6038 if (diag < 0) { 6039 fprintf(stderr, 6040 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6041 port_id, on, diag); 6042 return; 6043 } 6044 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6045 } 6046 6047 void 6048 rx_vlan_strip_set(portid_t port_id, int on) 6049 { 6050 int diag; 6051 int vlan_offload; 6052 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6053 6054 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6055 return; 6056 6057 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6058 6059 if (on) { 6060 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6061 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6062 } else { 6063 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6064 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6065 } 6066 6067 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6068 if (diag < 0) { 6069 fprintf(stderr, 6070 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6071 __func__, port_id, on, diag); 6072 return; 6073 } 6074 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6075 } 6076 6077 void 6078 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6079 { 6080 int diag; 6081 6082 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6083 return; 6084 6085 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6086 if (diag < 0) 6087 fprintf(stderr, 6088 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6089 __func__, port_id, queue_id, on, diag); 6090 } 6091 6092 void 6093 rx_vlan_filter_set(portid_t port_id, int on) 6094 { 6095 int diag; 6096 int vlan_offload; 6097 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6098 6099 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6100 return; 6101 6102 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6103 6104 if (on) { 6105 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6106 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6107 } else { 6108 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6109 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6110 } 6111 6112 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6113 if (diag < 0) { 6114 fprintf(stderr, 6115 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6116 __func__, port_id, on, diag); 6117 return; 6118 } 6119 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6120 } 6121 6122 void 6123 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6124 { 6125 int diag; 6126 int vlan_offload; 6127 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6128 6129 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6130 return; 6131 6132 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6133 6134 if (on) { 6135 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6136 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6137 } else { 6138 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6139 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6140 } 6141 6142 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6143 if (diag < 0) { 6144 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6145 __func__, port_id, on, diag); 6146 return; 6147 } 6148 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6149 } 6150 6151 int 6152 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6153 { 6154 int diag; 6155 6156 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6157 return 1; 6158 if (vlan_id_is_invalid(vlan_id)) 6159 return 1; 6160 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6161 if (diag == 0) 6162 return 0; 6163 fprintf(stderr, 6164 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6165 port_id, vlan_id, on, diag); 6166 return -1; 6167 } 6168 6169 void 6170 rx_vlan_all_filter_set(portid_t port_id, int on) 6171 { 6172 uint16_t vlan_id; 6173 6174 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6175 return; 6176 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6177 if (rx_vft_set(port_id, vlan_id, on)) 6178 break; 6179 } 6180 } 6181 6182 void 6183 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6184 { 6185 int diag; 6186 6187 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6188 return; 6189 6190 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6191 if (diag == 0) 6192 return; 6193 6194 fprintf(stderr, 6195 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6196 port_id, vlan_type, tp_id, diag); 6197 } 6198 6199 void 6200 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6201 { 6202 struct rte_eth_dev_info dev_info; 6203 int ret; 6204 6205 if (vlan_id_is_invalid(vlan_id)) 6206 return; 6207 6208 if (ports[port_id].dev_conf.txmode.offloads & 6209 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6210 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6211 return; 6212 } 6213 6214 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6215 if (ret != 0) 6216 return; 6217 6218 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6219 fprintf(stderr, 6220 "Error: vlan insert is not supported by port %d\n", 6221 port_id); 6222 return; 6223 } 6224 6225 tx_vlan_reset(port_id); 6226 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6227 ports[port_id].tx_vlan_id = vlan_id; 6228 } 6229 6230 void 6231 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6232 { 6233 struct rte_eth_dev_info dev_info; 6234 int ret; 6235 6236 if (vlan_id_is_invalid(vlan_id)) 6237 return; 6238 if (vlan_id_is_invalid(vlan_id_outer)) 6239 return; 6240 6241 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6242 if (ret != 0) 6243 return; 6244 6245 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6246 fprintf(stderr, 6247 "Error: qinq insert not supported by port %d\n", 6248 port_id); 6249 return; 6250 } 6251 6252 tx_vlan_reset(port_id); 6253 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6254 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6255 ports[port_id].tx_vlan_id = vlan_id; 6256 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6257 } 6258 6259 void 6260 tx_vlan_reset(portid_t port_id) 6261 { 6262 ports[port_id].dev_conf.txmode.offloads &= 6263 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6264 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6265 ports[port_id].tx_vlan_id = 0; 6266 ports[port_id].tx_vlan_id_outer = 0; 6267 } 6268 6269 void 6270 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6271 { 6272 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6273 return; 6274 6275 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6276 } 6277 6278 void 6279 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6280 { 6281 int ret; 6282 6283 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6284 return; 6285 6286 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6287 return; 6288 6289 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6290 fprintf(stderr, "map_value not in required range 0..%d\n", 6291 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6292 return; 6293 } 6294 6295 if (!is_rx) { /* tx */ 6296 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6297 map_value); 6298 if (ret) { 6299 fprintf(stderr, 6300 "failed to set tx queue stats mapping.\n"); 6301 return; 6302 } 6303 } else { /* rx */ 6304 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6305 map_value); 6306 if (ret) { 6307 fprintf(stderr, 6308 "failed to set rx queue stats mapping.\n"); 6309 return; 6310 } 6311 } 6312 } 6313 6314 void 6315 set_xstats_hide_zero(uint8_t on_off) 6316 { 6317 xstats_hide_zero = on_off; 6318 } 6319 6320 void 6321 set_record_core_cycles(uint8_t on_off) 6322 { 6323 record_core_cycles = on_off; 6324 } 6325 6326 void 6327 set_record_burst_stats(uint8_t on_off) 6328 { 6329 record_burst_stats = on_off; 6330 } 6331 6332 uint16_t 6333 str_to_flowtype(const char *string) 6334 { 6335 uint8_t i; 6336 6337 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6338 if (!strcmp(flowtype_str_table[i].str, string)) 6339 return flowtype_str_table[i].ftype; 6340 } 6341 6342 if (isdigit(string[0])) { 6343 int val = atoi(string); 6344 if (val > 0 && val < 64) 6345 return (uint16_t)val; 6346 } 6347 6348 return RTE_ETH_FLOW_UNKNOWN; 6349 } 6350 6351 const char* 6352 flowtype_to_str(uint16_t flow_type) 6353 { 6354 uint8_t i; 6355 6356 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6357 if (flowtype_str_table[i].ftype == flow_type) 6358 return flowtype_str_table[i].str; 6359 } 6360 6361 return NULL; 6362 } 6363 6364 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6365 6366 static inline void 6367 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6368 { 6369 struct rte_eth_flex_payload_cfg *cfg; 6370 uint32_t i, j; 6371 6372 for (i = 0; i < flex_conf->nb_payloads; i++) { 6373 cfg = &flex_conf->flex_set[i]; 6374 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6375 printf("\n RAW: "); 6376 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6377 printf("\n L2_PAYLOAD: "); 6378 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6379 printf("\n L3_PAYLOAD: "); 6380 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6381 printf("\n L4_PAYLOAD: "); 6382 else 6383 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6384 for (j = 0; j < num; j++) 6385 printf(" %-5u", cfg->src_offset[j]); 6386 } 6387 printf("\n"); 6388 } 6389 6390 static inline void 6391 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6392 { 6393 struct rte_eth_fdir_flex_mask *mask; 6394 uint32_t i, j; 6395 const char *p; 6396 6397 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6398 mask = &flex_conf->flex_mask[i]; 6399 p = flowtype_to_str(mask->flow_type); 6400 printf("\n %s:\t", p ? p : "unknown"); 6401 for (j = 0; j < num; j++) 6402 printf(" %02x", mask->mask[j]); 6403 } 6404 printf("\n"); 6405 } 6406 6407 static inline void 6408 print_fdir_flow_type(uint32_t flow_types_mask) 6409 { 6410 int i; 6411 const char *p; 6412 6413 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6414 if (!(flow_types_mask & (1 << i))) 6415 continue; 6416 p = flowtype_to_str(i); 6417 if (p) 6418 printf(" %s", p); 6419 else 6420 printf(" unknown"); 6421 } 6422 printf("\n"); 6423 } 6424 6425 static int 6426 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6427 struct rte_eth_fdir_stats *fdir_stat) 6428 { 6429 int ret = -ENOTSUP; 6430 6431 #ifdef RTE_NET_I40E 6432 if (ret == -ENOTSUP) { 6433 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6434 if (!ret) 6435 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6436 } 6437 #endif 6438 #ifdef RTE_NET_IXGBE 6439 if (ret == -ENOTSUP) { 6440 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6441 if (!ret) 6442 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6443 } 6444 #endif 6445 switch (ret) { 6446 case 0: 6447 break; 6448 case -ENOTSUP: 6449 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6450 port_id); 6451 break; 6452 default: 6453 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6454 break; 6455 } 6456 return ret; 6457 } 6458 6459 void 6460 fdir_get_infos(portid_t port_id) 6461 { 6462 struct rte_eth_fdir_stats fdir_stat; 6463 struct rte_eth_fdir_info fdir_info; 6464 6465 static const char *fdir_stats_border = "########################"; 6466 6467 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6468 return; 6469 6470 memset(&fdir_info, 0, sizeof(fdir_info)); 6471 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6472 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6473 return; 6474 6475 printf("\n %s FDIR infos for port %-2d %s\n", 6476 fdir_stats_border, port_id, fdir_stats_border); 6477 printf(" MODE: "); 6478 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6479 printf(" PERFECT\n"); 6480 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6481 printf(" PERFECT-MAC-VLAN\n"); 6482 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6483 printf(" PERFECT-TUNNEL\n"); 6484 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6485 printf(" SIGNATURE\n"); 6486 else 6487 printf(" DISABLE\n"); 6488 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6489 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6490 printf(" SUPPORTED FLOW TYPE: "); 6491 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6492 } 6493 printf(" FLEX PAYLOAD INFO:\n"); 6494 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6495 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6496 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6497 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6498 fdir_info.flex_payload_unit, 6499 fdir_info.max_flex_payload_segment_num, 6500 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6501 if (fdir_info.flex_conf.nb_payloads > 0) { 6502 printf(" FLEX PAYLOAD SRC OFFSET:"); 6503 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6504 } 6505 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6506 printf(" FLEX MASK CFG:"); 6507 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6508 } 6509 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6510 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6511 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6512 fdir_info.guarant_spc, fdir_info.best_spc); 6513 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6514 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6515 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6516 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6517 fdir_stat.collision, fdir_stat.free, 6518 fdir_stat.maxhash, fdir_stat.maxlen, 6519 fdir_stat.add, fdir_stat.remove, 6520 fdir_stat.f_add, fdir_stat.f_remove); 6521 printf(" %s############################%s\n", 6522 fdir_stats_border, fdir_stats_border); 6523 } 6524 6525 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6526 6527 void 6528 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6529 { 6530 #ifdef RTE_NET_IXGBE 6531 int diag; 6532 6533 if (is_rx) 6534 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6535 else 6536 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6537 6538 if (diag == 0) 6539 return; 6540 fprintf(stderr, 6541 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6542 is_rx ? "rx" : "tx", port_id, diag); 6543 return; 6544 #endif 6545 fprintf(stderr, "VF %s setting not supported for port %d\n", 6546 is_rx ? "Rx" : "Tx", port_id); 6547 RTE_SET_USED(vf); 6548 RTE_SET_USED(on); 6549 } 6550 6551 int 6552 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6553 { 6554 int diag; 6555 struct rte_eth_link link; 6556 int ret; 6557 6558 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6559 return 1; 6560 ret = eth_link_get_nowait_print_err(port_id, &link); 6561 if (ret < 0) 6562 return 1; 6563 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6564 rate > link.link_speed) { 6565 fprintf(stderr, 6566 "Invalid rate value:%u bigger than link speed: %u\n", 6567 rate, link.link_speed); 6568 return 1; 6569 } 6570 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6571 if (diag == 0) 6572 return diag; 6573 fprintf(stderr, 6574 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6575 port_id, diag); 6576 return diag; 6577 } 6578 6579 int 6580 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6581 { 6582 int diag = -ENOTSUP; 6583 6584 RTE_SET_USED(vf); 6585 RTE_SET_USED(rate); 6586 RTE_SET_USED(q_msk); 6587 6588 #ifdef RTE_NET_IXGBE 6589 if (diag == -ENOTSUP) 6590 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6591 q_msk); 6592 #endif 6593 #ifdef RTE_NET_BNXT 6594 if (diag == -ENOTSUP) 6595 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6596 #endif 6597 if (diag == 0) 6598 return diag; 6599 6600 fprintf(stderr, 6601 "%s for port_id=%d failed diag=%d\n", 6602 __func__, port_id, diag); 6603 return diag; 6604 } 6605 6606 int 6607 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6608 { 6609 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6610 return -EINVAL; 6611 6612 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6613 } 6614 6615 /* 6616 * Functions to manage the set of filtered Multicast MAC addresses. 6617 * 6618 * A pool of filtered multicast MAC addresses is associated with each port. 6619 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6620 * The address of the pool and the number of valid multicast MAC addresses 6621 * recorded in the pool are stored in the fields "mc_addr_pool" and 6622 * "mc_addr_nb" of the "rte_port" data structure. 6623 * 6624 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6625 * to be supplied a contiguous array of multicast MAC addresses. 6626 * To comply with this constraint, the set of multicast addresses recorded 6627 * into the pool are systematically compacted at the beginning of the pool. 6628 * Hence, when a multicast address is removed from the pool, all following 6629 * addresses, if any, are copied back to keep the set contiguous. 6630 */ 6631 #define MCAST_POOL_INC 32 6632 6633 static int 6634 mcast_addr_pool_extend(struct rte_port *port) 6635 { 6636 struct rte_ether_addr *mc_pool; 6637 size_t mc_pool_size; 6638 6639 /* 6640 * If a free entry is available at the end of the pool, just 6641 * increment the number of recorded multicast addresses. 6642 */ 6643 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6644 port->mc_addr_nb++; 6645 return 0; 6646 } 6647 6648 /* 6649 * [re]allocate a pool with MCAST_POOL_INC more entries. 6650 * The previous test guarantees that port->mc_addr_nb is a multiple 6651 * of MCAST_POOL_INC. 6652 */ 6653 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6654 MCAST_POOL_INC); 6655 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6656 mc_pool_size); 6657 if (mc_pool == NULL) { 6658 fprintf(stderr, 6659 "allocation of pool of %u multicast addresses failed\n", 6660 port->mc_addr_nb + MCAST_POOL_INC); 6661 return -ENOMEM; 6662 } 6663 6664 port->mc_addr_pool = mc_pool; 6665 port->mc_addr_nb++; 6666 return 0; 6667 6668 } 6669 6670 static void 6671 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6672 { 6673 if (mcast_addr_pool_extend(port) != 0) 6674 return; 6675 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6676 } 6677 6678 static void 6679 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6680 { 6681 port->mc_addr_nb--; 6682 if (addr_idx == port->mc_addr_nb) { 6683 /* No need to recompact the set of multicast addresses. */ 6684 if (port->mc_addr_nb == 0) { 6685 /* free the pool of multicast addresses. */ 6686 free(port->mc_addr_pool); 6687 port->mc_addr_pool = NULL; 6688 } 6689 return; 6690 } 6691 memmove(&port->mc_addr_pool[addr_idx], 6692 &port->mc_addr_pool[addr_idx + 1], 6693 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6694 } 6695 6696 int 6697 mcast_addr_pool_destroy(portid_t port_id) 6698 { 6699 struct rte_port *port; 6700 6701 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6702 port_id == (portid_t)RTE_PORT_ALL) 6703 return -EINVAL; 6704 port = &ports[port_id]; 6705 6706 if (port->mc_addr_nb != 0) { 6707 /* free the pool of multicast addresses. */ 6708 free(port->mc_addr_pool); 6709 port->mc_addr_pool = NULL; 6710 port->mc_addr_nb = 0; 6711 } 6712 return 0; 6713 } 6714 6715 static int 6716 eth_port_multicast_addr_list_set(portid_t port_id) 6717 { 6718 struct rte_port *port; 6719 int diag; 6720 6721 port = &ports[port_id]; 6722 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6723 port->mc_addr_nb); 6724 if (diag < 0) 6725 fprintf(stderr, 6726 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6727 port_id, port->mc_addr_nb, diag); 6728 6729 return diag; 6730 } 6731 6732 void 6733 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6734 { 6735 struct rte_port *port; 6736 uint32_t i; 6737 6738 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6739 return; 6740 6741 port = &ports[port_id]; 6742 6743 /* 6744 * Check that the added multicast MAC address is not already recorded 6745 * in the pool of multicast addresses. 6746 */ 6747 for (i = 0; i < port->mc_addr_nb; i++) { 6748 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6749 fprintf(stderr, 6750 "multicast address already filtered by port\n"); 6751 return; 6752 } 6753 } 6754 6755 mcast_addr_pool_append(port, mc_addr); 6756 if (eth_port_multicast_addr_list_set(port_id) < 0) 6757 /* Rollback on failure, remove the address from the pool */ 6758 mcast_addr_pool_remove(port, i); 6759 } 6760 6761 void 6762 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6763 { 6764 struct rte_port *port; 6765 uint32_t i; 6766 6767 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6768 return; 6769 6770 port = &ports[port_id]; 6771 6772 /* 6773 * Search the pool of multicast MAC addresses for the removed address. 6774 */ 6775 for (i = 0; i < port->mc_addr_nb; i++) { 6776 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6777 break; 6778 } 6779 if (i == port->mc_addr_nb) { 6780 fprintf(stderr, "multicast address not filtered by port %d\n", 6781 port_id); 6782 return; 6783 } 6784 6785 mcast_addr_pool_remove(port, i); 6786 if (eth_port_multicast_addr_list_set(port_id) < 0) 6787 /* Rollback on failure, add the address back into the pool */ 6788 mcast_addr_pool_append(port, mc_addr); 6789 } 6790 6791 void 6792 port_dcb_info_display(portid_t port_id) 6793 { 6794 struct rte_eth_dcb_info dcb_info; 6795 uint16_t i; 6796 int ret; 6797 static const char *border = "================"; 6798 6799 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6800 return; 6801 6802 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6803 if (ret) { 6804 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6805 port_id); 6806 return; 6807 } 6808 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6809 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6810 printf("\n TC : "); 6811 for (i = 0; i < dcb_info.nb_tcs; i++) 6812 printf("\t%4d", i); 6813 printf("\n Priority : "); 6814 for (i = 0; i < dcb_info.nb_tcs; i++) 6815 printf("\t%4d", dcb_info.prio_tc[i]); 6816 printf("\n BW percent :"); 6817 for (i = 0; i < dcb_info.nb_tcs; i++) 6818 printf("\t%4d%%", dcb_info.tc_bws[i]); 6819 printf("\n RXQ base : "); 6820 for (i = 0; i < dcb_info.nb_tcs; i++) 6821 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6822 printf("\n RXQ number :"); 6823 for (i = 0; i < dcb_info.nb_tcs; i++) 6824 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6825 printf("\n TXQ base : "); 6826 for (i = 0; i < dcb_info.nb_tcs; i++) 6827 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6828 printf("\n TXQ number :"); 6829 for (i = 0; i < dcb_info.nb_tcs; i++) 6830 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6831 printf("\n"); 6832 } 6833 6834 uint8_t * 6835 open_file(const char *file_path, uint32_t *size) 6836 { 6837 int fd = open(file_path, O_RDONLY); 6838 off_t pkg_size; 6839 uint8_t *buf = NULL; 6840 int ret = 0; 6841 struct stat st_buf; 6842 6843 if (size) 6844 *size = 0; 6845 6846 if (fd == -1) { 6847 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6848 return buf; 6849 } 6850 6851 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6852 close(fd); 6853 fprintf(stderr, "%s: File operations failed\n", __func__); 6854 return buf; 6855 } 6856 6857 pkg_size = st_buf.st_size; 6858 if (pkg_size < 0) { 6859 close(fd); 6860 fprintf(stderr, "%s: File operations failed\n", __func__); 6861 return buf; 6862 } 6863 6864 buf = (uint8_t *)malloc(pkg_size); 6865 if (!buf) { 6866 close(fd); 6867 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6868 return buf; 6869 } 6870 6871 ret = read(fd, buf, pkg_size); 6872 if (ret < 0) { 6873 close(fd); 6874 fprintf(stderr, "%s: File read operation failed\n", __func__); 6875 close_file(buf); 6876 return NULL; 6877 } 6878 6879 if (size) 6880 *size = pkg_size; 6881 6882 close(fd); 6883 6884 return buf; 6885 } 6886 6887 int 6888 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6889 { 6890 FILE *fh = fopen(file_path, "wb"); 6891 6892 if (fh == NULL) { 6893 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6894 return -1; 6895 } 6896 6897 if (fwrite(buf, 1, size, fh) != size) { 6898 fclose(fh); 6899 fprintf(stderr, "%s: File write operation failed\n", __func__); 6900 return -1; 6901 } 6902 6903 fclose(fh); 6904 6905 return 0; 6906 } 6907 6908 int 6909 close_file(uint8_t *buf) 6910 { 6911 if (buf) { 6912 free((void *)buf); 6913 return 0; 6914 } 6915 6916 return -1; 6917 } 6918 6919 void 6920 show_macs(portid_t port_id) 6921 { 6922 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6923 struct rte_eth_dev_info dev_info; 6924 int32_t i, rc, num_macs = 0; 6925 6926 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6927 return; 6928 6929 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6930 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6931 if (rc < 0) 6932 return; 6933 6934 for (i = 0; i < rc; i++) { 6935 6936 /* skip zero address */ 6937 if (rte_is_zero_ether_addr(&addr[i])) 6938 continue; 6939 6940 num_macs++; 6941 } 6942 6943 printf("Number of MAC address added: %d\n", num_macs); 6944 6945 for (i = 0; i < rc; i++) { 6946 6947 /* skip zero address */ 6948 if (rte_is_zero_ether_addr(&addr[i])) 6949 continue; 6950 6951 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6952 printf(" %s\n", buf); 6953 } 6954 } 6955 6956 void 6957 show_mcast_macs(portid_t port_id) 6958 { 6959 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6960 struct rte_ether_addr *addr; 6961 struct rte_port *port; 6962 uint32_t i; 6963 6964 port = &ports[port_id]; 6965 6966 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6967 6968 for (i = 0; i < port->mc_addr_nb; i++) { 6969 addr = &port->mc_addr_pool[i]; 6970 6971 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6972 printf(" %s\n", buf); 6973 } 6974 } 6975