1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 { 174 .mode = RTE_ETH_FEC_LLRS, 175 .name = "llrs", 176 }, 177 }; 178 179 static const struct { 180 char str[32]; 181 uint16_t ftype; 182 } flowtype_str_table[] = { 183 {"raw", RTE_ETH_FLOW_RAW}, 184 {"ipv4", RTE_ETH_FLOW_IPV4}, 185 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 186 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 187 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 188 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 189 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 190 {"ipv6", RTE_ETH_FLOW_IPV6}, 191 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 192 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 193 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 194 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 195 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 196 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 197 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 198 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 199 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 200 {"port", RTE_ETH_FLOW_PORT}, 201 {"vxlan", RTE_ETH_FLOW_VXLAN}, 202 {"geneve", RTE_ETH_FLOW_GENEVE}, 203 {"nvgre", RTE_ETH_FLOW_NVGRE}, 204 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 205 {"gtpu", RTE_ETH_FLOW_GTPU}, 206 }; 207 208 static void 209 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 210 { 211 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 212 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 213 printf("%s%s", name, buf); 214 } 215 216 static void 217 nic_xstats_display_periodic(portid_t port_id) 218 { 219 struct xstat_display_info *xstats_info; 220 uint64_t *prev_values, *curr_values; 221 uint64_t diff_value, value_rate; 222 struct timespec cur_time; 223 uint64_t *ids_supp; 224 size_t ids_supp_sz; 225 uint64_t diff_ns; 226 unsigned int i; 227 int rc; 228 229 xstats_info = &ports[port_id].xstats_info; 230 231 ids_supp_sz = xstats_info->ids_supp_sz; 232 if (ids_supp_sz == 0) 233 return; 234 235 printf("\n"); 236 237 ids_supp = xstats_info->ids_supp; 238 prev_values = xstats_info->prev_values; 239 curr_values = xstats_info->curr_values; 240 241 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 242 ids_supp_sz); 243 if (rc != (int)ids_supp_sz) { 244 fprintf(stderr, 245 "Failed to get values of %zu xstats for port %u - return code %d\n", 246 ids_supp_sz, port_id, rc); 247 return; 248 } 249 250 diff_ns = 0; 251 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 252 uint64_t ns; 253 254 ns = cur_time.tv_sec * NS_PER_SEC; 255 ns += cur_time.tv_nsec; 256 257 if (xstats_info->prev_ns != 0) 258 diff_ns = ns - xstats_info->prev_ns; 259 xstats_info->prev_ns = ns; 260 } 261 262 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 263 for (i = 0; i < ids_supp_sz; i++) { 264 diff_value = (curr_values[i] > prev_values[i]) ? 265 (curr_values[i] - prev_values[i]) : 0; 266 prev_values[i] = curr_values[i]; 267 value_rate = diff_ns > 0 ? 268 (double)diff_value / diff_ns * NS_PER_SEC : 0; 269 270 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 271 xstats_display[i].name, curr_values[i], value_rate); 272 } 273 } 274 275 void 276 nic_stats_display(portid_t port_id) 277 { 278 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 279 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 280 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 281 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 282 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 283 struct timespec cur_time; 284 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 285 diff_ns; 286 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 287 struct rte_eth_stats stats; 288 static const char *nic_stats_border = "########################"; 289 int ret; 290 291 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 292 print_valid_ports(); 293 return; 294 } 295 ret = rte_eth_stats_get(port_id, &stats); 296 if (ret != 0) { 297 fprintf(stderr, 298 "%s: Error: failed to get stats (port %u): %d", 299 __func__, port_id, ret); 300 return; 301 } 302 printf("\n %s NIC statistics for port %-2d %s\n", 303 nic_stats_border, port_id, nic_stats_border); 304 305 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 306 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 307 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 308 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 309 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 310 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 311 312 diff_ns = 0; 313 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 314 uint64_t ns; 315 316 ns = cur_time.tv_sec * NS_PER_SEC; 317 ns += cur_time.tv_nsec; 318 319 if (prev_ns[port_id] != 0) 320 diff_ns = ns - prev_ns[port_id]; 321 prev_ns[port_id] = ns; 322 } 323 324 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 325 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 326 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 327 (stats.opackets - prev_pkts_tx[port_id]) : 0; 328 prev_pkts_rx[port_id] = stats.ipackets; 329 prev_pkts_tx[port_id] = stats.opackets; 330 mpps_rx = diff_ns > 0 ? 331 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 332 mpps_tx = diff_ns > 0 ? 333 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 334 335 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 336 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 337 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 338 (stats.obytes - prev_bytes_tx[port_id]) : 0; 339 prev_bytes_rx[port_id] = stats.ibytes; 340 prev_bytes_tx[port_id] = stats.obytes; 341 mbps_rx = diff_ns > 0 ? 342 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 343 mbps_tx = diff_ns > 0 ? 344 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 345 346 printf("\n Throughput (since last show)\n"); 347 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 348 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 349 mpps_tx, mbps_tx * 8); 350 351 if (xstats_display_num > 0) 352 nic_xstats_display_periodic(port_id); 353 354 printf(" %s############################%s\n", 355 nic_stats_border, nic_stats_border); 356 } 357 358 void 359 nic_stats_clear(portid_t port_id) 360 { 361 int ret; 362 363 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 364 print_valid_ports(); 365 return; 366 } 367 368 ret = rte_eth_stats_reset(port_id); 369 if (ret != 0) { 370 fprintf(stderr, 371 "%s: Error: failed to reset stats (port %u): %s", 372 __func__, port_id, strerror(-ret)); 373 return; 374 } 375 376 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 377 if (ret != 0) { 378 if (ret < 0) 379 ret = -ret; 380 fprintf(stderr, 381 "%s: Error: failed to get stats (port %u): %s", 382 __func__, port_id, strerror(ret)); 383 return; 384 } 385 printf("\n NIC statistics for port %d cleared\n", port_id); 386 } 387 388 void 389 nic_xstats_display(portid_t port_id) 390 { 391 struct rte_eth_xstat *xstats; 392 int cnt_xstats, idx_xstat; 393 struct rte_eth_xstat_name *xstats_names; 394 395 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 396 print_valid_ports(); 397 return; 398 } 399 printf("###### NIC extended statistics for port %-2d\n", port_id); 400 if (!rte_eth_dev_is_valid_port(port_id)) { 401 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 402 return; 403 } 404 405 /* Get count */ 406 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 407 if (cnt_xstats < 0) { 408 fprintf(stderr, "Error: Cannot get count of xstats\n"); 409 return; 410 } 411 412 /* Get id-name lookup table */ 413 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 414 if (xstats_names == NULL) { 415 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 416 return; 417 } 418 if (cnt_xstats != rte_eth_xstats_get_names( 419 port_id, xstats_names, cnt_xstats)) { 420 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 421 free(xstats_names); 422 return; 423 } 424 425 /* Get stats themselves */ 426 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 427 if (xstats == NULL) { 428 fprintf(stderr, "Cannot allocate memory for xstats\n"); 429 free(xstats_names); 430 return; 431 } 432 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 433 fprintf(stderr, "Error: Unable to get xstats\n"); 434 free(xstats_names); 435 free(xstats); 436 return; 437 } 438 439 /* Display xstats */ 440 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 441 if (xstats_hide_zero && !xstats[idx_xstat].value) 442 continue; 443 printf("%s: %"PRIu64"\n", 444 xstats_names[idx_xstat].name, 445 xstats[idx_xstat].value); 446 } 447 free(xstats_names); 448 free(xstats); 449 } 450 451 void 452 nic_xstats_clear(portid_t port_id) 453 { 454 int ret; 455 456 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 457 print_valid_ports(); 458 return; 459 } 460 461 ret = rte_eth_xstats_reset(port_id); 462 if (ret != 0) { 463 fprintf(stderr, 464 "%s: Error: failed to reset xstats (port %u): %s\n", 465 __func__, port_id, strerror(-ret)); 466 return; 467 } 468 469 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 470 if (ret != 0) { 471 if (ret < 0) 472 ret = -ret; 473 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 474 __func__, port_id, strerror(ret)); 475 return; 476 } 477 } 478 479 static const char * 480 get_queue_state_name(uint8_t queue_state) 481 { 482 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 483 return "stopped"; 484 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 485 return "started"; 486 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 487 return "hairpin"; 488 else 489 return "unknown"; 490 } 491 492 void 493 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 494 { 495 struct rte_eth_burst_mode mode; 496 struct rte_eth_rxq_info qinfo; 497 int32_t rc; 498 static const char *info_border = "*********************"; 499 500 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 501 if (rc != 0) { 502 fprintf(stderr, 503 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 504 port_id, queue_id, strerror(-rc), rc); 505 return; 506 } 507 508 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 509 info_border, port_id, queue_id, info_border); 510 511 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 512 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 513 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 514 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 515 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 516 printf("\nRX drop packets: %s", 517 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 518 printf("\nRX deferred start: %s", 519 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 520 printf("\nRX scattered packets: %s", 521 (qinfo.scattered_rx != 0) ? "on" : "off"); 522 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 523 if (qinfo.rx_buf_size != 0) 524 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 525 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 526 527 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 528 printf("\nBurst mode: %s%s", 529 mode.info, 530 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 531 " (per queue)" : ""); 532 533 printf("\n"); 534 } 535 536 void 537 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 538 { 539 struct rte_eth_burst_mode mode; 540 struct rte_eth_txq_info qinfo; 541 int32_t rc; 542 static const char *info_border = "*********************"; 543 544 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 545 if (rc != 0) { 546 fprintf(stderr, 547 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 548 port_id, queue_id, strerror(-rc), rc); 549 return; 550 } 551 552 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 553 info_border, port_id, queue_id, info_border); 554 555 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 556 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 557 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 558 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 559 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 560 printf("\nTX deferred start: %s", 561 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 562 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 563 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 564 565 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 566 printf("\nBurst mode: %s%s", 567 mode.info, 568 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 569 " (per queue)" : ""); 570 571 printf("\n"); 572 } 573 574 static int bus_match_all(const struct rte_bus *bus, const void *data) 575 { 576 RTE_SET_USED(bus); 577 RTE_SET_USED(data); 578 return 0; 579 } 580 581 static void 582 device_infos_display_speeds(uint32_t speed_capa) 583 { 584 printf("\n\tDevice speed capability:"); 585 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 586 printf(" Autonegotiate (all speeds)"); 587 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 588 printf(" Disable autonegotiate (fixed speed) "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 590 printf(" 10 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 592 printf(" 10 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 594 printf(" 100 Mbps half-duplex "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 596 printf(" 100 Mbps full-duplex "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 598 printf(" 1 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 600 printf(" 2.5 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 602 printf(" 5 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 604 printf(" 10 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 606 printf(" 20 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 608 printf(" 25 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 610 printf(" 40 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 612 printf(" 50 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 614 printf(" 56 Gbps "); 615 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 616 printf(" 100 Gbps "); 617 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 618 printf(" 200 Gbps "); 619 if (speed_capa & RTE_ETH_LINK_SPEED_400G) 620 printf(" 400 Gbps "); 621 } 622 623 void 624 device_infos_display(const char *identifier) 625 { 626 static const char *info_border = "*********************"; 627 struct rte_bus *start = NULL, *next; 628 struct rte_dev_iterator dev_iter; 629 char name[RTE_ETH_NAME_MAX_LEN]; 630 struct rte_ether_addr mac_addr; 631 struct rte_device *dev; 632 struct rte_devargs da; 633 portid_t port_id; 634 struct rte_eth_dev_info dev_info; 635 char devstr[128]; 636 637 memset(&da, 0, sizeof(da)); 638 if (!identifier) 639 goto skip_parse; 640 641 if (rte_devargs_parsef(&da, "%s", identifier)) { 642 fprintf(stderr, "cannot parse identifier\n"); 643 return; 644 } 645 646 skip_parse: 647 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 648 649 start = next; 650 if (identifier && da.bus != next) 651 continue; 652 653 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 654 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 655 656 if (rte_dev_driver(dev) == NULL) 657 continue; 658 /* Check for matching device if identifier is present */ 659 if (identifier && 660 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 661 continue; 662 printf("\n%s Infos for device %s %s\n", 663 info_border, rte_dev_name(dev), info_border); 664 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 665 printf("\nBus information: %s", 666 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 667 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 668 printf("\nDevargs: %s", 669 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 670 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 671 printf("\n"); 672 673 /* List ports with matching device name */ 674 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 675 printf("\n\tPort id: %-2d", port_id); 676 if (eth_macaddr_get_print_err(port_id, 677 &mac_addr) == 0) 678 print_ethaddr("\n\tMAC address: ", 679 &mac_addr); 680 rte_eth_dev_get_name_by_port(port_id, name); 681 printf("\n\tDevice name: %s", name); 682 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 683 device_infos_display_speeds(dev_info.speed_capa); 684 printf("\n"); 685 } 686 } 687 }; 688 rte_devargs_reset(&da); 689 } 690 691 static void 692 print_dev_capabilities(uint64_t capabilities) 693 { 694 uint64_t single_capa; 695 int begin; 696 int end; 697 int bit; 698 699 if (capabilities == 0) 700 return; 701 702 begin = rte_ctz64(capabilities); 703 end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities); 704 705 single_capa = 1ULL << begin; 706 for (bit = begin; bit < end; bit++) { 707 if (capabilities & single_capa) 708 printf(" %s", 709 rte_eth_dev_capability_name(single_capa)); 710 single_capa <<= 1; 711 } 712 } 713 714 uint64_t 715 str_to_rsstypes(const char *str) 716 { 717 uint16_t i; 718 719 for (i = 0; rss_type_table[i].str != NULL; i++) { 720 if (strcmp(rss_type_table[i].str, str) == 0) 721 return rss_type_table[i].rss_type; 722 } 723 724 return 0; 725 } 726 727 const char * 728 rsstypes_to_str(uint64_t rss_type) 729 { 730 uint16_t i; 731 732 for (i = 0; rss_type_table[i].str != NULL; i++) { 733 if (rss_type_table[i].rss_type == rss_type) 734 return rss_type_table[i].str; 735 } 736 737 return NULL; 738 } 739 740 static void 741 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 742 { 743 uint16_t user_defined_str_len; 744 uint16_t total_len = 0; 745 uint16_t str_len = 0; 746 uint64_t rss_offload; 747 uint16_t i; 748 749 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 750 rss_offload = RTE_BIT64(i); 751 if ((offload_types & rss_offload) != 0) { 752 const char *p = rsstypes_to_str(rss_offload); 753 754 user_defined_str_len = 755 strlen("user-defined-") + (i / 10 + 1); 756 str_len = p ? strlen(p) : user_defined_str_len; 757 str_len += 2; /* add two spaces */ 758 if (total_len + str_len >= char_num_per_line) { 759 total_len = 0; 760 printf("\n"); 761 } 762 763 if (p) 764 printf(" %s", p); 765 else 766 printf(" user-defined-%u", i); 767 total_len += str_len; 768 } 769 } 770 printf("\n"); 771 } 772 773 void 774 port_infos_display(portid_t port_id) 775 { 776 struct rte_port *port; 777 struct rte_ether_addr mac_addr; 778 struct rte_eth_link link; 779 struct rte_eth_dev_info dev_info; 780 int vlan_offload; 781 struct rte_mempool * mp; 782 static const char *info_border = "*********************"; 783 uint16_t mtu; 784 char name[RTE_ETH_NAME_MAX_LEN]; 785 int ret; 786 char fw_version[ETHDEV_FWVERS_LEN]; 787 788 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 789 print_valid_ports(); 790 return; 791 } 792 port = &ports[port_id]; 793 ret = eth_link_get_nowait_print_err(port_id, &link); 794 if (ret < 0) 795 return; 796 797 ret = eth_dev_info_get_print_err(port_id, &dev_info); 798 if (ret != 0) 799 return; 800 801 printf("\n%s Infos for port %-2d %s\n", 802 info_border, port_id, info_border); 803 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 804 print_ethaddr("MAC address: ", &mac_addr); 805 rte_eth_dev_get_name_by_port(port_id, name); 806 printf("\nDevice name: %s", name); 807 printf("\nDriver name: %s", dev_info.driver_name); 808 809 if (rte_eth_dev_fw_version_get(port_id, fw_version, 810 ETHDEV_FWVERS_LEN) == 0) 811 printf("\nFirmware-version: %s", fw_version); 812 else 813 printf("\nFirmware-version: %s", "not available"); 814 815 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 816 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 817 printf("\nConnect to socket: %u", port->socket_id); 818 819 if (port_numa[port_id] != NUMA_NO_CONFIG) { 820 mp = mbuf_pool_find(port_numa[port_id], 0); 821 if (mp) 822 printf("\nmemory allocation on the socket: %d", 823 port_numa[port_id]); 824 } else 825 printf("\nmemory allocation on the socket: %u",port->socket_id); 826 827 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 828 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 829 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 830 ("full-duplex") : ("half-duplex")); 831 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 832 ("On") : ("Off")); 833 834 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 835 printf("MTU: %u\n", mtu); 836 837 printf("Promiscuous mode: %s\n", 838 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 839 printf("Allmulticast mode: %s\n", 840 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 841 printf("Maximum number of MAC addresses: %u\n", 842 (unsigned int)(port->dev_info.max_mac_addrs)); 843 printf("Maximum number of MAC addresses of hash filtering: %u\n", 844 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 845 846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 847 if (vlan_offload >= 0){ 848 printf("VLAN offload: \n"); 849 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 850 printf(" strip on, "); 851 else 852 printf(" strip off, "); 853 854 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 855 printf("filter on, "); 856 else 857 printf("filter off, "); 858 859 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 860 printf("extend on, "); 861 else 862 printf("extend off, "); 863 864 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 865 printf("qinq strip on\n"); 866 else 867 printf("qinq strip off\n"); 868 } 869 870 if (dev_info.hash_key_size > 0) 871 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 872 if (dev_info.reta_size > 0) 873 printf("Redirection table size: %u\n", dev_info.reta_size); 874 if (!dev_info.flow_type_rss_offloads) 875 printf("No RSS offload flow type is supported.\n"); 876 else { 877 printf("Supported RSS offload flow types:\n"); 878 rss_offload_types_display(dev_info.flow_type_rss_offloads, 879 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 880 } 881 882 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 883 printf("Maximum configurable length of RX packet: %u\n", 884 dev_info.max_rx_pktlen); 885 printf("Maximum configurable size of LRO aggregated packet: %u\n", 886 dev_info.max_lro_pkt_size); 887 if (dev_info.max_vfs) 888 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 889 if (dev_info.max_vmdq_pools) 890 printf("Maximum number of VMDq pools: %u\n", 891 dev_info.max_vmdq_pools); 892 893 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 894 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 895 printf("Max possible number of RXDs per queue: %hu\n", 896 dev_info.rx_desc_lim.nb_max); 897 printf("Min possible number of RXDs per queue: %hu\n", 898 dev_info.rx_desc_lim.nb_min); 899 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 900 901 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 902 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 903 printf("Max possible number of TXDs per queue: %hu\n", 904 dev_info.tx_desc_lim.nb_max); 905 printf("Min possible number of TXDs per queue: %hu\n", 906 dev_info.tx_desc_lim.nb_min); 907 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 908 printf("Max segment number per packet: %hu\n", 909 dev_info.tx_desc_lim.nb_seg_max); 910 printf("Max segment number per MTU/TSO: %hu\n", 911 dev_info.tx_desc_lim.nb_mtu_seg_max); 912 913 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 914 print_dev_capabilities(dev_info.dev_capa); 915 printf(" )\n"); 916 /* Show switch info only if valid switch domain and port id is set */ 917 if (dev_info.switch_info.domain_id != 918 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 919 if (dev_info.switch_info.name) 920 printf("Switch name: %s\n", dev_info.switch_info.name); 921 922 printf("Switch domain Id: %u\n", 923 dev_info.switch_info.domain_id); 924 printf("Switch Port Id: %u\n", 925 dev_info.switch_info.port_id); 926 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 927 printf("Switch Rx domain: %u\n", 928 dev_info.switch_info.rx_domain); 929 } 930 printf("Device error handling mode: "); 931 switch (dev_info.err_handle_mode) { 932 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 933 printf("none\n"); 934 break; 935 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 936 printf("passive\n"); 937 break; 938 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 939 printf("proactive\n"); 940 break; 941 default: 942 printf("unknown\n"); 943 break; 944 } 945 printf("Device private info:\n"); 946 ret = rte_eth_dev_priv_dump(port_id, stdout); 947 if (ret == -ENOTSUP) 948 printf(" none\n"); 949 else if (ret < 0) 950 fprintf(stderr, " Failed to dump private info with error (%d): %s\n", 951 ret, strerror(-ret)); 952 } 953 954 void 955 port_summary_header_display(void) 956 { 957 uint16_t port_number; 958 959 port_number = rte_eth_dev_count_avail(); 960 printf("Number of available ports: %i\n", port_number); 961 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 962 "Driver", "Status", "Link"); 963 } 964 965 void 966 port_summary_display(portid_t port_id) 967 { 968 struct rte_ether_addr mac_addr; 969 struct rte_eth_link link; 970 struct rte_eth_dev_info dev_info; 971 char name[RTE_ETH_NAME_MAX_LEN]; 972 int ret; 973 974 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 975 print_valid_ports(); 976 return; 977 } 978 979 ret = eth_link_get_nowait_print_err(port_id, &link); 980 if (ret < 0) 981 return; 982 983 ret = eth_dev_info_get_print_err(port_id, &dev_info); 984 if (ret != 0) 985 return; 986 987 rte_eth_dev_get_name_by_port(port_id, name); 988 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 989 if (ret != 0) 990 return; 991 992 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 993 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 994 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 995 rte_eth_link_speed_to_str(link.link_speed)); 996 } 997 998 void 999 port_eeprom_display(portid_t port_id) 1000 { 1001 struct rte_dev_eeprom_info einfo; 1002 int ret; 1003 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1004 print_valid_ports(); 1005 return; 1006 } 1007 1008 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 1009 if (len_eeprom < 0) { 1010 switch (len_eeprom) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", 1022 len_eeprom); 1023 break; 1024 } 1025 return; 1026 } 1027 1028 einfo.offset = 0; 1029 einfo.length = len_eeprom; 1030 einfo.data = calloc(1, len_eeprom); 1031 if (!einfo.data) { 1032 fprintf(stderr, 1033 "Allocation of port %u eeprom data failed\n", 1034 port_id); 1035 return; 1036 } 1037 1038 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1039 if (ret != 0) { 1040 switch (ret) { 1041 case -ENODEV: 1042 fprintf(stderr, "port index %d invalid\n", port_id); 1043 break; 1044 case -ENOTSUP: 1045 fprintf(stderr, "operation not supported by device\n"); 1046 break; 1047 case -EIO: 1048 fprintf(stderr, "device is removed\n"); 1049 break; 1050 default: 1051 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1052 break; 1053 } 1054 free(einfo.data); 1055 return; 1056 } 1057 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1058 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1059 free(einfo.data); 1060 } 1061 1062 void 1063 port_module_eeprom_display(portid_t port_id) 1064 { 1065 struct rte_eth_dev_module_info minfo; 1066 struct rte_dev_eeprom_info einfo; 1067 int ret; 1068 1069 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1070 print_valid_ports(); 1071 return; 1072 } 1073 1074 1075 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 return; 1093 } 1094 1095 einfo.offset = 0; 1096 einfo.length = minfo.eeprom_len; 1097 einfo.data = calloc(1, minfo.eeprom_len); 1098 if (!einfo.data) { 1099 fprintf(stderr, 1100 "Allocation of port %u eeprom data failed\n", 1101 port_id); 1102 return; 1103 } 1104 1105 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1106 if (ret != 0) { 1107 switch (ret) { 1108 case -ENODEV: 1109 fprintf(stderr, "port index %d invalid\n", port_id); 1110 break; 1111 case -ENOTSUP: 1112 fprintf(stderr, "operation not supported by device\n"); 1113 break; 1114 case -EIO: 1115 fprintf(stderr, "device is removed\n"); 1116 break; 1117 default: 1118 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1119 ret); 1120 break; 1121 } 1122 free(einfo.data); 1123 return; 1124 } 1125 1126 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1127 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1128 free(einfo.data); 1129 } 1130 1131 int 1132 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1133 { 1134 uint16_t pid; 1135 1136 if (port_id == (portid_t)RTE_PORT_ALL) 1137 return 0; 1138 1139 RTE_ETH_FOREACH_DEV(pid) 1140 if (port_id == pid) 1141 return 0; 1142 1143 if (warning == ENABLED_WARN) 1144 fprintf(stderr, "Invalid port %d\n", port_id); 1145 1146 return 1; 1147 } 1148 1149 void print_valid_ports(void) 1150 { 1151 portid_t pid; 1152 1153 printf("The valid ports array is ["); 1154 RTE_ETH_FOREACH_DEV(pid) { 1155 printf(" %d", pid); 1156 } 1157 printf(" ]\n"); 1158 } 1159 1160 static int 1161 vlan_id_is_invalid(uint16_t vlan_id) 1162 { 1163 if (vlan_id < 4096) 1164 return 0; 1165 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1166 return 1; 1167 } 1168 1169 static uint32_t 1170 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1171 { 1172 uint32_t overhead_len; 1173 1174 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1175 overhead_len = max_rx_pktlen - max_mtu; 1176 else 1177 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1178 1179 return overhead_len; 1180 } 1181 1182 static int 1183 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1184 { 1185 struct rte_eth_dev_info dev_info; 1186 uint32_t overhead_len; 1187 uint32_t frame_size; 1188 int ret; 1189 1190 ret = rte_eth_dev_info_get(port_id, &dev_info); 1191 if (ret != 0) 1192 return ret; 1193 1194 if (mtu < dev_info.min_mtu) { 1195 fprintf(stderr, 1196 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1197 mtu, dev_info.min_mtu, port_id); 1198 return -EINVAL; 1199 } 1200 if (mtu > dev_info.max_mtu) { 1201 fprintf(stderr, 1202 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1203 mtu, dev_info.max_mtu, port_id); 1204 return -EINVAL; 1205 } 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 frame_size = mtu + overhead_len; 1210 if (frame_size > dev_info.max_rx_pktlen) { 1211 fprintf(stderr, 1212 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1213 frame_size, dev_info.max_rx_pktlen, port_id); 1214 return -EINVAL; 1215 } 1216 1217 return 0; 1218 } 1219 1220 void 1221 port_mtu_set(portid_t port_id, uint16_t mtu) 1222 { 1223 struct rte_port *port = &ports[port_id]; 1224 int diag; 1225 1226 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1227 return; 1228 1229 diag = eth_dev_validate_mtu(port_id, mtu); 1230 if (diag != 0) 1231 return; 1232 1233 if (port->need_reconfig == 0) { 1234 diag = rte_eth_dev_set_mtu(port_id, mtu); 1235 if (diag != 0) { 1236 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1237 return; 1238 } 1239 } 1240 1241 port->dev_conf.rxmode.mtu = mtu; 1242 } 1243 1244 /* Generic flow management functions. */ 1245 1246 static struct port_flow_tunnel * 1247 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1248 { 1249 struct port_flow_tunnel *flow_tunnel; 1250 1251 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1252 if (flow_tunnel->id == port_tunnel_id) 1253 goto out; 1254 } 1255 flow_tunnel = NULL; 1256 1257 out: 1258 return flow_tunnel; 1259 } 1260 1261 const char * 1262 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1263 { 1264 const char *type; 1265 switch (tunnel->type) { 1266 default: 1267 type = "unknown"; 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_VXLAN: 1270 type = "vxlan"; 1271 break; 1272 case RTE_FLOW_ITEM_TYPE_GRE: 1273 type = "gre"; 1274 break; 1275 case RTE_FLOW_ITEM_TYPE_NVGRE: 1276 type = "nvgre"; 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_GENEVE: 1279 type = "geneve"; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 struct port_flow_tunnel * 1287 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1288 { 1289 struct rte_port *port = &ports[port_id]; 1290 struct port_flow_tunnel *flow_tunnel; 1291 1292 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1293 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1294 goto out; 1295 } 1296 flow_tunnel = NULL; 1297 1298 out: 1299 return flow_tunnel; 1300 } 1301 1302 void port_flow_tunnel_list(portid_t port_id) 1303 { 1304 struct rte_port *port = &ports[port_id]; 1305 struct port_flow_tunnel *flt; 1306 1307 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1308 printf("port %u tunnel #%u type=%s", 1309 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1310 if (flt->tunnel.tun_id) 1311 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1312 printf("\n"); 1313 } 1314 } 1315 1316 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1317 { 1318 struct rte_port *port = &ports[port_id]; 1319 struct port_flow_tunnel *flt; 1320 1321 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1322 if (flt->id == tunnel_id) 1323 break; 1324 } 1325 if (flt) { 1326 LIST_REMOVE(flt, chain); 1327 free(flt); 1328 printf("port %u: flow tunnel #%u destroyed\n", 1329 port_id, tunnel_id); 1330 } 1331 } 1332 1333 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1334 { 1335 struct rte_port *port = &ports[port_id]; 1336 enum rte_flow_item_type type; 1337 struct port_flow_tunnel *flt; 1338 1339 if (!strcmp(ops->type, "vxlan")) 1340 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1341 else if (!strcmp(ops->type, "gre")) 1342 type = RTE_FLOW_ITEM_TYPE_GRE; 1343 else if (!strcmp(ops->type, "nvgre")) 1344 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1345 else if (!strcmp(ops->type, "geneve")) 1346 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1347 else { 1348 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1349 ops->type); 1350 return; 1351 } 1352 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1353 if (flt->tunnel.type == type) 1354 break; 1355 } 1356 if (!flt) { 1357 flt = calloc(1, sizeof(*flt)); 1358 if (!flt) { 1359 fprintf(stderr, "failed to allocate port flt object\n"); 1360 return; 1361 } 1362 flt->tunnel.type = type; 1363 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1364 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1365 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1366 } 1367 printf("port %d: flow tunnel #%u type %s\n", 1368 port_id, flt->id, ops->type); 1369 } 1370 1371 /** Generate a port_flow entry from attributes/pattern/actions. */ 1372 static struct port_flow * 1373 port_flow_new(const struct rte_flow_attr *attr, 1374 const struct rte_flow_item *pattern, 1375 const struct rte_flow_action *actions, 1376 struct rte_flow_error *error) 1377 { 1378 const struct rte_flow_conv_rule rule = { 1379 .attr_ro = attr, 1380 .pattern_ro = pattern, 1381 .actions_ro = actions, 1382 }; 1383 struct port_flow *pf; 1384 int ret; 1385 1386 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1387 if (ret < 0) 1388 return NULL; 1389 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1390 if (!pf) { 1391 rte_flow_error_set 1392 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1393 "calloc() failed"); 1394 return NULL; 1395 } 1396 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1397 error) >= 0) 1398 return pf; 1399 free(pf); 1400 return NULL; 1401 } 1402 1403 /** Print a message out of a flow error. */ 1404 static int 1405 port_flow_complain(struct rte_flow_error *error) 1406 { 1407 static const char *const errstrlist[] = { 1408 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1409 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1410 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1411 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1412 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1413 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1414 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1415 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1416 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1417 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1418 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1419 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1420 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1421 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1422 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1423 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1424 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1425 }; 1426 const char *errstr; 1427 char buf[32]; 1428 int err = rte_errno; 1429 1430 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1431 !errstrlist[error->type]) 1432 errstr = "unknown type"; 1433 else 1434 errstr = errstrlist[error->type]; 1435 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1436 __func__, error->type, errstr, 1437 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1438 error->cause), buf) : "", 1439 error->message ? error->message : "(no stated reason)", 1440 rte_strerror(err)); 1441 1442 switch (error->type) { 1443 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1444 fprintf(stderr, "The status suggests the use of \"transfer\" " 1445 "as the possible cause of the failure. Make " 1446 "sure that the flow in question and its " 1447 "indirect components (if any) are managed " 1448 "via \"transfer\" proxy port. Use command " 1449 "\"show port (port_id) flow transfer proxy\" " 1450 "to figure out the proxy port ID\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 return -err; 1457 } 1458 1459 static void 1460 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1461 { 1462 uint16_t total_len = 0; 1463 uint16_t str_len; 1464 uint16_t i; 1465 1466 if (rss_types == 0) 1467 return; 1468 1469 for (i = 0; rss_type_table[i].str; i++) { 1470 if (rss_type_table[i].rss_type == 0) 1471 continue; 1472 1473 if ((rss_types & rss_type_table[i].rss_type) == 1474 rss_type_table[i].rss_type) { 1475 /* Contain two spaces */ 1476 str_len = strlen(rss_type_table[i].str) + 2; 1477 if (total_len + str_len > char_num_per_line) { 1478 printf("\n"); 1479 total_len = 0; 1480 } 1481 printf(" %s", rss_type_table[i].str); 1482 total_len += str_len; 1483 } 1484 } 1485 printf("\n"); 1486 } 1487 1488 static void 1489 rss_config_display(struct rte_flow_action_rss *rss_conf) 1490 { 1491 uint8_t i; 1492 1493 if (rss_conf == NULL) { 1494 fprintf(stderr, "Invalid rule\n"); 1495 return; 1496 } 1497 1498 printf("RSS:\n" 1499 " queues:"); 1500 if (rss_conf->queue_num == 0) 1501 printf(" none"); 1502 for (i = 0; i < rss_conf->queue_num; i++) 1503 printf(" %d", rss_conf->queue[i]); 1504 printf("\n"); 1505 1506 printf(" function: "); 1507 switch (rss_conf->func) { 1508 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1509 printf("default\n"); 1510 break; 1511 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1512 printf("toeplitz\n"); 1513 break; 1514 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1515 printf("simple_xor\n"); 1516 break; 1517 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1518 printf("symmetric_toeplitz\n"); 1519 break; 1520 default: 1521 printf("Unknown function\n"); 1522 return; 1523 } 1524 1525 printf(" RSS key:\n"); 1526 if (rss_conf->key_len == 0) { 1527 printf(" none"); 1528 } else { 1529 printf(" key_len: %u\n", rss_conf->key_len); 1530 printf(" key: "); 1531 if (rss_conf->key == NULL) { 1532 printf("none"); 1533 } else { 1534 for (i = 0; i < rss_conf->key_len; i++) 1535 printf("%02X", rss_conf->key[i]); 1536 } 1537 } 1538 printf("\n"); 1539 1540 printf(" types:\n"); 1541 if (rss_conf->types == 0) { 1542 printf(" none\n"); 1543 return; 1544 } 1545 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1546 } 1547 1548 static struct port_indirect_action * 1549 action_get_by_id(portid_t port_id, uint32_t id) 1550 { 1551 struct rte_port *port; 1552 struct port_indirect_action **ppia; 1553 struct port_indirect_action *pia = NULL; 1554 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return NULL; 1558 port = &ports[port_id]; 1559 ppia = &port->actions_list; 1560 while (*ppia) { 1561 if ((*ppia)->id == id) { 1562 pia = *ppia; 1563 break; 1564 } 1565 ppia = &(*ppia)->next; 1566 } 1567 if (!pia) 1568 fprintf(stderr, 1569 "Failed to find indirect action #%u on port %u\n", 1570 id, port_id); 1571 return pia; 1572 } 1573 1574 static int 1575 action_alloc(portid_t port_id, uint32_t id, 1576 struct port_indirect_action **action) 1577 { 1578 struct rte_port *port; 1579 struct port_indirect_action **ppia; 1580 struct port_indirect_action *pia = NULL; 1581 1582 *action = NULL; 1583 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1584 port_id == (portid_t)RTE_PORT_ALL) 1585 return -EINVAL; 1586 port = &ports[port_id]; 1587 if (id == UINT32_MAX) { 1588 /* taking first available ID */ 1589 if (port->actions_list) { 1590 if (port->actions_list->id == UINT32_MAX - 1) { 1591 fprintf(stderr, 1592 "Highest indirect action ID is already assigned, delete it first\n"); 1593 return -ENOMEM; 1594 } 1595 id = port->actions_list->id + 1; 1596 } else { 1597 id = 0; 1598 } 1599 } 1600 pia = calloc(1, sizeof(*pia)); 1601 if (!pia) { 1602 fprintf(stderr, 1603 "Allocation of port %u indirect action failed\n", 1604 port_id); 1605 return -ENOMEM; 1606 } 1607 ppia = &port->actions_list; 1608 while (*ppia && (*ppia)->id > id) 1609 ppia = &(*ppia)->next; 1610 if (*ppia && (*ppia)->id == id) { 1611 fprintf(stderr, 1612 "Indirect action #%u is already assigned, delete it first\n", 1613 id); 1614 free(pia); 1615 return -EINVAL; 1616 } 1617 pia->next = *ppia; 1618 pia->id = id; 1619 *ppia = pia; 1620 *action = pia; 1621 return 0; 1622 } 1623 1624 static int 1625 template_alloc(uint32_t id, struct port_template **template, 1626 struct port_template **list) 1627 { 1628 struct port_template *lst = *list; 1629 struct port_template **ppt; 1630 struct port_template *pt = NULL; 1631 1632 *template = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest template ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of port template failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Template #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *template = pt; 1664 return 0; 1665 } 1666 1667 static int 1668 table_alloc(uint32_t id, struct port_table **table, 1669 struct port_table **list) 1670 { 1671 struct port_table *lst = *list; 1672 struct port_table **ppt; 1673 struct port_table *pt = NULL; 1674 1675 *table = NULL; 1676 if (id == UINT32_MAX) { 1677 /* taking first available ID */ 1678 if (lst) { 1679 if (lst->id == UINT32_MAX - 1) { 1680 printf("Highest table ID is already" 1681 " assigned, delete it first\n"); 1682 return -ENOMEM; 1683 } 1684 id = lst->id + 1; 1685 } else { 1686 id = 0; 1687 } 1688 } 1689 pt = calloc(1, sizeof(*pt)); 1690 if (!pt) { 1691 printf("Allocation of table failed\n"); 1692 return -ENOMEM; 1693 } 1694 ppt = list; 1695 while (*ppt && (*ppt)->id > id) 1696 ppt = &(*ppt)->next; 1697 if (*ppt && (*ppt)->id == id) { 1698 printf("Table #%u is already assigned," 1699 " delete it first\n", id); 1700 free(pt); 1701 return -EINVAL; 1702 } 1703 pt->next = *ppt; 1704 pt->id = id; 1705 *ppt = pt; 1706 *table = pt; 1707 return 0; 1708 } 1709 1710 /** Get info about flow management resources. */ 1711 int 1712 port_flow_get_info(portid_t port_id) 1713 { 1714 struct rte_flow_port_info port_info; 1715 struct rte_flow_queue_info queue_info; 1716 struct rte_flow_error error; 1717 1718 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1719 port_id == (portid_t)RTE_PORT_ALL) 1720 return -EINVAL; 1721 /* Poisoning to make sure PMDs update it in case of error. */ 1722 memset(&error, 0x99, sizeof(error)); 1723 memset(&port_info, 0, sizeof(port_info)); 1724 memset(&queue_info, 0, sizeof(queue_info)); 1725 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1726 return port_flow_complain(&error); 1727 printf("Flow engine resources on port %u:\n" 1728 "Number of queues: %d\n" 1729 "Size of queues: %d\n" 1730 "Number of counters: %d\n" 1731 "Number of aging objects: %d\n" 1732 "Number of meter actions: %d\n", 1733 port_id, port_info.max_nb_queues, 1734 queue_info.max_size, 1735 port_info.max_nb_counters, 1736 port_info.max_nb_aging_objects, 1737 port_info.max_nb_meters); 1738 return 0; 1739 } 1740 1741 /** Configure flow management resources. */ 1742 int 1743 port_flow_configure(portid_t port_id, 1744 const struct rte_flow_port_attr *port_attr, 1745 uint16_t nb_queue, 1746 const struct rte_flow_queue_attr *queue_attr) 1747 { 1748 struct rte_port *port; 1749 struct rte_flow_error error; 1750 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1751 int std_queue; 1752 1753 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1754 port_id == (portid_t)RTE_PORT_ALL) 1755 return -EINVAL; 1756 port = &ports[port_id]; 1757 port->queue_nb = nb_queue; 1758 port->queue_sz = queue_attr->size; 1759 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1760 attr_list[std_queue] = queue_attr; 1761 /* Poisoning to make sure PMDs update it in case of error. */ 1762 memset(&error, 0x66, sizeof(error)); 1763 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1764 return port_flow_complain(&error); 1765 printf("Configure flows on port %u: " 1766 "number of queues %d with %d elements\n", 1767 port_id, nb_queue, queue_attr->size); 1768 return 0; 1769 } 1770 1771 static int 1772 action_handle_create(portid_t port_id, 1773 struct port_indirect_action *pia, 1774 const struct rte_flow_indir_action_conf *conf, 1775 const struct rte_flow_action *action, 1776 struct rte_flow_error *error) 1777 { 1778 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1779 struct rte_flow_action_age *age = 1780 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1781 1782 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1783 age->context = &pia->age_type; 1784 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1785 struct rte_flow_action_conntrack *ct = 1786 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1787 1788 memcpy(ct, &conntrack_context, sizeof(*ct)); 1789 } 1790 pia->type = action->type; 1791 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1792 error); 1793 return pia->handle ? 0 : -1; 1794 } 1795 1796 static int 1797 action_list_handle_create(portid_t port_id, 1798 struct port_indirect_action *pia, 1799 const struct rte_flow_indir_action_conf *conf, 1800 const struct rte_flow_action *actions, 1801 struct rte_flow_error *error) 1802 { 1803 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 1804 pia->list_handle = 1805 rte_flow_action_list_handle_create(port_id, conf, 1806 actions, error); 1807 return pia->list_handle ? 0 : -1; 1808 } 1809 /** Create indirect action */ 1810 int 1811 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list, 1812 const struct rte_flow_indir_action_conf *conf, 1813 const struct rte_flow_action *action) 1814 { 1815 struct port_indirect_action *pia; 1816 int ret; 1817 struct rte_flow_error error; 1818 1819 ret = action_alloc(port_id, id, &pia); 1820 if (ret) 1821 return ret; 1822 /* Poisoning to make sure PMDs update it in case of error. */ 1823 memset(&error, 0x22, sizeof(error)); 1824 ret = indirect_list ? 1825 action_list_handle_create(port_id, pia, conf, action, &error) : 1826 action_handle_create(port_id, pia, conf, action, &error); 1827 if (ret) { 1828 uint32_t destroy_id = pia->id; 1829 port_action_handle_destroy(port_id, 1, &destroy_id); 1830 return port_flow_complain(&error); 1831 } 1832 printf("Indirect action #%u created\n", pia->id); 1833 return 0; 1834 } 1835 1836 /** Destroy indirect action */ 1837 int 1838 port_action_handle_destroy(portid_t port_id, 1839 uint32_t n, 1840 const uint32_t *actions) 1841 { 1842 struct rte_port *port; 1843 struct port_indirect_action **tmp; 1844 int ret = 0; 1845 1846 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1847 port_id == (portid_t)RTE_PORT_ALL) 1848 return -EINVAL; 1849 port = &ports[port_id]; 1850 tmp = &port->actions_list; 1851 while (*tmp) { 1852 uint32_t i; 1853 1854 for (i = 0; i != n; ++i) { 1855 struct rte_flow_error error; 1856 struct port_indirect_action *pia = *tmp; 1857 1858 if (actions[i] != pia->id) 1859 continue; 1860 /* 1861 * Poisoning to make sure PMDs update it in case 1862 * of error. 1863 */ 1864 memset(&error, 0x33, sizeof(error)); 1865 1866 if (pia->handle) { 1867 ret = pia->type == 1868 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1869 rte_flow_action_list_handle_destroy 1870 (port_id, pia->list_handle, &error) : 1871 rte_flow_action_handle_destroy 1872 (port_id, pia->handle, &error); 1873 if (ret) { 1874 ret = port_flow_complain(&error); 1875 continue; 1876 } 1877 } 1878 *tmp = pia->next; 1879 printf("Indirect action #%u destroyed\n", pia->id); 1880 free(pia); 1881 break; 1882 } 1883 if (i == n) 1884 tmp = &(*tmp)->next; 1885 } 1886 return ret; 1887 } 1888 1889 int 1890 port_action_handle_flush(portid_t port_id) 1891 { 1892 struct rte_port *port; 1893 struct port_indirect_action **tmp; 1894 int ret = 0; 1895 1896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1897 port_id == (portid_t)RTE_PORT_ALL) 1898 return -EINVAL; 1899 port = &ports[port_id]; 1900 tmp = &port->actions_list; 1901 while (*tmp != NULL) { 1902 struct rte_flow_error error; 1903 struct port_indirect_action *pia = *tmp; 1904 1905 /* Poisoning to make sure PMDs update it in case of error. */ 1906 memset(&error, 0x44, sizeof(error)); 1907 if (pia->handle != NULL) { 1908 ret = pia->type == 1909 RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 1910 rte_flow_action_list_handle_destroy 1911 (port_id, pia->list_handle, &error) : 1912 rte_flow_action_handle_destroy 1913 (port_id, pia->handle, &error); 1914 if (ret) { 1915 printf("Indirect action #%u not destroyed\n", 1916 pia->id); 1917 ret = port_flow_complain(&error); 1918 } 1919 tmp = &pia->next; 1920 } else { 1921 *tmp = pia->next; 1922 free(pia); 1923 } 1924 } 1925 return ret; 1926 } 1927 1928 /** Get indirect action by port + id */ 1929 struct rte_flow_action_handle * 1930 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1931 { 1932 1933 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1934 1935 return (pia) ? pia->handle : NULL; 1936 } 1937 1938 /** Update indirect action */ 1939 int 1940 port_action_handle_update(portid_t port_id, uint32_t id, 1941 const struct rte_flow_action *action) 1942 { 1943 struct rte_flow_error error; 1944 struct rte_flow_action_handle *action_handle; 1945 struct port_indirect_action *pia; 1946 struct rte_flow_update_meter_mark mtr_update; 1947 const void *update; 1948 1949 action_handle = port_action_handle_get_by_id(port_id, id); 1950 if (!action_handle) 1951 return -EINVAL; 1952 pia = action_get_by_id(port_id, id); 1953 if (!pia) 1954 return -EINVAL; 1955 switch (pia->type) { 1956 case RTE_FLOW_ACTION_TYPE_AGE: 1957 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1958 update = action->conf; 1959 break; 1960 case RTE_FLOW_ACTION_TYPE_METER_MARK: 1961 memcpy(&mtr_update.meter_mark, action->conf, 1962 sizeof(struct rte_flow_action_meter_mark)); 1963 if (mtr_update.meter_mark.profile) 1964 mtr_update.profile_valid = 1; 1965 if (mtr_update.meter_mark.policy) 1966 mtr_update.policy_valid = 1; 1967 mtr_update.color_mode_valid = 1; 1968 mtr_update.state_valid = 1; 1969 update = &mtr_update; 1970 break; 1971 default: 1972 update = action; 1973 break; 1974 } 1975 if (rte_flow_action_handle_update(port_id, action_handle, update, 1976 &error)) { 1977 return port_flow_complain(&error); 1978 } 1979 printf("Indirect action #%u updated\n", id); 1980 return 0; 1981 } 1982 1983 static void 1984 port_action_handle_query_dump(portid_t port_id, 1985 const struct port_indirect_action *pia, 1986 union port_action_query *query) 1987 { 1988 if (!pia || !query) 1989 return; 1990 switch (pia->type) { 1991 case RTE_FLOW_ACTION_TYPE_AGE: 1992 printf("Indirect AGE action:\n" 1993 " aged: %u\n" 1994 " sec_since_last_hit_valid: %u\n" 1995 " sec_since_last_hit: %" PRIu32 "\n", 1996 query->age.aged, 1997 query->age.sec_since_last_hit_valid, 1998 query->age.sec_since_last_hit); 1999 break; 2000 case RTE_FLOW_ACTION_TYPE_COUNT: 2001 printf("Indirect COUNT action:\n" 2002 " hits_set: %u\n" 2003 " bytes_set: %u\n" 2004 " hits: %" PRIu64 "\n" 2005 " bytes: %" PRIu64 "\n", 2006 query->count.hits_set, 2007 query->count.bytes_set, 2008 query->count.hits, 2009 query->count.bytes); 2010 break; 2011 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2012 printf("Conntrack Context:\n" 2013 " Peer: %u, Flow dir: %s, Enable: %u\n" 2014 " Live: %u, SACK: %u, CACK: %u\n" 2015 " Packet dir: %s, Liberal: %u, State: %u\n" 2016 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2017 " Last Seq: %u, Last ACK: %u\n" 2018 " Last Win: %u, Last End: %u\n", 2019 query->ct.peer_port, 2020 query->ct.is_original_dir ? "Original" : "Reply", 2021 query->ct.enable, query->ct.live_connection, 2022 query->ct.selective_ack, query->ct.challenge_ack_passed, 2023 query->ct.last_direction ? "Original" : "Reply", 2024 query->ct.liberal_mode, query->ct.state, 2025 query->ct.max_ack_window, query->ct.retransmission_limit, 2026 query->ct.last_index, query->ct.last_seq, 2027 query->ct.last_ack, query->ct.last_window, 2028 query->ct.last_end); 2029 printf(" Original Dir:\n" 2030 " scale: %u, fin: %u, ack seen: %u\n" 2031 " unacked data: %u\n Sent end: %u," 2032 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2033 query->ct.original_dir.scale, 2034 query->ct.original_dir.close_initiated, 2035 query->ct.original_dir.last_ack_seen, 2036 query->ct.original_dir.data_unacked, 2037 query->ct.original_dir.sent_end, 2038 query->ct.original_dir.reply_end, 2039 query->ct.original_dir.max_win, 2040 query->ct.original_dir.max_ack); 2041 printf(" Reply Dir:\n" 2042 " scale: %u, fin: %u, ack seen: %u\n" 2043 " unacked data: %u\n Sent end: %u," 2044 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2045 query->ct.reply_dir.scale, 2046 query->ct.reply_dir.close_initiated, 2047 query->ct.reply_dir.last_ack_seen, 2048 query->ct.reply_dir.data_unacked, 2049 query->ct.reply_dir.sent_end, 2050 query->ct.reply_dir.reply_end, 2051 query->ct.reply_dir.max_win, 2052 query->ct.reply_dir.max_ack); 2053 break; 2054 case RTE_FLOW_ACTION_TYPE_QUOTA: 2055 printf("Indirect QUOTA action %u\n" 2056 " unused quota: %" PRId64 "\n", 2057 pia->id, query->quota.quota); 2058 break; 2059 default: 2060 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 2061 pia->type, pia->id, port_id); 2062 break; 2063 } 2064 2065 } 2066 2067 void 2068 port_action_handle_query_update(portid_t port_id, uint32_t id, 2069 enum rte_flow_query_update_mode qu_mode, 2070 const struct rte_flow_action *action) 2071 { 2072 int ret; 2073 struct rte_flow_error error; 2074 struct port_indirect_action *pia; 2075 union port_action_query query; 2076 2077 pia = action_get_by_id(port_id, id); 2078 if (!pia || !pia->handle) 2079 return; 2080 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2081 &query, qu_mode, &error); 2082 if (ret) 2083 port_flow_complain(&error); 2084 else 2085 port_action_handle_query_dump(port_id, pia, &query); 2086 2087 } 2088 2089 int 2090 port_action_handle_query(portid_t port_id, uint32_t id) 2091 { 2092 struct rte_flow_error error; 2093 struct port_indirect_action *pia; 2094 union port_action_query query; 2095 2096 pia = action_get_by_id(port_id, id); 2097 if (!pia) 2098 return -EINVAL; 2099 switch (pia->type) { 2100 case RTE_FLOW_ACTION_TYPE_AGE: 2101 case RTE_FLOW_ACTION_TYPE_COUNT: 2102 case RTE_FLOW_ACTION_TYPE_QUOTA: 2103 break; 2104 default: 2105 fprintf(stderr, 2106 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2107 id, pia->type, port_id); 2108 return -ENOTSUP; 2109 } 2110 /* Poisoning to make sure PMDs update it in case of error. */ 2111 memset(&error, 0x55, sizeof(error)); 2112 memset(&query, 0, sizeof(query)); 2113 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2114 return port_flow_complain(&error); 2115 port_action_handle_query_dump(port_id, pia, &query); 2116 return 0; 2117 } 2118 2119 static struct port_flow_tunnel * 2120 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2121 const struct rte_flow_item *pattern, 2122 const struct rte_flow_action *actions, 2123 const struct tunnel_ops *tunnel_ops) 2124 { 2125 int ret; 2126 struct rte_port *port; 2127 struct port_flow_tunnel *pft; 2128 struct rte_flow_error error; 2129 2130 port = &ports[port_id]; 2131 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2132 if (!pft) { 2133 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2134 tunnel_ops->id); 2135 return NULL; 2136 } 2137 if (tunnel_ops->actions) { 2138 uint32_t num_actions; 2139 const struct rte_flow_action *aptr; 2140 2141 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2142 &pft->pmd_actions, 2143 &pft->num_pmd_actions, 2144 &error); 2145 if (ret) { 2146 port_flow_complain(&error); 2147 return NULL; 2148 } 2149 for (aptr = actions, num_actions = 1; 2150 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2151 aptr++, num_actions++); 2152 pft->actions = malloc( 2153 (num_actions + pft->num_pmd_actions) * 2154 sizeof(actions[0])); 2155 if (!pft->actions) { 2156 rte_flow_tunnel_action_decap_release( 2157 port_id, pft->actions, 2158 pft->num_pmd_actions, &error); 2159 return NULL; 2160 } 2161 rte_memcpy(pft->actions, pft->pmd_actions, 2162 pft->num_pmd_actions * sizeof(actions[0])); 2163 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2164 num_actions * sizeof(actions[0])); 2165 } 2166 if (tunnel_ops->items) { 2167 uint32_t num_items; 2168 const struct rte_flow_item *iptr; 2169 2170 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2171 &pft->pmd_items, 2172 &pft->num_pmd_items, 2173 &error); 2174 if (ret) { 2175 port_flow_complain(&error); 2176 return NULL; 2177 } 2178 for (iptr = pattern, num_items = 1; 2179 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2180 iptr++, num_items++); 2181 pft->items = malloc((num_items + pft->num_pmd_items) * 2182 sizeof(pattern[0])); 2183 if (!pft->items) { 2184 rte_flow_tunnel_item_release( 2185 port_id, pft->pmd_items, 2186 pft->num_pmd_items, &error); 2187 return NULL; 2188 } 2189 rte_memcpy(pft->items, pft->pmd_items, 2190 pft->num_pmd_items * sizeof(pattern[0])); 2191 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2192 num_items * sizeof(pattern[0])); 2193 } 2194 2195 return pft; 2196 } 2197 2198 static void 2199 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2200 const struct tunnel_ops *tunnel_ops, 2201 struct port_flow_tunnel *pft) 2202 { 2203 struct rte_flow_error error; 2204 2205 if (tunnel_ops->actions) { 2206 free(pft->actions); 2207 rte_flow_tunnel_action_decap_release( 2208 port_id, pft->pmd_actions, 2209 pft->num_pmd_actions, &error); 2210 pft->actions = NULL; 2211 pft->pmd_actions = NULL; 2212 } 2213 if (tunnel_ops->items) { 2214 free(pft->items); 2215 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2216 pft->num_pmd_items, 2217 &error); 2218 pft->items = NULL; 2219 pft->pmd_items = NULL; 2220 } 2221 } 2222 2223 /** Add port meter policy */ 2224 int 2225 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2226 const struct rte_flow_action *actions) 2227 { 2228 struct rte_mtr_error error; 2229 const struct rte_flow_action *act = actions; 2230 const struct rte_flow_action *start; 2231 struct rte_mtr_meter_policy_params policy; 2232 uint32_t i = 0, act_n; 2233 int ret; 2234 2235 for (i = 0; i < RTE_COLORS; i++) { 2236 for (act_n = 0, start = act; 2237 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2238 act_n++; 2239 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2240 policy.actions[i] = start; 2241 else 2242 policy.actions[i] = NULL; 2243 act++; 2244 } 2245 ret = rte_mtr_meter_policy_add(port_id, 2246 policy_id, 2247 &policy, &error); 2248 if (ret) 2249 print_mtr_err_msg(&error); 2250 return ret; 2251 } 2252 2253 struct rte_flow_meter_profile * 2254 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2255 { 2256 struct rte_mtr_error error; 2257 struct rte_flow_meter_profile *profile; 2258 2259 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2260 if (!profile) 2261 print_mtr_err_msg(&error); 2262 return profile; 2263 } 2264 struct rte_flow_meter_policy * 2265 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2266 { 2267 struct rte_mtr_error error; 2268 struct rte_flow_meter_policy *policy; 2269 2270 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2271 if (!policy) 2272 print_mtr_err_msg(&error); 2273 return policy; 2274 } 2275 2276 /** Validate flow rule. */ 2277 int 2278 port_flow_validate(portid_t port_id, 2279 const struct rte_flow_attr *attr, 2280 const struct rte_flow_item *pattern, 2281 const struct rte_flow_action *actions, 2282 const struct tunnel_ops *tunnel_ops) 2283 { 2284 struct rte_flow_error error; 2285 struct port_flow_tunnel *pft = NULL; 2286 int ret; 2287 2288 /* Poisoning to make sure PMDs update it in case of error. */ 2289 memset(&error, 0x11, sizeof(error)); 2290 if (tunnel_ops->enabled) { 2291 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2292 actions, tunnel_ops); 2293 if (!pft) 2294 return -ENOENT; 2295 if (pft->items) 2296 pattern = pft->items; 2297 if (pft->actions) 2298 actions = pft->actions; 2299 } 2300 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2301 if (tunnel_ops->enabled) 2302 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2303 if (ret) 2304 return port_flow_complain(&error); 2305 printf("Flow rule validated\n"); 2306 return 0; 2307 } 2308 2309 /** Return age action structure if exists, otherwise NULL. */ 2310 static struct rte_flow_action_age * 2311 age_action_get(const struct rte_flow_action *actions) 2312 { 2313 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2314 switch (actions->type) { 2315 case RTE_FLOW_ACTION_TYPE_AGE: 2316 return (struct rte_flow_action_age *) 2317 (uintptr_t)actions->conf; 2318 default: 2319 break; 2320 } 2321 } 2322 return NULL; 2323 } 2324 2325 /** Create pattern template */ 2326 int 2327 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2328 const struct rte_flow_pattern_template_attr *attr, 2329 const struct rte_flow_item *pattern) 2330 { 2331 struct rte_port *port; 2332 struct port_template *pit; 2333 int ret; 2334 struct rte_flow_error error; 2335 2336 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2337 port_id == (portid_t)RTE_PORT_ALL) 2338 return -EINVAL; 2339 port = &ports[port_id]; 2340 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2341 if (ret) 2342 return ret; 2343 /* Poisoning to make sure PMDs update it in case of error. */ 2344 memset(&error, 0x22, sizeof(error)); 2345 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2346 attr, pattern, &error); 2347 if (!pit->template.pattern_template) { 2348 uint32_t destroy_id = pit->id; 2349 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2350 return port_flow_complain(&error); 2351 } 2352 printf("Pattern template #%u created\n", pit->id); 2353 return 0; 2354 } 2355 2356 /** Destroy pattern template */ 2357 int 2358 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2359 const uint32_t *template) 2360 { 2361 struct rte_port *port; 2362 struct port_template **tmp; 2363 int ret = 0; 2364 2365 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2366 port_id == (portid_t)RTE_PORT_ALL) 2367 return -EINVAL; 2368 port = &ports[port_id]; 2369 tmp = &port->pattern_templ_list; 2370 while (*tmp) { 2371 uint32_t i; 2372 2373 for (i = 0; i != n; ++i) { 2374 struct rte_flow_error error; 2375 struct port_template *pit = *tmp; 2376 2377 if (template[i] != pit->id) 2378 continue; 2379 /* 2380 * Poisoning to make sure PMDs update it in case 2381 * of error. 2382 */ 2383 memset(&error, 0x33, sizeof(error)); 2384 2385 if (pit->template.pattern_template && 2386 rte_flow_pattern_template_destroy(port_id, 2387 pit->template.pattern_template, 2388 &error)) { 2389 ret = port_flow_complain(&error); 2390 continue; 2391 } 2392 *tmp = pit->next; 2393 printf("Pattern template #%u destroyed\n", pit->id); 2394 free(pit); 2395 break; 2396 } 2397 if (i == n) 2398 tmp = &(*tmp)->next; 2399 } 2400 return ret; 2401 } 2402 2403 /** Flush pattern template */ 2404 int 2405 port_flow_pattern_template_flush(portid_t port_id) 2406 { 2407 struct rte_port *port; 2408 struct port_template **tmp; 2409 int ret = 0; 2410 2411 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2412 port_id == (portid_t)RTE_PORT_ALL) 2413 return -EINVAL; 2414 port = &ports[port_id]; 2415 tmp = &port->pattern_templ_list; 2416 while (*tmp) { 2417 struct rte_flow_error error; 2418 struct port_template *pit = *tmp; 2419 2420 /* 2421 * Poisoning to make sure PMDs update it in case 2422 * of error. 2423 */ 2424 memset(&error, 0x33, sizeof(error)); 2425 if (pit->template.pattern_template && 2426 rte_flow_pattern_template_destroy(port_id, 2427 pit->template.pattern_template, &error)) { 2428 printf("Pattern template #%u not destroyed\n", pit->id); 2429 ret = port_flow_complain(&error); 2430 tmp = &pit->next; 2431 } else { 2432 *tmp = pit->next; 2433 free(pit); 2434 } 2435 } 2436 return ret; 2437 } 2438 2439 /** Create actions template */ 2440 int 2441 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2442 const struct rte_flow_actions_template_attr *attr, 2443 const struct rte_flow_action *actions, 2444 const struct rte_flow_action *masks) 2445 { 2446 struct rte_port *port; 2447 struct port_template *pat; 2448 int ret; 2449 struct rte_flow_error error; 2450 2451 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2452 port_id == (portid_t)RTE_PORT_ALL) 2453 return -EINVAL; 2454 port = &ports[port_id]; 2455 ret = template_alloc(id, &pat, &port->actions_templ_list); 2456 if (ret) 2457 return ret; 2458 /* Poisoning to make sure PMDs update it in case of error. */ 2459 memset(&error, 0x22, sizeof(error)); 2460 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2461 attr, actions, masks, &error); 2462 if (!pat->template.actions_template) { 2463 uint32_t destroy_id = pat->id; 2464 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2465 return port_flow_complain(&error); 2466 } 2467 printf("Actions template #%u created\n", pat->id); 2468 return 0; 2469 } 2470 2471 /** Destroy actions template */ 2472 int 2473 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2474 const uint32_t *template) 2475 { 2476 struct rte_port *port; 2477 struct port_template **tmp; 2478 int ret = 0; 2479 2480 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2481 port_id == (portid_t)RTE_PORT_ALL) 2482 return -EINVAL; 2483 port = &ports[port_id]; 2484 tmp = &port->actions_templ_list; 2485 while (*tmp) { 2486 uint32_t i; 2487 2488 for (i = 0; i != n; ++i) { 2489 struct rte_flow_error error; 2490 struct port_template *pat = *tmp; 2491 2492 if (template[i] != pat->id) 2493 continue; 2494 /* 2495 * Poisoning to make sure PMDs update it in case 2496 * of error. 2497 */ 2498 memset(&error, 0x33, sizeof(error)); 2499 2500 if (pat->template.actions_template && 2501 rte_flow_actions_template_destroy(port_id, 2502 pat->template.actions_template, &error)) { 2503 ret = port_flow_complain(&error); 2504 continue; 2505 } 2506 *tmp = pat->next; 2507 printf("Actions template #%u destroyed\n", pat->id); 2508 free(pat); 2509 break; 2510 } 2511 if (i == n) 2512 tmp = &(*tmp)->next; 2513 } 2514 return ret; 2515 } 2516 2517 /** Flush actions template */ 2518 int 2519 port_flow_actions_template_flush(portid_t port_id) 2520 { 2521 struct rte_port *port; 2522 struct port_template **tmp; 2523 int ret = 0; 2524 2525 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2526 port_id == (portid_t)RTE_PORT_ALL) 2527 return -EINVAL; 2528 port = &ports[port_id]; 2529 tmp = &port->actions_templ_list; 2530 while (*tmp) { 2531 struct rte_flow_error error; 2532 struct port_template *pat = *tmp; 2533 2534 /* 2535 * Poisoning to make sure PMDs update it in case 2536 * of error. 2537 */ 2538 memset(&error, 0x33, sizeof(error)); 2539 2540 if (pat->template.actions_template && 2541 rte_flow_actions_template_destroy(port_id, 2542 pat->template.actions_template, &error)) { 2543 ret = port_flow_complain(&error); 2544 printf("Actions template #%u not destroyed\n", pat->id); 2545 tmp = &pat->next; 2546 } else { 2547 *tmp = pat->next; 2548 free(pat); 2549 } 2550 } 2551 return ret; 2552 } 2553 2554 /** Create table */ 2555 int 2556 port_flow_template_table_create(portid_t port_id, uint32_t id, 2557 const struct rte_flow_template_table_attr *table_attr, 2558 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2559 uint32_t nb_actions_templates, uint32_t *actions_templates) 2560 { 2561 struct rte_port *port; 2562 struct port_table *pt; 2563 struct port_template *temp = NULL; 2564 int ret; 2565 uint32_t i; 2566 struct rte_flow_error error; 2567 struct rte_flow_pattern_template 2568 *flow_pattern_templates[nb_pattern_templates]; 2569 struct rte_flow_actions_template 2570 *flow_actions_templates[nb_actions_templates]; 2571 2572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2573 port_id == (portid_t)RTE_PORT_ALL) 2574 return -EINVAL; 2575 port = &ports[port_id]; 2576 for (i = 0; i < nb_pattern_templates; ++i) { 2577 bool found = false; 2578 temp = port->pattern_templ_list; 2579 while (temp) { 2580 if (pattern_templates[i] == temp->id) { 2581 flow_pattern_templates[i] = 2582 temp->template.pattern_template; 2583 found = true; 2584 break; 2585 } 2586 temp = temp->next; 2587 } 2588 if (!found) { 2589 printf("Pattern template #%u is invalid\n", 2590 pattern_templates[i]); 2591 return -EINVAL; 2592 } 2593 } 2594 for (i = 0; i < nb_actions_templates; ++i) { 2595 bool found = false; 2596 temp = port->actions_templ_list; 2597 while (temp) { 2598 if (actions_templates[i] == temp->id) { 2599 flow_actions_templates[i] = 2600 temp->template.actions_template; 2601 found = true; 2602 break; 2603 } 2604 temp = temp->next; 2605 } 2606 if (!found) { 2607 printf("Actions template #%u is invalid\n", 2608 actions_templates[i]); 2609 return -EINVAL; 2610 } 2611 } 2612 ret = table_alloc(id, &pt, &port->table_list); 2613 if (ret) 2614 return ret; 2615 /* Poisoning to make sure PMDs update it in case of error. */ 2616 memset(&error, 0x22, sizeof(error)); 2617 pt->table = rte_flow_template_table_create(port_id, table_attr, 2618 flow_pattern_templates, nb_pattern_templates, 2619 flow_actions_templates, nb_actions_templates, 2620 &error); 2621 2622 if (!pt->table) { 2623 uint32_t destroy_id = pt->id; 2624 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2625 return port_flow_complain(&error); 2626 } 2627 pt->nb_pattern_templates = nb_pattern_templates; 2628 pt->nb_actions_templates = nb_actions_templates; 2629 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2630 sizeof(struct rte_flow_attr)); 2631 printf("Template table #%u created\n", pt->id); 2632 return 0; 2633 } 2634 2635 /** Destroy table */ 2636 int 2637 port_flow_template_table_destroy(portid_t port_id, 2638 uint32_t n, const uint32_t *table) 2639 { 2640 struct rte_port *port; 2641 struct port_table **tmp; 2642 int ret = 0; 2643 2644 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2645 port_id == (portid_t)RTE_PORT_ALL) 2646 return -EINVAL; 2647 port = &ports[port_id]; 2648 tmp = &port->table_list; 2649 while (*tmp) { 2650 uint32_t i; 2651 2652 for (i = 0; i != n; ++i) { 2653 struct rte_flow_error error; 2654 struct port_table *pt = *tmp; 2655 2656 if (table[i] != pt->id) 2657 continue; 2658 /* 2659 * Poisoning to make sure PMDs update it in case 2660 * of error. 2661 */ 2662 memset(&error, 0x33, sizeof(error)); 2663 2664 if (pt->table && 2665 rte_flow_template_table_destroy(port_id, 2666 pt->table, 2667 &error)) { 2668 ret = port_flow_complain(&error); 2669 continue; 2670 } 2671 *tmp = pt->next; 2672 printf("Template table #%u destroyed\n", pt->id); 2673 free(pt); 2674 break; 2675 } 2676 if (i == n) 2677 tmp = &(*tmp)->next; 2678 } 2679 return ret; 2680 } 2681 2682 /** Flush table */ 2683 int 2684 port_flow_template_table_flush(portid_t port_id) 2685 { 2686 struct rte_port *port; 2687 struct port_table **tmp; 2688 int ret = 0; 2689 2690 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2691 port_id == (portid_t)RTE_PORT_ALL) 2692 return -EINVAL; 2693 port = &ports[port_id]; 2694 tmp = &port->table_list; 2695 while (*tmp) { 2696 struct rte_flow_error error; 2697 struct port_table *pt = *tmp; 2698 2699 /* 2700 * Poisoning to make sure PMDs update it in case 2701 * of error. 2702 */ 2703 memset(&error, 0x33, sizeof(error)); 2704 2705 if (pt->table && 2706 rte_flow_template_table_destroy(port_id, 2707 pt->table, 2708 &error)) { 2709 ret = port_flow_complain(&error); 2710 printf("Template table #%u not destroyed\n", pt->id); 2711 tmp = &pt->next; 2712 } else { 2713 *tmp = pt->next; 2714 free(pt); 2715 } 2716 } 2717 return ret; 2718 } 2719 2720 /** Enqueue create flow rule operation. */ 2721 int 2722 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2723 bool postpone, uint32_t table_id, uint32_t rule_idx, 2724 uint32_t pattern_idx, uint32_t actions_idx, 2725 const struct rte_flow_item *pattern, 2726 const struct rte_flow_action *actions) 2727 { 2728 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2729 struct rte_flow *flow; 2730 struct rte_port *port; 2731 struct port_flow *pf; 2732 struct port_table *pt; 2733 uint32_t id = 0; 2734 bool found; 2735 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2736 struct rte_flow_action_age *age = age_action_get(actions); 2737 struct queue_job *job; 2738 2739 port = &ports[port_id]; 2740 if (port->flow_list) { 2741 if (port->flow_list->id == UINT32_MAX) { 2742 printf("Highest rule ID is already assigned," 2743 " delete it first"); 2744 return -ENOMEM; 2745 } 2746 id = port->flow_list->id + 1; 2747 } 2748 2749 if (queue_id >= port->queue_nb) { 2750 printf("Queue #%u is invalid\n", queue_id); 2751 return -EINVAL; 2752 } 2753 2754 found = false; 2755 pt = port->table_list; 2756 while (pt) { 2757 if (table_id == pt->id) { 2758 found = true; 2759 break; 2760 } 2761 pt = pt->next; 2762 } 2763 if (!found) { 2764 printf("Table #%u is invalid\n", table_id); 2765 return -EINVAL; 2766 } 2767 2768 if (pattern_idx >= pt->nb_pattern_templates) { 2769 printf("Pattern template index #%u is invalid," 2770 " %u templates present in the table\n", 2771 pattern_idx, pt->nb_pattern_templates); 2772 return -EINVAL; 2773 } 2774 if (actions_idx >= pt->nb_actions_templates) { 2775 printf("Actions template index #%u is invalid," 2776 " %u templates present in the table\n", 2777 actions_idx, pt->nb_actions_templates); 2778 return -EINVAL; 2779 } 2780 2781 job = calloc(1, sizeof(*job)); 2782 if (!job) { 2783 printf("Queue flow create job allocate failed\n"); 2784 return -ENOMEM; 2785 } 2786 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2787 2788 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2789 if (!pf) { 2790 free(job); 2791 return port_flow_complain(&error); 2792 } 2793 if (age) { 2794 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2795 age->context = &pf->age_type; 2796 } 2797 /* Poisoning to make sure PMDs update it in case of error. */ 2798 memset(&error, 0x11, sizeof(error)); 2799 if (rule_idx == UINT32_MAX) 2800 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2801 pattern, pattern_idx, actions, actions_idx, job, &error); 2802 else 2803 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2804 rule_idx, actions, actions_idx, job, &error); 2805 if (!flow) { 2806 uint64_t flow_id = pf->id; 2807 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2808 free(job); 2809 return port_flow_complain(&error); 2810 } 2811 2812 pf->next = port->flow_list; 2813 pf->id = id; 2814 pf->table = pt; 2815 pf->flow = flow; 2816 job->pf = pf; 2817 port->flow_list = pf; 2818 printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id); 2819 return 0; 2820 } 2821 2822 /** Enqueue number of destroy flow rules operations. */ 2823 int 2824 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2825 bool postpone, uint32_t n, const uint64_t *rule) 2826 { 2827 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2828 struct rte_port *port; 2829 struct port_flow **tmp; 2830 int ret = 0; 2831 struct queue_job *job; 2832 2833 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2834 port_id == (portid_t)RTE_PORT_ALL) 2835 return -EINVAL; 2836 port = &ports[port_id]; 2837 2838 if (queue_id >= port->queue_nb) { 2839 printf("Queue #%u is invalid\n", queue_id); 2840 return -EINVAL; 2841 } 2842 2843 tmp = &port->flow_list; 2844 while (*tmp) { 2845 uint32_t i; 2846 2847 for (i = 0; i != n; ++i) { 2848 struct rte_flow_error error; 2849 struct port_flow *pf = *tmp; 2850 2851 if (rule[i] != pf->id) 2852 continue; 2853 /* 2854 * Poisoning to make sure PMD 2855 * update it in case of error. 2856 */ 2857 memset(&error, 0x33, sizeof(error)); 2858 job = calloc(1, sizeof(*job)); 2859 if (!job) { 2860 printf("Queue flow destroy job allocate failed\n"); 2861 return -ENOMEM; 2862 } 2863 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2864 job->pf = pf; 2865 2866 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2867 pf->flow, job, &error)) { 2868 free(job); 2869 ret = port_flow_complain(&error); 2870 continue; 2871 } 2872 printf("Flow rule #%"PRIu64" destruction enqueued\n", 2873 pf->id); 2874 *tmp = pf->next; 2875 break; 2876 } 2877 if (i == n) 2878 tmp = &(*tmp)->next; 2879 } 2880 return ret; 2881 } 2882 2883 static void 2884 queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2885 struct port_indirect_action *pia, 2886 struct queue_job *job, 2887 const struct rte_flow_op_attr *attr, 2888 const struct rte_flow_indir_action_conf *conf, 2889 const struct rte_flow_action *action, 2890 struct rte_flow_error *error) 2891 { 2892 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2893 struct rte_flow_action_age *age = 2894 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2895 2896 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2897 age->context = &pia->age_type; 2898 } 2899 /* Poisoning to make sure PMDs update it in case of error. */ 2900 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2901 attr, conf, action, 2902 job, error); 2903 pia->type = action->type; 2904 } 2905 2906 static void 2907 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id, 2908 struct port_indirect_action *pia, 2909 struct queue_job *job, 2910 const struct rte_flow_op_attr *attr, 2911 const struct rte_flow_indir_action_conf *conf, 2912 const struct rte_flow_action *action, 2913 struct rte_flow_error *error) 2914 { 2915 /* Poisoning to make sure PMDs update it in case of error. */ 2916 pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST; 2917 pia->list_handle = rte_flow_async_action_list_handle_create 2918 (port_id, queue_id, attr, conf, action, 2919 job, error); 2920 } 2921 2922 /** Enqueue update flow rule operation. */ 2923 int 2924 port_queue_flow_update(portid_t port_id, queueid_t queue_id, 2925 bool postpone, uint32_t rule_idx, uint32_t actions_idx, 2926 const struct rte_flow_action *actions) 2927 { 2928 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2929 struct rte_port *port; 2930 struct port_flow *pf, *uf; 2931 struct port_flow **tmp; 2932 struct port_table *pt; 2933 bool found; 2934 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2935 struct rte_flow_action_age *age = age_action_get(actions); 2936 struct queue_job *job; 2937 2938 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2939 port_id == (portid_t)RTE_PORT_ALL) 2940 return -EINVAL; 2941 port = &ports[port_id]; 2942 2943 if (queue_id >= port->queue_nb) { 2944 printf("Queue #%u is invalid\n", queue_id); 2945 return -EINVAL; 2946 } 2947 2948 found = false; 2949 tmp = &port->flow_list; 2950 while (*tmp) { 2951 pf = *tmp; 2952 if (rule_idx == pf->id) { 2953 found = true; 2954 break; 2955 } 2956 tmp = &(*tmp)->next; 2957 } 2958 if (!found) { 2959 printf("Flow rule #%u is invalid\n", rule_idx); 2960 return -EINVAL; 2961 } 2962 2963 pt = pf->table; 2964 if (actions_idx >= pt->nb_actions_templates) { 2965 printf("Actions template index #%u is invalid," 2966 " %u templates present in the table\n", 2967 actions_idx, pt->nb_actions_templates); 2968 return -EINVAL; 2969 } 2970 2971 job = calloc(1, sizeof(*job)); 2972 if (!job) { 2973 printf("Queue flow create job allocate failed\n"); 2974 return -ENOMEM; 2975 } 2976 job->type = QUEUE_JOB_TYPE_FLOW_UPDATE; 2977 2978 uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error); 2979 if (!uf) { 2980 free(job); 2981 return port_flow_complain(&error); 2982 } 2983 2984 if (age) { 2985 uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2986 age->context = &uf->age_type; 2987 } 2988 2989 /* 2990 * Poisoning to make sure PMD update it in case of error. 2991 */ 2992 memset(&error, 0x44, sizeof(error)); 2993 if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow, 2994 actions, actions_idx, job, &error)) { 2995 free(uf); 2996 free(job); 2997 return port_flow_complain(&error); 2998 } 2999 uf->next = pf->next; 3000 uf->id = pf->id; 3001 uf->table = pt; 3002 uf->flow = pf->flow; 3003 *tmp = uf; 3004 job->pf = pf; 3005 3006 printf("Flow rule #%"PRIu64" update enqueued\n", pf->id); 3007 return 0; 3008 } 3009 3010 /** Enqueue indirect action create operation. */ 3011 int 3012 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 3013 bool postpone, uint32_t id, 3014 const struct rte_flow_indir_action_conf *conf, 3015 const struct rte_flow_action *action) 3016 { 3017 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3018 struct rte_port *port; 3019 struct port_indirect_action *pia; 3020 int ret; 3021 struct rte_flow_error error; 3022 struct queue_job *job; 3023 bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END; 3024 3025 3026 ret = action_alloc(port_id, id, &pia); 3027 if (ret) 3028 return ret; 3029 3030 port = &ports[port_id]; 3031 if (queue_id >= port->queue_nb) { 3032 printf("Queue #%u is invalid\n", queue_id); 3033 return -EINVAL; 3034 } 3035 job = calloc(1, sizeof(*job)); 3036 if (!job) { 3037 printf("Queue action create job allocate failed\n"); 3038 return -ENOMEM; 3039 } 3040 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 3041 job->pia = pia; 3042 3043 /* Poisoning to make sure PMDs update it in case of error. */ 3044 memset(&error, 0x88, sizeof(error)); 3045 3046 if (is_indirect_list) 3047 queue_action_list_handle_create(port_id, queue_id, pia, job, 3048 &attr, conf, action, &error); 3049 else 3050 queue_action_handle_create(port_id, queue_id, pia, job, &attr, 3051 conf, action, &error); 3052 3053 if (!pia->handle) { 3054 uint32_t destroy_id = pia->id; 3055 port_queue_action_handle_destroy(port_id, queue_id, 3056 postpone, 1, &destroy_id); 3057 free(job); 3058 return port_flow_complain(&error); 3059 } 3060 printf("Indirect action #%u creation queued\n", pia->id); 3061 return 0; 3062 } 3063 3064 /** Enqueue indirect action destroy operation. */ 3065 int 3066 port_queue_action_handle_destroy(portid_t port_id, 3067 uint32_t queue_id, bool postpone, 3068 uint32_t n, const uint32_t *actions) 3069 { 3070 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3071 struct rte_port *port; 3072 struct port_indirect_action **tmp; 3073 int ret = 0; 3074 struct queue_job *job; 3075 3076 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3077 port_id == (portid_t)RTE_PORT_ALL) 3078 return -EINVAL; 3079 port = &ports[port_id]; 3080 3081 if (queue_id >= port->queue_nb) { 3082 printf("Queue #%u is invalid\n", queue_id); 3083 return -EINVAL; 3084 } 3085 3086 tmp = &port->actions_list; 3087 while (*tmp) { 3088 uint32_t i; 3089 3090 for (i = 0; i != n; ++i) { 3091 struct rte_flow_error error; 3092 struct port_indirect_action *pia = *tmp; 3093 3094 if (actions[i] != pia->id) 3095 continue; 3096 /* 3097 * Poisoning to make sure PMDs update it in case 3098 * of error. 3099 */ 3100 memset(&error, 0x99, sizeof(error)); 3101 job = calloc(1, sizeof(*job)); 3102 if (!job) { 3103 printf("Queue action destroy job allocate failed\n"); 3104 return -ENOMEM; 3105 } 3106 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 3107 job->pia = pia; 3108 ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ? 3109 rte_flow_async_action_list_handle_destroy 3110 (port_id, queue_id, 3111 &attr, pia->list_handle, 3112 job, &error) : 3113 rte_flow_async_action_handle_destroy 3114 (port_id, queue_id, &attr, pia->handle, 3115 job, &error); 3116 if (ret) { 3117 free(job); 3118 ret = port_flow_complain(&error); 3119 continue; 3120 } 3121 *tmp = pia->next; 3122 printf("Indirect action #%u destruction queued\n", 3123 pia->id); 3124 break; 3125 } 3126 if (i == n) 3127 tmp = &(*tmp)->next; 3128 } 3129 return ret; 3130 } 3131 3132 /** Enqueue indirect action update operation. */ 3133 int 3134 port_queue_action_handle_update(portid_t port_id, 3135 uint32_t queue_id, bool postpone, uint32_t id, 3136 const struct rte_flow_action *action) 3137 { 3138 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3139 struct rte_port *port; 3140 struct rte_flow_error error; 3141 struct rte_flow_action_handle *action_handle; 3142 struct queue_job *job; 3143 struct port_indirect_action *pia; 3144 struct rte_flow_update_meter_mark mtr_update; 3145 const void *update; 3146 3147 action_handle = port_action_handle_get_by_id(port_id, id); 3148 if (!action_handle) 3149 return -EINVAL; 3150 3151 port = &ports[port_id]; 3152 if (queue_id >= port->queue_nb) { 3153 printf("Queue #%u is invalid\n", queue_id); 3154 return -EINVAL; 3155 } 3156 3157 job = calloc(1, sizeof(*job)); 3158 if (!job) { 3159 printf("Queue action update job allocate failed\n"); 3160 return -ENOMEM; 3161 } 3162 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 3163 3164 pia = action_get_by_id(port_id, id); 3165 if (!pia) { 3166 free(job); 3167 return -EINVAL; 3168 } 3169 3170 switch (pia->type) { 3171 case RTE_FLOW_ACTION_TYPE_AGE: 3172 update = action->conf; 3173 break; 3174 case RTE_FLOW_ACTION_TYPE_METER_MARK: 3175 rte_memcpy(&mtr_update.meter_mark, action->conf, 3176 sizeof(struct rte_flow_action_meter_mark)); 3177 if (mtr_update.meter_mark.profile) 3178 mtr_update.profile_valid = 1; 3179 if (mtr_update.meter_mark.policy) 3180 mtr_update.policy_valid = 1; 3181 mtr_update.color_mode_valid = 1; 3182 mtr_update.init_color_valid = 1; 3183 mtr_update.state_valid = 1; 3184 update = &mtr_update; 3185 break; 3186 default: 3187 update = action; 3188 break; 3189 } 3190 3191 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 3192 action_handle, update, job, &error)) { 3193 free(job); 3194 return port_flow_complain(&error); 3195 } 3196 printf("Indirect action #%u update queued\n", id); 3197 return 0; 3198 } 3199 3200 void 3201 port_queue_action_handle_query_update(portid_t port_id, 3202 uint32_t queue_id, bool postpone, 3203 uint32_t id, 3204 enum rte_flow_query_update_mode qu_mode, 3205 const struct rte_flow_action *action) 3206 { 3207 int ret; 3208 struct rte_flow_error error; 3209 struct port_indirect_action *pia = action_get_by_id(port_id, id); 3210 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3211 struct queue_job *job; 3212 3213 if (!pia || !pia->handle) 3214 return; 3215 job = calloc(1, sizeof(*job)); 3216 if (!job) 3217 return; 3218 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3219 job->pia = pia; 3220 3221 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3222 &attr, pia->handle, 3223 action, 3224 &job->query, 3225 qu_mode, job, 3226 &error); 3227 if (ret) { 3228 port_flow_complain(&error); 3229 free(job); 3230 } else { 3231 printf("port-%u: indirect action #%u update-and-query queued\n", 3232 port_id, id); 3233 } 3234 } 3235 3236 /** Enqueue indirect action query operation. */ 3237 int 3238 port_queue_action_handle_query(portid_t port_id, 3239 uint32_t queue_id, bool postpone, uint32_t id) 3240 { 3241 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3242 struct rte_port *port; 3243 struct rte_flow_error error; 3244 struct rte_flow_action_handle *action_handle; 3245 struct port_indirect_action *pia; 3246 struct queue_job *job; 3247 3248 pia = action_get_by_id(port_id, id); 3249 action_handle = pia ? pia->handle : NULL; 3250 if (!action_handle) 3251 return -EINVAL; 3252 3253 port = &ports[port_id]; 3254 if (queue_id >= port->queue_nb) { 3255 printf("Queue #%u is invalid\n", queue_id); 3256 return -EINVAL; 3257 } 3258 3259 job = calloc(1, sizeof(*job)); 3260 if (!job) { 3261 printf("Queue action update job allocate failed\n"); 3262 return -ENOMEM; 3263 } 3264 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3265 job->pia = pia; 3266 3267 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3268 action_handle, &job->query, job, &error)) { 3269 free(job); 3270 return port_flow_complain(&error); 3271 } 3272 printf("Indirect action #%u update queued\n", id); 3273 return 0; 3274 } 3275 3276 /** Push all the queue operations in the queue to the NIC. */ 3277 int 3278 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3279 { 3280 struct rte_port *port; 3281 struct rte_flow_error error; 3282 int ret = 0; 3283 3284 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3285 port_id == (portid_t)RTE_PORT_ALL) 3286 return -EINVAL; 3287 port = &ports[port_id]; 3288 3289 if (queue_id >= port->queue_nb) { 3290 printf("Queue #%u is invalid\n", queue_id); 3291 return -EINVAL; 3292 } 3293 3294 memset(&error, 0x55, sizeof(error)); 3295 ret = rte_flow_push(port_id, queue_id, &error); 3296 if (ret < 0) { 3297 printf("Failed to push operations in the queue\n"); 3298 return -EINVAL; 3299 } 3300 printf("Queue #%u operations pushed\n", queue_id); 3301 return ret; 3302 } 3303 3304 /** Pull queue operation results from the queue. */ 3305 static int 3306 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3307 const uint64_t *rule, int nb_flows) 3308 { 3309 struct rte_port *port = &ports[port_id]; 3310 struct rte_flow_op_result *res; 3311 struct rte_flow_error error; 3312 uint32_t n = nb_flows; 3313 int ret = 0; 3314 int i; 3315 3316 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3317 if (!res) { 3318 printf("Failed to allocate memory for pulled results\n"); 3319 return -ENOMEM; 3320 } 3321 3322 memset(&error, 0x66, sizeof(error)); 3323 while (nb_flows > 0) { 3324 int success = 0; 3325 3326 if (n > port->queue_sz) 3327 n = port->queue_sz; 3328 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3329 if (ret < 0) { 3330 free(res); 3331 return ret; 3332 } 3333 ret = rte_flow_push(port_id, queue_id, &error); 3334 if (ret < 0) { 3335 printf("Failed to push operations in the queue: %s\n", 3336 strerror(-ret)); 3337 free(res); 3338 return ret; 3339 } 3340 while (success < nb_flows) { 3341 ret = rte_flow_pull(port_id, queue_id, res, 3342 port->queue_sz, &error); 3343 if (ret < 0) { 3344 printf("Failed to pull a operation results: %s\n", 3345 strerror(-ret)); 3346 free(res); 3347 return ret; 3348 } 3349 3350 for (i = 0; i < ret; i++) { 3351 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3352 success++; 3353 } 3354 } 3355 rule += n; 3356 nb_flows -= n; 3357 n = nb_flows; 3358 } 3359 3360 free(res); 3361 return ret; 3362 } 3363 3364 /** List simply and destroy all aged flows per queue. */ 3365 void 3366 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3367 { 3368 void **contexts; 3369 int nb_context, total = 0, idx; 3370 uint64_t *rules = NULL; 3371 struct rte_port *port; 3372 struct rte_flow_error error; 3373 enum age_action_context_type *type; 3374 union { 3375 struct port_flow *pf; 3376 struct port_indirect_action *pia; 3377 } ctx; 3378 3379 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3380 port_id == (portid_t)RTE_PORT_ALL) 3381 return; 3382 port = &ports[port_id]; 3383 if (queue_id >= port->queue_nb) { 3384 printf("Error: queue #%u is invalid\n", queue_id); 3385 return; 3386 } 3387 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3388 if (total < 0) { 3389 port_flow_complain(&error); 3390 return; 3391 } 3392 printf("Port %u queue %u total aged flows: %d\n", 3393 port_id, queue_id, total); 3394 if (total == 0) 3395 return; 3396 contexts = calloc(total, sizeof(void *)); 3397 if (contexts == NULL) { 3398 printf("Cannot allocate contexts for aged flow\n"); 3399 return; 3400 } 3401 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3402 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3403 total, &error); 3404 if (nb_context > total) { 3405 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3406 port_id, queue_id, nb_context, total); 3407 free(contexts); 3408 return; 3409 } 3410 if (destroy) { 3411 rules = malloc(sizeof(uint32_t) * nb_context); 3412 if (rules == NULL) 3413 printf("Cannot allocate memory for destroy aged flow\n"); 3414 } 3415 total = 0; 3416 for (idx = 0; idx < nb_context; idx++) { 3417 if (!contexts[idx]) { 3418 printf("Error: get Null context in port %u queue %u\n", 3419 port_id, queue_id); 3420 continue; 3421 } 3422 type = (enum age_action_context_type *)contexts[idx]; 3423 switch (*type) { 3424 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3425 ctx.pf = container_of(type, struct port_flow, age_type); 3426 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3427 "\t%c%c%c\t\n", 3428 "Flow", 3429 ctx.pf->id, 3430 ctx.pf->rule.attr->group, 3431 ctx.pf->rule.attr->priority, 3432 ctx.pf->rule.attr->ingress ? 'i' : '-', 3433 ctx.pf->rule.attr->egress ? 'e' : '-', 3434 ctx.pf->rule.attr->transfer ? 't' : '-'); 3435 if (rules != NULL) { 3436 rules[total] = ctx.pf->id; 3437 total++; 3438 } 3439 break; 3440 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3441 ctx.pia = container_of(type, 3442 struct port_indirect_action, 3443 age_type); 3444 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3445 ctx.pia->id); 3446 break; 3447 default: 3448 printf("Error: invalid context type %u\n", port_id); 3449 break; 3450 } 3451 } 3452 if (rules != NULL) { 3453 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3454 free(rules); 3455 } 3456 printf("\n%d flows destroyed\n", total); 3457 free(contexts); 3458 } 3459 3460 /** Pull queue operation results from the queue. */ 3461 int 3462 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3463 { 3464 struct rte_port *port; 3465 struct rte_flow_op_result *res; 3466 struct rte_flow_error error; 3467 int ret = 0; 3468 int success = 0; 3469 int i; 3470 struct queue_job *job; 3471 3472 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3473 port_id == (portid_t)RTE_PORT_ALL) 3474 return -EINVAL; 3475 port = &ports[port_id]; 3476 3477 if (queue_id >= port->queue_nb) { 3478 printf("Queue #%u is invalid\n", queue_id); 3479 return -EINVAL; 3480 } 3481 3482 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3483 if (!res) { 3484 printf("Failed to allocate memory for pulled results\n"); 3485 return -ENOMEM; 3486 } 3487 3488 memset(&error, 0x66, sizeof(error)); 3489 ret = rte_flow_pull(port_id, queue_id, res, 3490 port->queue_sz, &error); 3491 if (ret < 0) { 3492 printf("Failed to pull a operation results\n"); 3493 free(res); 3494 return -EINVAL; 3495 } 3496 3497 for (i = 0; i < ret; i++) { 3498 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3499 success++; 3500 job = (struct queue_job *)res[i].user_data; 3501 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY || 3502 job->type == QUEUE_JOB_TYPE_FLOW_UPDATE) 3503 free(job->pf); 3504 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3505 free(job->pia); 3506 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3507 port_action_handle_query_dump(port_id, job->pia, 3508 &job->query); 3509 free(job); 3510 } 3511 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3512 queue_id, ret, ret - success, success); 3513 free(res); 3514 return ret; 3515 } 3516 3517 /** Create flow rule. */ 3518 int 3519 port_flow_create(portid_t port_id, 3520 const struct rte_flow_attr *attr, 3521 const struct rte_flow_item *pattern, 3522 const struct rte_flow_action *actions, 3523 const struct tunnel_ops *tunnel_ops, 3524 uintptr_t user_id) 3525 { 3526 struct rte_flow *flow; 3527 struct rte_port *port; 3528 struct port_flow *pf; 3529 uint32_t id = 0; 3530 struct rte_flow_error error; 3531 struct port_flow_tunnel *pft = NULL; 3532 struct rte_flow_action_age *age = age_action_get(actions); 3533 3534 port = &ports[port_id]; 3535 if (port->flow_list) { 3536 if (port->flow_list->id == UINT32_MAX) { 3537 fprintf(stderr, 3538 "Highest rule ID is already assigned, delete it first"); 3539 return -ENOMEM; 3540 } 3541 id = port->flow_list->id + 1; 3542 } 3543 if (tunnel_ops->enabled) { 3544 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3545 actions, tunnel_ops); 3546 if (!pft) 3547 return -ENOENT; 3548 if (pft->items) 3549 pattern = pft->items; 3550 if (pft->actions) 3551 actions = pft->actions; 3552 } 3553 pf = port_flow_new(attr, pattern, actions, &error); 3554 if (!pf) 3555 return port_flow_complain(&error); 3556 if (age) { 3557 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3558 age->context = &pf->age_type; 3559 } 3560 /* Poisoning to make sure PMDs update it in case of error. */ 3561 memset(&error, 0x22, sizeof(error)); 3562 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3563 if (!flow) { 3564 if (tunnel_ops->enabled) 3565 port_flow_tunnel_offload_cmd_release(port_id, 3566 tunnel_ops, pft); 3567 free(pf); 3568 return port_flow_complain(&error); 3569 } 3570 pf->next = port->flow_list; 3571 pf->id = id; 3572 pf->user_id = user_id; 3573 pf->flow = flow; 3574 port->flow_list = pf; 3575 if (tunnel_ops->enabled) 3576 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3577 if (user_id) 3578 printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n", 3579 pf->id, pf->user_id); 3580 else 3581 printf("Flow rule #%"PRIu64" created\n", pf->id); 3582 return 0; 3583 } 3584 3585 /** Destroy a number of flow rules. */ 3586 int 3587 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule, 3588 bool is_user_id) 3589 { 3590 struct rte_port *port; 3591 struct port_flow **tmp; 3592 int ret = 0; 3593 3594 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3595 port_id == (portid_t)RTE_PORT_ALL) 3596 return -EINVAL; 3597 port = &ports[port_id]; 3598 tmp = &port->flow_list; 3599 while (*tmp) { 3600 uint32_t i; 3601 3602 for (i = 0; i != n; ++i) { 3603 struct rte_flow_error error; 3604 struct port_flow *pf = *tmp; 3605 3606 if (rule[i] != (is_user_id ? pf->user_id : pf->id)) 3607 continue; 3608 /* 3609 * Poisoning to make sure PMDs update it in case 3610 * of error. 3611 */ 3612 memset(&error, 0x33, sizeof(error)); 3613 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3614 ret = port_flow_complain(&error); 3615 continue; 3616 } 3617 if (is_user_id) 3618 printf("Flow rule #%"PRIu64" destroyed, " 3619 "user-id 0x%"PRIx64"\n", 3620 pf->id, pf->user_id); 3621 else 3622 printf("Flow rule #%"PRIu64" destroyed\n", 3623 pf->id); 3624 *tmp = pf->next; 3625 free(pf); 3626 break; 3627 } 3628 if (i == n) 3629 tmp = &(*tmp)->next; 3630 } 3631 return ret; 3632 } 3633 3634 /** Remove all flow rules. */ 3635 int 3636 port_flow_flush(portid_t port_id) 3637 { 3638 struct rte_flow_error error; 3639 struct rte_port *port; 3640 int ret = 0; 3641 3642 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3643 port_id == (portid_t)RTE_PORT_ALL) 3644 return -EINVAL; 3645 3646 port = &ports[port_id]; 3647 3648 if (port->flow_list == NULL) 3649 return ret; 3650 3651 /* Poisoning to make sure PMDs update it in case of error. */ 3652 memset(&error, 0x44, sizeof(error)); 3653 if (rte_flow_flush(port_id, &error)) { 3654 port_flow_complain(&error); 3655 } 3656 3657 while (port->flow_list) { 3658 struct port_flow *pf = port->flow_list->next; 3659 3660 free(port->flow_list); 3661 port->flow_list = pf; 3662 } 3663 return ret; 3664 } 3665 3666 /** Dump flow rules. */ 3667 int 3668 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id, 3669 const char *file_name, bool is_user_id) 3670 { 3671 int ret = 0; 3672 FILE *file = stdout; 3673 struct rte_flow_error error; 3674 struct rte_port *port; 3675 struct port_flow *pflow; 3676 struct rte_flow *tmpFlow = NULL; 3677 bool found = false; 3678 3679 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3680 port_id == (portid_t)RTE_PORT_ALL) 3681 return -EINVAL; 3682 3683 if (!dump_all) { 3684 port = &ports[port_id]; 3685 pflow = port->flow_list; 3686 while (pflow) { 3687 if (rule_id != 3688 (is_user_id ? pflow->user_id : pflow->id)) { 3689 pflow = pflow->next; 3690 } else { 3691 tmpFlow = pflow->flow; 3692 if (tmpFlow) 3693 found = true; 3694 break; 3695 } 3696 } 3697 if (found == false) { 3698 fprintf(stderr, "Failed to dump to flow %"PRIu64"\n", 3699 rule_id); 3700 return -EINVAL; 3701 } 3702 } 3703 3704 if (file_name && strlen(file_name)) { 3705 file = fopen(file_name, "w"); 3706 if (!file) { 3707 fprintf(stderr, "Failed to create file %s: %s\n", 3708 file_name, strerror(errno)); 3709 return -errno; 3710 } 3711 } 3712 3713 if (!dump_all) 3714 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3715 else 3716 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3717 if (ret) { 3718 port_flow_complain(&error); 3719 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3720 } else 3721 printf("Flow dump finished\n"); 3722 if (file_name && strlen(file_name)) 3723 fclose(file); 3724 return ret; 3725 } 3726 3727 /** Query a flow rule. */ 3728 int 3729 port_flow_query(portid_t port_id, uint64_t rule, 3730 const struct rte_flow_action *action, bool is_user_id) 3731 { 3732 struct rte_flow_error error; 3733 struct rte_port *port; 3734 struct port_flow *pf; 3735 const char *name; 3736 union { 3737 struct rte_flow_query_count count; 3738 struct rte_flow_action_rss rss_conf; 3739 struct rte_flow_query_age age; 3740 } query; 3741 int ret; 3742 3743 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3744 port_id == (portid_t)RTE_PORT_ALL) 3745 return -EINVAL; 3746 port = &ports[port_id]; 3747 for (pf = port->flow_list; pf; pf = pf->next) 3748 if ((is_user_id ? pf->user_id : pf->id) == rule) 3749 break; 3750 if (!pf) { 3751 fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule); 3752 return -ENOENT; 3753 } 3754 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3755 &name, sizeof(name), 3756 (void *)(uintptr_t)action->type, &error); 3757 if (ret < 0) 3758 return port_flow_complain(&error); 3759 switch (action->type) { 3760 case RTE_FLOW_ACTION_TYPE_COUNT: 3761 case RTE_FLOW_ACTION_TYPE_RSS: 3762 case RTE_FLOW_ACTION_TYPE_AGE: 3763 break; 3764 default: 3765 fprintf(stderr, "Cannot query action type %d (%s)\n", 3766 action->type, name); 3767 return -ENOTSUP; 3768 } 3769 /* Poisoning to make sure PMDs update it in case of error. */ 3770 memset(&error, 0x55, sizeof(error)); 3771 memset(&query, 0, sizeof(query)); 3772 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3773 return port_flow_complain(&error); 3774 switch (action->type) { 3775 case RTE_FLOW_ACTION_TYPE_COUNT: 3776 printf("%s:\n" 3777 " hits_set: %u\n" 3778 " bytes_set: %u\n" 3779 " hits: %" PRIu64 "\n" 3780 " bytes: %" PRIu64 "\n", 3781 name, 3782 query.count.hits_set, 3783 query.count.bytes_set, 3784 query.count.hits, 3785 query.count.bytes); 3786 break; 3787 case RTE_FLOW_ACTION_TYPE_RSS: 3788 rss_config_display(&query.rss_conf); 3789 break; 3790 case RTE_FLOW_ACTION_TYPE_AGE: 3791 printf("%s:\n" 3792 " aged: %u\n" 3793 " sec_since_last_hit_valid: %u\n" 3794 " sec_since_last_hit: %" PRIu32 "\n", 3795 name, 3796 query.age.aged, 3797 query.age.sec_since_last_hit_valid, 3798 query.age.sec_since_last_hit); 3799 break; 3800 default: 3801 fprintf(stderr, 3802 "Cannot display result for action type %d (%s)\n", 3803 action->type, name); 3804 break; 3805 } 3806 return 0; 3807 } 3808 3809 /** List simply and destroy all aged flows. */ 3810 void 3811 port_flow_aged(portid_t port_id, uint8_t destroy) 3812 { 3813 void **contexts; 3814 int nb_context, total = 0, idx; 3815 struct rte_flow_error error; 3816 enum age_action_context_type *type; 3817 union { 3818 struct port_flow *pf; 3819 struct port_indirect_action *pia; 3820 } ctx; 3821 3822 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3823 port_id == (portid_t)RTE_PORT_ALL) 3824 return; 3825 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3826 printf("Port %u total aged flows: %d\n", port_id, total); 3827 if (total < 0) { 3828 port_flow_complain(&error); 3829 return; 3830 } 3831 if (total == 0) 3832 return; 3833 contexts = malloc(sizeof(void *) * total); 3834 if (contexts == NULL) { 3835 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3836 return; 3837 } 3838 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3839 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3840 if (nb_context != total) { 3841 fprintf(stderr, 3842 "Port:%d get aged flows count(%d) != total(%d)\n", 3843 port_id, nb_context, total); 3844 free(contexts); 3845 return; 3846 } 3847 total = 0; 3848 for (idx = 0; idx < nb_context; idx++) { 3849 if (!contexts[idx]) { 3850 fprintf(stderr, "Error: get Null context in port %u\n", 3851 port_id); 3852 continue; 3853 } 3854 type = (enum age_action_context_type *)contexts[idx]; 3855 switch (*type) { 3856 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3857 ctx.pf = container_of(type, struct port_flow, age_type); 3858 printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 3859 "\t%c%c%c\t\n", 3860 "Flow", 3861 ctx.pf->id, 3862 ctx.pf->rule.attr->group, 3863 ctx.pf->rule.attr->priority, 3864 ctx.pf->rule.attr->ingress ? 'i' : '-', 3865 ctx.pf->rule.attr->egress ? 'e' : '-', 3866 ctx.pf->rule.attr->transfer ? 't' : '-'); 3867 if (destroy && !port_flow_destroy(port_id, 1, 3868 &ctx.pf->id, false)) 3869 total++; 3870 break; 3871 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3872 ctx.pia = container_of(type, 3873 struct port_indirect_action, age_type); 3874 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3875 ctx.pia->id); 3876 break; 3877 default: 3878 fprintf(stderr, "Error: invalid context type %u\n", 3879 port_id); 3880 break; 3881 } 3882 } 3883 printf("\n%d flows destroyed\n", total); 3884 free(contexts); 3885 } 3886 3887 /** List flow rules. */ 3888 void 3889 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3890 { 3891 struct rte_port *port; 3892 struct port_flow *pf; 3893 struct port_flow *list = NULL; 3894 uint32_t i; 3895 3896 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3897 port_id == (portid_t)RTE_PORT_ALL) 3898 return; 3899 port = &ports[port_id]; 3900 if (!port->flow_list) 3901 return; 3902 /* Sort flows by group, priority and ID. */ 3903 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3904 struct port_flow **tmp; 3905 const struct rte_flow_attr *curr = pf->rule.attr; 3906 3907 if (n) { 3908 /* Filter out unwanted groups. */ 3909 for (i = 0; i != n; ++i) 3910 if (curr->group == group[i]) 3911 break; 3912 if (i == n) 3913 continue; 3914 } 3915 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3916 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3917 3918 if (curr->group > comp->group || 3919 (curr->group == comp->group && 3920 curr->priority > comp->priority) || 3921 (curr->group == comp->group && 3922 curr->priority == comp->priority && 3923 pf->id > (*tmp)->id)) 3924 continue; 3925 break; 3926 } 3927 pf->tmp = *tmp; 3928 *tmp = pf; 3929 } 3930 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3931 for (pf = list; pf != NULL; pf = pf->tmp) { 3932 const struct rte_flow_item *item = pf->rule.pattern; 3933 const struct rte_flow_action *action = pf->rule.actions; 3934 const char *name; 3935 3936 printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3937 pf->id, 3938 pf->rule.attr->group, 3939 pf->rule.attr->priority, 3940 pf->rule.attr->ingress ? 'i' : '-', 3941 pf->rule.attr->egress ? 'e' : '-', 3942 pf->rule.attr->transfer ? 't' : '-'); 3943 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3944 if ((uint32_t)item->type > INT_MAX) 3945 name = "PMD_INTERNAL"; 3946 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3947 &name, sizeof(name), 3948 (void *)(uintptr_t)item->type, 3949 NULL) <= 0) 3950 name = "[UNKNOWN]"; 3951 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3952 printf("%s ", name); 3953 ++item; 3954 } 3955 printf("=>"); 3956 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3957 if ((uint32_t)action->type > INT_MAX) 3958 name = "PMD_INTERNAL"; 3959 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3960 &name, sizeof(name), 3961 (void *)(uintptr_t)action->type, 3962 NULL) <= 0) 3963 name = "[UNKNOWN]"; 3964 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3965 printf(" %s", name); 3966 ++action; 3967 } 3968 printf("\n"); 3969 } 3970 } 3971 3972 /** Restrict ingress traffic to the defined flow rules. */ 3973 int 3974 port_flow_isolate(portid_t port_id, int set) 3975 { 3976 struct rte_flow_error error; 3977 3978 /* Poisoning to make sure PMDs update it in case of error. */ 3979 memset(&error, 0x66, sizeof(error)); 3980 if (rte_flow_isolate(port_id, set, &error)) 3981 return port_flow_complain(&error); 3982 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3983 port_id, 3984 set ? "now restricted" : "not restricted anymore"); 3985 return 0; 3986 } 3987 3988 /* 3989 * RX/TX ring descriptors display functions. 3990 */ 3991 int 3992 rx_queue_id_is_invalid(queueid_t rxq_id) 3993 { 3994 if (rxq_id < nb_rxq) 3995 return 0; 3996 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3997 rxq_id, nb_rxq); 3998 return 1; 3999 } 4000 4001 int 4002 tx_queue_id_is_invalid(queueid_t txq_id) 4003 { 4004 if (txq_id < nb_txq) 4005 return 0; 4006 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 4007 txq_id, nb_txq); 4008 return 1; 4009 } 4010 4011 static int 4012 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 4013 { 4014 struct rte_port *port = &ports[port_id]; 4015 struct rte_eth_rxq_info rx_qinfo; 4016 int ret; 4017 4018 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 4019 if (ret == 0) { 4020 *ring_size = rx_qinfo.nb_desc; 4021 return ret; 4022 } 4023 4024 if (ret != -ENOTSUP) 4025 return ret; 4026 /* 4027 * If the rte_eth_rx_queue_info_get is not support for this PMD, 4028 * ring_size stored in testpmd will be used for validity verification. 4029 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 4030 * being 0, it will use a default value provided by PMDs to setup this 4031 * rxq. If the default value is 0, it will use the 4032 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 4033 */ 4034 if (port->nb_rx_desc[rxq_id]) 4035 *ring_size = port->nb_rx_desc[rxq_id]; 4036 else if (port->dev_info.default_rxportconf.ring_size) 4037 *ring_size = port->dev_info.default_rxportconf.ring_size; 4038 else 4039 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 4040 return 0; 4041 } 4042 4043 static int 4044 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 4045 { 4046 struct rte_port *port = &ports[port_id]; 4047 struct rte_eth_txq_info tx_qinfo; 4048 int ret; 4049 4050 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 4051 if (ret == 0) { 4052 *ring_size = tx_qinfo.nb_desc; 4053 return ret; 4054 } 4055 4056 if (ret != -ENOTSUP) 4057 return ret; 4058 /* 4059 * If the rte_eth_tx_queue_info_get is not support for this PMD, 4060 * ring_size stored in testpmd will be used for validity verification. 4061 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 4062 * being 0, it will use a default value provided by PMDs to setup this 4063 * txq. If the default value is 0, it will use the 4064 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 4065 */ 4066 if (port->nb_tx_desc[txq_id]) 4067 *ring_size = port->nb_tx_desc[txq_id]; 4068 else if (port->dev_info.default_txportconf.ring_size) 4069 *ring_size = port->dev_info.default_txportconf.ring_size; 4070 else 4071 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 4072 return 0; 4073 } 4074 4075 static int 4076 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 4077 { 4078 uint16_t ring_size; 4079 int ret; 4080 4081 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 4082 if (ret) 4083 return 1; 4084 4085 if (rxdesc_id < ring_size) 4086 return 0; 4087 4088 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 4089 rxdesc_id, ring_size); 4090 return 1; 4091 } 4092 4093 static int 4094 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 4095 { 4096 uint16_t ring_size; 4097 int ret; 4098 4099 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 4100 if (ret) 4101 return 1; 4102 4103 if (txdesc_id < ring_size) 4104 return 0; 4105 4106 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 4107 txdesc_id, ring_size); 4108 return 1; 4109 } 4110 4111 static const struct rte_memzone * 4112 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 4113 { 4114 char mz_name[RTE_MEMZONE_NAMESIZE]; 4115 const struct rte_memzone *mz; 4116 4117 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 4118 port_id, q_id, ring_name); 4119 mz = rte_memzone_lookup(mz_name); 4120 if (mz == NULL) 4121 fprintf(stderr, 4122 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 4123 ring_name, port_id, q_id, mz_name); 4124 return mz; 4125 } 4126 4127 union igb_ring_dword { 4128 uint64_t dword; 4129 struct { 4130 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 4131 uint32_t lo; 4132 uint32_t hi; 4133 #else 4134 uint32_t hi; 4135 uint32_t lo; 4136 #endif 4137 } words; 4138 }; 4139 4140 struct igb_ring_desc_32_bytes { 4141 union igb_ring_dword lo_dword; 4142 union igb_ring_dword hi_dword; 4143 union igb_ring_dword resv1; 4144 union igb_ring_dword resv2; 4145 }; 4146 4147 struct igb_ring_desc_16_bytes { 4148 union igb_ring_dword lo_dword; 4149 union igb_ring_dword hi_dword; 4150 }; 4151 4152 static void 4153 ring_rxd_display_dword(union igb_ring_dword dword) 4154 { 4155 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 4156 (unsigned)dword.words.hi); 4157 } 4158 4159 static void 4160 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 4161 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4162 portid_t port_id, 4163 #else 4164 __rte_unused portid_t port_id, 4165 #endif 4166 uint16_t desc_id) 4167 { 4168 struct igb_ring_desc_16_bytes *ring = 4169 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4170 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 4171 int ret; 4172 struct rte_eth_dev_info dev_info; 4173 4174 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4175 if (ret != 0) 4176 return; 4177 4178 if (strstr(dev_info.driver_name, "i40e") != NULL) { 4179 /* 32 bytes RX descriptor, i40e only */ 4180 struct igb_ring_desc_32_bytes *ring = 4181 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 4182 ring[desc_id].lo_dword.dword = 4183 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4184 ring_rxd_display_dword(ring[desc_id].lo_dword); 4185 ring[desc_id].hi_dword.dword = 4186 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4187 ring_rxd_display_dword(ring[desc_id].hi_dword); 4188 ring[desc_id].resv1.dword = 4189 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 4190 ring_rxd_display_dword(ring[desc_id].resv1); 4191 ring[desc_id].resv2.dword = 4192 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 4193 ring_rxd_display_dword(ring[desc_id].resv2); 4194 4195 return; 4196 } 4197 #endif 4198 /* 16 bytes RX descriptor */ 4199 ring[desc_id].lo_dword.dword = 4200 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4201 ring_rxd_display_dword(ring[desc_id].lo_dword); 4202 ring[desc_id].hi_dword.dword = 4203 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4204 ring_rxd_display_dword(ring[desc_id].hi_dword); 4205 } 4206 4207 static void 4208 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 4209 { 4210 struct igb_ring_desc_16_bytes *ring; 4211 struct igb_ring_desc_16_bytes txd; 4212 4213 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 4214 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 4215 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 4216 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 4217 (unsigned)txd.lo_dword.words.lo, 4218 (unsigned)txd.lo_dword.words.hi, 4219 (unsigned)txd.hi_dword.words.lo, 4220 (unsigned)txd.hi_dword.words.hi); 4221 } 4222 4223 void 4224 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 4225 { 4226 const struct rte_memzone *rx_mz; 4227 4228 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 4229 return; 4230 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 4231 if (rx_mz == NULL) 4232 return; 4233 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4234 } 4235 4236 void 4237 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4238 { 4239 const struct rte_memzone *tx_mz; 4240 4241 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4242 return; 4243 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4244 if (tx_mz == NULL) 4245 return; 4246 ring_tx_descriptor_display(tx_mz, txd_id); 4247 } 4248 4249 void 4250 fwd_lcores_config_display(void) 4251 { 4252 lcoreid_t lc_id; 4253 4254 printf("List of forwarding lcores:"); 4255 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4256 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4257 printf("\n"); 4258 } 4259 void 4260 rxtx_config_display(void) 4261 { 4262 portid_t pid; 4263 queueid_t qid; 4264 4265 printf(" %s%s%s packet forwarding%s packets/burst=%d\n", 4266 cur_fwd_eng->fwd_mode_name, 4267 cur_fwd_eng->status ? "-" : "", 4268 cur_fwd_eng->status ? cur_fwd_eng->status : "", 4269 retry_enabled == 0 ? "" : " with retry", 4270 nb_pkt_per_burst); 4271 4272 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4273 printf(" packet len=%u - nb packet segments=%d\n", 4274 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4275 4276 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4277 nb_fwd_lcores, nb_fwd_ports); 4278 4279 RTE_ETH_FOREACH_DEV(pid) { 4280 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4281 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4282 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4283 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4284 struct rte_eth_rxq_info rx_qinfo; 4285 struct rte_eth_txq_info tx_qinfo; 4286 uint16_t rx_free_thresh_tmp; 4287 uint16_t tx_free_thresh_tmp; 4288 uint16_t tx_rs_thresh_tmp; 4289 uint16_t nb_rx_desc_tmp; 4290 uint16_t nb_tx_desc_tmp; 4291 uint64_t offloads_tmp; 4292 uint8_t pthresh_tmp; 4293 uint8_t hthresh_tmp; 4294 uint8_t wthresh_tmp; 4295 int32_t rc; 4296 4297 /* per port config */ 4298 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4299 (unsigned int)pid, nb_rxq, nb_txq); 4300 4301 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4302 ports[pid].dev_conf.rxmode.offloads, 4303 ports[pid].dev_conf.txmode.offloads); 4304 4305 /* per rx queue config only for first queue to be less verbose */ 4306 for (qid = 0; qid < 1; qid++) { 4307 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4308 if (rc) { 4309 nb_rx_desc_tmp = nb_rx_desc[qid]; 4310 rx_free_thresh_tmp = 4311 rx_conf[qid].rx_free_thresh; 4312 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4313 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4314 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4315 offloads_tmp = rx_conf[qid].offloads; 4316 } else { 4317 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4318 rx_free_thresh_tmp = 4319 rx_qinfo.conf.rx_free_thresh; 4320 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4321 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4322 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4323 offloads_tmp = rx_qinfo.conf.offloads; 4324 } 4325 4326 printf(" RX queue: %d\n", qid); 4327 printf(" RX desc=%d - RX free threshold=%d\n", 4328 nb_rx_desc_tmp, rx_free_thresh_tmp); 4329 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4330 " wthresh=%d\n", 4331 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4332 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4333 if (rx_conf->share_group > 0) 4334 printf(" share_group=%u share_qid=%u", 4335 rx_conf->share_group, 4336 rx_conf->share_qid); 4337 printf("\n"); 4338 } 4339 4340 /* per tx queue config only for first queue to be less verbose */ 4341 for (qid = 0; qid < 1; qid++) { 4342 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4343 if (rc) { 4344 nb_tx_desc_tmp = nb_tx_desc[qid]; 4345 tx_free_thresh_tmp = 4346 tx_conf[qid].tx_free_thresh; 4347 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4348 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4349 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4350 offloads_tmp = tx_conf[qid].offloads; 4351 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4352 } else { 4353 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4354 tx_free_thresh_tmp = 4355 tx_qinfo.conf.tx_free_thresh; 4356 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4357 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4358 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4359 offloads_tmp = tx_qinfo.conf.offloads; 4360 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4361 } 4362 4363 printf(" TX queue: %d\n", qid); 4364 printf(" TX desc=%d - TX free threshold=%d\n", 4365 nb_tx_desc_tmp, tx_free_thresh_tmp); 4366 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4367 " wthresh=%d\n", 4368 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4369 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4370 offloads_tmp, tx_rs_thresh_tmp); 4371 } 4372 } 4373 } 4374 4375 void 4376 port_rss_reta_info(portid_t port_id, 4377 struct rte_eth_rss_reta_entry64 *reta_conf, 4378 uint16_t nb_entries) 4379 { 4380 uint16_t i, idx, shift; 4381 int ret; 4382 4383 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4384 return; 4385 4386 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4387 if (ret != 0) { 4388 fprintf(stderr, 4389 "Failed to get RSS RETA info, return code = %d\n", 4390 ret); 4391 return; 4392 } 4393 4394 for (i = 0; i < nb_entries; i++) { 4395 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4396 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4397 if (!(reta_conf[idx].mask & (1ULL << shift))) 4398 continue; 4399 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4400 i, reta_conf[idx].reta[shift]); 4401 } 4402 } 4403 4404 /* 4405 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4406 * key of the port. 4407 */ 4408 void 4409 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4410 { 4411 struct rte_eth_rss_conf rss_conf = {0}; 4412 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4413 uint64_t rss_hf; 4414 uint8_t i; 4415 int diag; 4416 struct rte_eth_dev_info dev_info; 4417 uint8_t hash_key_size; 4418 int ret; 4419 4420 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4421 return; 4422 4423 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4424 if (ret != 0) 4425 return; 4426 4427 if (dev_info.hash_key_size > 0 && 4428 dev_info.hash_key_size <= sizeof(rss_key)) 4429 hash_key_size = dev_info.hash_key_size; 4430 else { 4431 fprintf(stderr, 4432 "dev_info did not provide a valid hash key size\n"); 4433 return; 4434 } 4435 4436 /* Get RSS hash key if asked to display it */ 4437 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4438 rss_conf.rss_key_len = hash_key_size; 4439 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4440 if (diag != 0) { 4441 switch (diag) { 4442 case -ENODEV: 4443 fprintf(stderr, "port index %d invalid\n", port_id); 4444 break; 4445 case -ENOTSUP: 4446 fprintf(stderr, "operation not supported by device\n"); 4447 break; 4448 default: 4449 fprintf(stderr, "operation failed - diag=%d\n", diag); 4450 break; 4451 } 4452 return; 4453 } 4454 rss_hf = rss_conf.rss_hf; 4455 if (rss_hf == 0) { 4456 printf("RSS disabled\n"); 4457 return; 4458 } 4459 printf("RSS functions:\n"); 4460 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4461 if (!show_rss_key) 4462 return; 4463 printf("RSS key:\n"); 4464 for (i = 0; i < hash_key_size; i++) 4465 printf("%02X", rss_key[i]); 4466 printf("\n"); 4467 } 4468 4469 void 4470 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4471 uint8_t hash_key_len) 4472 { 4473 struct rte_eth_rss_conf rss_conf; 4474 int diag; 4475 4476 rss_conf.rss_key = NULL; 4477 rss_conf.rss_key_len = 0; 4478 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4479 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4480 if (diag == 0) { 4481 rss_conf.rss_key = hash_key; 4482 rss_conf.rss_key_len = hash_key_len; 4483 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4484 } 4485 if (diag == 0) 4486 return; 4487 4488 switch (diag) { 4489 case -ENODEV: 4490 fprintf(stderr, "port index %d invalid\n", port_id); 4491 break; 4492 case -ENOTSUP: 4493 fprintf(stderr, "operation not supported by device\n"); 4494 break; 4495 default: 4496 fprintf(stderr, "operation failed - diag=%d\n", diag); 4497 break; 4498 } 4499 } 4500 4501 /* 4502 * Check whether a shared rxq scheduled on other lcores. 4503 */ 4504 static bool 4505 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4506 portid_t src_port, queueid_t src_rxq, 4507 uint32_t share_group, queueid_t share_rxq) 4508 { 4509 streamid_t sm_id; 4510 streamid_t nb_fs_per_lcore; 4511 lcoreid_t nb_fc; 4512 lcoreid_t lc_id; 4513 struct fwd_stream *fs; 4514 struct rte_port *port; 4515 struct rte_eth_dev_info *dev_info; 4516 struct rte_eth_rxconf *rxq_conf; 4517 4518 nb_fc = cur_fwd_config.nb_fwd_lcores; 4519 /* Check remaining cores. */ 4520 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4521 sm_id = fwd_lcores[lc_id]->stream_idx; 4522 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4523 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4524 sm_id++) { 4525 fs = fwd_streams[sm_id]; 4526 port = &ports[fs->rx_port]; 4527 dev_info = &port->dev_info; 4528 rxq_conf = &port->rxq[fs->rx_queue].conf; 4529 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4530 == 0 || rxq_conf->share_group == 0) 4531 /* Not shared rxq. */ 4532 continue; 4533 if (domain_id != port->dev_info.switch_info.domain_id) 4534 continue; 4535 if (rxq_conf->share_group != share_group) 4536 continue; 4537 if (rxq_conf->share_qid != share_rxq) 4538 continue; 4539 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4540 share_group, share_rxq); 4541 printf(" lcore %hhu Port %hu queue %hu\n", 4542 src_lc, src_port, src_rxq); 4543 printf(" lcore %hhu Port %hu queue %hu\n", 4544 lc_id, fs->rx_port, fs->rx_queue); 4545 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4546 nb_rxq); 4547 return true; 4548 } 4549 } 4550 return false; 4551 } 4552 4553 /* 4554 * Check shared rxq configuration. 4555 * 4556 * Shared group must not being scheduled on different core. 4557 */ 4558 bool 4559 pkt_fwd_shared_rxq_check(void) 4560 { 4561 streamid_t sm_id; 4562 streamid_t nb_fs_per_lcore; 4563 lcoreid_t nb_fc; 4564 lcoreid_t lc_id; 4565 struct fwd_stream *fs; 4566 uint16_t domain_id; 4567 struct rte_port *port; 4568 struct rte_eth_dev_info *dev_info; 4569 struct rte_eth_rxconf *rxq_conf; 4570 4571 if (rxq_share == 0) 4572 return true; 4573 nb_fc = cur_fwd_config.nb_fwd_lcores; 4574 /* 4575 * Check streams on each core, make sure the same switch domain + 4576 * group + queue doesn't get scheduled on other cores. 4577 */ 4578 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4579 sm_id = fwd_lcores[lc_id]->stream_idx; 4580 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4581 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4582 sm_id++) { 4583 fs = fwd_streams[sm_id]; 4584 /* Update lcore info stream being scheduled. */ 4585 fs->lcore = fwd_lcores[lc_id]; 4586 port = &ports[fs->rx_port]; 4587 dev_info = &port->dev_info; 4588 rxq_conf = &port->rxq[fs->rx_queue].conf; 4589 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4590 == 0 || rxq_conf->share_group == 0) 4591 /* Not shared rxq. */ 4592 continue; 4593 /* Check shared rxq not scheduled on remaining cores. */ 4594 domain_id = port->dev_info.switch_info.domain_id; 4595 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4596 fs->rx_port, 4597 fs->rx_queue, 4598 rxq_conf->share_group, 4599 rxq_conf->share_qid)) 4600 return false; 4601 } 4602 } 4603 return true; 4604 } 4605 4606 /* 4607 * Setup forwarding configuration for each logical core. 4608 */ 4609 static void 4610 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4611 { 4612 streamid_t nb_fs_per_lcore; 4613 streamid_t nb_fs; 4614 streamid_t sm_id; 4615 lcoreid_t nb_extra; 4616 lcoreid_t nb_fc; 4617 lcoreid_t nb_lc; 4618 lcoreid_t lc_id; 4619 4620 nb_fs = cfg->nb_fwd_streams; 4621 nb_fc = cfg->nb_fwd_lcores; 4622 if (nb_fs <= nb_fc) { 4623 nb_fs_per_lcore = 1; 4624 nb_extra = 0; 4625 } else { 4626 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4627 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4628 } 4629 4630 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4631 sm_id = 0; 4632 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4633 fwd_lcores[lc_id]->stream_idx = sm_id; 4634 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4635 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4636 } 4637 4638 /* 4639 * Assign extra remaining streams, if any. 4640 */ 4641 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4642 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4643 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4644 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4645 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4646 } 4647 } 4648 4649 static portid_t 4650 fwd_topology_tx_port_get(portid_t rxp) 4651 { 4652 static int warning_once = 1; 4653 4654 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4655 4656 switch (port_topology) { 4657 default: 4658 case PORT_TOPOLOGY_PAIRED: 4659 if ((rxp & 0x1) == 0) { 4660 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4661 return rxp + 1; 4662 if (warning_once) { 4663 fprintf(stderr, 4664 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4665 warning_once = 0; 4666 } 4667 return rxp; 4668 } 4669 return rxp - 1; 4670 case PORT_TOPOLOGY_CHAINED: 4671 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4672 case PORT_TOPOLOGY_LOOP: 4673 return rxp; 4674 } 4675 } 4676 4677 static void 4678 simple_fwd_config_setup(void) 4679 { 4680 portid_t i; 4681 4682 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4683 cur_fwd_config.nb_fwd_streams = 4684 (streamid_t) cur_fwd_config.nb_fwd_ports; 4685 4686 /* reinitialize forwarding streams */ 4687 init_fwd_streams(); 4688 4689 /* 4690 * In the simple forwarding test, the number of forwarding cores 4691 * must be lower or equal to the number of forwarding ports. 4692 */ 4693 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4694 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4695 cur_fwd_config.nb_fwd_lcores = 4696 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4697 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4698 4699 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4700 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4701 fwd_streams[i]->rx_queue = 0; 4702 fwd_streams[i]->tx_port = 4703 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4704 fwd_streams[i]->tx_queue = 0; 4705 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4706 fwd_streams[i]->retry_enabled = retry_enabled; 4707 } 4708 } 4709 4710 /** 4711 * For the RSS forwarding test all streams distributed over lcores. Each stream 4712 * being composed of a RX queue to poll on a RX port for input messages, 4713 * associated with a TX queue of a TX port where to send forwarded packets. 4714 */ 4715 static void 4716 rss_fwd_config_setup(void) 4717 { 4718 portid_t rxp; 4719 portid_t txp; 4720 queueid_t rxq; 4721 queueid_t nb_q; 4722 streamid_t sm_id; 4723 int start; 4724 int end; 4725 4726 nb_q = nb_rxq; 4727 if (nb_q > nb_txq) 4728 nb_q = nb_txq; 4729 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4730 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4731 cur_fwd_config.nb_fwd_streams = 4732 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4733 4734 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4735 cur_fwd_config.nb_fwd_lcores = 4736 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4737 4738 /* reinitialize forwarding streams */ 4739 init_fwd_streams(); 4740 4741 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4742 4743 if (proc_id > 0 && nb_q % num_procs != 0) 4744 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4745 4746 /** 4747 * In multi-process, All queues are allocated to different 4748 * processes based on num_procs and proc_id. For example: 4749 * if supports 4 queues(nb_q), 2 processes(num_procs), 4750 * the 0~1 queue for primary process. 4751 * the 2~3 queue for secondary process. 4752 */ 4753 start = proc_id * nb_q / num_procs; 4754 end = start + nb_q / num_procs; 4755 rxp = 0; 4756 rxq = start; 4757 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4758 struct fwd_stream *fs; 4759 4760 fs = fwd_streams[sm_id]; 4761 txp = fwd_topology_tx_port_get(rxp); 4762 fs->rx_port = fwd_ports_ids[rxp]; 4763 fs->rx_queue = rxq; 4764 fs->tx_port = fwd_ports_ids[txp]; 4765 fs->tx_queue = rxq; 4766 fs->peer_addr = fs->tx_port; 4767 fs->retry_enabled = retry_enabled; 4768 rxp++; 4769 if (rxp < nb_fwd_ports) 4770 continue; 4771 rxp = 0; 4772 rxq++; 4773 if (rxq >= end) 4774 rxq = start; 4775 } 4776 } 4777 4778 static uint16_t 4779 get_fwd_port_total_tc_num(void) 4780 { 4781 struct rte_eth_dcb_info dcb_info; 4782 uint16_t total_tc_num = 0; 4783 unsigned int i; 4784 4785 for (i = 0; i < nb_fwd_ports; i++) { 4786 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4787 total_tc_num += dcb_info.nb_tcs; 4788 } 4789 4790 return total_tc_num; 4791 } 4792 4793 /** 4794 * For the DCB forwarding test, each core is assigned on each traffic class. 4795 * 4796 * Each core is assigned a multi-stream, each stream being composed of 4797 * a RX queue to poll on a RX port for input messages, associated with 4798 * a TX queue of a TX port where to send forwarded packets. All RX and 4799 * TX queues are mapping to the same traffic class. 4800 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4801 * the same core 4802 */ 4803 static void 4804 dcb_fwd_config_setup(void) 4805 { 4806 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4807 portid_t txp, rxp = 0; 4808 queueid_t txq, rxq = 0; 4809 lcoreid_t lc_id; 4810 uint16_t nb_rx_queue, nb_tx_queue; 4811 uint16_t i, j, k, sm_id = 0; 4812 uint16_t total_tc_num; 4813 struct rte_port *port; 4814 uint8_t tc = 0; 4815 portid_t pid; 4816 int ret; 4817 4818 /* 4819 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4820 * or RTE_PORT_STOPPED. 4821 * 4822 * Re-configure ports to get updated mapping between tc and queue in 4823 * case the queue number of the port is changed. Skip for started ports 4824 * since modifying queue number and calling dev_configure need to stop 4825 * ports first. 4826 */ 4827 for (pid = 0; pid < nb_fwd_ports; pid++) { 4828 if (port_is_started(pid) == 1) 4829 continue; 4830 4831 port = &ports[pid]; 4832 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4833 &port->dev_conf); 4834 if (ret < 0) { 4835 fprintf(stderr, 4836 "Failed to re-configure port %d, ret = %d.\n", 4837 pid, ret); 4838 return; 4839 } 4840 } 4841 4842 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4843 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4844 cur_fwd_config.nb_fwd_streams = 4845 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4846 total_tc_num = get_fwd_port_total_tc_num(); 4847 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4848 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4849 4850 /* reinitialize forwarding streams */ 4851 init_fwd_streams(); 4852 sm_id = 0; 4853 txp = 1; 4854 /* get the dcb info on the first RX and TX ports */ 4855 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4856 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4857 4858 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4859 fwd_lcores[lc_id]->stream_nb = 0; 4860 fwd_lcores[lc_id]->stream_idx = sm_id; 4861 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4862 /* if the nb_queue is zero, means this tc is 4863 * not enabled on the POOL 4864 */ 4865 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4866 break; 4867 k = fwd_lcores[lc_id]->stream_nb + 4868 fwd_lcores[lc_id]->stream_idx; 4869 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4870 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4871 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4872 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4873 for (j = 0; j < nb_rx_queue; j++) { 4874 struct fwd_stream *fs; 4875 4876 fs = fwd_streams[k + j]; 4877 fs->rx_port = fwd_ports_ids[rxp]; 4878 fs->rx_queue = rxq + j; 4879 fs->tx_port = fwd_ports_ids[txp]; 4880 fs->tx_queue = txq + j % nb_tx_queue; 4881 fs->peer_addr = fs->tx_port; 4882 fs->retry_enabled = retry_enabled; 4883 } 4884 fwd_lcores[lc_id]->stream_nb += 4885 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4886 } 4887 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4888 4889 tc++; 4890 if (tc < rxp_dcb_info.nb_tcs) 4891 continue; 4892 /* Restart from TC 0 on next RX port */ 4893 tc = 0; 4894 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4895 rxp = (portid_t) 4896 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4897 else 4898 rxp++; 4899 if (rxp >= nb_fwd_ports) 4900 return; 4901 /* get the dcb information on next RX and TX ports */ 4902 if ((rxp & 0x1) == 0) 4903 txp = (portid_t) (rxp + 1); 4904 else 4905 txp = (portid_t) (rxp - 1); 4906 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4907 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4908 } 4909 } 4910 4911 static void 4912 icmp_echo_config_setup(void) 4913 { 4914 portid_t rxp; 4915 queueid_t rxq; 4916 lcoreid_t lc_id; 4917 uint16_t sm_id; 4918 4919 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4920 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4921 (nb_txq * nb_fwd_ports); 4922 else 4923 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4924 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4925 cur_fwd_config.nb_fwd_streams = 4926 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4927 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4928 cur_fwd_config.nb_fwd_lcores = 4929 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4930 if (verbose_level > 0) { 4931 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4932 __FUNCTION__, 4933 cur_fwd_config.nb_fwd_lcores, 4934 cur_fwd_config.nb_fwd_ports, 4935 cur_fwd_config.nb_fwd_streams); 4936 } 4937 4938 /* reinitialize forwarding streams */ 4939 init_fwd_streams(); 4940 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4941 rxp = 0; rxq = 0; 4942 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4943 if (verbose_level > 0) 4944 printf(" core=%d: \n", lc_id); 4945 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4946 struct fwd_stream *fs; 4947 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4948 fs->rx_port = fwd_ports_ids[rxp]; 4949 fs->rx_queue = rxq; 4950 fs->tx_port = fs->rx_port; 4951 fs->tx_queue = rxq; 4952 fs->peer_addr = fs->tx_port; 4953 fs->retry_enabled = retry_enabled; 4954 if (verbose_level > 0) 4955 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4956 sm_id, fs->rx_port, fs->rx_queue, 4957 fs->tx_queue); 4958 rxq = (queueid_t) (rxq + 1); 4959 if (rxq == nb_rxq) { 4960 rxq = 0; 4961 rxp = (portid_t) (rxp + 1); 4962 } 4963 } 4964 } 4965 } 4966 4967 void 4968 fwd_config_setup(void) 4969 { 4970 struct rte_port *port; 4971 portid_t pt_id; 4972 unsigned int i; 4973 4974 cur_fwd_config.fwd_eng = cur_fwd_eng; 4975 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4976 icmp_echo_config_setup(); 4977 return; 4978 } 4979 4980 if ((nb_rxq > 1) && (nb_txq > 1)){ 4981 if (dcb_config) { 4982 for (i = 0; i < nb_fwd_ports; i++) { 4983 pt_id = fwd_ports_ids[i]; 4984 port = &ports[pt_id]; 4985 if (!port->dcb_flag) { 4986 fprintf(stderr, 4987 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4988 return; 4989 } 4990 } 4991 if (nb_fwd_lcores == 1) { 4992 fprintf(stderr, 4993 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4994 return; 4995 } 4996 4997 dcb_fwd_config_setup(); 4998 } else 4999 rss_fwd_config_setup(); 5000 } 5001 else 5002 simple_fwd_config_setup(); 5003 } 5004 5005 static const char * 5006 mp_alloc_to_str(uint8_t mode) 5007 { 5008 switch (mode) { 5009 case MP_ALLOC_NATIVE: 5010 return "native"; 5011 case MP_ALLOC_ANON: 5012 return "anon"; 5013 case MP_ALLOC_XMEM: 5014 return "xmem"; 5015 case MP_ALLOC_XMEM_HUGE: 5016 return "xmemhuge"; 5017 case MP_ALLOC_XBUF: 5018 return "xbuf"; 5019 default: 5020 return "invalid"; 5021 } 5022 } 5023 5024 void 5025 pkt_fwd_config_display(struct fwd_config *cfg) 5026 { 5027 struct fwd_stream *fs; 5028 lcoreid_t lc_id; 5029 streamid_t sm_id; 5030 5031 printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 5032 "NUMA support %s, MP allocation mode: %s\n", 5033 cfg->fwd_eng->fwd_mode_name, 5034 cfg->fwd_eng->status ? "-" : "", 5035 cfg->fwd_eng->status ? cfg->fwd_eng->status : "", 5036 retry_enabled == 0 ? "" : " with retry", 5037 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 5038 numa_support == 1 ? "enabled" : "disabled", 5039 mp_alloc_to_str(mp_alloc_type)); 5040 5041 if (retry_enabled) 5042 printf("TX retry num: %u, delay between TX retries: %uus\n", 5043 burst_tx_retry_num, burst_tx_delay_time); 5044 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 5045 printf("Logical Core %u (socket %u) forwards packets on " 5046 "%d streams:", 5047 fwd_lcores_cpuids[lc_id], 5048 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 5049 fwd_lcores[lc_id]->stream_nb); 5050 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 5051 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 5052 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 5053 "P=%d/Q=%d (socket %u) ", 5054 fs->rx_port, fs->rx_queue, 5055 ports[fs->rx_port].socket_id, 5056 fs->tx_port, fs->tx_queue, 5057 ports[fs->tx_port].socket_id); 5058 print_ethaddr("peer=", 5059 &peer_eth_addrs[fs->peer_addr]); 5060 } 5061 printf("\n"); 5062 } 5063 printf("\n"); 5064 } 5065 5066 void 5067 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 5068 { 5069 struct rte_ether_addr new_peer_addr; 5070 if (!rte_eth_dev_is_valid_port(port_id)) { 5071 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 5072 return; 5073 } 5074 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 5075 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 5076 peer_addr); 5077 return; 5078 } 5079 peer_eth_addrs[port_id] = new_peer_addr; 5080 } 5081 5082 int 5083 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 5084 { 5085 unsigned int i; 5086 unsigned int lcore_cpuid; 5087 int record_now; 5088 5089 record_now = 0; 5090 again: 5091 for (i = 0; i < nb_lc; i++) { 5092 lcore_cpuid = lcorelist[i]; 5093 if (! rte_lcore_is_enabled(lcore_cpuid)) { 5094 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 5095 return -1; 5096 } 5097 if (lcore_cpuid == rte_get_main_lcore()) { 5098 fprintf(stderr, 5099 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 5100 lcore_cpuid); 5101 return -1; 5102 } 5103 if (record_now) 5104 fwd_lcores_cpuids[i] = lcore_cpuid; 5105 } 5106 if (record_now == 0) { 5107 record_now = 1; 5108 goto again; 5109 } 5110 nb_cfg_lcores = (lcoreid_t) nb_lc; 5111 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 5112 printf("previous number of forwarding cores %u - changed to " 5113 "number of configured cores %u\n", 5114 (unsigned int) nb_fwd_lcores, nb_lc); 5115 nb_fwd_lcores = (lcoreid_t) nb_lc; 5116 } 5117 5118 return 0; 5119 } 5120 5121 int 5122 set_fwd_lcores_mask(uint64_t lcoremask) 5123 { 5124 unsigned int lcorelist[64]; 5125 unsigned int nb_lc; 5126 unsigned int i; 5127 5128 if (lcoremask == 0) { 5129 fprintf(stderr, "Invalid NULL mask of cores\n"); 5130 return -1; 5131 } 5132 nb_lc = 0; 5133 for (i = 0; i < 64; i++) { 5134 if (! ((uint64_t)(1ULL << i) & lcoremask)) 5135 continue; 5136 lcorelist[nb_lc++] = i; 5137 } 5138 return set_fwd_lcores_list(lcorelist, nb_lc); 5139 } 5140 5141 void 5142 set_fwd_lcores_number(uint16_t nb_lc) 5143 { 5144 if (test_done == 0) { 5145 fprintf(stderr, "Please stop forwarding first\n"); 5146 return; 5147 } 5148 if (nb_lc > nb_cfg_lcores) { 5149 fprintf(stderr, 5150 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 5151 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 5152 return; 5153 } 5154 nb_fwd_lcores = (lcoreid_t) nb_lc; 5155 printf("Number of forwarding cores set to %u\n", 5156 (unsigned int) nb_fwd_lcores); 5157 } 5158 5159 void 5160 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 5161 { 5162 unsigned int i; 5163 portid_t port_id; 5164 int record_now; 5165 5166 record_now = 0; 5167 again: 5168 for (i = 0; i < nb_pt; i++) { 5169 port_id = (portid_t) portlist[i]; 5170 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5171 return; 5172 if (record_now) 5173 fwd_ports_ids[i] = port_id; 5174 } 5175 if (record_now == 0) { 5176 record_now = 1; 5177 goto again; 5178 } 5179 nb_cfg_ports = (portid_t) nb_pt; 5180 if (nb_fwd_ports != (portid_t) nb_pt) { 5181 printf("previous number of forwarding ports %u - changed to " 5182 "number of configured ports %u\n", 5183 (unsigned int) nb_fwd_ports, nb_pt); 5184 nb_fwd_ports = (portid_t) nb_pt; 5185 } 5186 } 5187 5188 /** 5189 * Parse the user input and obtain the list of forwarding ports 5190 * 5191 * @param[in] list 5192 * String containing the user input. User can specify 5193 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 5194 * For example, if the user wants to use all the available 5195 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 5196 * If the user wants to use only the ports 1,2 then the input 5197 * is 1,2. 5198 * valid characters are '-' and ',' 5199 * @param[out] values 5200 * This array will be filled with a list of port IDs 5201 * based on the user input 5202 * Note that duplicate entries are discarded and only the first 5203 * count entries in this array are port IDs and all the rest 5204 * will contain default values 5205 * @param[in] maxsize 5206 * This parameter denotes 2 things 5207 * 1) Number of elements in the values array 5208 * 2) Maximum value of each element in the values array 5209 * @return 5210 * On success, returns total count of parsed port IDs 5211 * On failure, returns 0 5212 */ 5213 static unsigned int 5214 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 5215 { 5216 unsigned int count = 0; 5217 char *end = NULL; 5218 int min, max; 5219 int value, i; 5220 unsigned int marked[maxsize]; 5221 5222 if (list == NULL || values == NULL) 5223 return 0; 5224 5225 for (i = 0; i < (int)maxsize; i++) 5226 marked[i] = 0; 5227 5228 min = INT_MAX; 5229 5230 do { 5231 /*Remove the blank spaces if any*/ 5232 while (isblank(*list)) 5233 list++; 5234 if (*list == '\0') 5235 break; 5236 errno = 0; 5237 value = strtol(list, &end, 10); 5238 if (errno || end == NULL) 5239 return 0; 5240 if (value < 0 || value >= (int)maxsize) 5241 return 0; 5242 while (isblank(*end)) 5243 end++; 5244 if (*end == '-' && min == INT_MAX) { 5245 min = value; 5246 } else if ((*end == ',') || (*end == '\0')) { 5247 max = value; 5248 if (min == INT_MAX) 5249 min = value; 5250 for (i = min; i <= max; i++) { 5251 if (count < maxsize) { 5252 if (marked[i]) 5253 continue; 5254 values[count] = i; 5255 marked[i] = 1; 5256 count++; 5257 } 5258 } 5259 min = INT_MAX; 5260 } else 5261 return 0; 5262 list = end + 1; 5263 } while (*end != '\0'); 5264 5265 return count; 5266 } 5267 5268 void 5269 parse_fwd_portlist(const char *portlist) 5270 { 5271 unsigned int portcount; 5272 unsigned int portindex[RTE_MAX_ETHPORTS]; 5273 unsigned int i, valid_port_count = 0; 5274 5275 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5276 if (!portcount) 5277 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5278 5279 /* 5280 * Here we verify the validity of the ports 5281 * and thereby calculate the total number of 5282 * valid ports 5283 */ 5284 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5285 if (rte_eth_dev_is_valid_port(portindex[i])) { 5286 portindex[valid_port_count] = portindex[i]; 5287 valid_port_count++; 5288 } 5289 } 5290 5291 set_fwd_ports_list(portindex, valid_port_count); 5292 } 5293 5294 void 5295 set_fwd_ports_mask(uint64_t portmask) 5296 { 5297 unsigned int portlist[64]; 5298 unsigned int nb_pt; 5299 unsigned int i; 5300 5301 if (portmask == 0) { 5302 fprintf(stderr, "Invalid NULL mask of ports\n"); 5303 return; 5304 } 5305 nb_pt = 0; 5306 RTE_ETH_FOREACH_DEV(i) { 5307 if (! ((uint64_t)(1ULL << i) & portmask)) 5308 continue; 5309 portlist[nb_pt++] = i; 5310 } 5311 set_fwd_ports_list(portlist, nb_pt); 5312 } 5313 5314 void 5315 set_fwd_ports_number(uint16_t nb_pt) 5316 { 5317 if (nb_pt > nb_cfg_ports) { 5318 fprintf(stderr, 5319 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5320 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5321 return; 5322 } 5323 nb_fwd_ports = (portid_t) nb_pt; 5324 printf("Number of forwarding ports set to %u\n", 5325 (unsigned int) nb_fwd_ports); 5326 } 5327 5328 int 5329 port_is_forwarding(portid_t port_id) 5330 { 5331 unsigned int i; 5332 5333 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5334 return -1; 5335 5336 for (i = 0; i < nb_fwd_ports; i++) { 5337 if (fwd_ports_ids[i] == port_id) 5338 return 1; 5339 } 5340 5341 return 0; 5342 } 5343 5344 void 5345 set_nb_pkt_per_burst(uint16_t nb) 5346 { 5347 if (nb > MAX_PKT_BURST) { 5348 fprintf(stderr, 5349 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5350 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5351 return; 5352 } 5353 nb_pkt_per_burst = nb; 5354 printf("Number of packets per burst set to %u\n", 5355 (unsigned int) nb_pkt_per_burst); 5356 } 5357 5358 static const char * 5359 tx_split_get_name(enum tx_pkt_split split) 5360 { 5361 uint32_t i; 5362 5363 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5364 if (tx_split_name[i].split == split) 5365 return tx_split_name[i].name; 5366 } 5367 return NULL; 5368 } 5369 5370 void 5371 set_tx_pkt_split(const char *name) 5372 { 5373 uint32_t i; 5374 5375 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5376 if (strcmp(tx_split_name[i].name, name) == 0) { 5377 tx_pkt_split = tx_split_name[i].split; 5378 return; 5379 } 5380 } 5381 fprintf(stderr, "unknown value: \"%s\"\n", name); 5382 } 5383 5384 int 5385 parse_fec_mode(const char *name, uint32_t *fec_capa) 5386 { 5387 uint8_t i; 5388 5389 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5390 if (strcmp(fec_mode_name[i].name, name) == 0) { 5391 *fec_capa = 5392 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5393 return 0; 5394 } 5395 } 5396 return -1; 5397 } 5398 5399 void 5400 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5401 { 5402 unsigned int i, j; 5403 5404 printf("FEC capabilities:\n"); 5405 5406 for (i = 0; i < num; i++) { 5407 printf("%s : ", 5408 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5409 5410 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5411 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5412 speed_fec_capa[i].capa) 5413 printf("%s ", fec_mode_name[j].name); 5414 } 5415 printf("\n"); 5416 } 5417 } 5418 5419 void 5420 show_rx_pkt_offsets(void) 5421 { 5422 uint32_t i, n; 5423 5424 n = rx_pkt_nb_offs; 5425 printf("Number of offsets: %u\n", n); 5426 if (n) { 5427 printf("Segment offsets: "); 5428 for (i = 0; i != n - 1; i++) 5429 printf("%hu,", rx_pkt_seg_offsets[i]); 5430 printf("%hu\n", rx_pkt_seg_lengths[i]); 5431 } 5432 } 5433 5434 void 5435 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5436 { 5437 unsigned int i; 5438 5439 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5440 printf("nb segments per RX packets=%u >= " 5441 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5442 return; 5443 } 5444 5445 /* 5446 * No extra check here, the segment length will be checked by PMD 5447 * in the extended queue setup. 5448 */ 5449 for (i = 0; i < nb_offs; i++) { 5450 if (seg_offsets[i] >= UINT16_MAX) { 5451 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5452 i, seg_offsets[i]); 5453 return; 5454 } 5455 } 5456 5457 for (i = 0; i < nb_offs; i++) 5458 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5459 5460 rx_pkt_nb_offs = (uint8_t) nb_offs; 5461 } 5462 5463 void 5464 show_rx_pkt_segments(void) 5465 { 5466 uint32_t i, n; 5467 5468 n = rx_pkt_nb_segs; 5469 printf("Number of segments: %u\n", n); 5470 if (n) { 5471 printf("Segment sizes: "); 5472 for (i = 0; i != n - 1; i++) 5473 printf("%hu,", rx_pkt_seg_lengths[i]); 5474 printf("%hu\n", rx_pkt_seg_lengths[i]); 5475 } 5476 } 5477 5478 static const char *get_ptype_str(uint32_t ptype) 5479 { 5480 const char *str; 5481 5482 switch (ptype) { 5483 case RTE_PTYPE_L2_ETHER: 5484 str = "eth"; 5485 break; 5486 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5487 str = "ipv4"; 5488 break; 5489 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5490 str = "ipv6"; 5491 break; 5492 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5493 str = "ipv4-tcp"; 5494 break; 5495 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5496 str = "ipv4-udp"; 5497 break; 5498 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5499 str = "ipv4-sctp"; 5500 break; 5501 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5502 str = "ipv6-tcp"; 5503 break; 5504 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5505 str = "ipv6-udp"; 5506 break; 5507 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5508 str = "ipv6-sctp"; 5509 break; 5510 case RTE_PTYPE_TUNNEL_GRENAT: 5511 str = "grenat"; 5512 break; 5513 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5514 str = "inner-eth"; 5515 break; 5516 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5517 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5518 str = "inner-ipv4"; 5519 break; 5520 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5521 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5522 str = "inner-ipv6"; 5523 break; 5524 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5525 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5526 str = "inner-ipv4-tcp"; 5527 break; 5528 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5529 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5530 str = "inner-ipv4-udp"; 5531 break; 5532 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5533 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5534 str = "inner-ipv4-sctp"; 5535 break; 5536 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5538 str = "inner-ipv6-tcp"; 5539 break; 5540 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5541 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5542 str = "inner-ipv6-udp"; 5543 break; 5544 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5545 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5546 str = "inner-ipv6-sctp"; 5547 break; 5548 default: 5549 str = "unsupported"; 5550 } 5551 5552 return str; 5553 } 5554 5555 void 5556 show_rx_pkt_hdrs(void) 5557 { 5558 uint32_t i, n; 5559 5560 n = rx_pkt_nb_segs; 5561 printf("Number of segments: %u\n", n); 5562 if (n) { 5563 printf("Packet segs: "); 5564 for (i = 0; i < n - 1; i++) 5565 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5566 printf("payload\n"); 5567 } 5568 } 5569 5570 void 5571 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5572 { 5573 unsigned int i; 5574 5575 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5576 printf("nb segments per RX packets=%u > " 5577 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5578 return; 5579 } 5580 5581 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5582 5583 for (i = 0; i < nb_segs; i++) 5584 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5585 /* 5586 * We calculate the number of hdrs, but payload is not included, 5587 * so rx_pkt_nb_segs would increase 1. 5588 */ 5589 rx_pkt_nb_segs = nb_segs + 1; 5590 } 5591 5592 void 5593 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5594 { 5595 unsigned int i; 5596 5597 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5598 printf("nb segments per RX packets=%u >= " 5599 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5600 return; 5601 } 5602 5603 /* 5604 * No extra check here, the segment length will be checked by PMD 5605 * in the extended queue setup. 5606 */ 5607 for (i = 0; i < nb_segs; i++) { 5608 if (seg_lengths[i] >= UINT16_MAX) { 5609 printf("length[%u]=%u > UINT16_MAX - give up\n", 5610 i, seg_lengths[i]); 5611 return; 5612 } 5613 } 5614 5615 for (i = 0; i < nb_segs; i++) 5616 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5617 5618 rx_pkt_nb_segs = (uint8_t) nb_segs; 5619 } 5620 5621 void 5622 show_tx_pkt_segments(void) 5623 { 5624 uint32_t i, n; 5625 const char *split; 5626 5627 n = tx_pkt_nb_segs; 5628 split = tx_split_get_name(tx_pkt_split); 5629 5630 printf("Number of segments: %u\n", n); 5631 printf("Segment sizes: "); 5632 for (i = 0; i != n - 1; i++) 5633 printf("%hu,", tx_pkt_seg_lengths[i]); 5634 printf("%hu\n", tx_pkt_seg_lengths[i]); 5635 printf("Split packet: %s\n", split); 5636 } 5637 5638 static bool 5639 nb_segs_is_invalid(unsigned int nb_segs) 5640 { 5641 uint16_t ring_size; 5642 uint16_t queue_id; 5643 uint16_t port_id; 5644 int ret; 5645 5646 RTE_ETH_FOREACH_DEV(port_id) { 5647 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5648 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5649 if (ret) { 5650 /* Port may not be initialized yet, can't say 5651 * the port is invalid in this stage. 5652 */ 5653 continue; 5654 } 5655 if (ring_size < nb_segs) { 5656 printf("nb segments per TX packets=%u >= TX " 5657 "queue(%u) ring_size=%u - txpkts ignored\n", 5658 nb_segs, queue_id, ring_size); 5659 return true; 5660 } 5661 } 5662 } 5663 5664 return false; 5665 } 5666 5667 void 5668 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5669 { 5670 uint16_t tx_pkt_len; 5671 unsigned int i; 5672 5673 /* 5674 * For single segment settings failed check is ignored. 5675 * It is a very basic capability to send the single segment 5676 * packets, suppose it is always supported. 5677 */ 5678 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5679 fprintf(stderr, 5680 "Tx segment size(%u) is not supported - txpkts ignored\n", 5681 nb_segs); 5682 return; 5683 } 5684 5685 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5686 fprintf(stderr, 5687 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5688 nb_segs, RTE_MAX_SEGS_PER_PKT); 5689 return; 5690 } 5691 5692 /* 5693 * Check that each segment length is greater or equal than 5694 * the mbuf data size. 5695 * Check also that the total packet length is greater or equal than the 5696 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5697 * 20 + 8). 5698 */ 5699 tx_pkt_len = 0; 5700 for (i = 0; i < nb_segs; i++) { 5701 if (seg_lengths[i] > mbuf_data_size[0]) { 5702 fprintf(stderr, 5703 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5704 i, seg_lengths[i], mbuf_data_size[0]); 5705 return; 5706 } 5707 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5708 } 5709 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5710 fprintf(stderr, "total packet length=%u < %d - give up\n", 5711 (unsigned) tx_pkt_len, 5712 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5713 return; 5714 } 5715 5716 for (i = 0; i < nb_segs; i++) 5717 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5718 5719 tx_pkt_length = tx_pkt_len; 5720 tx_pkt_nb_segs = (uint8_t) nb_segs; 5721 } 5722 5723 void 5724 show_tx_pkt_times(void) 5725 { 5726 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5727 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5728 } 5729 5730 void 5731 set_tx_pkt_times(unsigned int *tx_times) 5732 { 5733 tx_pkt_times_inter = tx_times[0]; 5734 tx_pkt_times_intra = tx_times[1]; 5735 } 5736 5737 #ifdef RTE_LIB_GRO 5738 void 5739 setup_gro(const char *onoff, portid_t port_id) 5740 { 5741 if (!rte_eth_dev_is_valid_port(port_id)) { 5742 fprintf(stderr, "invalid port id %u\n", port_id); 5743 return; 5744 } 5745 if (test_done == 0) { 5746 fprintf(stderr, 5747 "Before enable/disable GRO, please stop forwarding first\n"); 5748 return; 5749 } 5750 if (strcmp(onoff, "on") == 0) { 5751 if (gro_ports[port_id].enable != 0) { 5752 fprintf(stderr, 5753 "Port %u has enabled GRO. Please disable GRO first\n", 5754 port_id); 5755 return; 5756 } 5757 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5758 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5759 gro_ports[port_id].param.max_flow_num = 5760 GRO_DEFAULT_FLOW_NUM; 5761 gro_ports[port_id].param.max_item_per_flow = 5762 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5763 } 5764 gro_ports[port_id].enable = 1; 5765 } else { 5766 if (gro_ports[port_id].enable == 0) { 5767 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5768 return; 5769 } 5770 gro_ports[port_id].enable = 0; 5771 } 5772 } 5773 5774 void 5775 setup_gro_flush_cycles(uint8_t cycles) 5776 { 5777 if (test_done == 0) { 5778 fprintf(stderr, 5779 "Before change flush interval for GRO, please stop forwarding first.\n"); 5780 return; 5781 } 5782 5783 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5784 GRO_DEFAULT_FLUSH_CYCLES) { 5785 fprintf(stderr, 5786 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5787 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5788 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5789 } 5790 5791 gro_flush_cycles = cycles; 5792 } 5793 5794 void 5795 show_gro(portid_t port_id) 5796 { 5797 struct rte_gro_param *param; 5798 uint32_t max_pkts_num; 5799 5800 param = &gro_ports[port_id].param; 5801 5802 if (!rte_eth_dev_is_valid_port(port_id)) { 5803 fprintf(stderr, "Invalid port id %u.\n", port_id); 5804 return; 5805 } 5806 if (gro_ports[port_id].enable) { 5807 printf("GRO type: TCP/IPv4\n"); 5808 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5809 max_pkts_num = param->max_flow_num * 5810 param->max_item_per_flow; 5811 } else 5812 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5813 printf("Max number of packets to perform GRO: %u\n", 5814 max_pkts_num); 5815 printf("Flushing cycles: %u\n", gro_flush_cycles); 5816 } else 5817 printf("Port %u doesn't enable GRO.\n", port_id); 5818 } 5819 #endif /* RTE_LIB_GRO */ 5820 5821 #ifdef RTE_LIB_GSO 5822 void 5823 setup_gso(const char *mode, portid_t port_id) 5824 { 5825 if (!rte_eth_dev_is_valid_port(port_id)) { 5826 fprintf(stderr, "invalid port id %u\n", port_id); 5827 return; 5828 } 5829 if (strcmp(mode, "on") == 0) { 5830 if (test_done == 0) { 5831 fprintf(stderr, 5832 "before enabling GSO, please stop forwarding first\n"); 5833 return; 5834 } 5835 gso_ports[port_id].enable = 1; 5836 } else if (strcmp(mode, "off") == 0) { 5837 if (test_done == 0) { 5838 fprintf(stderr, 5839 "before disabling GSO, please stop forwarding first\n"); 5840 return; 5841 } 5842 gso_ports[port_id].enable = 0; 5843 } 5844 } 5845 #endif /* RTE_LIB_GSO */ 5846 5847 char* 5848 list_pkt_forwarding_modes(void) 5849 { 5850 static char fwd_modes[128] = ""; 5851 const char *separator = "|"; 5852 struct fwd_engine *fwd_eng; 5853 unsigned i = 0; 5854 5855 if (strlen (fwd_modes) == 0) { 5856 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5857 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5858 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5859 strncat(fwd_modes, separator, 5860 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5861 } 5862 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5863 } 5864 5865 return fwd_modes; 5866 } 5867 5868 char* 5869 list_pkt_forwarding_retry_modes(void) 5870 { 5871 static char fwd_modes[128] = ""; 5872 const char *separator = "|"; 5873 struct fwd_engine *fwd_eng; 5874 unsigned i = 0; 5875 5876 if (strlen(fwd_modes) == 0) { 5877 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5878 if (fwd_eng == &rx_only_engine) 5879 continue; 5880 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5881 sizeof(fwd_modes) - 5882 strlen(fwd_modes) - 1); 5883 strncat(fwd_modes, separator, 5884 sizeof(fwd_modes) - 5885 strlen(fwd_modes) - 1); 5886 } 5887 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5888 } 5889 5890 return fwd_modes; 5891 } 5892 5893 void 5894 set_pkt_forwarding_mode(const char *fwd_mode_name) 5895 { 5896 struct fwd_engine *fwd_eng; 5897 unsigned i; 5898 5899 i = 0; 5900 while ((fwd_eng = fwd_engines[i]) != NULL) { 5901 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5902 printf("Set %s packet forwarding mode%s\n", 5903 fwd_mode_name, 5904 retry_enabled == 0 ? "" : " with retry"); 5905 cur_fwd_eng = fwd_eng; 5906 return; 5907 } 5908 i++; 5909 } 5910 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5911 } 5912 5913 void 5914 add_rx_dump_callbacks(portid_t portid) 5915 { 5916 struct rte_eth_dev_info dev_info; 5917 uint16_t queue; 5918 int ret; 5919 5920 if (port_id_is_invalid(portid, ENABLED_WARN)) 5921 return; 5922 5923 ret = eth_dev_info_get_print_err(portid, &dev_info); 5924 if (ret != 0) 5925 return; 5926 5927 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5928 if (!ports[portid].rx_dump_cb[queue]) 5929 ports[portid].rx_dump_cb[queue] = 5930 rte_eth_add_rx_callback(portid, queue, 5931 dump_rx_pkts, NULL); 5932 } 5933 5934 void 5935 add_tx_dump_callbacks(portid_t portid) 5936 { 5937 struct rte_eth_dev_info dev_info; 5938 uint16_t queue; 5939 int ret; 5940 5941 if (port_id_is_invalid(portid, ENABLED_WARN)) 5942 return; 5943 5944 ret = eth_dev_info_get_print_err(portid, &dev_info); 5945 if (ret != 0) 5946 return; 5947 5948 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5949 if (!ports[portid].tx_dump_cb[queue]) 5950 ports[portid].tx_dump_cb[queue] = 5951 rte_eth_add_tx_callback(portid, queue, 5952 dump_tx_pkts, NULL); 5953 } 5954 5955 void 5956 remove_rx_dump_callbacks(portid_t portid) 5957 { 5958 struct rte_eth_dev_info dev_info; 5959 uint16_t queue; 5960 int ret; 5961 5962 if (port_id_is_invalid(portid, ENABLED_WARN)) 5963 return; 5964 5965 ret = eth_dev_info_get_print_err(portid, &dev_info); 5966 if (ret != 0) 5967 return; 5968 5969 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5970 if (ports[portid].rx_dump_cb[queue]) { 5971 rte_eth_remove_rx_callback(portid, queue, 5972 ports[portid].rx_dump_cb[queue]); 5973 ports[portid].rx_dump_cb[queue] = NULL; 5974 } 5975 } 5976 5977 void 5978 remove_tx_dump_callbacks(portid_t portid) 5979 { 5980 struct rte_eth_dev_info dev_info; 5981 uint16_t queue; 5982 int ret; 5983 5984 if (port_id_is_invalid(portid, ENABLED_WARN)) 5985 return; 5986 5987 ret = eth_dev_info_get_print_err(portid, &dev_info); 5988 if (ret != 0) 5989 return; 5990 5991 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5992 if (ports[portid].tx_dump_cb[queue]) { 5993 rte_eth_remove_tx_callback(portid, queue, 5994 ports[portid].tx_dump_cb[queue]); 5995 ports[portid].tx_dump_cb[queue] = NULL; 5996 } 5997 } 5998 5999 void 6000 configure_rxtx_dump_callbacks(uint16_t verbose) 6001 { 6002 portid_t portid; 6003 6004 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 6005 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 6006 return; 6007 #endif 6008 6009 RTE_ETH_FOREACH_DEV(portid) 6010 { 6011 if (verbose == 1 || verbose > 2) 6012 add_rx_dump_callbacks(portid); 6013 else 6014 remove_rx_dump_callbacks(portid); 6015 if (verbose >= 2) 6016 add_tx_dump_callbacks(portid); 6017 else 6018 remove_tx_dump_callbacks(portid); 6019 } 6020 } 6021 6022 void 6023 set_verbose_level(uint16_t vb_level) 6024 { 6025 printf("Change verbose level from %u to %u\n", 6026 (unsigned int) verbose_level, (unsigned int) vb_level); 6027 verbose_level = vb_level; 6028 configure_rxtx_dump_callbacks(verbose_level); 6029 } 6030 6031 void 6032 vlan_extend_set(portid_t port_id, int on) 6033 { 6034 int diag; 6035 int vlan_offload; 6036 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6037 6038 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6039 return; 6040 6041 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6042 6043 if (on) { 6044 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 6045 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6046 } else { 6047 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 6048 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 6049 } 6050 6051 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6052 if (diag < 0) { 6053 fprintf(stderr, 6054 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 6055 port_id, on, diag); 6056 return; 6057 } 6058 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6059 } 6060 6061 void 6062 rx_vlan_strip_set(portid_t port_id, int on) 6063 { 6064 int diag; 6065 int vlan_offload; 6066 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6067 6068 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6069 return; 6070 6071 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6072 6073 if (on) { 6074 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 6075 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6076 } else { 6077 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 6078 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 6079 } 6080 6081 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6082 if (diag < 0) { 6083 fprintf(stderr, 6084 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6085 __func__, port_id, on, diag); 6086 return; 6087 } 6088 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6089 } 6090 6091 void 6092 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 6093 { 6094 int diag; 6095 6096 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6097 return; 6098 6099 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 6100 if (diag < 0) 6101 fprintf(stderr, 6102 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 6103 __func__, port_id, queue_id, on, diag); 6104 } 6105 6106 void 6107 rx_vlan_filter_set(portid_t port_id, int on) 6108 { 6109 int diag; 6110 int vlan_offload; 6111 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6112 6113 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6114 return; 6115 6116 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6117 6118 if (on) { 6119 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 6120 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6121 } else { 6122 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 6123 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 6124 } 6125 6126 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6127 if (diag < 0) { 6128 fprintf(stderr, 6129 "%s(port_pi=%d, on=%d) failed diag=%d\n", 6130 __func__, port_id, on, diag); 6131 return; 6132 } 6133 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6134 } 6135 6136 void 6137 rx_vlan_qinq_strip_set(portid_t port_id, int on) 6138 { 6139 int diag; 6140 int vlan_offload; 6141 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 6142 6143 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6144 return; 6145 6146 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 6147 6148 if (on) { 6149 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 6150 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6151 } else { 6152 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 6153 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 6154 } 6155 6156 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 6157 if (diag < 0) { 6158 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 6159 __func__, port_id, on, diag); 6160 return; 6161 } 6162 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 6163 } 6164 6165 int 6166 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 6167 { 6168 int diag; 6169 6170 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6171 return 1; 6172 if (vlan_id_is_invalid(vlan_id)) 6173 return 1; 6174 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 6175 if (diag == 0) 6176 return 0; 6177 fprintf(stderr, 6178 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 6179 port_id, vlan_id, on, diag); 6180 return -1; 6181 } 6182 6183 void 6184 rx_vlan_all_filter_set(portid_t port_id, int on) 6185 { 6186 uint16_t vlan_id; 6187 6188 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6189 return; 6190 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 6191 if (rx_vft_set(port_id, vlan_id, on)) 6192 break; 6193 } 6194 } 6195 6196 void 6197 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 6198 { 6199 int diag; 6200 6201 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6202 return; 6203 6204 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 6205 if (diag == 0) 6206 return; 6207 6208 fprintf(stderr, 6209 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 6210 port_id, vlan_type, tp_id, diag); 6211 } 6212 6213 void 6214 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 6215 { 6216 struct rte_eth_dev_info dev_info; 6217 int ret; 6218 6219 if (vlan_id_is_invalid(vlan_id)) 6220 return; 6221 6222 if (ports[port_id].dev_conf.txmode.offloads & 6223 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 6224 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 6225 return; 6226 } 6227 6228 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6229 if (ret != 0) 6230 return; 6231 6232 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 6233 fprintf(stderr, 6234 "Error: vlan insert is not supported by port %d\n", 6235 port_id); 6236 return; 6237 } 6238 6239 tx_vlan_reset(port_id); 6240 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6241 ports[port_id].tx_vlan_id = vlan_id; 6242 } 6243 6244 void 6245 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6246 { 6247 struct rte_eth_dev_info dev_info; 6248 int ret; 6249 6250 if (vlan_id_is_invalid(vlan_id)) 6251 return; 6252 if (vlan_id_is_invalid(vlan_id_outer)) 6253 return; 6254 6255 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6256 if (ret != 0) 6257 return; 6258 6259 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6260 fprintf(stderr, 6261 "Error: qinq insert not supported by port %d\n", 6262 port_id); 6263 return; 6264 } 6265 6266 tx_vlan_reset(port_id); 6267 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6268 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6269 ports[port_id].tx_vlan_id = vlan_id; 6270 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6271 } 6272 6273 void 6274 tx_vlan_reset(portid_t port_id) 6275 { 6276 ports[port_id].dev_conf.txmode.offloads &= 6277 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6278 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6279 ports[port_id].tx_vlan_id = 0; 6280 ports[port_id].tx_vlan_id_outer = 0; 6281 } 6282 6283 void 6284 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6285 { 6286 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6287 return; 6288 6289 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6290 } 6291 6292 void 6293 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6294 { 6295 int ret; 6296 6297 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6298 return; 6299 6300 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6301 return; 6302 6303 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6304 fprintf(stderr, "map_value not in required range 0..%d\n", 6305 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6306 return; 6307 } 6308 6309 if (!is_rx) { /* tx */ 6310 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6311 map_value); 6312 if (ret) { 6313 fprintf(stderr, 6314 "failed to set tx queue stats mapping.\n"); 6315 return; 6316 } 6317 } else { /* rx */ 6318 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6319 map_value); 6320 if (ret) { 6321 fprintf(stderr, 6322 "failed to set rx queue stats mapping.\n"); 6323 return; 6324 } 6325 } 6326 } 6327 6328 void 6329 set_xstats_hide_zero(uint8_t on_off) 6330 { 6331 xstats_hide_zero = on_off; 6332 } 6333 6334 void 6335 set_record_core_cycles(uint8_t on_off) 6336 { 6337 record_core_cycles = on_off; 6338 } 6339 6340 void 6341 set_record_burst_stats(uint8_t on_off) 6342 { 6343 record_burst_stats = on_off; 6344 } 6345 6346 uint16_t 6347 str_to_flowtype(const char *string) 6348 { 6349 uint8_t i; 6350 6351 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6352 if (!strcmp(flowtype_str_table[i].str, string)) 6353 return flowtype_str_table[i].ftype; 6354 } 6355 6356 if (isdigit(string[0])) { 6357 int val = atoi(string); 6358 if (val > 0 && val < 64) 6359 return (uint16_t)val; 6360 } 6361 6362 return RTE_ETH_FLOW_UNKNOWN; 6363 } 6364 6365 const char* 6366 flowtype_to_str(uint16_t flow_type) 6367 { 6368 uint8_t i; 6369 6370 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6371 if (flowtype_str_table[i].ftype == flow_type) 6372 return flowtype_str_table[i].str; 6373 } 6374 6375 return NULL; 6376 } 6377 6378 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6379 6380 static inline void 6381 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6382 { 6383 struct rte_eth_flex_payload_cfg *cfg; 6384 uint32_t i, j; 6385 6386 for (i = 0; i < flex_conf->nb_payloads; i++) { 6387 cfg = &flex_conf->flex_set[i]; 6388 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6389 printf("\n RAW: "); 6390 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6391 printf("\n L2_PAYLOAD: "); 6392 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6393 printf("\n L3_PAYLOAD: "); 6394 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6395 printf("\n L4_PAYLOAD: "); 6396 else 6397 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6398 for (j = 0; j < num; j++) 6399 printf(" %-5u", cfg->src_offset[j]); 6400 } 6401 printf("\n"); 6402 } 6403 6404 static inline void 6405 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6406 { 6407 struct rte_eth_fdir_flex_mask *mask; 6408 uint32_t i, j; 6409 const char *p; 6410 6411 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6412 mask = &flex_conf->flex_mask[i]; 6413 p = flowtype_to_str(mask->flow_type); 6414 printf("\n %s:\t", p ? p : "unknown"); 6415 for (j = 0; j < num; j++) 6416 printf(" %02x", mask->mask[j]); 6417 } 6418 printf("\n"); 6419 } 6420 6421 static inline void 6422 print_fdir_flow_type(uint32_t flow_types_mask) 6423 { 6424 int i; 6425 const char *p; 6426 6427 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6428 if (!(flow_types_mask & (1 << i))) 6429 continue; 6430 p = flowtype_to_str(i); 6431 if (p) 6432 printf(" %s", p); 6433 else 6434 printf(" unknown"); 6435 } 6436 printf("\n"); 6437 } 6438 6439 static int 6440 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6441 struct rte_eth_fdir_stats *fdir_stat) 6442 { 6443 int ret = -ENOTSUP; 6444 6445 #ifdef RTE_NET_I40E 6446 if (ret == -ENOTSUP) { 6447 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6448 if (!ret) 6449 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6450 } 6451 #endif 6452 #ifdef RTE_NET_IXGBE 6453 if (ret == -ENOTSUP) { 6454 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6455 if (!ret) 6456 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6457 } 6458 #endif 6459 switch (ret) { 6460 case 0: 6461 break; 6462 case -ENOTSUP: 6463 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6464 port_id); 6465 break; 6466 default: 6467 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6468 break; 6469 } 6470 return ret; 6471 } 6472 6473 void 6474 fdir_get_infos(portid_t port_id) 6475 { 6476 struct rte_eth_fdir_stats fdir_stat; 6477 struct rte_eth_fdir_info fdir_info; 6478 6479 static const char *fdir_stats_border = "########################"; 6480 6481 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6482 return; 6483 6484 memset(&fdir_info, 0, sizeof(fdir_info)); 6485 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6486 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6487 return; 6488 6489 printf("\n %s FDIR infos for port %-2d %s\n", 6490 fdir_stats_border, port_id, fdir_stats_border); 6491 printf(" MODE: "); 6492 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6493 printf(" PERFECT\n"); 6494 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6495 printf(" PERFECT-MAC-VLAN\n"); 6496 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6497 printf(" PERFECT-TUNNEL\n"); 6498 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6499 printf(" SIGNATURE\n"); 6500 else 6501 printf(" DISABLE\n"); 6502 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6503 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6504 printf(" SUPPORTED FLOW TYPE: "); 6505 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6506 } 6507 printf(" FLEX PAYLOAD INFO:\n"); 6508 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6509 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6510 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6511 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6512 fdir_info.flex_payload_unit, 6513 fdir_info.max_flex_payload_segment_num, 6514 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6515 if (fdir_info.flex_conf.nb_payloads > 0) { 6516 printf(" FLEX PAYLOAD SRC OFFSET:"); 6517 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6518 } 6519 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6520 printf(" FLEX MASK CFG:"); 6521 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6522 } 6523 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6524 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6525 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6526 fdir_info.guarant_spc, fdir_info.best_spc); 6527 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6528 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6529 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6530 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6531 fdir_stat.collision, fdir_stat.free, 6532 fdir_stat.maxhash, fdir_stat.maxlen, 6533 fdir_stat.add, fdir_stat.remove, 6534 fdir_stat.f_add, fdir_stat.f_remove); 6535 printf(" %s############################%s\n", 6536 fdir_stats_border, fdir_stats_border); 6537 } 6538 6539 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6540 6541 void 6542 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6543 { 6544 #ifdef RTE_NET_IXGBE 6545 int diag; 6546 6547 if (is_rx) 6548 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6549 else 6550 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6551 6552 if (diag == 0) 6553 return; 6554 fprintf(stderr, 6555 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6556 is_rx ? "rx" : "tx", port_id, diag); 6557 return; 6558 #endif 6559 fprintf(stderr, "VF %s setting not supported for port %d\n", 6560 is_rx ? "Rx" : "Tx", port_id); 6561 RTE_SET_USED(vf); 6562 RTE_SET_USED(on); 6563 } 6564 6565 int 6566 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6567 { 6568 int diag; 6569 struct rte_eth_link link; 6570 int ret; 6571 6572 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6573 return 1; 6574 ret = eth_link_get_nowait_print_err(port_id, &link); 6575 if (ret < 0) 6576 return 1; 6577 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6578 rate > link.link_speed) { 6579 fprintf(stderr, 6580 "Invalid rate value:%u bigger than link speed: %u\n", 6581 rate, link.link_speed); 6582 return 1; 6583 } 6584 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6585 if (diag == 0) 6586 return diag; 6587 fprintf(stderr, 6588 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6589 port_id, diag); 6590 return diag; 6591 } 6592 6593 int 6594 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6595 { 6596 int diag = -ENOTSUP; 6597 6598 RTE_SET_USED(vf); 6599 RTE_SET_USED(rate); 6600 RTE_SET_USED(q_msk); 6601 6602 #ifdef RTE_NET_IXGBE 6603 if (diag == -ENOTSUP) 6604 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6605 q_msk); 6606 #endif 6607 #ifdef RTE_NET_BNXT 6608 if (diag == -ENOTSUP) 6609 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6610 #endif 6611 if (diag == 0) 6612 return diag; 6613 6614 fprintf(stderr, 6615 "%s for port_id=%d failed diag=%d\n", 6616 __func__, port_id, diag); 6617 return diag; 6618 } 6619 6620 int 6621 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6622 { 6623 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6624 return -EINVAL; 6625 6626 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6627 } 6628 6629 /* 6630 * Functions to manage the set of filtered Multicast MAC addresses. 6631 * 6632 * A pool of filtered multicast MAC addresses is associated with each port. 6633 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6634 * The address of the pool and the number of valid multicast MAC addresses 6635 * recorded in the pool are stored in the fields "mc_addr_pool" and 6636 * "mc_addr_nb" of the "rte_port" data structure. 6637 * 6638 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6639 * to be supplied a contiguous array of multicast MAC addresses. 6640 * To comply with this constraint, the set of multicast addresses recorded 6641 * into the pool are systematically compacted at the beginning of the pool. 6642 * Hence, when a multicast address is removed from the pool, all following 6643 * addresses, if any, are copied back to keep the set contiguous. 6644 */ 6645 #define MCAST_POOL_INC 32 6646 6647 static int 6648 mcast_addr_pool_extend(struct rte_port *port) 6649 { 6650 struct rte_ether_addr *mc_pool; 6651 size_t mc_pool_size; 6652 6653 /* 6654 * If a free entry is available at the end of the pool, just 6655 * increment the number of recorded multicast addresses. 6656 */ 6657 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6658 port->mc_addr_nb++; 6659 return 0; 6660 } 6661 6662 /* 6663 * [re]allocate a pool with MCAST_POOL_INC more entries. 6664 * The previous test guarantees that port->mc_addr_nb is a multiple 6665 * of MCAST_POOL_INC. 6666 */ 6667 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6668 MCAST_POOL_INC); 6669 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6670 mc_pool_size); 6671 if (mc_pool == NULL) { 6672 fprintf(stderr, 6673 "allocation of pool of %u multicast addresses failed\n", 6674 port->mc_addr_nb + MCAST_POOL_INC); 6675 return -ENOMEM; 6676 } 6677 6678 port->mc_addr_pool = mc_pool; 6679 port->mc_addr_nb++; 6680 return 0; 6681 6682 } 6683 6684 static void 6685 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6686 { 6687 if (mcast_addr_pool_extend(port) != 0) 6688 return; 6689 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6690 } 6691 6692 static void 6693 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6694 { 6695 port->mc_addr_nb--; 6696 if (addr_idx == port->mc_addr_nb) { 6697 /* No need to recompact the set of multicast addresses. */ 6698 if (port->mc_addr_nb == 0) { 6699 /* free the pool of multicast addresses. */ 6700 free(port->mc_addr_pool); 6701 port->mc_addr_pool = NULL; 6702 } 6703 return; 6704 } 6705 memmove(&port->mc_addr_pool[addr_idx], 6706 &port->mc_addr_pool[addr_idx + 1], 6707 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6708 } 6709 6710 int 6711 mcast_addr_pool_destroy(portid_t port_id) 6712 { 6713 struct rte_port *port; 6714 6715 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6716 port_id == (portid_t)RTE_PORT_ALL) 6717 return -EINVAL; 6718 port = &ports[port_id]; 6719 6720 if (port->mc_addr_nb != 0) { 6721 /* free the pool of multicast addresses. */ 6722 free(port->mc_addr_pool); 6723 port->mc_addr_pool = NULL; 6724 port->mc_addr_nb = 0; 6725 } 6726 return 0; 6727 } 6728 6729 static int 6730 eth_port_multicast_addr_list_set(portid_t port_id) 6731 { 6732 struct rte_port *port; 6733 int diag; 6734 6735 port = &ports[port_id]; 6736 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6737 port->mc_addr_nb); 6738 if (diag < 0) 6739 fprintf(stderr, 6740 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6741 port_id, port->mc_addr_nb, diag); 6742 6743 return diag; 6744 } 6745 6746 void 6747 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6748 { 6749 struct rte_port *port; 6750 uint32_t i; 6751 6752 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6753 return; 6754 6755 port = &ports[port_id]; 6756 6757 /* 6758 * Check that the added multicast MAC address is not already recorded 6759 * in the pool of multicast addresses. 6760 */ 6761 for (i = 0; i < port->mc_addr_nb; i++) { 6762 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6763 fprintf(stderr, 6764 "multicast address already filtered by port\n"); 6765 return; 6766 } 6767 } 6768 6769 mcast_addr_pool_append(port, mc_addr); 6770 if (eth_port_multicast_addr_list_set(port_id) < 0) 6771 /* Rollback on failure, remove the address from the pool */ 6772 mcast_addr_pool_remove(port, i); 6773 } 6774 6775 void 6776 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6777 { 6778 struct rte_port *port; 6779 uint32_t i; 6780 6781 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6782 return; 6783 6784 port = &ports[port_id]; 6785 6786 /* 6787 * Search the pool of multicast MAC addresses for the removed address. 6788 */ 6789 for (i = 0; i < port->mc_addr_nb; i++) { 6790 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6791 break; 6792 } 6793 if (i == port->mc_addr_nb) { 6794 fprintf(stderr, "multicast address not filtered by port %d\n", 6795 port_id); 6796 return; 6797 } 6798 6799 mcast_addr_pool_remove(port, i); 6800 if (eth_port_multicast_addr_list_set(port_id) < 0) 6801 /* Rollback on failure, add the address back into the pool */ 6802 mcast_addr_pool_append(port, mc_addr); 6803 } 6804 6805 void 6806 port_dcb_info_display(portid_t port_id) 6807 { 6808 struct rte_eth_dcb_info dcb_info; 6809 uint16_t i; 6810 int ret; 6811 static const char *border = "================"; 6812 6813 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6814 return; 6815 6816 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6817 if (ret) { 6818 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6819 port_id); 6820 return; 6821 } 6822 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6823 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6824 printf("\n TC : "); 6825 for (i = 0; i < dcb_info.nb_tcs; i++) 6826 printf("\t%4d", i); 6827 printf("\n Priority : "); 6828 for (i = 0; i < dcb_info.nb_tcs; i++) 6829 printf("\t%4d", dcb_info.prio_tc[i]); 6830 printf("\n BW percent :"); 6831 for (i = 0; i < dcb_info.nb_tcs; i++) 6832 printf("\t%4d%%", dcb_info.tc_bws[i]); 6833 printf("\n RXQ base : "); 6834 for (i = 0; i < dcb_info.nb_tcs; i++) 6835 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6836 printf("\n RXQ number :"); 6837 for (i = 0; i < dcb_info.nb_tcs; i++) 6838 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6839 printf("\n TXQ base : "); 6840 for (i = 0; i < dcb_info.nb_tcs; i++) 6841 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6842 printf("\n TXQ number :"); 6843 for (i = 0; i < dcb_info.nb_tcs; i++) 6844 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6845 printf("\n"); 6846 } 6847 6848 uint8_t * 6849 open_file(const char *file_path, uint32_t *size) 6850 { 6851 int fd = open(file_path, O_RDONLY); 6852 off_t pkg_size; 6853 uint8_t *buf = NULL; 6854 int ret = 0; 6855 struct stat st_buf; 6856 6857 if (size) 6858 *size = 0; 6859 6860 if (fd == -1) { 6861 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6862 return buf; 6863 } 6864 6865 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6866 close(fd); 6867 fprintf(stderr, "%s: File operations failed\n", __func__); 6868 return buf; 6869 } 6870 6871 pkg_size = st_buf.st_size; 6872 if (pkg_size < 0) { 6873 close(fd); 6874 fprintf(stderr, "%s: File operations failed\n", __func__); 6875 return buf; 6876 } 6877 6878 buf = (uint8_t *)malloc(pkg_size); 6879 if (!buf) { 6880 close(fd); 6881 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6882 return buf; 6883 } 6884 6885 ret = read(fd, buf, pkg_size); 6886 if (ret < 0) { 6887 close(fd); 6888 fprintf(stderr, "%s: File read operation failed\n", __func__); 6889 close_file(buf); 6890 return NULL; 6891 } 6892 6893 if (size) 6894 *size = pkg_size; 6895 6896 close(fd); 6897 6898 return buf; 6899 } 6900 6901 int 6902 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6903 { 6904 FILE *fh = fopen(file_path, "wb"); 6905 6906 if (fh == NULL) { 6907 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6908 return -1; 6909 } 6910 6911 if (fwrite(buf, 1, size, fh) != size) { 6912 fclose(fh); 6913 fprintf(stderr, "%s: File write operation failed\n", __func__); 6914 return -1; 6915 } 6916 6917 fclose(fh); 6918 6919 return 0; 6920 } 6921 6922 int 6923 close_file(uint8_t *buf) 6924 { 6925 if (buf) { 6926 free((void *)buf); 6927 return 0; 6928 } 6929 6930 return -1; 6931 } 6932 6933 void 6934 show_macs(portid_t port_id) 6935 { 6936 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6937 struct rte_eth_dev_info dev_info; 6938 int32_t i, rc, num_macs = 0; 6939 6940 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6941 return; 6942 6943 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6944 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6945 if (rc < 0) 6946 return; 6947 6948 for (i = 0; i < rc; i++) { 6949 6950 /* skip zero address */ 6951 if (rte_is_zero_ether_addr(&addr[i])) 6952 continue; 6953 6954 num_macs++; 6955 } 6956 6957 printf("Number of MAC address added: %d\n", num_macs); 6958 6959 for (i = 0; i < rc; i++) { 6960 6961 /* skip zero address */ 6962 if (rte_is_zero_ether_addr(&addr[i])) 6963 continue; 6964 6965 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6966 printf(" %s\n", buf); 6967 } 6968 } 6969 6970 void 6971 show_mcast_macs(portid_t port_id) 6972 { 6973 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6974 struct rte_ether_addr *addr; 6975 struct rte_port *port; 6976 uint32_t i; 6977 6978 port = &ports[port_id]; 6979 6980 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6981 6982 for (i = 0; i < port->mc_addr_nb; i++) { 6983 addr = &port->mc_addr_pool[i]; 6984 6985 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6986 printf(" %s\n", buf); 6987 } 6988 } 6989