1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 }; 174 175 static const struct { 176 char str[32]; 177 uint16_t ftype; 178 } flowtype_str_table[] = { 179 {"raw", RTE_ETH_FLOW_RAW}, 180 {"ipv4", RTE_ETH_FLOW_IPV4}, 181 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 182 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 183 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 184 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 185 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 186 {"ipv6", RTE_ETH_FLOW_IPV6}, 187 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 188 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 189 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 190 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 191 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 192 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 193 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 194 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 195 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 196 {"port", RTE_ETH_FLOW_PORT}, 197 {"vxlan", RTE_ETH_FLOW_VXLAN}, 198 {"geneve", RTE_ETH_FLOW_GENEVE}, 199 {"nvgre", RTE_ETH_FLOW_NVGRE}, 200 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 201 {"gtpu", RTE_ETH_FLOW_GTPU}, 202 }; 203 204 static void 205 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 206 { 207 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 208 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 209 printf("%s%s", name, buf); 210 } 211 212 static void 213 nic_xstats_display_periodic(portid_t port_id) 214 { 215 struct xstat_display_info *xstats_info; 216 uint64_t *prev_values, *curr_values; 217 uint64_t diff_value, value_rate; 218 struct timespec cur_time; 219 uint64_t *ids_supp; 220 size_t ids_supp_sz; 221 uint64_t diff_ns; 222 unsigned int i; 223 int rc; 224 225 xstats_info = &ports[port_id].xstats_info; 226 227 ids_supp_sz = xstats_info->ids_supp_sz; 228 if (ids_supp_sz == 0) 229 return; 230 231 printf("\n"); 232 233 ids_supp = xstats_info->ids_supp; 234 prev_values = xstats_info->prev_values; 235 curr_values = xstats_info->curr_values; 236 237 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 238 ids_supp_sz); 239 if (rc != (int)ids_supp_sz) { 240 fprintf(stderr, 241 "Failed to get values of %zu xstats for port %u - return code %d\n", 242 ids_supp_sz, port_id, rc); 243 return; 244 } 245 246 diff_ns = 0; 247 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 248 uint64_t ns; 249 250 ns = cur_time.tv_sec * NS_PER_SEC; 251 ns += cur_time.tv_nsec; 252 253 if (xstats_info->prev_ns != 0) 254 diff_ns = ns - xstats_info->prev_ns; 255 xstats_info->prev_ns = ns; 256 } 257 258 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 259 for (i = 0; i < ids_supp_sz; i++) { 260 diff_value = (curr_values[i] > prev_values[i]) ? 261 (curr_values[i] - prev_values[i]) : 0; 262 prev_values[i] = curr_values[i]; 263 value_rate = diff_ns > 0 ? 264 (double)diff_value / diff_ns * NS_PER_SEC : 0; 265 266 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 267 xstats_display[i].name, curr_values[i], value_rate); 268 } 269 } 270 271 void 272 nic_stats_display(portid_t port_id) 273 { 274 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 275 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 276 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 277 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 278 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 279 struct timespec cur_time; 280 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 281 diff_ns; 282 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 283 struct rte_eth_stats stats; 284 static const char *nic_stats_border = "########################"; 285 int ret; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 ret = rte_eth_stats_get(port_id, &stats); 292 if (ret != 0) { 293 fprintf(stderr, 294 "%s: Error: failed to get stats (port %u): %d", 295 __func__, port_id, ret); 296 return; 297 } 298 printf("\n %s NIC statistics for port %-2d %s\n", 299 nic_stats_border, port_id, nic_stats_border); 300 301 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 302 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 303 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 304 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 305 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 306 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 307 308 diff_ns = 0; 309 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 310 uint64_t ns; 311 312 ns = cur_time.tv_sec * NS_PER_SEC; 313 ns += cur_time.tv_nsec; 314 315 if (prev_ns[port_id] != 0) 316 diff_ns = ns - prev_ns[port_id]; 317 prev_ns[port_id] = ns; 318 } 319 320 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 321 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 322 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 323 (stats.opackets - prev_pkts_tx[port_id]) : 0; 324 prev_pkts_rx[port_id] = stats.ipackets; 325 prev_pkts_tx[port_id] = stats.opackets; 326 mpps_rx = diff_ns > 0 ? 327 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 328 mpps_tx = diff_ns > 0 ? 329 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 330 331 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 332 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 333 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 334 (stats.obytes - prev_bytes_tx[port_id]) : 0; 335 prev_bytes_rx[port_id] = stats.ibytes; 336 prev_bytes_tx[port_id] = stats.obytes; 337 mbps_rx = diff_ns > 0 ? 338 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 339 mbps_tx = diff_ns > 0 ? 340 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 341 342 printf("\n Throughput (since last show)\n"); 343 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 344 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 345 mpps_tx, mbps_tx * 8); 346 347 if (xstats_display_num > 0) 348 nic_xstats_display_periodic(port_id); 349 350 printf(" %s############################%s\n", 351 nic_stats_border, nic_stats_border); 352 } 353 354 void 355 nic_stats_clear(portid_t port_id) 356 { 357 int ret; 358 359 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 360 print_valid_ports(); 361 return; 362 } 363 364 ret = rte_eth_stats_reset(port_id); 365 if (ret != 0) { 366 fprintf(stderr, 367 "%s: Error: failed to reset stats (port %u): %s", 368 __func__, port_id, strerror(-ret)); 369 return; 370 } 371 372 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 373 if (ret != 0) { 374 if (ret < 0) 375 ret = -ret; 376 fprintf(stderr, 377 "%s: Error: failed to get stats (port %u): %s", 378 __func__, port_id, strerror(ret)); 379 return; 380 } 381 printf("\n NIC statistics for port %d cleared\n", port_id); 382 } 383 384 void 385 nic_xstats_display(portid_t port_id) 386 { 387 struct rte_eth_xstat *xstats; 388 int cnt_xstats, idx_xstat; 389 struct rte_eth_xstat_name *xstats_names; 390 391 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 392 print_valid_ports(); 393 return; 394 } 395 printf("###### NIC extended statistics for port %-2d\n", port_id); 396 if (!rte_eth_dev_is_valid_port(port_id)) { 397 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 398 return; 399 } 400 401 /* Get count */ 402 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 403 if (cnt_xstats < 0) { 404 fprintf(stderr, "Error: Cannot get count of xstats\n"); 405 return; 406 } 407 408 /* Get id-name lookup table */ 409 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 410 if (xstats_names == NULL) { 411 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 412 return; 413 } 414 if (cnt_xstats != rte_eth_xstats_get_names( 415 port_id, xstats_names, cnt_xstats)) { 416 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 417 free(xstats_names); 418 return; 419 } 420 421 /* Get stats themselves */ 422 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 423 if (xstats == NULL) { 424 fprintf(stderr, "Cannot allocate memory for xstats\n"); 425 free(xstats_names); 426 return; 427 } 428 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 429 fprintf(stderr, "Error: Unable to get xstats\n"); 430 free(xstats_names); 431 free(xstats); 432 return; 433 } 434 435 /* Display xstats */ 436 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 437 if (xstats_hide_zero && !xstats[idx_xstat].value) 438 continue; 439 printf("%s: %"PRIu64"\n", 440 xstats_names[idx_xstat].name, 441 xstats[idx_xstat].value); 442 } 443 free(xstats_names); 444 free(xstats); 445 } 446 447 void 448 nic_xstats_clear(portid_t port_id) 449 { 450 int ret; 451 452 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 453 print_valid_ports(); 454 return; 455 } 456 457 ret = rte_eth_xstats_reset(port_id); 458 if (ret != 0) { 459 fprintf(stderr, 460 "%s: Error: failed to reset xstats (port %u): %s\n", 461 __func__, port_id, strerror(-ret)); 462 return; 463 } 464 465 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 466 if (ret != 0) { 467 if (ret < 0) 468 ret = -ret; 469 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 470 __func__, port_id, strerror(ret)); 471 return; 472 } 473 } 474 475 static const char * 476 get_queue_state_name(uint8_t queue_state) 477 { 478 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 479 return "stopped"; 480 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 481 return "started"; 482 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 483 return "hairpin"; 484 else 485 return "unknown"; 486 } 487 488 void 489 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 490 { 491 struct rte_eth_burst_mode mode; 492 struct rte_eth_rxq_info qinfo; 493 int32_t rc; 494 static const char *info_border = "*********************"; 495 496 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 497 if (rc != 0) { 498 fprintf(stderr, 499 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 500 port_id, queue_id, strerror(-rc), rc); 501 return; 502 } 503 504 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 505 info_border, port_id, queue_id, info_border); 506 507 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 508 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 509 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 510 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 511 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 512 printf("\nRX drop packets: %s", 513 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 514 printf("\nRX deferred start: %s", 515 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 516 printf("\nRX scattered packets: %s", 517 (qinfo.scattered_rx != 0) ? "on" : "off"); 518 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 519 if (qinfo.rx_buf_size != 0) 520 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 521 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 522 523 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 void 533 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 534 { 535 struct rte_eth_burst_mode mode; 536 struct rte_eth_txq_info qinfo; 537 int32_t rc; 538 static const char *info_border = "*********************"; 539 540 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 541 if (rc != 0) { 542 fprintf(stderr, 543 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 544 port_id, queue_id, strerror(-rc), rc); 545 return; 546 } 547 548 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 549 info_border, port_id, queue_id, info_border); 550 551 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 552 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 553 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 554 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 555 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 556 printf("\nTX deferred start: %s", 557 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 558 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 559 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 560 561 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 562 printf("\nBurst mode: %s%s", 563 mode.info, 564 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 565 " (per queue)" : ""); 566 567 printf("\n"); 568 } 569 570 static int bus_match_all(const struct rte_bus *bus, const void *data) 571 { 572 RTE_SET_USED(bus); 573 RTE_SET_USED(data); 574 return 0; 575 } 576 577 static void 578 device_infos_display_speeds(uint32_t speed_capa) 579 { 580 printf("\n\tDevice speed capability:"); 581 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 582 printf(" Autonegotiate (all speeds)"); 583 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 584 printf(" Disable autonegotiate (fixed speed) "); 585 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 586 printf(" 10 Mbps half-duplex "); 587 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 588 printf(" 10 Mbps full-duplex "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 590 printf(" 100 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 592 printf(" 100 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 594 printf(" 1 Gbps "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 596 printf(" 2.5 Gbps "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 598 printf(" 5 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 600 printf(" 10 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 602 printf(" 20 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 604 printf(" 25 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 606 printf(" 40 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 608 printf(" 50 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 610 printf(" 56 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 612 printf(" 100 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 614 printf(" 200 Gbps "); 615 } 616 617 void 618 device_infos_display(const char *identifier) 619 { 620 static const char *info_border = "*********************"; 621 struct rte_bus *start = NULL, *next; 622 struct rte_dev_iterator dev_iter; 623 char name[RTE_ETH_NAME_MAX_LEN]; 624 struct rte_ether_addr mac_addr; 625 struct rte_device *dev; 626 struct rte_devargs da; 627 portid_t port_id; 628 struct rte_eth_dev_info dev_info; 629 char devstr[128]; 630 631 memset(&da, 0, sizeof(da)); 632 if (!identifier) 633 goto skip_parse; 634 635 if (rte_devargs_parsef(&da, "%s", identifier)) { 636 fprintf(stderr, "cannot parse identifier\n"); 637 return; 638 } 639 640 skip_parse: 641 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 642 643 start = next; 644 if (identifier && da.bus != next) 645 continue; 646 647 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 648 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 649 650 if (!dev->driver) 651 continue; 652 /* Check for matching device if identifier is present */ 653 if (identifier && 654 strncmp(da.name, dev->name, strlen(dev->name))) 655 continue; 656 printf("\n%s Infos for device %s %s\n", 657 info_border, dev->name, info_border); 658 printf("Bus name: %s", rte_bus_name(dev->bus)); 659 printf("\nDriver name: %s", dev->driver->name); 660 printf("\nDevargs: %s", 661 dev->devargs ? dev->devargs->args : ""); 662 printf("\nConnect to socket: %d", dev->numa_node); 663 printf("\n"); 664 665 /* List ports with matching device name */ 666 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 667 printf("\n\tPort id: %-2d", port_id); 668 if (eth_macaddr_get_print_err(port_id, 669 &mac_addr) == 0) 670 print_ethaddr("\n\tMAC address: ", 671 &mac_addr); 672 rte_eth_dev_get_name_by_port(port_id, name); 673 printf("\n\tDevice name: %s", name); 674 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 675 device_infos_display_speeds(dev_info.speed_capa); 676 printf("\n"); 677 } 678 } 679 }; 680 rte_devargs_reset(&da); 681 } 682 683 static void 684 print_dev_capabilities(uint64_t capabilities) 685 { 686 uint64_t single_capa; 687 int begin; 688 int end; 689 int bit; 690 691 if (capabilities == 0) 692 return; 693 694 begin = __builtin_ctzll(capabilities); 695 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 696 697 single_capa = 1ULL << begin; 698 for (bit = begin; bit < end; bit++) { 699 if (capabilities & single_capa) 700 printf(" %s", 701 rte_eth_dev_capability_name(single_capa)); 702 single_capa <<= 1; 703 } 704 } 705 706 uint64_t 707 str_to_rsstypes(const char *str) 708 { 709 uint16_t i; 710 711 for (i = 0; rss_type_table[i].str != NULL; i++) { 712 if (strcmp(rss_type_table[i].str, str) == 0) 713 return rss_type_table[i].rss_type; 714 } 715 716 return 0; 717 } 718 719 const char * 720 rsstypes_to_str(uint64_t rss_type) 721 { 722 uint16_t i; 723 724 for (i = 0; rss_type_table[i].str != NULL; i++) { 725 if (rss_type_table[i].rss_type == rss_type) 726 return rss_type_table[i].str; 727 } 728 729 return NULL; 730 } 731 732 static void 733 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 734 { 735 uint16_t user_defined_str_len; 736 uint16_t total_len = 0; 737 uint16_t str_len = 0; 738 uint64_t rss_offload; 739 uint16_t i; 740 741 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 742 rss_offload = RTE_BIT64(i); 743 if ((offload_types & rss_offload) != 0) { 744 const char *p = rsstypes_to_str(rss_offload); 745 746 user_defined_str_len = 747 strlen("user-defined-") + (i / 10 + 1); 748 str_len = p ? strlen(p) : user_defined_str_len; 749 str_len += 2; /* add two spaces */ 750 if (total_len + str_len >= char_num_per_line) { 751 total_len = 0; 752 printf("\n"); 753 } 754 755 if (p) 756 printf(" %s", p); 757 else 758 printf(" user-defined-%u", i); 759 total_len += str_len; 760 } 761 } 762 printf("\n"); 763 } 764 765 void 766 port_infos_display(portid_t port_id) 767 { 768 struct rte_port *port; 769 struct rte_ether_addr mac_addr; 770 struct rte_eth_link link; 771 struct rte_eth_dev_info dev_info; 772 int vlan_offload; 773 struct rte_mempool * mp; 774 static const char *info_border = "*********************"; 775 uint16_t mtu; 776 char name[RTE_ETH_NAME_MAX_LEN]; 777 int ret; 778 char fw_version[ETHDEV_FWVERS_LEN]; 779 780 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 781 print_valid_ports(); 782 return; 783 } 784 port = &ports[port_id]; 785 ret = eth_link_get_nowait_print_err(port_id, &link); 786 if (ret < 0) 787 return; 788 789 ret = eth_dev_info_get_print_err(port_id, &dev_info); 790 if (ret != 0) 791 return; 792 793 printf("\n%s Infos for port %-2d %s\n", 794 info_border, port_id, info_border); 795 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 796 print_ethaddr("MAC address: ", &mac_addr); 797 rte_eth_dev_get_name_by_port(port_id, name); 798 printf("\nDevice name: %s", name); 799 printf("\nDriver name: %s", dev_info.driver_name); 800 801 if (rte_eth_dev_fw_version_get(port_id, fw_version, 802 ETHDEV_FWVERS_LEN) == 0) 803 printf("\nFirmware-version: %s", fw_version); 804 else 805 printf("\nFirmware-version: %s", "not available"); 806 807 if (dev_info.device->devargs && dev_info.device->devargs->args) 808 printf("\nDevargs: %s", dev_info.device->devargs->args); 809 printf("\nConnect to socket: %u", port->socket_id); 810 811 if (port_numa[port_id] != NUMA_NO_CONFIG) { 812 mp = mbuf_pool_find(port_numa[port_id], 0); 813 if (mp) 814 printf("\nmemory allocation on the socket: %d", 815 port_numa[port_id]); 816 } else 817 printf("\nmemory allocation on the socket: %u",port->socket_id); 818 819 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 820 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 821 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 822 ("full-duplex") : ("half-duplex")); 823 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 824 ("On") : ("Off")); 825 826 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 827 printf("MTU: %u\n", mtu); 828 829 printf("Promiscuous mode: %s\n", 830 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 831 printf("Allmulticast mode: %s\n", 832 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 833 printf("Maximum number of MAC addresses: %u\n", 834 (unsigned int)(port->dev_info.max_mac_addrs)); 835 printf("Maximum number of MAC addresses of hash filtering: %u\n", 836 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 837 838 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 839 if (vlan_offload >= 0){ 840 printf("VLAN offload: \n"); 841 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 842 printf(" strip on, "); 843 else 844 printf(" strip off, "); 845 846 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 847 printf("filter on, "); 848 else 849 printf("filter off, "); 850 851 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 852 printf("extend on, "); 853 else 854 printf("extend off, "); 855 856 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 857 printf("qinq strip on\n"); 858 else 859 printf("qinq strip off\n"); 860 } 861 862 if (dev_info.hash_key_size > 0) 863 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 864 if (dev_info.reta_size > 0) 865 printf("Redirection table size: %u\n", dev_info.reta_size); 866 if (!dev_info.flow_type_rss_offloads) 867 printf("No RSS offload flow type is supported.\n"); 868 else { 869 printf("Supported RSS offload flow types:\n"); 870 rss_offload_types_display(dev_info.flow_type_rss_offloads, 871 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 872 } 873 874 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 875 printf("Maximum configurable length of RX packet: %u\n", 876 dev_info.max_rx_pktlen); 877 printf("Maximum configurable size of LRO aggregated packet: %u\n", 878 dev_info.max_lro_pkt_size); 879 if (dev_info.max_vfs) 880 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 881 if (dev_info.max_vmdq_pools) 882 printf("Maximum number of VMDq pools: %u\n", 883 dev_info.max_vmdq_pools); 884 885 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 886 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 887 printf("Max possible number of RXDs per queue: %hu\n", 888 dev_info.rx_desc_lim.nb_max); 889 printf("Min possible number of RXDs per queue: %hu\n", 890 dev_info.rx_desc_lim.nb_min); 891 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 892 893 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 894 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 895 printf("Max possible number of TXDs per queue: %hu\n", 896 dev_info.tx_desc_lim.nb_max); 897 printf("Min possible number of TXDs per queue: %hu\n", 898 dev_info.tx_desc_lim.nb_min); 899 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 900 printf("Max segment number per packet: %hu\n", 901 dev_info.tx_desc_lim.nb_seg_max); 902 printf("Max segment number per MTU/TSO: %hu\n", 903 dev_info.tx_desc_lim.nb_mtu_seg_max); 904 905 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 906 print_dev_capabilities(dev_info.dev_capa); 907 printf(" )\n"); 908 /* Show switch info only if valid switch domain and port id is set */ 909 if (dev_info.switch_info.domain_id != 910 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 911 if (dev_info.switch_info.name) 912 printf("Switch name: %s\n", dev_info.switch_info.name); 913 914 printf("Switch domain Id: %u\n", 915 dev_info.switch_info.domain_id); 916 printf("Switch Port Id: %u\n", 917 dev_info.switch_info.port_id); 918 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 919 printf("Switch Rx domain: %u\n", 920 dev_info.switch_info.rx_domain); 921 } 922 } 923 924 void 925 port_summary_header_display(void) 926 { 927 uint16_t port_number; 928 929 port_number = rte_eth_dev_count_avail(); 930 printf("Number of available ports: %i\n", port_number); 931 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 932 "Driver", "Status", "Link"); 933 } 934 935 void 936 port_summary_display(portid_t port_id) 937 { 938 struct rte_ether_addr mac_addr; 939 struct rte_eth_link link; 940 struct rte_eth_dev_info dev_info; 941 char name[RTE_ETH_NAME_MAX_LEN]; 942 int ret; 943 944 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 945 print_valid_ports(); 946 return; 947 } 948 949 ret = eth_link_get_nowait_print_err(port_id, &link); 950 if (ret < 0) 951 return; 952 953 ret = eth_dev_info_get_print_err(port_id, &dev_info); 954 if (ret != 0) 955 return; 956 957 rte_eth_dev_get_name_by_port(port_id, name); 958 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 959 if (ret != 0) 960 return; 961 962 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 963 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 964 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 965 rte_eth_link_speed_to_str(link.link_speed)); 966 } 967 968 void 969 port_eeprom_display(portid_t port_id) 970 { 971 struct rte_dev_eeprom_info einfo; 972 int ret; 973 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 974 print_valid_ports(); 975 return; 976 } 977 978 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 979 if (len_eeprom < 0) { 980 switch (len_eeprom) { 981 case -ENODEV: 982 fprintf(stderr, "port index %d invalid\n", port_id); 983 break; 984 case -ENOTSUP: 985 fprintf(stderr, "operation not supported by device\n"); 986 break; 987 case -EIO: 988 fprintf(stderr, "device is removed\n"); 989 break; 990 default: 991 fprintf(stderr, "Unable to get EEPROM: %d\n", 992 len_eeprom); 993 break; 994 } 995 return; 996 } 997 998 einfo.offset = 0; 999 einfo.length = len_eeprom; 1000 einfo.data = calloc(1, len_eeprom); 1001 if (!einfo.data) { 1002 fprintf(stderr, 1003 "Allocation of port %u eeprom data failed\n", 1004 port_id); 1005 return; 1006 } 1007 1008 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1009 if (ret != 0) { 1010 switch (ret) { 1011 case -ENODEV: 1012 fprintf(stderr, "port index %d invalid\n", port_id); 1013 break; 1014 case -ENOTSUP: 1015 fprintf(stderr, "operation not supported by device\n"); 1016 break; 1017 case -EIO: 1018 fprintf(stderr, "device is removed\n"); 1019 break; 1020 default: 1021 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1022 break; 1023 } 1024 free(einfo.data); 1025 return; 1026 } 1027 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1028 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1029 free(einfo.data); 1030 } 1031 1032 void 1033 port_module_eeprom_display(portid_t port_id) 1034 { 1035 struct rte_eth_dev_module_info minfo; 1036 struct rte_dev_eeprom_info einfo; 1037 int ret; 1038 1039 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1040 print_valid_ports(); 1041 return; 1042 } 1043 1044 1045 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1046 if (ret != 0) { 1047 switch (ret) { 1048 case -ENODEV: 1049 fprintf(stderr, "port index %d invalid\n", port_id); 1050 break; 1051 case -ENOTSUP: 1052 fprintf(stderr, "operation not supported by device\n"); 1053 break; 1054 case -EIO: 1055 fprintf(stderr, "device is removed\n"); 1056 break; 1057 default: 1058 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1059 ret); 1060 break; 1061 } 1062 return; 1063 } 1064 1065 einfo.offset = 0; 1066 einfo.length = minfo.eeprom_len; 1067 einfo.data = calloc(1, minfo.eeprom_len); 1068 if (!einfo.data) { 1069 fprintf(stderr, 1070 "Allocation of port %u eeprom data failed\n", 1071 port_id); 1072 return; 1073 } 1074 1075 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1076 if (ret != 0) { 1077 switch (ret) { 1078 case -ENODEV: 1079 fprintf(stderr, "port index %d invalid\n", port_id); 1080 break; 1081 case -ENOTSUP: 1082 fprintf(stderr, "operation not supported by device\n"); 1083 break; 1084 case -EIO: 1085 fprintf(stderr, "device is removed\n"); 1086 break; 1087 default: 1088 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1089 ret); 1090 break; 1091 } 1092 free(einfo.data); 1093 return; 1094 } 1095 1096 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1097 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1098 free(einfo.data); 1099 } 1100 1101 int 1102 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1103 { 1104 uint16_t pid; 1105 1106 if (port_id == (portid_t)RTE_PORT_ALL) 1107 return 0; 1108 1109 RTE_ETH_FOREACH_DEV(pid) 1110 if (port_id == pid) 1111 return 0; 1112 1113 if (warning == ENABLED_WARN) 1114 fprintf(stderr, "Invalid port %d\n", port_id); 1115 1116 return 1; 1117 } 1118 1119 void print_valid_ports(void) 1120 { 1121 portid_t pid; 1122 1123 printf("The valid ports array is ["); 1124 RTE_ETH_FOREACH_DEV(pid) { 1125 printf(" %d", pid); 1126 } 1127 printf(" ]\n"); 1128 } 1129 1130 static int 1131 vlan_id_is_invalid(uint16_t vlan_id) 1132 { 1133 if (vlan_id < 4096) 1134 return 0; 1135 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1136 return 1; 1137 } 1138 1139 static uint32_t 1140 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1141 { 1142 uint32_t overhead_len; 1143 1144 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1145 overhead_len = max_rx_pktlen - max_mtu; 1146 else 1147 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1148 1149 return overhead_len; 1150 } 1151 1152 static int 1153 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1154 { 1155 struct rte_eth_dev_info dev_info; 1156 uint32_t overhead_len; 1157 uint32_t frame_size; 1158 int ret; 1159 1160 ret = rte_eth_dev_info_get(port_id, &dev_info); 1161 if (ret != 0) 1162 return ret; 1163 1164 if (mtu < dev_info.min_mtu) { 1165 fprintf(stderr, 1166 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1167 mtu, dev_info.min_mtu, port_id); 1168 return -EINVAL; 1169 } 1170 if (mtu > dev_info.max_mtu) { 1171 fprintf(stderr, 1172 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1173 mtu, dev_info.max_mtu, port_id); 1174 return -EINVAL; 1175 } 1176 1177 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1178 dev_info.max_mtu); 1179 frame_size = mtu + overhead_len; 1180 if (frame_size > dev_info.max_rx_pktlen) { 1181 fprintf(stderr, 1182 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1183 frame_size, dev_info.max_rx_pktlen, port_id); 1184 return -EINVAL; 1185 } 1186 1187 return 0; 1188 } 1189 1190 void 1191 port_mtu_set(portid_t port_id, uint16_t mtu) 1192 { 1193 struct rte_port *port = &ports[port_id]; 1194 int diag; 1195 1196 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1197 return; 1198 1199 diag = eth_dev_validate_mtu(port_id, mtu); 1200 if (diag != 0) 1201 return; 1202 1203 if (port->need_reconfig == 0) { 1204 diag = rte_eth_dev_set_mtu(port_id, mtu); 1205 if (diag != 0) { 1206 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1207 return; 1208 } 1209 } 1210 1211 port->dev_conf.rxmode.mtu = mtu; 1212 } 1213 1214 /* Generic flow management functions. */ 1215 1216 static struct port_flow_tunnel * 1217 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1218 { 1219 struct port_flow_tunnel *flow_tunnel; 1220 1221 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1222 if (flow_tunnel->id == port_tunnel_id) 1223 goto out; 1224 } 1225 flow_tunnel = NULL; 1226 1227 out: 1228 return flow_tunnel; 1229 } 1230 1231 const char * 1232 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1233 { 1234 const char *type; 1235 switch (tunnel->type) { 1236 default: 1237 type = "unknown"; 1238 break; 1239 case RTE_FLOW_ITEM_TYPE_VXLAN: 1240 type = "vxlan"; 1241 break; 1242 case RTE_FLOW_ITEM_TYPE_GRE: 1243 type = "gre"; 1244 break; 1245 case RTE_FLOW_ITEM_TYPE_NVGRE: 1246 type = "nvgre"; 1247 break; 1248 case RTE_FLOW_ITEM_TYPE_GENEVE: 1249 type = "geneve"; 1250 break; 1251 } 1252 1253 return type; 1254 } 1255 1256 struct port_flow_tunnel * 1257 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1258 { 1259 struct rte_port *port = &ports[port_id]; 1260 struct port_flow_tunnel *flow_tunnel; 1261 1262 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1263 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1264 goto out; 1265 } 1266 flow_tunnel = NULL; 1267 1268 out: 1269 return flow_tunnel; 1270 } 1271 1272 void port_flow_tunnel_list(portid_t port_id) 1273 { 1274 struct rte_port *port = &ports[port_id]; 1275 struct port_flow_tunnel *flt; 1276 1277 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1278 printf("port %u tunnel #%u type=%s", 1279 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1280 if (flt->tunnel.tun_id) 1281 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1282 printf("\n"); 1283 } 1284 } 1285 1286 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1287 { 1288 struct rte_port *port = &ports[port_id]; 1289 struct port_flow_tunnel *flt; 1290 1291 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1292 if (flt->id == tunnel_id) 1293 break; 1294 } 1295 if (flt) { 1296 LIST_REMOVE(flt, chain); 1297 free(flt); 1298 printf("port %u: flow tunnel #%u destroyed\n", 1299 port_id, tunnel_id); 1300 } 1301 } 1302 1303 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1304 { 1305 struct rte_port *port = &ports[port_id]; 1306 enum rte_flow_item_type type; 1307 struct port_flow_tunnel *flt; 1308 1309 if (!strcmp(ops->type, "vxlan")) 1310 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1311 else if (!strcmp(ops->type, "gre")) 1312 type = RTE_FLOW_ITEM_TYPE_GRE; 1313 else if (!strcmp(ops->type, "nvgre")) 1314 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1315 else if (!strcmp(ops->type, "geneve")) 1316 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1317 else { 1318 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1319 ops->type); 1320 return; 1321 } 1322 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1323 if (flt->tunnel.type == type) 1324 break; 1325 } 1326 if (!flt) { 1327 flt = calloc(1, sizeof(*flt)); 1328 if (!flt) { 1329 fprintf(stderr, "failed to allocate port flt object\n"); 1330 return; 1331 } 1332 flt->tunnel.type = type; 1333 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1334 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1335 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1336 } 1337 printf("port %d: flow tunnel #%u type %s\n", 1338 port_id, flt->id, ops->type); 1339 } 1340 1341 /** Generate a port_flow entry from attributes/pattern/actions. */ 1342 static struct port_flow * 1343 port_flow_new(const struct rte_flow_attr *attr, 1344 const struct rte_flow_item *pattern, 1345 const struct rte_flow_action *actions, 1346 struct rte_flow_error *error) 1347 { 1348 const struct rte_flow_conv_rule rule = { 1349 .attr_ro = attr, 1350 .pattern_ro = pattern, 1351 .actions_ro = actions, 1352 }; 1353 struct port_flow *pf; 1354 int ret; 1355 1356 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1357 if (ret < 0) 1358 return NULL; 1359 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1360 if (!pf) { 1361 rte_flow_error_set 1362 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1363 "calloc() failed"); 1364 return NULL; 1365 } 1366 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1367 error) >= 0) 1368 return pf; 1369 free(pf); 1370 return NULL; 1371 } 1372 1373 /** Print a message out of a flow error. */ 1374 static int 1375 port_flow_complain(struct rte_flow_error *error) 1376 { 1377 static const char *const errstrlist[] = { 1378 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1379 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1380 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1381 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1382 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1383 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1384 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1385 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1386 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1387 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1388 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1389 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1390 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1391 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1392 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1393 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1394 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1395 }; 1396 const char *errstr; 1397 char buf[32]; 1398 int err = rte_errno; 1399 1400 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1401 !errstrlist[error->type]) 1402 errstr = "unknown type"; 1403 else 1404 errstr = errstrlist[error->type]; 1405 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1406 __func__, error->type, errstr, 1407 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1408 error->cause), buf) : "", 1409 error->message ? error->message : "(no stated reason)", 1410 rte_strerror(err)); 1411 1412 switch (error->type) { 1413 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1414 fprintf(stderr, "The status suggests the use of \"transfer\" " 1415 "as the possible cause of the failure. Make " 1416 "sure that the flow in question and its " 1417 "indirect components (if any) are managed " 1418 "via \"transfer\" proxy port. Use command " 1419 "\"show port (port_id) flow transfer proxy\" " 1420 "to figure out the proxy port ID\n"); 1421 break; 1422 default: 1423 break; 1424 } 1425 1426 return -err; 1427 } 1428 1429 static void 1430 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1431 { 1432 uint16_t total_len = 0; 1433 uint16_t str_len; 1434 uint16_t i; 1435 1436 if (rss_types == 0) 1437 return; 1438 1439 for (i = 0; rss_type_table[i].str; i++) { 1440 if (rss_type_table[i].rss_type == 0) 1441 continue; 1442 1443 if ((rss_types & rss_type_table[i].rss_type) == 1444 rss_type_table[i].rss_type) { 1445 /* Contain two spaces */ 1446 str_len = strlen(rss_type_table[i].str) + 2; 1447 if (total_len + str_len > char_num_per_line) { 1448 printf("\n"); 1449 total_len = 0; 1450 } 1451 printf(" %s", rss_type_table[i].str); 1452 total_len += str_len; 1453 } 1454 } 1455 printf("\n"); 1456 } 1457 1458 static void 1459 rss_config_display(struct rte_flow_action_rss *rss_conf) 1460 { 1461 uint8_t i; 1462 1463 if (rss_conf == NULL) { 1464 fprintf(stderr, "Invalid rule\n"); 1465 return; 1466 } 1467 1468 printf("RSS:\n" 1469 " queues:"); 1470 if (rss_conf->queue_num == 0) 1471 printf(" none"); 1472 for (i = 0; i < rss_conf->queue_num; i++) 1473 printf(" %d", rss_conf->queue[i]); 1474 printf("\n"); 1475 1476 printf(" function: "); 1477 switch (rss_conf->func) { 1478 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1479 printf("default\n"); 1480 break; 1481 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1482 printf("toeplitz\n"); 1483 break; 1484 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1485 printf("simple_xor\n"); 1486 break; 1487 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1488 printf("symmetric_toeplitz\n"); 1489 break; 1490 default: 1491 printf("Unknown function\n"); 1492 return; 1493 } 1494 1495 printf(" types:\n"); 1496 if (rss_conf->types == 0) { 1497 printf(" none\n"); 1498 return; 1499 } 1500 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1501 } 1502 1503 static struct port_indirect_action * 1504 action_get_by_id(portid_t port_id, uint32_t id) 1505 { 1506 struct rte_port *port; 1507 struct port_indirect_action **ppia; 1508 struct port_indirect_action *pia = NULL; 1509 1510 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1511 port_id == (portid_t)RTE_PORT_ALL) 1512 return NULL; 1513 port = &ports[port_id]; 1514 ppia = &port->actions_list; 1515 while (*ppia) { 1516 if ((*ppia)->id == id) { 1517 pia = *ppia; 1518 break; 1519 } 1520 ppia = &(*ppia)->next; 1521 } 1522 if (!pia) 1523 fprintf(stderr, 1524 "Failed to find indirect action #%u on port %u\n", 1525 id, port_id); 1526 return pia; 1527 } 1528 1529 static int 1530 action_alloc(portid_t port_id, uint32_t id, 1531 struct port_indirect_action **action) 1532 { 1533 struct rte_port *port; 1534 struct port_indirect_action **ppia; 1535 struct port_indirect_action *pia = NULL; 1536 1537 *action = NULL; 1538 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1539 port_id == (portid_t)RTE_PORT_ALL) 1540 return -EINVAL; 1541 port = &ports[port_id]; 1542 if (id == UINT32_MAX) { 1543 /* taking first available ID */ 1544 if (port->actions_list) { 1545 if (port->actions_list->id == UINT32_MAX - 1) { 1546 fprintf(stderr, 1547 "Highest indirect action ID is already assigned, delete it first\n"); 1548 return -ENOMEM; 1549 } 1550 id = port->actions_list->id + 1; 1551 } else { 1552 id = 0; 1553 } 1554 } 1555 pia = calloc(1, sizeof(*pia)); 1556 if (!pia) { 1557 fprintf(stderr, 1558 "Allocation of port %u indirect action failed\n", 1559 port_id); 1560 return -ENOMEM; 1561 } 1562 ppia = &port->actions_list; 1563 while (*ppia && (*ppia)->id > id) 1564 ppia = &(*ppia)->next; 1565 if (*ppia && (*ppia)->id == id) { 1566 fprintf(stderr, 1567 "Indirect action #%u is already assigned, delete it first\n", 1568 id); 1569 free(pia); 1570 return -EINVAL; 1571 } 1572 pia->next = *ppia; 1573 pia->id = id; 1574 *ppia = pia; 1575 *action = pia; 1576 return 0; 1577 } 1578 1579 static int 1580 template_alloc(uint32_t id, struct port_template **template, 1581 struct port_template **list) 1582 { 1583 struct port_template *lst = *list; 1584 struct port_template **ppt; 1585 struct port_template *pt = NULL; 1586 1587 *template = NULL; 1588 if (id == UINT32_MAX) { 1589 /* taking first available ID */ 1590 if (lst) { 1591 if (lst->id == UINT32_MAX - 1) { 1592 printf("Highest template ID is already" 1593 " assigned, delete it first\n"); 1594 return -ENOMEM; 1595 } 1596 id = lst->id + 1; 1597 } else { 1598 id = 0; 1599 } 1600 } 1601 pt = calloc(1, sizeof(*pt)); 1602 if (!pt) { 1603 printf("Allocation of port template failed\n"); 1604 return -ENOMEM; 1605 } 1606 ppt = list; 1607 while (*ppt && (*ppt)->id > id) 1608 ppt = &(*ppt)->next; 1609 if (*ppt && (*ppt)->id == id) { 1610 printf("Template #%u is already assigned," 1611 " delete it first\n", id); 1612 free(pt); 1613 return -EINVAL; 1614 } 1615 pt->next = *ppt; 1616 pt->id = id; 1617 *ppt = pt; 1618 *template = pt; 1619 return 0; 1620 } 1621 1622 static int 1623 table_alloc(uint32_t id, struct port_table **table, 1624 struct port_table **list) 1625 { 1626 struct port_table *lst = *list; 1627 struct port_table **ppt; 1628 struct port_table *pt = NULL; 1629 1630 *table = NULL; 1631 if (id == UINT32_MAX) { 1632 /* taking first available ID */ 1633 if (lst) { 1634 if (lst->id == UINT32_MAX - 1) { 1635 printf("Highest table ID is already" 1636 " assigned, delete it first\n"); 1637 return -ENOMEM; 1638 } 1639 id = lst->id + 1; 1640 } else { 1641 id = 0; 1642 } 1643 } 1644 pt = calloc(1, sizeof(*pt)); 1645 if (!pt) { 1646 printf("Allocation of table failed\n"); 1647 return -ENOMEM; 1648 } 1649 ppt = list; 1650 while (*ppt && (*ppt)->id > id) 1651 ppt = &(*ppt)->next; 1652 if (*ppt && (*ppt)->id == id) { 1653 printf("Table #%u is already assigned," 1654 " delete it first\n", id); 1655 free(pt); 1656 return -EINVAL; 1657 } 1658 pt->next = *ppt; 1659 pt->id = id; 1660 *ppt = pt; 1661 *table = pt; 1662 return 0; 1663 } 1664 1665 /** Get info about flow management resources. */ 1666 int 1667 port_flow_get_info(portid_t port_id) 1668 { 1669 struct rte_flow_port_info port_info; 1670 struct rte_flow_queue_info queue_info; 1671 struct rte_flow_error error; 1672 1673 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1674 port_id == (portid_t)RTE_PORT_ALL) 1675 return -EINVAL; 1676 /* Poisoning to make sure PMDs update it in case of error. */ 1677 memset(&error, 0x99, sizeof(error)); 1678 memset(&port_info, 0, sizeof(port_info)); 1679 memset(&queue_info, 0, sizeof(queue_info)); 1680 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1681 return port_flow_complain(&error); 1682 printf("Flow engine resources on port %u:\n" 1683 "Number of queues: %d\n" 1684 "Size of queues: %d\n" 1685 "Number of counters: %d\n" 1686 "Number of aging objects: %d\n" 1687 "Number of meter actions: %d\n", 1688 port_id, port_info.max_nb_queues, 1689 queue_info.max_size, 1690 port_info.max_nb_counters, 1691 port_info.max_nb_aging_objects, 1692 port_info.max_nb_meters); 1693 return 0; 1694 } 1695 1696 /** Configure flow management resources. */ 1697 int 1698 port_flow_configure(portid_t port_id, 1699 const struct rte_flow_port_attr *port_attr, 1700 uint16_t nb_queue, 1701 const struct rte_flow_queue_attr *queue_attr) 1702 { 1703 struct rte_port *port; 1704 struct rte_flow_error error; 1705 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1706 int std_queue; 1707 1708 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1709 port_id == (portid_t)RTE_PORT_ALL) 1710 return -EINVAL; 1711 port = &ports[port_id]; 1712 port->queue_nb = nb_queue; 1713 port->queue_sz = queue_attr->size; 1714 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1715 attr_list[std_queue] = queue_attr; 1716 /* Poisoning to make sure PMDs update it in case of error. */ 1717 memset(&error, 0x66, sizeof(error)); 1718 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1719 return port_flow_complain(&error); 1720 printf("Configure flows on port %u: " 1721 "number of queues %d with %d elements\n", 1722 port_id, nb_queue, queue_attr->size); 1723 return 0; 1724 } 1725 1726 /** Create indirect action */ 1727 int 1728 port_action_handle_create(portid_t port_id, uint32_t id, 1729 const struct rte_flow_indir_action_conf *conf, 1730 const struct rte_flow_action *action) 1731 { 1732 struct port_indirect_action *pia; 1733 int ret; 1734 struct rte_flow_error error; 1735 1736 ret = action_alloc(port_id, id, &pia); 1737 if (ret) 1738 return ret; 1739 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1740 struct rte_flow_action_age *age = 1741 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1742 1743 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1744 age->context = &pia->age_type; 1745 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1746 struct rte_flow_action_conntrack *ct = 1747 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1748 1749 memcpy(ct, &conntrack_context, sizeof(*ct)); 1750 } 1751 /* Poisoning to make sure PMDs update it in case of error. */ 1752 memset(&error, 0x22, sizeof(error)); 1753 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1754 &error); 1755 if (!pia->handle) { 1756 uint32_t destroy_id = pia->id; 1757 port_action_handle_destroy(port_id, 1, &destroy_id); 1758 return port_flow_complain(&error); 1759 } 1760 pia->type = action->type; 1761 printf("Indirect action #%u created\n", pia->id); 1762 return 0; 1763 } 1764 1765 /** Destroy indirect action */ 1766 int 1767 port_action_handle_destroy(portid_t port_id, 1768 uint32_t n, 1769 const uint32_t *actions) 1770 { 1771 struct rte_port *port; 1772 struct port_indirect_action **tmp; 1773 uint32_t c = 0; 1774 int ret = 0; 1775 1776 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1777 port_id == (portid_t)RTE_PORT_ALL) 1778 return -EINVAL; 1779 port = &ports[port_id]; 1780 tmp = &port->actions_list; 1781 while (*tmp) { 1782 uint32_t i; 1783 1784 for (i = 0; i != n; ++i) { 1785 struct rte_flow_error error; 1786 struct port_indirect_action *pia = *tmp; 1787 1788 if (actions[i] != pia->id) 1789 continue; 1790 /* 1791 * Poisoning to make sure PMDs update it in case 1792 * of error. 1793 */ 1794 memset(&error, 0x33, sizeof(error)); 1795 1796 if (pia->handle && rte_flow_action_handle_destroy( 1797 port_id, pia->handle, &error)) { 1798 ret = port_flow_complain(&error); 1799 continue; 1800 } 1801 *tmp = pia->next; 1802 printf("Indirect action #%u destroyed\n", pia->id); 1803 free(pia); 1804 break; 1805 } 1806 if (i == n) 1807 tmp = &(*tmp)->next; 1808 ++c; 1809 } 1810 return ret; 1811 } 1812 1813 int 1814 port_action_handle_flush(portid_t port_id) 1815 { 1816 struct rte_port *port; 1817 struct port_indirect_action **tmp; 1818 int ret = 0; 1819 1820 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1821 port_id == (portid_t)RTE_PORT_ALL) 1822 return -EINVAL; 1823 port = &ports[port_id]; 1824 tmp = &port->actions_list; 1825 while (*tmp != NULL) { 1826 struct rte_flow_error error; 1827 struct port_indirect_action *pia = *tmp; 1828 1829 /* Poisoning to make sure PMDs update it in case of error. */ 1830 memset(&error, 0x44, sizeof(error)); 1831 if (pia->handle != NULL && 1832 rte_flow_action_handle_destroy 1833 (port_id, pia->handle, &error) != 0) { 1834 printf("Indirect action #%u not destroyed\n", pia->id); 1835 ret = port_flow_complain(&error); 1836 tmp = &pia->next; 1837 } else { 1838 *tmp = pia->next; 1839 free(pia); 1840 } 1841 } 1842 return ret; 1843 } 1844 1845 /** Get indirect action by port + id */ 1846 struct rte_flow_action_handle * 1847 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1848 { 1849 1850 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1851 1852 return (pia) ? pia->handle : NULL; 1853 } 1854 1855 /** Update indirect action */ 1856 int 1857 port_action_handle_update(portid_t port_id, uint32_t id, 1858 const struct rte_flow_action *action) 1859 { 1860 struct rte_flow_error error; 1861 struct rte_flow_action_handle *action_handle; 1862 struct port_indirect_action *pia; 1863 const void *update; 1864 1865 action_handle = port_action_handle_get_by_id(port_id, id); 1866 if (!action_handle) 1867 return -EINVAL; 1868 pia = action_get_by_id(port_id, id); 1869 if (!pia) 1870 return -EINVAL; 1871 switch (pia->type) { 1872 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1873 update = action->conf; 1874 break; 1875 default: 1876 update = action; 1877 break; 1878 } 1879 if (rte_flow_action_handle_update(port_id, action_handle, update, 1880 &error)) { 1881 return port_flow_complain(&error); 1882 } 1883 printf("Indirect action #%u updated\n", id); 1884 return 0; 1885 } 1886 1887 int 1888 port_action_handle_query(portid_t port_id, uint32_t id) 1889 { 1890 struct rte_flow_error error; 1891 struct port_indirect_action *pia; 1892 union { 1893 struct rte_flow_query_count count; 1894 struct rte_flow_query_age age; 1895 struct rte_flow_action_conntrack ct; 1896 } query; 1897 1898 pia = action_get_by_id(port_id, id); 1899 if (!pia) 1900 return -EINVAL; 1901 switch (pia->type) { 1902 case RTE_FLOW_ACTION_TYPE_AGE: 1903 case RTE_FLOW_ACTION_TYPE_COUNT: 1904 break; 1905 default: 1906 fprintf(stderr, 1907 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1908 id, pia->type, port_id); 1909 return -ENOTSUP; 1910 } 1911 /* Poisoning to make sure PMDs update it in case of error. */ 1912 memset(&error, 0x55, sizeof(error)); 1913 memset(&query, 0, sizeof(query)); 1914 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1915 return port_flow_complain(&error); 1916 switch (pia->type) { 1917 case RTE_FLOW_ACTION_TYPE_AGE: 1918 printf("Indirect AGE action:\n" 1919 " aged: %u\n" 1920 " sec_since_last_hit_valid: %u\n" 1921 " sec_since_last_hit: %" PRIu32 "\n", 1922 query.age.aged, 1923 query.age.sec_since_last_hit_valid, 1924 query.age.sec_since_last_hit); 1925 break; 1926 case RTE_FLOW_ACTION_TYPE_COUNT: 1927 printf("Indirect COUNT action:\n" 1928 " hits_set: %u\n" 1929 " bytes_set: %u\n" 1930 " hits: %" PRIu64 "\n" 1931 " bytes: %" PRIu64 "\n", 1932 query.count.hits_set, 1933 query.count.bytes_set, 1934 query.count.hits, 1935 query.count.bytes); 1936 break; 1937 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1938 printf("Conntrack Context:\n" 1939 " Peer: %u, Flow dir: %s, Enable: %u\n" 1940 " Live: %u, SACK: %u, CACK: %u\n" 1941 " Packet dir: %s, Liberal: %u, State: %u\n" 1942 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1943 " Last Seq: %u, Last ACK: %u\n" 1944 " Last Win: %u, Last End: %u\n", 1945 query.ct.peer_port, 1946 query.ct.is_original_dir ? "Original" : "Reply", 1947 query.ct.enable, query.ct.live_connection, 1948 query.ct.selective_ack, query.ct.challenge_ack_passed, 1949 query.ct.last_direction ? "Original" : "Reply", 1950 query.ct.liberal_mode, query.ct.state, 1951 query.ct.max_ack_window, query.ct.retransmission_limit, 1952 query.ct.last_index, query.ct.last_seq, 1953 query.ct.last_ack, query.ct.last_window, 1954 query.ct.last_end); 1955 printf(" Original Dir:\n" 1956 " scale: %u, fin: %u, ack seen: %u\n" 1957 " unacked data: %u\n Sent end: %u," 1958 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1959 query.ct.original_dir.scale, 1960 query.ct.original_dir.close_initiated, 1961 query.ct.original_dir.last_ack_seen, 1962 query.ct.original_dir.data_unacked, 1963 query.ct.original_dir.sent_end, 1964 query.ct.original_dir.reply_end, 1965 query.ct.original_dir.max_win, 1966 query.ct.original_dir.max_ack); 1967 printf(" Reply Dir:\n" 1968 " scale: %u, fin: %u, ack seen: %u\n" 1969 " unacked data: %u\n Sent end: %u," 1970 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1971 query.ct.reply_dir.scale, 1972 query.ct.reply_dir.close_initiated, 1973 query.ct.reply_dir.last_ack_seen, 1974 query.ct.reply_dir.data_unacked, 1975 query.ct.reply_dir.sent_end, 1976 query.ct.reply_dir.reply_end, 1977 query.ct.reply_dir.max_win, 1978 query.ct.reply_dir.max_ack); 1979 break; 1980 default: 1981 fprintf(stderr, 1982 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1983 id, pia->type, port_id); 1984 break; 1985 } 1986 return 0; 1987 } 1988 1989 static struct port_flow_tunnel * 1990 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1991 const struct rte_flow_item *pattern, 1992 const struct rte_flow_action *actions, 1993 const struct tunnel_ops *tunnel_ops) 1994 { 1995 int ret; 1996 struct rte_port *port; 1997 struct port_flow_tunnel *pft; 1998 struct rte_flow_error error; 1999 2000 port = &ports[port_id]; 2001 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2002 if (!pft) { 2003 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2004 tunnel_ops->id); 2005 return NULL; 2006 } 2007 if (tunnel_ops->actions) { 2008 uint32_t num_actions; 2009 const struct rte_flow_action *aptr; 2010 2011 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2012 &pft->pmd_actions, 2013 &pft->num_pmd_actions, 2014 &error); 2015 if (ret) { 2016 port_flow_complain(&error); 2017 return NULL; 2018 } 2019 for (aptr = actions, num_actions = 1; 2020 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2021 aptr++, num_actions++); 2022 pft->actions = malloc( 2023 (num_actions + pft->num_pmd_actions) * 2024 sizeof(actions[0])); 2025 if (!pft->actions) { 2026 rte_flow_tunnel_action_decap_release( 2027 port_id, pft->actions, 2028 pft->num_pmd_actions, &error); 2029 return NULL; 2030 } 2031 rte_memcpy(pft->actions, pft->pmd_actions, 2032 pft->num_pmd_actions * sizeof(actions[0])); 2033 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2034 num_actions * sizeof(actions[0])); 2035 } 2036 if (tunnel_ops->items) { 2037 uint32_t num_items; 2038 const struct rte_flow_item *iptr; 2039 2040 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2041 &pft->pmd_items, 2042 &pft->num_pmd_items, 2043 &error); 2044 if (ret) { 2045 port_flow_complain(&error); 2046 return NULL; 2047 } 2048 for (iptr = pattern, num_items = 1; 2049 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2050 iptr++, num_items++); 2051 pft->items = malloc((num_items + pft->num_pmd_items) * 2052 sizeof(pattern[0])); 2053 if (!pft->items) { 2054 rte_flow_tunnel_item_release( 2055 port_id, pft->pmd_items, 2056 pft->num_pmd_items, &error); 2057 return NULL; 2058 } 2059 rte_memcpy(pft->items, pft->pmd_items, 2060 pft->num_pmd_items * sizeof(pattern[0])); 2061 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2062 num_items * sizeof(pattern[0])); 2063 } 2064 2065 return pft; 2066 } 2067 2068 static void 2069 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2070 const struct tunnel_ops *tunnel_ops, 2071 struct port_flow_tunnel *pft) 2072 { 2073 struct rte_flow_error error; 2074 2075 if (tunnel_ops->actions) { 2076 free(pft->actions); 2077 rte_flow_tunnel_action_decap_release( 2078 port_id, pft->pmd_actions, 2079 pft->num_pmd_actions, &error); 2080 pft->actions = NULL; 2081 pft->pmd_actions = NULL; 2082 } 2083 if (tunnel_ops->items) { 2084 free(pft->items); 2085 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2086 pft->num_pmd_items, 2087 &error); 2088 pft->items = NULL; 2089 pft->pmd_items = NULL; 2090 } 2091 } 2092 2093 /** Add port meter policy */ 2094 int 2095 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2096 const struct rte_flow_action *actions) 2097 { 2098 struct rte_mtr_error error; 2099 const struct rte_flow_action *act = actions; 2100 const struct rte_flow_action *start; 2101 struct rte_mtr_meter_policy_params policy; 2102 uint32_t i = 0, act_n; 2103 int ret; 2104 2105 for (i = 0; i < RTE_COLORS; i++) { 2106 for (act_n = 0, start = act; 2107 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2108 act_n++; 2109 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2110 policy.actions[i] = start; 2111 else 2112 policy.actions[i] = NULL; 2113 act++; 2114 } 2115 ret = rte_mtr_meter_policy_add(port_id, 2116 policy_id, 2117 &policy, &error); 2118 if (ret) 2119 print_mtr_err_msg(&error); 2120 return ret; 2121 } 2122 2123 /** Validate flow rule. */ 2124 int 2125 port_flow_validate(portid_t port_id, 2126 const struct rte_flow_attr *attr, 2127 const struct rte_flow_item *pattern, 2128 const struct rte_flow_action *actions, 2129 const struct tunnel_ops *tunnel_ops) 2130 { 2131 struct rte_flow_error error; 2132 struct port_flow_tunnel *pft = NULL; 2133 int ret; 2134 2135 /* Poisoning to make sure PMDs update it in case of error. */ 2136 memset(&error, 0x11, sizeof(error)); 2137 if (tunnel_ops->enabled) { 2138 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2139 actions, tunnel_ops); 2140 if (!pft) 2141 return -ENOENT; 2142 if (pft->items) 2143 pattern = pft->items; 2144 if (pft->actions) 2145 actions = pft->actions; 2146 } 2147 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2148 if (tunnel_ops->enabled) 2149 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2150 if (ret) 2151 return port_flow_complain(&error); 2152 printf("Flow rule validated\n"); 2153 return 0; 2154 } 2155 2156 /** Return age action structure if exists, otherwise NULL. */ 2157 static struct rte_flow_action_age * 2158 age_action_get(const struct rte_flow_action *actions) 2159 { 2160 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2161 switch (actions->type) { 2162 case RTE_FLOW_ACTION_TYPE_AGE: 2163 return (struct rte_flow_action_age *) 2164 (uintptr_t)actions->conf; 2165 default: 2166 break; 2167 } 2168 } 2169 return NULL; 2170 } 2171 2172 /** Create pattern template */ 2173 int 2174 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2175 const struct rte_flow_pattern_template_attr *attr, 2176 const struct rte_flow_item *pattern) 2177 { 2178 struct rte_port *port; 2179 struct port_template *pit; 2180 int ret; 2181 struct rte_flow_error error; 2182 2183 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2184 port_id == (portid_t)RTE_PORT_ALL) 2185 return -EINVAL; 2186 port = &ports[port_id]; 2187 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2188 if (ret) 2189 return ret; 2190 /* Poisoning to make sure PMDs update it in case of error. */ 2191 memset(&error, 0x22, sizeof(error)); 2192 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2193 attr, pattern, &error); 2194 if (!pit->template.pattern_template) { 2195 uint32_t destroy_id = pit->id; 2196 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2197 return port_flow_complain(&error); 2198 } 2199 printf("Pattern template #%u created\n", pit->id); 2200 return 0; 2201 } 2202 2203 /** Destroy pattern template */ 2204 int 2205 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2206 const uint32_t *template) 2207 { 2208 struct rte_port *port; 2209 struct port_template **tmp; 2210 uint32_t c = 0; 2211 int ret = 0; 2212 2213 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2214 port_id == (portid_t)RTE_PORT_ALL) 2215 return -EINVAL; 2216 port = &ports[port_id]; 2217 tmp = &port->pattern_templ_list; 2218 while (*tmp) { 2219 uint32_t i; 2220 2221 for (i = 0; i != n; ++i) { 2222 struct rte_flow_error error; 2223 struct port_template *pit = *tmp; 2224 2225 if (template[i] != pit->id) 2226 continue; 2227 /* 2228 * Poisoning to make sure PMDs update it in case 2229 * of error. 2230 */ 2231 memset(&error, 0x33, sizeof(error)); 2232 2233 if (pit->template.pattern_template && 2234 rte_flow_pattern_template_destroy(port_id, 2235 pit->template.pattern_template, 2236 &error)) { 2237 ret = port_flow_complain(&error); 2238 continue; 2239 } 2240 *tmp = pit->next; 2241 printf("Pattern template #%u destroyed\n", pit->id); 2242 free(pit); 2243 break; 2244 } 2245 if (i == n) 2246 tmp = &(*tmp)->next; 2247 ++c; 2248 } 2249 return ret; 2250 } 2251 2252 /** Create actions template */ 2253 int 2254 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2255 const struct rte_flow_actions_template_attr *attr, 2256 const struct rte_flow_action *actions, 2257 const struct rte_flow_action *masks) 2258 { 2259 struct rte_port *port; 2260 struct port_template *pat; 2261 int ret; 2262 struct rte_flow_error error; 2263 2264 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2265 port_id == (portid_t)RTE_PORT_ALL) 2266 return -EINVAL; 2267 port = &ports[port_id]; 2268 ret = template_alloc(id, &pat, &port->actions_templ_list); 2269 if (ret) 2270 return ret; 2271 /* Poisoning to make sure PMDs update it in case of error. */ 2272 memset(&error, 0x22, sizeof(error)); 2273 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2274 attr, actions, masks, &error); 2275 if (!pat->template.actions_template) { 2276 uint32_t destroy_id = pat->id; 2277 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2278 return port_flow_complain(&error); 2279 } 2280 printf("Actions template #%u created\n", pat->id); 2281 return 0; 2282 } 2283 2284 /** Destroy actions template */ 2285 int 2286 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2287 const uint32_t *template) 2288 { 2289 struct rte_port *port; 2290 struct port_template **tmp; 2291 uint32_t c = 0; 2292 int ret = 0; 2293 2294 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2295 port_id == (portid_t)RTE_PORT_ALL) 2296 return -EINVAL; 2297 port = &ports[port_id]; 2298 tmp = &port->actions_templ_list; 2299 while (*tmp) { 2300 uint32_t i; 2301 2302 for (i = 0; i != n; ++i) { 2303 struct rte_flow_error error; 2304 struct port_template *pat = *tmp; 2305 2306 if (template[i] != pat->id) 2307 continue; 2308 /* 2309 * Poisoning to make sure PMDs update it in case 2310 * of error. 2311 */ 2312 memset(&error, 0x33, sizeof(error)); 2313 2314 if (pat->template.actions_template && 2315 rte_flow_actions_template_destroy(port_id, 2316 pat->template.actions_template, &error)) { 2317 ret = port_flow_complain(&error); 2318 continue; 2319 } 2320 *tmp = pat->next; 2321 printf("Actions template #%u destroyed\n", pat->id); 2322 free(pat); 2323 break; 2324 } 2325 if (i == n) 2326 tmp = &(*tmp)->next; 2327 ++c; 2328 } 2329 return ret; 2330 } 2331 2332 /** Create table */ 2333 int 2334 port_flow_template_table_create(portid_t port_id, uint32_t id, 2335 const struct rte_flow_template_table_attr *table_attr, 2336 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2337 uint32_t nb_actions_templates, uint32_t *actions_templates) 2338 { 2339 struct rte_port *port; 2340 struct port_table *pt; 2341 struct port_template *temp = NULL; 2342 int ret; 2343 uint32_t i; 2344 struct rte_flow_error error; 2345 struct rte_flow_pattern_template 2346 *flow_pattern_templates[nb_pattern_templates]; 2347 struct rte_flow_actions_template 2348 *flow_actions_templates[nb_actions_templates]; 2349 2350 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2351 port_id == (portid_t)RTE_PORT_ALL) 2352 return -EINVAL; 2353 port = &ports[port_id]; 2354 for (i = 0; i < nb_pattern_templates; ++i) { 2355 bool found = false; 2356 temp = port->pattern_templ_list; 2357 while (temp) { 2358 if (pattern_templates[i] == temp->id) { 2359 flow_pattern_templates[i] = 2360 temp->template.pattern_template; 2361 found = true; 2362 break; 2363 } 2364 temp = temp->next; 2365 } 2366 if (!found) { 2367 printf("Pattern template #%u is invalid\n", 2368 pattern_templates[i]); 2369 return -EINVAL; 2370 } 2371 } 2372 for (i = 0; i < nb_actions_templates; ++i) { 2373 bool found = false; 2374 temp = port->actions_templ_list; 2375 while (temp) { 2376 if (actions_templates[i] == temp->id) { 2377 flow_actions_templates[i] = 2378 temp->template.actions_template; 2379 found = true; 2380 break; 2381 } 2382 temp = temp->next; 2383 } 2384 if (!found) { 2385 printf("Actions template #%u is invalid\n", 2386 actions_templates[i]); 2387 return -EINVAL; 2388 } 2389 } 2390 ret = table_alloc(id, &pt, &port->table_list); 2391 if (ret) 2392 return ret; 2393 /* Poisoning to make sure PMDs update it in case of error. */ 2394 memset(&error, 0x22, sizeof(error)); 2395 pt->table = rte_flow_template_table_create(port_id, table_attr, 2396 flow_pattern_templates, nb_pattern_templates, 2397 flow_actions_templates, nb_actions_templates, 2398 &error); 2399 2400 if (!pt->table) { 2401 uint32_t destroy_id = pt->id; 2402 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2403 return port_flow_complain(&error); 2404 } 2405 pt->nb_pattern_templates = nb_pattern_templates; 2406 pt->nb_actions_templates = nb_actions_templates; 2407 printf("Template table #%u created\n", pt->id); 2408 return 0; 2409 } 2410 2411 /** Destroy table */ 2412 int 2413 port_flow_template_table_destroy(portid_t port_id, 2414 uint32_t n, const uint32_t *table) 2415 { 2416 struct rte_port *port; 2417 struct port_table **tmp; 2418 uint32_t c = 0; 2419 int ret = 0; 2420 2421 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2422 port_id == (portid_t)RTE_PORT_ALL) 2423 return -EINVAL; 2424 port = &ports[port_id]; 2425 tmp = &port->table_list; 2426 while (*tmp) { 2427 uint32_t i; 2428 2429 for (i = 0; i != n; ++i) { 2430 struct rte_flow_error error; 2431 struct port_table *pt = *tmp; 2432 2433 if (table[i] != pt->id) 2434 continue; 2435 /* 2436 * Poisoning to make sure PMDs update it in case 2437 * of error. 2438 */ 2439 memset(&error, 0x33, sizeof(error)); 2440 2441 if (pt->table && 2442 rte_flow_template_table_destroy(port_id, 2443 pt->table, 2444 &error)) { 2445 ret = port_flow_complain(&error); 2446 continue; 2447 } 2448 *tmp = pt->next; 2449 printf("Template table #%u destroyed\n", pt->id); 2450 free(pt); 2451 break; 2452 } 2453 if (i == n) 2454 tmp = &(*tmp)->next; 2455 ++c; 2456 } 2457 return ret; 2458 } 2459 2460 /** Enqueue create flow rule operation. */ 2461 int 2462 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2463 bool postpone, uint32_t table_id, 2464 uint32_t pattern_idx, uint32_t actions_idx, 2465 const struct rte_flow_item *pattern, 2466 const struct rte_flow_action *actions) 2467 { 2468 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2469 struct rte_flow *flow; 2470 struct rte_port *port; 2471 struct port_flow *pf; 2472 struct port_table *pt; 2473 uint32_t id = 0; 2474 bool found; 2475 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2476 struct rte_flow_action_age *age = age_action_get(actions); 2477 2478 port = &ports[port_id]; 2479 if (port->flow_list) { 2480 if (port->flow_list->id == UINT32_MAX) { 2481 printf("Highest rule ID is already assigned," 2482 " delete it first"); 2483 return -ENOMEM; 2484 } 2485 id = port->flow_list->id + 1; 2486 } 2487 2488 if (queue_id >= port->queue_nb) { 2489 printf("Queue #%u is invalid\n", queue_id); 2490 return -EINVAL; 2491 } 2492 2493 found = false; 2494 pt = port->table_list; 2495 while (pt) { 2496 if (table_id == pt->id) { 2497 found = true; 2498 break; 2499 } 2500 pt = pt->next; 2501 } 2502 if (!found) { 2503 printf("Table #%u is invalid\n", table_id); 2504 return -EINVAL; 2505 } 2506 2507 if (pattern_idx >= pt->nb_pattern_templates) { 2508 printf("Pattern template index #%u is invalid," 2509 " %u templates present in the table\n", 2510 pattern_idx, pt->nb_pattern_templates); 2511 return -EINVAL; 2512 } 2513 if (actions_idx >= pt->nb_actions_templates) { 2514 printf("Actions template index #%u is invalid," 2515 " %u templates present in the table\n", 2516 actions_idx, pt->nb_actions_templates); 2517 return -EINVAL; 2518 } 2519 2520 pf = port_flow_new(NULL, pattern, actions, &error); 2521 if (!pf) 2522 return port_flow_complain(&error); 2523 if (age) { 2524 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2525 age->context = &pf->age_type; 2526 } 2527 /* Poisoning to make sure PMDs update it in case of error. */ 2528 memset(&error, 0x11, sizeof(error)); 2529 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2530 pattern, pattern_idx, actions, actions_idx, NULL, &error); 2531 if (!flow) { 2532 uint32_t flow_id = pf->id; 2533 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2534 return port_flow_complain(&error); 2535 } 2536 2537 pf->next = port->flow_list; 2538 pf->id = id; 2539 pf->flow = flow; 2540 port->flow_list = pf; 2541 printf("Flow rule #%u creation enqueued\n", pf->id); 2542 return 0; 2543 } 2544 2545 /** Enqueue number of destroy flow rules operations. */ 2546 int 2547 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2548 bool postpone, uint32_t n, const uint32_t *rule) 2549 { 2550 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2551 struct rte_port *port; 2552 struct port_flow **tmp; 2553 uint32_t c = 0; 2554 int ret = 0; 2555 2556 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2557 port_id == (portid_t)RTE_PORT_ALL) 2558 return -EINVAL; 2559 port = &ports[port_id]; 2560 2561 if (queue_id >= port->queue_nb) { 2562 printf("Queue #%u is invalid\n", queue_id); 2563 return -EINVAL; 2564 } 2565 2566 tmp = &port->flow_list; 2567 while (*tmp) { 2568 uint32_t i; 2569 2570 for (i = 0; i != n; ++i) { 2571 struct rte_flow_error error; 2572 struct port_flow *pf = *tmp; 2573 2574 if (rule[i] != pf->id) 2575 continue; 2576 /* 2577 * Poisoning to make sure PMD 2578 * update it in case of error. 2579 */ 2580 memset(&error, 0x33, sizeof(error)); 2581 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2582 pf->flow, NULL, &error)) { 2583 ret = port_flow_complain(&error); 2584 continue; 2585 } 2586 printf("Flow rule #%u destruction enqueued\n", pf->id); 2587 *tmp = pf->next; 2588 free(pf); 2589 break; 2590 } 2591 if (i == n) 2592 tmp = &(*tmp)->next; 2593 ++c; 2594 } 2595 return ret; 2596 } 2597 2598 /** Enqueue indirect action create operation. */ 2599 int 2600 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2601 bool postpone, uint32_t id, 2602 const struct rte_flow_indir_action_conf *conf, 2603 const struct rte_flow_action *action) 2604 { 2605 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2606 struct rte_port *port; 2607 struct port_indirect_action *pia; 2608 int ret; 2609 struct rte_flow_error error; 2610 2611 ret = action_alloc(port_id, id, &pia); 2612 if (ret) 2613 return ret; 2614 2615 port = &ports[port_id]; 2616 if (queue_id >= port->queue_nb) { 2617 printf("Queue #%u is invalid\n", queue_id); 2618 return -EINVAL; 2619 } 2620 2621 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2622 struct rte_flow_action_age *age = 2623 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2624 2625 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2626 age->context = &pia->age_type; 2627 } 2628 /* Poisoning to make sure PMDs update it in case of error. */ 2629 memset(&error, 0x88, sizeof(error)); 2630 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2631 &attr, conf, action, NULL, &error); 2632 if (!pia->handle) { 2633 uint32_t destroy_id = pia->id; 2634 port_queue_action_handle_destroy(port_id, queue_id, 2635 postpone, 1, &destroy_id); 2636 return port_flow_complain(&error); 2637 } 2638 pia->type = action->type; 2639 printf("Indirect action #%u creation queued\n", pia->id); 2640 return 0; 2641 } 2642 2643 /** Enqueue indirect action destroy operation. */ 2644 int 2645 port_queue_action_handle_destroy(portid_t port_id, 2646 uint32_t queue_id, bool postpone, 2647 uint32_t n, const uint32_t *actions) 2648 { 2649 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2650 struct rte_port *port; 2651 struct port_indirect_action **tmp; 2652 uint32_t c = 0; 2653 int ret = 0; 2654 2655 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2656 port_id == (portid_t)RTE_PORT_ALL) 2657 return -EINVAL; 2658 port = &ports[port_id]; 2659 2660 if (queue_id >= port->queue_nb) { 2661 printf("Queue #%u is invalid\n", queue_id); 2662 return -EINVAL; 2663 } 2664 2665 tmp = &port->actions_list; 2666 while (*tmp) { 2667 uint32_t i; 2668 2669 for (i = 0; i != n; ++i) { 2670 struct rte_flow_error error; 2671 struct port_indirect_action *pia = *tmp; 2672 2673 if (actions[i] != pia->id) 2674 continue; 2675 /* 2676 * Poisoning to make sure PMDs update it in case 2677 * of error. 2678 */ 2679 memset(&error, 0x99, sizeof(error)); 2680 2681 if (pia->handle && 2682 rte_flow_async_action_handle_destroy(port_id, 2683 queue_id, &attr, pia->handle, NULL, &error)) { 2684 ret = port_flow_complain(&error); 2685 continue; 2686 } 2687 *tmp = pia->next; 2688 printf("Indirect action #%u destruction queued\n", 2689 pia->id); 2690 free(pia); 2691 break; 2692 } 2693 if (i == n) 2694 tmp = &(*tmp)->next; 2695 ++c; 2696 } 2697 return ret; 2698 } 2699 2700 /** Enqueue indirect action update operation. */ 2701 int 2702 port_queue_action_handle_update(portid_t port_id, 2703 uint32_t queue_id, bool postpone, uint32_t id, 2704 const struct rte_flow_action *action) 2705 { 2706 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2707 struct rte_port *port; 2708 struct rte_flow_error error; 2709 struct rte_flow_action_handle *action_handle; 2710 2711 action_handle = port_action_handle_get_by_id(port_id, id); 2712 if (!action_handle) 2713 return -EINVAL; 2714 2715 port = &ports[port_id]; 2716 if (queue_id >= port->queue_nb) { 2717 printf("Queue #%u is invalid\n", queue_id); 2718 return -EINVAL; 2719 } 2720 2721 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2722 action_handle, action, NULL, &error)) { 2723 return port_flow_complain(&error); 2724 } 2725 printf("Indirect action #%u update queued\n", id); 2726 return 0; 2727 } 2728 2729 /** Push all the queue operations in the queue to the NIC. */ 2730 int 2731 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 2732 { 2733 struct rte_port *port; 2734 struct rte_flow_error error; 2735 int ret = 0; 2736 2737 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2738 port_id == (portid_t)RTE_PORT_ALL) 2739 return -EINVAL; 2740 port = &ports[port_id]; 2741 2742 if (queue_id >= port->queue_nb) { 2743 printf("Queue #%u is invalid\n", queue_id); 2744 return -EINVAL; 2745 } 2746 2747 memset(&error, 0x55, sizeof(error)); 2748 ret = rte_flow_push(port_id, queue_id, &error); 2749 if (ret < 0) { 2750 printf("Failed to push operations in the queue\n"); 2751 return -EINVAL; 2752 } 2753 printf("Queue #%u operations pushed\n", queue_id); 2754 return ret; 2755 } 2756 2757 /** Pull queue operation results from the queue. */ 2758 int 2759 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 2760 { 2761 struct rte_port *port; 2762 struct rte_flow_op_result *res; 2763 struct rte_flow_error error; 2764 int ret = 0; 2765 int success = 0; 2766 int i; 2767 2768 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2769 port_id == (portid_t)RTE_PORT_ALL) 2770 return -EINVAL; 2771 port = &ports[port_id]; 2772 2773 if (queue_id >= port->queue_nb) { 2774 printf("Queue #%u is invalid\n", queue_id); 2775 return -EINVAL; 2776 } 2777 2778 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 2779 if (!res) { 2780 printf("Failed to allocate memory for pulled results\n"); 2781 return -ENOMEM; 2782 } 2783 2784 memset(&error, 0x66, sizeof(error)); 2785 ret = rte_flow_pull(port_id, queue_id, res, 2786 port->queue_sz, &error); 2787 if (ret < 0) { 2788 printf("Failed to pull a operation results\n"); 2789 free(res); 2790 return -EINVAL; 2791 } 2792 2793 for (i = 0; i < ret; i++) { 2794 if (res[i].status == RTE_FLOW_OP_SUCCESS) 2795 success++; 2796 } 2797 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 2798 queue_id, ret, ret - success, success); 2799 free(res); 2800 return ret; 2801 } 2802 2803 /** Create flow rule. */ 2804 int 2805 port_flow_create(portid_t port_id, 2806 const struct rte_flow_attr *attr, 2807 const struct rte_flow_item *pattern, 2808 const struct rte_flow_action *actions, 2809 const struct tunnel_ops *tunnel_ops) 2810 { 2811 struct rte_flow *flow; 2812 struct rte_port *port; 2813 struct port_flow *pf; 2814 uint32_t id = 0; 2815 struct rte_flow_error error; 2816 struct port_flow_tunnel *pft = NULL; 2817 struct rte_flow_action_age *age = age_action_get(actions); 2818 2819 port = &ports[port_id]; 2820 if (port->flow_list) { 2821 if (port->flow_list->id == UINT32_MAX) { 2822 fprintf(stderr, 2823 "Highest rule ID is already assigned, delete it first"); 2824 return -ENOMEM; 2825 } 2826 id = port->flow_list->id + 1; 2827 } 2828 if (tunnel_ops->enabled) { 2829 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2830 actions, tunnel_ops); 2831 if (!pft) 2832 return -ENOENT; 2833 if (pft->items) 2834 pattern = pft->items; 2835 if (pft->actions) 2836 actions = pft->actions; 2837 } 2838 pf = port_flow_new(attr, pattern, actions, &error); 2839 if (!pf) 2840 return port_flow_complain(&error); 2841 if (age) { 2842 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2843 age->context = &pf->age_type; 2844 } 2845 /* Poisoning to make sure PMDs update it in case of error. */ 2846 memset(&error, 0x22, sizeof(error)); 2847 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2848 if (!flow) { 2849 if (tunnel_ops->enabled) 2850 port_flow_tunnel_offload_cmd_release(port_id, 2851 tunnel_ops, pft); 2852 free(pf); 2853 return port_flow_complain(&error); 2854 } 2855 pf->next = port->flow_list; 2856 pf->id = id; 2857 pf->flow = flow; 2858 port->flow_list = pf; 2859 if (tunnel_ops->enabled) 2860 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2861 printf("Flow rule #%u created\n", pf->id); 2862 return 0; 2863 } 2864 2865 /** Destroy a number of flow rules. */ 2866 int 2867 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2868 { 2869 struct rte_port *port; 2870 struct port_flow **tmp; 2871 uint32_t c = 0; 2872 int ret = 0; 2873 2874 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2875 port_id == (portid_t)RTE_PORT_ALL) 2876 return -EINVAL; 2877 port = &ports[port_id]; 2878 tmp = &port->flow_list; 2879 while (*tmp) { 2880 uint32_t i; 2881 2882 for (i = 0; i != n; ++i) { 2883 struct rte_flow_error error; 2884 struct port_flow *pf = *tmp; 2885 2886 if (rule[i] != pf->id) 2887 continue; 2888 /* 2889 * Poisoning to make sure PMDs update it in case 2890 * of error. 2891 */ 2892 memset(&error, 0x33, sizeof(error)); 2893 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2894 ret = port_flow_complain(&error); 2895 continue; 2896 } 2897 printf("Flow rule #%u destroyed\n", pf->id); 2898 *tmp = pf->next; 2899 free(pf); 2900 break; 2901 } 2902 if (i == n) 2903 tmp = &(*tmp)->next; 2904 ++c; 2905 } 2906 return ret; 2907 } 2908 2909 /** Remove all flow rules. */ 2910 int 2911 port_flow_flush(portid_t port_id) 2912 { 2913 struct rte_flow_error error; 2914 struct rte_port *port; 2915 int ret = 0; 2916 2917 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2918 port_id == (portid_t)RTE_PORT_ALL) 2919 return -EINVAL; 2920 2921 port = &ports[port_id]; 2922 2923 if (port->flow_list == NULL) 2924 return ret; 2925 2926 /* Poisoning to make sure PMDs update it in case of error. */ 2927 memset(&error, 0x44, sizeof(error)); 2928 if (rte_flow_flush(port_id, &error)) { 2929 port_flow_complain(&error); 2930 } 2931 2932 while (port->flow_list) { 2933 struct port_flow *pf = port->flow_list->next; 2934 2935 free(port->flow_list); 2936 port->flow_list = pf; 2937 } 2938 return ret; 2939 } 2940 2941 /** Dump flow rules. */ 2942 int 2943 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2944 const char *file_name) 2945 { 2946 int ret = 0; 2947 FILE *file = stdout; 2948 struct rte_flow_error error; 2949 struct rte_port *port; 2950 struct port_flow *pflow; 2951 struct rte_flow *tmpFlow = NULL; 2952 bool found = false; 2953 2954 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2955 port_id == (portid_t)RTE_PORT_ALL) 2956 return -EINVAL; 2957 2958 if (!dump_all) { 2959 port = &ports[port_id]; 2960 pflow = port->flow_list; 2961 while (pflow) { 2962 if (rule_id != pflow->id) { 2963 pflow = pflow->next; 2964 } else { 2965 tmpFlow = pflow->flow; 2966 if (tmpFlow) 2967 found = true; 2968 break; 2969 } 2970 } 2971 if (found == false) { 2972 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2973 return -EINVAL; 2974 } 2975 } 2976 2977 if (file_name && strlen(file_name)) { 2978 file = fopen(file_name, "w"); 2979 if (!file) { 2980 fprintf(stderr, "Failed to create file %s: %s\n", 2981 file_name, strerror(errno)); 2982 return -errno; 2983 } 2984 } 2985 2986 if (!dump_all) 2987 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2988 else 2989 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2990 if (ret) { 2991 port_flow_complain(&error); 2992 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2993 } else 2994 printf("Flow dump finished\n"); 2995 if (file_name && strlen(file_name)) 2996 fclose(file); 2997 return ret; 2998 } 2999 3000 /** Query a flow rule. */ 3001 int 3002 port_flow_query(portid_t port_id, uint32_t rule, 3003 const struct rte_flow_action *action) 3004 { 3005 struct rte_flow_error error; 3006 struct rte_port *port; 3007 struct port_flow *pf; 3008 const char *name; 3009 union { 3010 struct rte_flow_query_count count; 3011 struct rte_flow_action_rss rss_conf; 3012 struct rte_flow_query_age age; 3013 } query; 3014 int ret; 3015 3016 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3017 port_id == (portid_t)RTE_PORT_ALL) 3018 return -EINVAL; 3019 port = &ports[port_id]; 3020 for (pf = port->flow_list; pf; pf = pf->next) 3021 if (pf->id == rule) 3022 break; 3023 if (!pf) { 3024 fprintf(stderr, "Flow rule #%u not found\n", rule); 3025 return -ENOENT; 3026 } 3027 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3028 &name, sizeof(name), 3029 (void *)(uintptr_t)action->type, &error); 3030 if (ret < 0) 3031 return port_flow_complain(&error); 3032 switch (action->type) { 3033 case RTE_FLOW_ACTION_TYPE_COUNT: 3034 case RTE_FLOW_ACTION_TYPE_RSS: 3035 case RTE_FLOW_ACTION_TYPE_AGE: 3036 break; 3037 default: 3038 fprintf(stderr, "Cannot query action type %d (%s)\n", 3039 action->type, name); 3040 return -ENOTSUP; 3041 } 3042 /* Poisoning to make sure PMDs update it in case of error. */ 3043 memset(&error, 0x55, sizeof(error)); 3044 memset(&query, 0, sizeof(query)); 3045 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3046 return port_flow_complain(&error); 3047 switch (action->type) { 3048 case RTE_FLOW_ACTION_TYPE_COUNT: 3049 printf("%s:\n" 3050 " hits_set: %u\n" 3051 " bytes_set: %u\n" 3052 " hits: %" PRIu64 "\n" 3053 " bytes: %" PRIu64 "\n", 3054 name, 3055 query.count.hits_set, 3056 query.count.bytes_set, 3057 query.count.hits, 3058 query.count.bytes); 3059 break; 3060 case RTE_FLOW_ACTION_TYPE_RSS: 3061 rss_config_display(&query.rss_conf); 3062 break; 3063 case RTE_FLOW_ACTION_TYPE_AGE: 3064 printf("%s:\n" 3065 " aged: %u\n" 3066 " sec_since_last_hit_valid: %u\n" 3067 " sec_since_last_hit: %" PRIu32 "\n", 3068 name, 3069 query.age.aged, 3070 query.age.sec_since_last_hit_valid, 3071 query.age.sec_since_last_hit); 3072 break; 3073 default: 3074 fprintf(stderr, 3075 "Cannot display result for action type %d (%s)\n", 3076 action->type, name); 3077 break; 3078 } 3079 return 0; 3080 } 3081 3082 /** List simply and destroy all aged flows. */ 3083 void 3084 port_flow_aged(portid_t port_id, uint8_t destroy) 3085 { 3086 void **contexts; 3087 int nb_context, total = 0, idx; 3088 struct rte_flow_error error; 3089 enum age_action_context_type *type; 3090 union { 3091 struct port_flow *pf; 3092 struct port_indirect_action *pia; 3093 } ctx; 3094 3095 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3096 port_id == (portid_t)RTE_PORT_ALL) 3097 return; 3098 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3099 printf("Port %u total aged flows: %d\n", port_id, total); 3100 if (total < 0) { 3101 port_flow_complain(&error); 3102 return; 3103 } 3104 if (total == 0) 3105 return; 3106 contexts = malloc(sizeof(void *) * total); 3107 if (contexts == NULL) { 3108 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3109 return; 3110 } 3111 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3112 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3113 if (nb_context != total) { 3114 fprintf(stderr, 3115 "Port:%d get aged flows count(%d) != total(%d)\n", 3116 port_id, nb_context, total); 3117 free(contexts); 3118 return; 3119 } 3120 total = 0; 3121 for (idx = 0; idx < nb_context; idx++) { 3122 if (!contexts[idx]) { 3123 fprintf(stderr, "Error: get Null context in port %u\n", 3124 port_id); 3125 continue; 3126 } 3127 type = (enum age_action_context_type *)contexts[idx]; 3128 switch (*type) { 3129 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3130 ctx.pf = container_of(type, struct port_flow, age_type); 3131 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3132 "\t%c%c%c\t\n", 3133 "Flow", 3134 ctx.pf->id, 3135 ctx.pf->rule.attr->group, 3136 ctx.pf->rule.attr->priority, 3137 ctx.pf->rule.attr->ingress ? 'i' : '-', 3138 ctx.pf->rule.attr->egress ? 'e' : '-', 3139 ctx.pf->rule.attr->transfer ? 't' : '-'); 3140 if (destroy && !port_flow_destroy(port_id, 1, 3141 &ctx.pf->id)) 3142 total++; 3143 break; 3144 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3145 ctx.pia = container_of(type, 3146 struct port_indirect_action, age_type); 3147 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3148 ctx.pia->id); 3149 break; 3150 default: 3151 fprintf(stderr, "Error: invalid context type %u\n", 3152 port_id); 3153 break; 3154 } 3155 } 3156 printf("\n%d flows destroyed\n", total); 3157 free(contexts); 3158 } 3159 3160 /** List flow rules. */ 3161 void 3162 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3163 { 3164 struct rte_port *port; 3165 struct port_flow *pf; 3166 struct port_flow *list = NULL; 3167 uint32_t i; 3168 3169 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3170 port_id == (portid_t)RTE_PORT_ALL) 3171 return; 3172 port = &ports[port_id]; 3173 if (!port->flow_list) 3174 return; 3175 /* Sort flows by group, priority and ID. */ 3176 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3177 struct port_flow **tmp; 3178 const struct rte_flow_attr *curr = pf->rule.attr; 3179 3180 if (n) { 3181 /* Filter out unwanted groups. */ 3182 for (i = 0; i != n; ++i) 3183 if (curr->group == group[i]) 3184 break; 3185 if (i == n) 3186 continue; 3187 } 3188 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3189 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3190 3191 if (curr->group > comp->group || 3192 (curr->group == comp->group && 3193 curr->priority > comp->priority) || 3194 (curr->group == comp->group && 3195 curr->priority == comp->priority && 3196 pf->id > (*tmp)->id)) 3197 continue; 3198 break; 3199 } 3200 pf->tmp = *tmp; 3201 *tmp = pf; 3202 } 3203 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3204 for (pf = list; pf != NULL; pf = pf->tmp) { 3205 const struct rte_flow_item *item = pf->rule.pattern; 3206 const struct rte_flow_action *action = pf->rule.actions; 3207 const char *name; 3208 3209 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3210 pf->id, 3211 pf->rule.attr->group, 3212 pf->rule.attr->priority, 3213 pf->rule.attr->ingress ? 'i' : '-', 3214 pf->rule.attr->egress ? 'e' : '-', 3215 pf->rule.attr->transfer ? 't' : '-'); 3216 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3217 if ((uint32_t)item->type > INT_MAX) 3218 name = "PMD_INTERNAL"; 3219 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3220 &name, sizeof(name), 3221 (void *)(uintptr_t)item->type, 3222 NULL) <= 0) 3223 name = "[UNKNOWN]"; 3224 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3225 printf("%s ", name); 3226 ++item; 3227 } 3228 printf("=>"); 3229 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3230 if ((uint32_t)action->type > INT_MAX) 3231 name = "PMD_INTERNAL"; 3232 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3233 &name, sizeof(name), 3234 (void *)(uintptr_t)action->type, 3235 NULL) <= 0) 3236 name = "[UNKNOWN]"; 3237 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3238 printf(" %s", name); 3239 ++action; 3240 } 3241 printf("\n"); 3242 } 3243 } 3244 3245 /** Restrict ingress traffic to the defined flow rules. */ 3246 int 3247 port_flow_isolate(portid_t port_id, int set) 3248 { 3249 struct rte_flow_error error; 3250 3251 /* Poisoning to make sure PMDs update it in case of error. */ 3252 memset(&error, 0x66, sizeof(error)); 3253 if (rte_flow_isolate(port_id, set, &error)) 3254 return port_flow_complain(&error); 3255 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3256 port_id, 3257 set ? "now restricted" : "not restricted anymore"); 3258 return 0; 3259 } 3260 3261 /* 3262 * RX/TX ring descriptors display functions. 3263 */ 3264 int 3265 rx_queue_id_is_invalid(queueid_t rxq_id) 3266 { 3267 if (rxq_id < nb_rxq) 3268 return 0; 3269 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3270 rxq_id, nb_rxq); 3271 return 1; 3272 } 3273 3274 int 3275 tx_queue_id_is_invalid(queueid_t txq_id) 3276 { 3277 if (txq_id < nb_txq) 3278 return 0; 3279 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3280 txq_id, nb_txq); 3281 return 1; 3282 } 3283 3284 static int 3285 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3286 { 3287 struct rte_port *port = &ports[port_id]; 3288 struct rte_eth_rxq_info rx_qinfo; 3289 int ret; 3290 3291 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3292 if (ret == 0) { 3293 *ring_size = rx_qinfo.nb_desc; 3294 return ret; 3295 } 3296 3297 if (ret != -ENOTSUP) 3298 return ret; 3299 /* 3300 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3301 * ring_size stored in testpmd will be used for validity verification. 3302 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3303 * being 0, it will use a default value provided by PMDs to setup this 3304 * rxq. If the default value is 0, it will use the 3305 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3306 */ 3307 if (port->nb_rx_desc[rxq_id]) 3308 *ring_size = port->nb_rx_desc[rxq_id]; 3309 else if (port->dev_info.default_rxportconf.ring_size) 3310 *ring_size = port->dev_info.default_rxportconf.ring_size; 3311 else 3312 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3313 return 0; 3314 } 3315 3316 static int 3317 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3318 { 3319 struct rte_port *port = &ports[port_id]; 3320 struct rte_eth_txq_info tx_qinfo; 3321 int ret; 3322 3323 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3324 if (ret == 0) { 3325 *ring_size = tx_qinfo.nb_desc; 3326 return ret; 3327 } 3328 3329 if (ret != -ENOTSUP) 3330 return ret; 3331 /* 3332 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3333 * ring_size stored in testpmd will be used for validity verification. 3334 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3335 * being 0, it will use a default value provided by PMDs to setup this 3336 * txq. If the default value is 0, it will use the 3337 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3338 */ 3339 if (port->nb_tx_desc[txq_id]) 3340 *ring_size = port->nb_tx_desc[txq_id]; 3341 else if (port->dev_info.default_txportconf.ring_size) 3342 *ring_size = port->dev_info.default_txportconf.ring_size; 3343 else 3344 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3345 return 0; 3346 } 3347 3348 static int 3349 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3350 { 3351 uint16_t ring_size; 3352 int ret; 3353 3354 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3355 if (ret) 3356 return 1; 3357 3358 if (rxdesc_id < ring_size) 3359 return 0; 3360 3361 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3362 rxdesc_id, ring_size); 3363 return 1; 3364 } 3365 3366 static int 3367 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3368 { 3369 uint16_t ring_size; 3370 int ret; 3371 3372 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3373 if (ret) 3374 return 1; 3375 3376 if (txdesc_id < ring_size) 3377 return 0; 3378 3379 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3380 txdesc_id, ring_size); 3381 return 1; 3382 } 3383 3384 static const struct rte_memzone * 3385 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3386 { 3387 char mz_name[RTE_MEMZONE_NAMESIZE]; 3388 const struct rte_memzone *mz; 3389 3390 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3391 port_id, q_id, ring_name); 3392 mz = rte_memzone_lookup(mz_name); 3393 if (mz == NULL) 3394 fprintf(stderr, 3395 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3396 ring_name, port_id, q_id, mz_name); 3397 return mz; 3398 } 3399 3400 union igb_ring_dword { 3401 uint64_t dword; 3402 struct { 3403 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3404 uint32_t lo; 3405 uint32_t hi; 3406 #else 3407 uint32_t hi; 3408 uint32_t lo; 3409 #endif 3410 } words; 3411 }; 3412 3413 struct igb_ring_desc_32_bytes { 3414 union igb_ring_dword lo_dword; 3415 union igb_ring_dword hi_dword; 3416 union igb_ring_dword resv1; 3417 union igb_ring_dword resv2; 3418 }; 3419 3420 struct igb_ring_desc_16_bytes { 3421 union igb_ring_dword lo_dword; 3422 union igb_ring_dword hi_dword; 3423 }; 3424 3425 static void 3426 ring_rxd_display_dword(union igb_ring_dword dword) 3427 { 3428 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3429 (unsigned)dword.words.hi); 3430 } 3431 3432 static void 3433 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3434 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3435 portid_t port_id, 3436 #else 3437 __rte_unused portid_t port_id, 3438 #endif 3439 uint16_t desc_id) 3440 { 3441 struct igb_ring_desc_16_bytes *ring = 3442 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3443 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3444 int ret; 3445 struct rte_eth_dev_info dev_info; 3446 3447 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3448 if (ret != 0) 3449 return; 3450 3451 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3452 /* 32 bytes RX descriptor, i40e only */ 3453 struct igb_ring_desc_32_bytes *ring = 3454 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3455 ring[desc_id].lo_dword.dword = 3456 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3457 ring_rxd_display_dword(ring[desc_id].lo_dword); 3458 ring[desc_id].hi_dword.dword = 3459 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3460 ring_rxd_display_dword(ring[desc_id].hi_dword); 3461 ring[desc_id].resv1.dword = 3462 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3463 ring_rxd_display_dword(ring[desc_id].resv1); 3464 ring[desc_id].resv2.dword = 3465 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3466 ring_rxd_display_dword(ring[desc_id].resv2); 3467 3468 return; 3469 } 3470 #endif 3471 /* 16 bytes RX descriptor */ 3472 ring[desc_id].lo_dword.dword = 3473 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3474 ring_rxd_display_dword(ring[desc_id].lo_dword); 3475 ring[desc_id].hi_dword.dword = 3476 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3477 ring_rxd_display_dword(ring[desc_id].hi_dword); 3478 } 3479 3480 static void 3481 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3482 { 3483 struct igb_ring_desc_16_bytes *ring; 3484 struct igb_ring_desc_16_bytes txd; 3485 3486 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3487 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3488 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3489 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3490 (unsigned)txd.lo_dword.words.lo, 3491 (unsigned)txd.lo_dword.words.hi, 3492 (unsigned)txd.hi_dword.words.lo, 3493 (unsigned)txd.hi_dword.words.hi); 3494 } 3495 3496 void 3497 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3498 { 3499 const struct rte_memzone *rx_mz; 3500 3501 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3502 return; 3503 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3504 if (rx_mz == NULL) 3505 return; 3506 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 3507 } 3508 3509 void 3510 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 3511 { 3512 const struct rte_memzone *tx_mz; 3513 3514 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 3515 return; 3516 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 3517 if (tx_mz == NULL) 3518 return; 3519 ring_tx_descriptor_display(tx_mz, txd_id); 3520 } 3521 3522 void 3523 fwd_lcores_config_display(void) 3524 { 3525 lcoreid_t lc_id; 3526 3527 printf("List of forwarding lcores:"); 3528 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 3529 printf(" %2u", fwd_lcores_cpuids[lc_id]); 3530 printf("\n"); 3531 } 3532 void 3533 rxtx_config_display(void) 3534 { 3535 portid_t pid; 3536 queueid_t qid; 3537 3538 printf(" %s packet forwarding%s packets/burst=%d\n", 3539 cur_fwd_eng->fwd_mode_name, 3540 retry_enabled == 0 ? "" : " with retry", 3541 nb_pkt_per_burst); 3542 3543 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 3544 printf(" packet len=%u - nb packet segments=%d\n", 3545 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 3546 3547 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 3548 nb_fwd_lcores, nb_fwd_ports); 3549 3550 RTE_ETH_FOREACH_DEV(pid) { 3551 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 3552 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 3553 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 3554 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 3555 struct rte_eth_rxq_info rx_qinfo; 3556 struct rte_eth_txq_info tx_qinfo; 3557 uint16_t rx_free_thresh_tmp; 3558 uint16_t tx_free_thresh_tmp; 3559 uint16_t tx_rs_thresh_tmp; 3560 uint16_t nb_rx_desc_tmp; 3561 uint16_t nb_tx_desc_tmp; 3562 uint64_t offloads_tmp; 3563 uint8_t pthresh_tmp; 3564 uint8_t hthresh_tmp; 3565 uint8_t wthresh_tmp; 3566 int32_t rc; 3567 3568 /* per port config */ 3569 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 3570 (unsigned int)pid, nb_rxq, nb_txq); 3571 3572 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 3573 ports[pid].dev_conf.rxmode.offloads, 3574 ports[pid].dev_conf.txmode.offloads); 3575 3576 /* per rx queue config only for first queue to be less verbose */ 3577 for (qid = 0; qid < 1; qid++) { 3578 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 3579 if (rc) { 3580 nb_rx_desc_tmp = nb_rx_desc[qid]; 3581 rx_free_thresh_tmp = 3582 rx_conf[qid].rx_free_thresh; 3583 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 3584 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 3585 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 3586 offloads_tmp = rx_conf[qid].offloads; 3587 } else { 3588 nb_rx_desc_tmp = rx_qinfo.nb_desc; 3589 rx_free_thresh_tmp = 3590 rx_qinfo.conf.rx_free_thresh; 3591 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 3592 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 3593 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 3594 offloads_tmp = rx_qinfo.conf.offloads; 3595 } 3596 3597 printf(" RX queue: %d\n", qid); 3598 printf(" RX desc=%d - RX free threshold=%d\n", 3599 nb_rx_desc_tmp, rx_free_thresh_tmp); 3600 printf(" RX threshold registers: pthresh=%d hthresh=%d " 3601 " wthresh=%d\n", 3602 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3603 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 3604 if (rx_conf->share_group > 0) 3605 printf(" share_group=%u share_qid=%u", 3606 rx_conf->share_group, 3607 rx_conf->share_qid); 3608 printf("\n"); 3609 } 3610 3611 /* per tx queue config only for first queue to be less verbose */ 3612 for (qid = 0; qid < 1; qid++) { 3613 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 3614 if (rc) { 3615 nb_tx_desc_tmp = nb_tx_desc[qid]; 3616 tx_free_thresh_tmp = 3617 tx_conf[qid].tx_free_thresh; 3618 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 3619 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 3620 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 3621 offloads_tmp = tx_conf[qid].offloads; 3622 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 3623 } else { 3624 nb_tx_desc_tmp = tx_qinfo.nb_desc; 3625 tx_free_thresh_tmp = 3626 tx_qinfo.conf.tx_free_thresh; 3627 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 3628 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 3629 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 3630 offloads_tmp = tx_qinfo.conf.offloads; 3631 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 3632 } 3633 3634 printf(" TX queue: %d\n", qid); 3635 printf(" TX desc=%d - TX free threshold=%d\n", 3636 nb_tx_desc_tmp, tx_free_thresh_tmp); 3637 printf(" TX threshold registers: pthresh=%d hthresh=%d " 3638 " wthresh=%d\n", 3639 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3640 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 3641 offloads_tmp, tx_rs_thresh_tmp); 3642 } 3643 } 3644 } 3645 3646 void 3647 port_rss_reta_info(portid_t port_id, 3648 struct rte_eth_rss_reta_entry64 *reta_conf, 3649 uint16_t nb_entries) 3650 { 3651 uint16_t i, idx, shift; 3652 int ret; 3653 3654 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3655 return; 3656 3657 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 3658 if (ret != 0) { 3659 fprintf(stderr, 3660 "Failed to get RSS RETA info, return code = %d\n", 3661 ret); 3662 return; 3663 } 3664 3665 for (i = 0; i < nb_entries; i++) { 3666 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3667 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3668 if (!(reta_conf[idx].mask & (1ULL << shift))) 3669 continue; 3670 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 3671 i, reta_conf[idx].reta[shift]); 3672 } 3673 } 3674 3675 /* 3676 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 3677 * key of the port. 3678 */ 3679 void 3680 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 3681 { 3682 struct rte_eth_rss_conf rss_conf = {0}; 3683 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 3684 uint64_t rss_hf; 3685 uint8_t i; 3686 int diag; 3687 struct rte_eth_dev_info dev_info; 3688 uint8_t hash_key_size; 3689 int ret; 3690 3691 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3692 return; 3693 3694 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3695 if (ret != 0) 3696 return; 3697 3698 if (dev_info.hash_key_size > 0 && 3699 dev_info.hash_key_size <= sizeof(rss_key)) 3700 hash_key_size = dev_info.hash_key_size; 3701 else { 3702 fprintf(stderr, 3703 "dev_info did not provide a valid hash key size\n"); 3704 return; 3705 } 3706 3707 /* Get RSS hash key if asked to display it */ 3708 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3709 rss_conf.rss_key_len = hash_key_size; 3710 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3711 if (diag != 0) { 3712 switch (diag) { 3713 case -ENODEV: 3714 fprintf(stderr, "port index %d invalid\n", port_id); 3715 break; 3716 case -ENOTSUP: 3717 fprintf(stderr, "operation not supported by device\n"); 3718 break; 3719 default: 3720 fprintf(stderr, "operation failed - diag=%d\n", diag); 3721 break; 3722 } 3723 return; 3724 } 3725 rss_hf = rss_conf.rss_hf; 3726 if (rss_hf == 0) { 3727 printf("RSS disabled\n"); 3728 return; 3729 } 3730 printf("RSS functions:\n"); 3731 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 3732 if (!show_rss_key) 3733 return; 3734 printf("RSS key:\n"); 3735 for (i = 0; i < hash_key_size; i++) 3736 printf("%02X", rss_key[i]); 3737 printf("\n"); 3738 } 3739 3740 void 3741 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3742 uint8_t hash_key_len) 3743 { 3744 struct rte_eth_rss_conf rss_conf; 3745 int diag; 3746 3747 rss_conf.rss_key = NULL; 3748 rss_conf.rss_key_len = 0; 3749 rss_conf.rss_hf = str_to_rsstypes(rss_type); 3750 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3751 if (diag == 0) { 3752 rss_conf.rss_key = hash_key; 3753 rss_conf.rss_key_len = hash_key_len; 3754 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3755 } 3756 if (diag == 0) 3757 return; 3758 3759 switch (diag) { 3760 case -ENODEV: 3761 fprintf(stderr, "port index %d invalid\n", port_id); 3762 break; 3763 case -ENOTSUP: 3764 fprintf(stderr, "operation not supported by device\n"); 3765 break; 3766 default: 3767 fprintf(stderr, "operation failed - diag=%d\n", diag); 3768 break; 3769 } 3770 } 3771 3772 /* 3773 * Check whether a shared rxq scheduled on other lcores. 3774 */ 3775 static bool 3776 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3777 portid_t src_port, queueid_t src_rxq, 3778 uint32_t share_group, queueid_t share_rxq) 3779 { 3780 streamid_t sm_id; 3781 streamid_t nb_fs_per_lcore; 3782 lcoreid_t nb_fc; 3783 lcoreid_t lc_id; 3784 struct fwd_stream *fs; 3785 struct rte_port *port; 3786 struct rte_eth_dev_info *dev_info; 3787 struct rte_eth_rxconf *rxq_conf; 3788 3789 nb_fc = cur_fwd_config.nb_fwd_lcores; 3790 /* Check remaining cores. */ 3791 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3792 sm_id = fwd_lcores[lc_id]->stream_idx; 3793 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3794 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3795 sm_id++) { 3796 fs = fwd_streams[sm_id]; 3797 port = &ports[fs->rx_port]; 3798 dev_info = &port->dev_info; 3799 rxq_conf = &port->rxq[fs->rx_queue].conf; 3800 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3801 == 0 || rxq_conf->share_group == 0) 3802 /* Not shared rxq. */ 3803 continue; 3804 if (domain_id != port->dev_info.switch_info.domain_id) 3805 continue; 3806 if (rxq_conf->share_group != share_group) 3807 continue; 3808 if (rxq_conf->share_qid != share_rxq) 3809 continue; 3810 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3811 share_group, share_rxq); 3812 printf(" lcore %hhu Port %hu queue %hu\n", 3813 src_lc, src_port, src_rxq); 3814 printf(" lcore %hhu Port %hu queue %hu\n", 3815 lc_id, fs->rx_port, fs->rx_queue); 3816 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3817 nb_rxq); 3818 return true; 3819 } 3820 } 3821 return false; 3822 } 3823 3824 /* 3825 * Check shared rxq configuration. 3826 * 3827 * Shared group must not being scheduled on different core. 3828 */ 3829 bool 3830 pkt_fwd_shared_rxq_check(void) 3831 { 3832 streamid_t sm_id; 3833 streamid_t nb_fs_per_lcore; 3834 lcoreid_t nb_fc; 3835 lcoreid_t lc_id; 3836 struct fwd_stream *fs; 3837 uint16_t domain_id; 3838 struct rte_port *port; 3839 struct rte_eth_dev_info *dev_info; 3840 struct rte_eth_rxconf *rxq_conf; 3841 3842 if (rxq_share == 0) 3843 return true; 3844 nb_fc = cur_fwd_config.nb_fwd_lcores; 3845 /* 3846 * Check streams on each core, make sure the same switch domain + 3847 * group + queue doesn't get scheduled on other cores. 3848 */ 3849 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3850 sm_id = fwd_lcores[lc_id]->stream_idx; 3851 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3852 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3853 sm_id++) { 3854 fs = fwd_streams[sm_id]; 3855 /* Update lcore info stream being scheduled. */ 3856 fs->lcore = fwd_lcores[lc_id]; 3857 port = &ports[fs->rx_port]; 3858 dev_info = &port->dev_info; 3859 rxq_conf = &port->rxq[fs->rx_queue].conf; 3860 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3861 == 0 || rxq_conf->share_group == 0) 3862 /* Not shared rxq. */ 3863 continue; 3864 /* Check shared rxq not scheduled on remaining cores. */ 3865 domain_id = port->dev_info.switch_info.domain_id; 3866 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3867 fs->rx_port, 3868 fs->rx_queue, 3869 rxq_conf->share_group, 3870 rxq_conf->share_qid)) 3871 return false; 3872 } 3873 } 3874 return true; 3875 } 3876 3877 /* 3878 * Setup forwarding configuration for each logical core. 3879 */ 3880 static void 3881 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3882 { 3883 streamid_t nb_fs_per_lcore; 3884 streamid_t nb_fs; 3885 streamid_t sm_id; 3886 lcoreid_t nb_extra; 3887 lcoreid_t nb_fc; 3888 lcoreid_t nb_lc; 3889 lcoreid_t lc_id; 3890 3891 nb_fs = cfg->nb_fwd_streams; 3892 nb_fc = cfg->nb_fwd_lcores; 3893 if (nb_fs <= nb_fc) { 3894 nb_fs_per_lcore = 1; 3895 nb_extra = 0; 3896 } else { 3897 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3898 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3899 } 3900 3901 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3902 sm_id = 0; 3903 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3904 fwd_lcores[lc_id]->stream_idx = sm_id; 3905 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3906 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3907 } 3908 3909 /* 3910 * Assign extra remaining streams, if any. 3911 */ 3912 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3913 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3914 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3915 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3916 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3917 } 3918 } 3919 3920 static portid_t 3921 fwd_topology_tx_port_get(portid_t rxp) 3922 { 3923 static int warning_once = 1; 3924 3925 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3926 3927 switch (port_topology) { 3928 default: 3929 case PORT_TOPOLOGY_PAIRED: 3930 if ((rxp & 0x1) == 0) { 3931 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3932 return rxp + 1; 3933 if (warning_once) { 3934 fprintf(stderr, 3935 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3936 warning_once = 0; 3937 } 3938 return rxp; 3939 } 3940 return rxp - 1; 3941 case PORT_TOPOLOGY_CHAINED: 3942 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3943 case PORT_TOPOLOGY_LOOP: 3944 return rxp; 3945 } 3946 } 3947 3948 static void 3949 simple_fwd_config_setup(void) 3950 { 3951 portid_t i; 3952 3953 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3954 cur_fwd_config.nb_fwd_streams = 3955 (streamid_t) cur_fwd_config.nb_fwd_ports; 3956 3957 /* reinitialize forwarding streams */ 3958 init_fwd_streams(); 3959 3960 /* 3961 * In the simple forwarding test, the number of forwarding cores 3962 * must be lower or equal to the number of forwarding ports. 3963 */ 3964 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3965 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3966 cur_fwd_config.nb_fwd_lcores = 3967 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3968 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3969 3970 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3971 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3972 fwd_streams[i]->rx_queue = 0; 3973 fwd_streams[i]->tx_port = 3974 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3975 fwd_streams[i]->tx_queue = 0; 3976 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3977 fwd_streams[i]->retry_enabled = retry_enabled; 3978 } 3979 } 3980 3981 /** 3982 * For the RSS forwarding test all streams distributed over lcores. Each stream 3983 * being composed of a RX queue to poll on a RX port for input messages, 3984 * associated with a TX queue of a TX port where to send forwarded packets. 3985 */ 3986 static void 3987 rss_fwd_config_setup(void) 3988 { 3989 portid_t rxp; 3990 portid_t txp; 3991 queueid_t rxq; 3992 queueid_t nb_q; 3993 streamid_t sm_id; 3994 int start; 3995 int end; 3996 3997 nb_q = nb_rxq; 3998 if (nb_q > nb_txq) 3999 nb_q = nb_txq; 4000 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4001 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4002 cur_fwd_config.nb_fwd_streams = 4003 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4004 4005 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4006 cur_fwd_config.nb_fwd_lcores = 4007 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4008 4009 /* reinitialize forwarding streams */ 4010 init_fwd_streams(); 4011 4012 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4013 4014 if (proc_id > 0 && nb_q % num_procs != 0) 4015 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4016 4017 /** 4018 * In multi-process, All queues are allocated to different 4019 * processes based on num_procs and proc_id. For example: 4020 * if supports 4 queues(nb_q), 2 processes(num_procs), 4021 * the 0~1 queue for primary process. 4022 * the 2~3 queue for secondary process. 4023 */ 4024 start = proc_id * nb_q / num_procs; 4025 end = start + nb_q / num_procs; 4026 rxp = 0; 4027 rxq = start; 4028 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4029 struct fwd_stream *fs; 4030 4031 fs = fwd_streams[sm_id]; 4032 txp = fwd_topology_tx_port_get(rxp); 4033 fs->rx_port = fwd_ports_ids[rxp]; 4034 fs->rx_queue = rxq; 4035 fs->tx_port = fwd_ports_ids[txp]; 4036 fs->tx_queue = rxq; 4037 fs->peer_addr = fs->tx_port; 4038 fs->retry_enabled = retry_enabled; 4039 rxp++; 4040 if (rxp < nb_fwd_ports) 4041 continue; 4042 rxp = 0; 4043 rxq++; 4044 if (rxq >= end) 4045 rxq = start; 4046 } 4047 } 4048 4049 static uint16_t 4050 get_fwd_port_total_tc_num(void) 4051 { 4052 struct rte_eth_dcb_info dcb_info; 4053 uint16_t total_tc_num = 0; 4054 unsigned int i; 4055 4056 for (i = 0; i < nb_fwd_ports; i++) { 4057 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4058 total_tc_num += dcb_info.nb_tcs; 4059 } 4060 4061 return total_tc_num; 4062 } 4063 4064 /** 4065 * For the DCB forwarding test, each core is assigned on each traffic class. 4066 * 4067 * Each core is assigned a multi-stream, each stream being composed of 4068 * a RX queue to poll on a RX port for input messages, associated with 4069 * a TX queue of a TX port where to send forwarded packets. All RX and 4070 * TX queues are mapping to the same traffic class. 4071 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4072 * the same core 4073 */ 4074 static void 4075 dcb_fwd_config_setup(void) 4076 { 4077 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4078 portid_t txp, rxp = 0; 4079 queueid_t txq, rxq = 0; 4080 lcoreid_t lc_id; 4081 uint16_t nb_rx_queue, nb_tx_queue; 4082 uint16_t i, j, k, sm_id = 0; 4083 uint16_t total_tc_num; 4084 struct rte_port *port; 4085 uint8_t tc = 0; 4086 portid_t pid; 4087 int ret; 4088 4089 /* 4090 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4091 * or RTE_PORT_STOPPED. 4092 * 4093 * Re-configure ports to get updated mapping between tc and queue in 4094 * case the queue number of the port is changed. Skip for started ports 4095 * since modifying queue number and calling dev_configure need to stop 4096 * ports first. 4097 */ 4098 for (pid = 0; pid < nb_fwd_ports; pid++) { 4099 if (port_is_started(pid) == 1) 4100 continue; 4101 4102 port = &ports[pid]; 4103 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4104 &port->dev_conf); 4105 if (ret < 0) { 4106 fprintf(stderr, 4107 "Failed to re-configure port %d, ret = %d.\n", 4108 pid, ret); 4109 return; 4110 } 4111 } 4112 4113 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4114 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4115 cur_fwd_config.nb_fwd_streams = 4116 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4117 total_tc_num = get_fwd_port_total_tc_num(); 4118 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4119 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4120 4121 /* reinitialize forwarding streams */ 4122 init_fwd_streams(); 4123 sm_id = 0; 4124 txp = 1; 4125 /* get the dcb info on the first RX and TX ports */ 4126 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4127 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4128 4129 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4130 fwd_lcores[lc_id]->stream_nb = 0; 4131 fwd_lcores[lc_id]->stream_idx = sm_id; 4132 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4133 /* if the nb_queue is zero, means this tc is 4134 * not enabled on the POOL 4135 */ 4136 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4137 break; 4138 k = fwd_lcores[lc_id]->stream_nb + 4139 fwd_lcores[lc_id]->stream_idx; 4140 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4141 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4142 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4143 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4144 for (j = 0; j < nb_rx_queue; j++) { 4145 struct fwd_stream *fs; 4146 4147 fs = fwd_streams[k + j]; 4148 fs->rx_port = fwd_ports_ids[rxp]; 4149 fs->rx_queue = rxq + j; 4150 fs->tx_port = fwd_ports_ids[txp]; 4151 fs->tx_queue = txq + j % nb_tx_queue; 4152 fs->peer_addr = fs->tx_port; 4153 fs->retry_enabled = retry_enabled; 4154 } 4155 fwd_lcores[lc_id]->stream_nb += 4156 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4157 } 4158 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4159 4160 tc++; 4161 if (tc < rxp_dcb_info.nb_tcs) 4162 continue; 4163 /* Restart from TC 0 on next RX port */ 4164 tc = 0; 4165 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4166 rxp = (portid_t) 4167 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4168 else 4169 rxp++; 4170 if (rxp >= nb_fwd_ports) 4171 return; 4172 /* get the dcb information on next RX and TX ports */ 4173 if ((rxp & 0x1) == 0) 4174 txp = (portid_t) (rxp + 1); 4175 else 4176 txp = (portid_t) (rxp - 1); 4177 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4178 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4179 } 4180 } 4181 4182 static void 4183 icmp_echo_config_setup(void) 4184 { 4185 portid_t rxp; 4186 queueid_t rxq; 4187 lcoreid_t lc_id; 4188 uint16_t sm_id; 4189 4190 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4191 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4192 (nb_txq * nb_fwd_ports); 4193 else 4194 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4195 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4196 cur_fwd_config.nb_fwd_streams = 4197 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4198 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4199 cur_fwd_config.nb_fwd_lcores = 4200 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4201 if (verbose_level > 0) { 4202 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4203 __FUNCTION__, 4204 cur_fwd_config.nb_fwd_lcores, 4205 cur_fwd_config.nb_fwd_ports, 4206 cur_fwd_config.nb_fwd_streams); 4207 } 4208 4209 /* reinitialize forwarding streams */ 4210 init_fwd_streams(); 4211 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4212 rxp = 0; rxq = 0; 4213 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4214 if (verbose_level > 0) 4215 printf(" core=%d: \n", lc_id); 4216 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4217 struct fwd_stream *fs; 4218 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4219 fs->rx_port = fwd_ports_ids[rxp]; 4220 fs->rx_queue = rxq; 4221 fs->tx_port = fs->rx_port; 4222 fs->tx_queue = rxq; 4223 fs->peer_addr = fs->tx_port; 4224 fs->retry_enabled = retry_enabled; 4225 if (verbose_level > 0) 4226 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4227 sm_id, fs->rx_port, fs->rx_queue, 4228 fs->tx_queue); 4229 rxq = (queueid_t) (rxq + 1); 4230 if (rxq == nb_rxq) { 4231 rxq = 0; 4232 rxp = (portid_t) (rxp + 1); 4233 } 4234 } 4235 } 4236 } 4237 4238 void 4239 fwd_config_setup(void) 4240 { 4241 struct rte_port *port; 4242 portid_t pt_id; 4243 unsigned int i; 4244 4245 cur_fwd_config.fwd_eng = cur_fwd_eng; 4246 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4247 icmp_echo_config_setup(); 4248 return; 4249 } 4250 4251 if ((nb_rxq > 1) && (nb_txq > 1)){ 4252 if (dcb_config) { 4253 for (i = 0; i < nb_fwd_ports; i++) { 4254 pt_id = fwd_ports_ids[i]; 4255 port = &ports[pt_id]; 4256 if (!port->dcb_flag) { 4257 fprintf(stderr, 4258 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4259 return; 4260 } 4261 } 4262 if (nb_fwd_lcores == 1) { 4263 fprintf(stderr, 4264 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4265 return; 4266 } 4267 4268 dcb_fwd_config_setup(); 4269 } else 4270 rss_fwd_config_setup(); 4271 } 4272 else 4273 simple_fwd_config_setup(); 4274 } 4275 4276 static const char * 4277 mp_alloc_to_str(uint8_t mode) 4278 { 4279 switch (mode) { 4280 case MP_ALLOC_NATIVE: 4281 return "native"; 4282 case MP_ALLOC_ANON: 4283 return "anon"; 4284 case MP_ALLOC_XMEM: 4285 return "xmem"; 4286 case MP_ALLOC_XMEM_HUGE: 4287 return "xmemhuge"; 4288 case MP_ALLOC_XBUF: 4289 return "xbuf"; 4290 default: 4291 return "invalid"; 4292 } 4293 } 4294 4295 void 4296 pkt_fwd_config_display(struct fwd_config *cfg) 4297 { 4298 struct fwd_stream *fs; 4299 lcoreid_t lc_id; 4300 streamid_t sm_id; 4301 4302 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4303 "NUMA support %s, MP allocation mode: %s\n", 4304 cfg->fwd_eng->fwd_mode_name, 4305 retry_enabled == 0 ? "" : " with retry", 4306 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4307 numa_support == 1 ? "enabled" : "disabled", 4308 mp_alloc_to_str(mp_alloc_type)); 4309 4310 if (retry_enabled) 4311 printf("TX retry num: %u, delay between TX retries: %uus\n", 4312 burst_tx_retry_num, burst_tx_delay_time); 4313 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4314 printf("Logical Core %u (socket %u) forwards packets on " 4315 "%d streams:", 4316 fwd_lcores_cpuids[lc_id], 4317 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4318 fwd_lcores[lc_id]->stream_nb); 4319 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4320 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4321 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4322 "P=%d/Q=%d (socket %u) ", 4323 fs->rx_port, fs->rx_queue, 4324 ports[fs->rx_port].socket_id, 4325 fs->tx_port, fs->tx_queue, 4326 ports[fs->tx_port].socket_id); 4327 print_ethaddr("peer=", 4328 &peer_eth_addrs[fs->peer_addr]); 4329 } 4330 printf("\n"); 4331 } 4332 printf("\n"); 4333 } 4334 4335 void 4336 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4337 { 4338 struct rte_ether_addr new_peer_addr; 4339 if (!rte_eth_dev_is_valid_port(port_id)) { 4340 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4341 return; 4342 } 4343 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4344 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4345 peer_addr); 4346 return; 4347 } 4348 peer_eth_addrs[port_id] = new_peer_addr; 4349 } 4350 4351 int 4352 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4353 { 4354 unsigned int i; 4355 unsigned int lcore_cpuid; 4356 int record_now; 4357 4358 record_now = 0; 4359 again: 4360 for (i = 0; i < nb_lc; i++) { 4361 lcore_cpuid = lcorelist[i]; 4362 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4363 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4364 return -1; 4365 } 4366 if (lcore_cpuid == rte_get_main_lcore()) { 4367 fprintf(stderr, 4368 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4369 lcore_cpuid); 4370 return -1; 4371 } 4372 if (record_now) 4373 fwd_lcores_cpuids[i] = lcore_cpuid; 4374 } 4375 if (record_now == 0) { 4376 record_now = 1; 4377 goto again; 4378 } 4379 nb_cfg_lcores = (lcoreid_t) nb_lc; 4380 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4381 printf("previous number of forwarding cores %u - changed to " 4382 "number of configured cores %u\n", 4383 (unsigned int) nb_fwd_lcores, nb_lc); 4384 nb_fwd_lcores = (lcoreid_t) nb_lc; 4385 } 4386 4387 return 0; 4388 } 4389 4390 int 4391 set_fwd_lcores_mask(uint64_t lcoremask) 4392 { 4393 unsigned int lcorelist[64]; 4394 unsigned int nb_lc; 4395 unsigned int i; 4396 4397 if (lcoremask == 0) { 4398 fprintf(stderr, "Invalid NULL mask of cores\n"); 4399 return -1; 4400 } 4401 nb_lc = 0; 4402 for (i = 0; i < 64; i++) { 4403 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4404 continue; 4405 lcorelist[nb_lc++] = i; 4406 } 4407 return set_fwd_lcores_list(lcorelist, nb_lc); 4408 } 4409 4410 void 4411 set_fwd_lcores_number(uint16_t nb_lc) 4412 { 4413 if (test_done == 0) { 4414 fprintf(stderr, "Please stop forwarding first\n"); 4415 return; 4416 } 4417 if (nb_lc > nb_cfg_lcores) { 4418 fprintf(stderr, 4419 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4420 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4421 return; 4422 } 4423 nb_fwd_lcores = (lcoreid_t) nb_lc; 4424 printf("Number of forwarding cores set to %u\n", 4425 (unsigned int) nb_fwd_lcores); 4426 } 4427 4428 void 4429 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4430 { 4431 unsigned int i; 4432 portid_t port_id; 4433 int record_now; 4434 4435 record_now = 0; 4436 again: 4437 for (i = 0; i < nb_pt; i++) { 4438 port_id = (portid_t) portlist[i]; 4439 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4440 return; 4441 if (record_now) 4442 fwd_ports_ids[i] = port_id; 4443 } 4444 if (record_now == 0) { 4445 record_now = 1; 4446 goto again; 4447 } 4448 nb_cfg_ports = (portid_t) nb_pt; 4449 if (nb_fwd_ports != (portid_t) nb_pt) { 4450 printf("previous number of forwarding ports %u - changed to " 4451 "number of configured ports %u\n", 4452 (unsigned int) nb_fwd_ports, nb_pt); 4453 nb_fwd_ports = (portid_t) nb_pt; 4454 } 4455 } 4456 4457 /** 4458 * Parse the user input and obtain the list of forwarding ports 4459 * 4460 * @param[in] list 4461 * String containing the user input. User can specify 4462 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4463 * For example, if the user wants to use all the available 4464 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4465 * If the user wants to use only the ports 1,2 then the input 4466 * is 1,2. 4467 * valid characters are '-' and ',' 4468 * @param[out] values 4469 * This array will be filled with a list of port IDs 4470 * based on the user input 4471 * Note that duplicate entries are discarded and only the first 4472 * count entries in this array are port IDs and all the rest 4473 * will contain default values 4474 * @param[in] maxsize 4475 * This parameter denotes 2 things 4476 * 1) Number of elements in the values array 4477 * 2) Maximum value of each element in the values array 4478 * @return 4479 * On success, returns total count of parsed port IDs 4480 * On failure, returns 0 4481 */ 4482 static unsigned int 4483 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4484 { 4485 unsigned int count = 0; 4486 char *end = NULL; 4487 int min, max; 4488 int value, i; 4489 unsigned int marked[maxsize]; 4490 4491 if (list == NULL || values == NULL) 4492 return 0; 4493 4494 for (i = 0; i < (int)maxsize; i++) 4495 marked[i] = 0; 4496 4497 min = INT_MAX; 4498 4499 do { 4500 /*Remove the blank spaces if any*/ 4501 while (isblank(*list)) 4502 list++; 4503 if (*list == '\0') 4504 break; 4505 errno = 0; 4506 value = strtol(list, &end, 10); 4507 if (errno || end == NULL) 4508 return 0; 4509 if (value < 0 || value >= (int)maxsize) 4510 return 0; 4511 while (isblank(*end)) 4512 end++; 4513 if (*end == '-' && min == INT_MAX) { 4514 min = value; 4515 } else if ((*end == ',') || (*end == '\0')) { 4516 max = value; 4517 if (min == INT_MAX) 4518 min = value; 4519 for (i = min; i <= max; i++) { 4520 if (count < maxsize) { 4521 if (marked[i]) 4522 continue; 4523 values[count] = i; 4524 marked[i] = 1; 4525 count++; 4526 } 4527 } 4528 min = INT_MAX; 4529 } else 4530 return 0; 4531 list = end + 1; 4532 } while (*end != '\0'); 4533 4534 return count; 4535 } 4536 4537 void 4538 parse_fwd_portlist(const char *portlist) 4539 { 4540 unsigned int portcount; 4541 unsigned int portindex[RTE_MAX_ETHPORTS]; 4542 unsigned int i, valid_port_count = 0; 4543 4544 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 4545 if (!portcount) 4546 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 4547 4548 /* 4549 * Here we verify the validity of the ports 4550 * and thereby calculate the total number of 4551 * valid ports 4552 */ 4553 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 4554 if (rte_eth_dev_is_valid_port(portindex[i])) { 4555 portindex[valid_port_count] = portindex[i]; 4556 valid_port_count++; 4557 } 4558 } 4559 4560 set_fwd_ports_list(portindex, valid_port_count); 4561 } 4562 4563 void 4564 set_fwd_ports_mask(uint64_t portmask) 4565 { 4566 unsigned int portlist[64]; 4567 unsigned int nb_pt; 4568 unsigned int i; 4569 4570 if (portmask == 0) { 4571 fprintf(stderr, "Invalid NULL mask of ports\n"); 4572 return; 4573 } 4574 nb_pt = 0; 4575 RTE_ETH_FOREACH_DEV(i) { 4576 if (! ((uint64_t)(1ULL << i) & portmask)) 4577 continue; 4578 portlist[nb_pt++] = i; 4579 } 4580 set_fwd_ports_list(portlist, nb_pt); 4581 } 4582 4583 void 4584 set_fwd_ports_number(uint16_t nb_pt) 4585 { 4586 if (nb_pt > nb_cfg_ports) { 4587 fprintf(stderr, 4588 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 4589 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 4590 return; 4591 } 4592 nb_fwd_ports = (portid_t) nb_pt; 4593 printf("Number of forwarding ports set to %u\n", 4594 (unsigned int) nb_fwd_ports); 4595 } 4596 4597 int 4598 port_is_forwarding(portid_t port_id) 4599 { 4600 unsigned int i; 4601 4602 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4603 return -1; 4604 4605 for (i = 0; i < nb_fwd_ports; i++) { 4606 if (fwd_ports_ids[i] == port_id) 4607 return 1; 4608 } 4609 4610 return 0; 4611 } 4612 4613 void 4614 set_nb_pkt_per_burst(uint16_t nb) 4615 { 4616 if (nb > MAX_PKT_BURST) { 4617 fprintf(stderr, 4618 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 4619 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 4620 return; 4621 } 4622 nb_pkt_per_burst = nb; 4623 printf("Number of packets per burst set to %u\n", 4624 (unsigned int) nb_pkt_per_burst); 4625 } 4626 4627 static const char * 4628 tx_split_get_name(enum tx_pkt_split split) 4629 { 4630 uint32_t i; 4631 4632 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4633 if (tx_split_name[i].split == split) 4634 return tx_split_name[i].name; 4635 } 4636 return NULL; 4637 } 4638 4639 void 4640 set_tx_pkt_split(const char *name) 4641 { 4642 uint32_t i; 4643 4644 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4645 if (strcmp(tx_split_name[i].name, name) == 0) { 4646 tx_pkt_split = tx_split_name[i].split; 4647 return; 4648 } 4649 } 4650 fprintf(stderr, "unknown value: \"%s\"\n", name); 4651 } 4652 4653 int 4654 parse_fec_mode(const char *name, uint32_t *fec_capa) 4655 { 4656 uint8_t i; 4657 4658 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 4659 if (strcmp(fec_mode_name[i].name, name) == 0) { 4660 *fec_capa = 4661 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 4662 return 0; 4663 } 4664 } 4665 return -1; 4666 } 4667 4668 void 4669 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 4670 { 4671 unsigned int i, j; 4672 4673 printf("FEC capabilities:\n"); 4674 4675 for (i = 0; i < num; i++) { 4676 printf("%s : ", 4677 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 4678 4679 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 4680 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 4681 speed_fec_capa[i].capa) 4682 printf("%s ", fec_mode_name[j].name); 4683 } 4684 printf("\n"); 4685 } 4686 } 4687 4688 void 4689 show_rx_pkt_offsets(void) 4690 { 4691 uint32_t i, n; 4692 4693 n = rx_pkt_nb_offs; 4694 printf("Number of offsets: %u\n", n); 4695 if (n) { 4696 printf("Segment offsets: "); 4697 for (i = 0; i != n - 1; i++) 4698 printf("%hu,", rx_pkt_seg_offsets[i]); 4699 printf("%hu\n", rx_pkt_seg_lengths[i]); 4700 } 4701 } 4702 4703 void 4704 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4705 { 4706 unsigned int i; 4707 4708 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4709 printf("nb segments per RX packets=%u >= " 4710 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4711 return; 4712 } 4713 4714 /* 4715 * No extra check here, the segment length will be checked by PMD 4716 * in the extended queue setup. 4717 */ 4718 for (i = 0; i < nb_offs; i++) { 4719 if (seg_offsets[i] >= UINT16_MAX) { 4720 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4721 i, seg_offsets[i]); 4722 return; 4723 } 4724 } 4725 4726 for (i = 0; i < nb_offs; i++) 4727 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4728 4729 rx_pkt_nb_offs = (uint8_t) nb_offs; 4730 } 4731 4732 void 4733 show_rx_pkt_segments(void) 4734 { 4735 uint32_t i, n; 4736 4737 n = rx_pkt_nb_segs; 4738 printf("Number of segments: %u\n", n); 4739 if (n) { 4740 printf("Segment sizes: "); 4741 for (i = 0; i != n - 1; i++) 4742 printf("%hu,", rx_pkt_seg_lengths[i]); 4743 printf("%hu\n", rx_pkt_seg_lengths[i]); 4744 } 4745 } 4746 4747 void 4748 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4749 { 4750 unsigned int i; 4751 4752 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4753 printf("nb segments per RX packets=%u >= " 4754 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4755 return; 4756 } 4757 4758 /* 4759 * No extra check here, the segment length will be checked by PMD 4760 * in the extended queue setup. 4761 */ 4762 for (i = 0; i < nb_segs; i++) { 4763 if (seg_lengths[i] >= UINT16_MAX) { 4764 printf("length[%u]=%u > UINT16_MAX - give up\n", 4765 i, seg_lengths[i]); 4766 return; 4767 } 4768 } 4769 4770 for (i = 0; i < nb_segs; i++) 4771 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4772 4773 rx_pkt_nb_segs = (uint8_t) nb_segs; 4774 } 4775 4776 void 4777 show_tx_pkt_segments(void) 4778 { 4779 uint32_t i, n; 4780 const char *split; 4781 4782 n = tx_pkt_nb_segs; 4783 split = tx_split_get_name(tx_pkt_split); 4784 4785 printf("Number of segments: %u\n", n); 4786 printf("Segment sizes: "); 4787 for (i = 0; i != n - 1; i++) 4788 printf("%hu,", tx_pkt_seg_lengths[i]); 4789 printf("%hu\n", tx_pkt_seg_lengths[i]); 4790 printf("Split packet: %s\n", split); 4791 } 4792 4793 static bool 4794 nb_segs_is_invalid(unsigned int nb_segs) 4795 { 4796 uint16_t ring_size; 4797 uint16_t queue_id; 4798 uint16_t port_id; 4799 int ret; 4800 4801 RTE_ETH_FOREACH_DEV(port_id) { 4802 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4803 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4804 if (ret) { 4805 /* Port may not be initialized yet, can't say 4806 * the port is invalid in this stage. 4807 */ 4808 continue; 4809 } 4810 if (ring_size < nb_segs) { 4811 printf("nb segments per TX packets=%u >= TX " 4812 "queue(%u) ring_size=%u - txpkts ignored\n", 4813 nb_segs, queue_id, ring_size); 4814 return true; 4815 } 4816 } 4817 } 4818 4819 return false; 4820 } 4821 4822 void 4823 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4824 { 4825 uint16_t tx_pkt_len; 4826 unsigned int i; 4827 4828 /* 4829 * For single segment settings failed check is ignored. 4830 * It is a very basic capability to send the single segment 4831 * packets, suppose it is always supported. 4832 */ 4833 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4834 fprintf(stderr, 4835 "Tx segment size(%u) is not supported - txpkts ignored\n", 4836 nb_segs); 4837 return; 4838 } 4839 4840 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4841 fprintf(stderr, 4842 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4843 nb_segs, RTE_MAX_SEGS_PER_PKT); 4844 return; 4845 } 4846 4847 /* 4848 * Check that each segment length is greater or equal than 4849 * the mbuf data size. 4850 * Check also that the total packet length is greater or equal than the 4851 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4852 * 20 + 8). 4853 */ 4854 tx_pkt_len = 0; 4855 for (i = 0; i < nb_segs; i++) { 4856 if (seg_lengths[i] > mbuf_data_size[0]) { 4857 fprintf(stderr, 4858 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4859 i, seg_lengths[i], mbuf_data_size[0]); 4860 return; 4861 } 4862 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4863 } 4864 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4865 fprintf(stderr, "total packet length=%u < %d - give up\n", 4866 (unsigned) tx_pkt_len, 4867 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4868 return; 4869 } 4870 4871 for (i = 0; i < nb_segs; i++) 4872 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4873 4874 tx_pkt_length = tx_pkt_len; 4875 tx_pkt_nb_segs = (uint8_t) nb_segs; 4876 } 4877 4878 void 4879 show_tx_pkt_times(void) 4880 { 4881 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4882 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4883 } 4884 4885 void 4886 set_tx_pkt_times(unsigned int *tx_times) 4887 { 4888 tx_pkt_times_inter = tx_times[0]; 4889 tx_pkt_times_intra = tx_times[1]; 4890 } 4891 4892 #ifdef RTE_LIB_GRO 4893 void 4894 setup_gro(const char *onoff, portid_t port_id) 4895 { 4896 if (!rte_eth_dev_is_valid_port(port_id)) { 4897 fprintf(stderr, "invalid port id %u\n", port_id); 4898 return; 4899 } 4900 if (test_done == 0) { 4901 fprintf(stderr, 4902 "Before enable/disable GRO, please stop forwarding first\n"); 4903 return; 4904 } 4905 if (strcmp(onoff, "on") == 0) { 4906 if (gro_ports[port_id].enable != 0) { 4907 fprintf(stderr, 4908 "Port %u has enabled GRO. Please disable GRO first\n", 4909 port_id); 4910 return; 4911 } 4912 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4913 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4914 gro_ports[port_id].param.max_flow_num = 4915 GRO_DEFAULT_FLOW_NUM; 4916 gro_ports[port_id].param.max_item_per_flow = 4917 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4918 } 4919 gro_ports[port_id].enable = 1; 4920 } else { 4921 if (gro_ports[port_id].enable == 0) { 4922 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4923 return; 4924 } 4925 gro_ports[port_id].enable = 0; 4926 } 4927 } 4928 4929 void 4930 setup_gro_flush_cycles(uint8_t cycles) 4931 { 4932 if (test_done == 0) { 4933 fprintf(stderr, 4934 "Before change flush interval for GRO, please stop forwarding first.\n"); 4935 return; 4936 } 4937 4938 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4939 GRO_DEFAULT_FLUSH_CYCLES) { 4940 fprintf(stderr, 4941 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4942 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4943 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4944 } 4945 4946 gro_flush_cycles = cycles; 4947 } 4948 4949 void 4950 show_gro(portid_t port_id) 4951 { 4952 struct rte_gro_param *param; 4953 uint32_t max_pkts_num; 4954 4955 param = &gro_ports[port_id].param; 4956 4957 if (!rte_eth_dev_is_valid_port(port_id)) { 4958 fprintf(stderr, "Invalid port id %u.\n", port_id); 4959 return; 4960 } 4961 if (gro_ports[port_id].enable) { 4962 printf("GRO type: TCP/IPv4\n"); 4963 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4964 max_pkts_num = param->max_flow_num * 4965 param->max_item_per_flow; 4966 } else 4967 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4968 printf("Max number of packets to perform GRO: %u\n", 4969 max_pkts_num); 4970 printf("Flushing cycles: %u\n", gro_flush_cycles); 4971 } else 4972 printf("Port %u doesn't enable GRO.\n", port_id); 4973 } 4974 #endif /* RTE_LIB_GRO */ 4975 4976 #ifdef RTE_LIB_GSO 4977 void 4978 setup_gso(const char *mode, portid_t port_id) 4979 { 4980 if (!rte_eth_dev_is_valid_port(port_id)) { 4981 fprintf(stderr, "invalid port id %u\n", port_id); 4982 return; 4983 } 4984 if (strcmp(mode, "on") == 0) { 4985 if (test_done == 0) { 4986 fprintf(stderr, 4987 "before enabling GSO, please stop forwarding first\n"); 4988 return; 4989 } 4990 gso_ports[port_id].enable = 1; 4991 } else if (strcmp(mode, "off") == 0) { 4992 if (test_done == 0) { 4993 fprintf(stderr, 4994 "before disabling GSO, please stop forwarding first\n"); 4995 return; 4996 } 4997 gso_ports[port_id].enable = 0; 4998 } 4999 } 5000 #endif /* RTE_LIB_GSO */ 5001 5002 char* 5003 list_pkt_forwarding_modes(void) 5004 { 5005 static char fwd_modes[128] = ""; 5006 const char *separator = "|"; 5007 struct fwd_engine *fwd_eng; 5008 unsigned i = 0; 5009 5010 if (strlen (fwd_modes) == 0) { 5011 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5012 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5013 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5014 strncat(fwd_modes, separator, 5015 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5016 } 5017 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5018 } 5019 5020 return fwd_modes; 5021 } 5022 5023 char* 5024 list_pkt_forwarding_retry_modes(void) 5025 { 5026 static char fwd_modes[128] = ""; 5027 const char *separator = "|"; 5028 struct fwd_engine *fwd_eng; 5029 unsigned i = 0; 5030 5031 if (strlen(fwd_modes) == 0) { 5032 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5033 if (fwd_eng == &rx_only_engine) 5034 continue; 5035 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5036 sizeof(fwd_modes) - 5037 strlen(fwd_modes) - 1); 5038 strncat(fwd_modes, separator, 5039 sizeof(fwd_modes) - 5040 strlen(fwd_modes) - 1); 5041 } 5042 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5043 } 5044 5045 return fwd_modes; 5046 } 5047 5048 void 5049 set_pkt_forwarding_mode(const char *fwd_mode_name) 5050 { 5051 struct fwd_engine *fwd_eng; 5052 unsigned i; 5053 5054 i = 0; 5055 while ((fwd_eng = fwd_engines[i]) != NULL) { 5056 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5057 printf("Set %s packet forwarding mode%s\n", 5058 fwd_mode_name, 5059 retry_enabled == 0 ? "" : " with retry"); 5060 cur_fwd_eng = fwd_eng; 5061 return; 5062 } 5063 i++; 5064 } 5065 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5066 } 5067 5068 void 5069 add_rx_dump_callbacks(portid_t portid) 5070 { 5071 struct rte_eth_dev_info dev_info; 5072 uint16_t queue; 5073 int ret; 5074 5075 if (port_id_is_invalid(portid, ENABLED_WARN)) 5076 return; 5077 5078 ret = eth_dev_info_get_print_err(portid, &dev_info); 5079 if (ret != 0) 5080 return; 5081 5082 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5083 if (!ports[portid].rx_dump_cb[queue]) 5084 ports[portid].rx_dump_cb[queue] = 5085 rte_eth_add_rx_callback(portid, queue, 5086 dump_rx_pkts, NULL); 5087 } 5088 5089 void 5090 add_tx_dump_callbacks(portid_t portid) 5091 { 5092 struct rte_eth_dev_info dev_info; 5093 uint16_t queue; 5094 int ret; 5095 5096 if (port_id_is_invalid(portid, ENABLED_WARN)) 5097 return; 5098 5099 ret = eth_dev_info_get_print_err(portid, &dev_info); 5100 if (ret != 0) 5101 return; 5102 5103 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5104 if (!ports[portid].tx_dump_cb[queue]) 5105 ports[portid].tx_dump_cb[queue] = 5106 rte_eth_add_tx_callback(portid, queue, 5107 dump_tx_pkts, NULL); 5108 } 5109 5110 void 5111 remove_rx_dump_callbacks(portid_t portid) 5112 { 5113 struct rte_eth_dev_info dev_info; 5114 uint16_t queue; 5115 int ret; 5116 5117 if (port_id_is_invalid(portid, ENABLED_WARN)) 5118 return; 5119 5120 ret = eth_dev_info_get_print_err(portid, &dev_info); 5121 if (ret != 0) 5122 return; 5123 5124 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5125 if (ports[portid].rx_dump_cb[queue]) { 5126 rte_eth_remove_rx_callback(portid, queue, 5127 ports[portid].rx_dump_cb[queue]); 5128 ports[portid].rx_dump_cb[queue] = NULL; 5129 } 5130 } 5131 5132 void 5133 remove_tx_dump_callbacks(portid_t portid) 5134 { 5135 struct rte_eth_dev_info dev_info; 5136 uint16_t queue; 5137 int ret; 5138 5139 if (port_id_is_invalid(portid, ENABLED_WARN)) 5140 return; 5141 5142 ret = eth_dev_info_get_print_err(portid, &dev_info); 5143 if (ret != 0) 5144 return; 5145 5146 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5147 if (ports[portid].tx_dump_cb[queue]) { 5148 rte_eth_remove_tx_callback(portid, queue, 5149 ports[portid].tx_dump_cb[queue]); 5150 ports[portid].tx_dump_cb[queue] = NULL; 5151 } 5152 } 5153 5154 void 5155 configure_rxtx_dump_callbacks(uint16_t verbose) 5156 { 5157 portid_t portid; 5158 5159 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5160 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5161 return; 5162 #endif 5163 5164 RTE_ETH_FOREACH_DEV(portid) 5165 { 5166 if (verbose == 1 || verbose > 2) 5167 add_rx_dump_callbacks(portid); 5168 else 5169 remove_rx_dump_callbacks(portid); 5170 if (verbose >= 2) 5171 add_tx_dump_callbacks(portid); 5172 else 5173 remove_tx_dump_callbacks(portid); 5174 } 5175 } 5176 5177 void 5178 set_verbose_level(uint16_t vb_level) 5179 { 5180 printf("Change verbose level from %u to %u\n", 5181 (unsigned int) verbose_level, (unsigned int) vb_level); 5182 verbose_level = vb_level; 5183 configure_rxtx_dump_callbacks(verbose_level); 5184 } 5185 5186 void 5187 vlan_extend_set(portid_t port_id, int on) 5188 { 5189 int diag; 5190 int vlan_offload; 5191 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5192 5193 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5194 return; 5195 5196 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5197 5198 if (on) { 5199 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5200 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5201 } else { 5202 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5203 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5204 } 5205 5206 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5207 if (diag < 0) { 5208 fprintf(stderr, 5209 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5210 port_id, on, diag); 5211 return; 5212 } 5213 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5214 } 5215 5216 void 5217 rx_vlan_strip_set(portid_t port_id, int on) 5218 { 5219 int diag; 5220 int vlan_offload; 5221 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5222 5223 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5224 return; 5225 5226 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5227 5228 if (on) { 5229 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5230 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5231 } else { 5232 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5233 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5234 } 5235 5236 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5237 if (diag < 0) { 5238 fprintf(stderr, 5239 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5240 __func__, port_id, on, diag); 5241 return; 5242 } 5243 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5244 } 5245 5246 void 5247 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5248 { 5249 int diag; 5250 5251 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5252 return; 5253 5254 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5255 if (diag < 0) 5256 fprintf(stderr, 5257 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5258 __func__, port_id, queue_id, on, diag); 5259 } 5260 5261 void 5262 rx_vlan_filter_set(portid_t port_id, int on) 5263 { 5264 int diag; 5265 int vlan_offload; 5266 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5267 5268 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5269 return; 5270 5271 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5272 5273 if (on) { 5274 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5275 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5276 } else { 5277 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5278 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5279 } 5280 5281 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5282 if (diag < 0) { 5283 fprintf(stderr, 5284 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5285 __func__, port_id, on, diag); 5286 return; 5287 } 5288 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5289 } 5290 5291 void 5292 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5293 { 5294 int diag; 5295 int vlan_offload; 5296 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5297 5298 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5299 return; 5300 5301 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5302 5303 if (on) { 5304 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5305 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5306 } else { 5307 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5308 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5309 } 5310 5311 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5312 if (diag < 0) { 5313 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5314 __func__, port_id, on, diag); 5315 return; 5316 } 5317 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5318 } 5319 5320 int 5321 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5322 { 5323 int diag; 5324 5325 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5326 return 1; 5327 if (vlan_id_is_invalid(vlan_id)) 5328 return 1; 5329 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5330 if (diag == 0) 5331 return 0; 5332 fprintf(stderr, 5333 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5334 port_id, vlan_id, on, diag); 5335 return -1; 5336 } 5337 5338 void 5339 rx_vlan_all_filter_set(portid_t port_id, int on) 5340 { 5341 uint16_t vlan_id; 5342 5343 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5344 return; 5345 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5346 if (rx_vft_set(port_id, vlan_id, on)) 5347 break; 5348 } 5349 } 5350 5351 void 5352 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5353 { 5354 int diag; 5355 5356 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5357 return; 5358 5359 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5360 if (diag == 0) 5361 return; 5362 5363 fprintf(stderr, 5364 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5365 port_id, vlan_type, tp_id, diag); 5366 } 5367 5368 void 5369 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5370 { 5371 struct rte_eth_dev_info dev_info; 5372 int ret; 5373 5374 if (vlan_id_is_invalid(vlan_id)) 5375 return; 5376 5377 if (ports[port_id].dev_conf.txmode.offloads & 5378 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5379 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5380 return; 5381 } 5382 5383 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5384 if (ret != 0) 5385 return; 5386 5387 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5388 fprintf(stderr, 5389 "Error: vlan insert is not supported by port %d\n", 5390 port_id); 5391 return; 5392 } 5393 5394 tx_vlan_reset(port_id); 5395 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 5396 ports[port_id].tx_vlan_id = vlan_id; 5397 } 5398 5399 void 5400 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 5401 { 5402 struct rte_eth_dev_info dev_info; 5403 int ret; 5404 5405 if (vlan_id_is_invalid(vlan_id)) 5406 return; 5407 if (vlan_id_is_invalid(vlan_id_outer)) 5408 return; 5409 5410 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5411 if (ret != 0) 5412 return; 5413 5414 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 5415 fprintf(stderr, 5416 "Error: qinq insert not supported by port %d\n", 5417 port_id); 5418 return; 5419 } 5420 5421 tx_vlan_reset(port_id); 5422 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5423 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5424 ports[port_id].tx_vlan_id = vlan_id; 5425 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 5426 } 5427 5428 void 5429 tx_vlan_reset(portid_t port_id) 5430 { 5431 ports[port_id].dev_conf.txmode.offloads &= 5432 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5433 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5434 ports[port_id].tx_vlan_id = 0; 5435 ports[port_id].tx_vlan_id_outer = 0; 5436 } 5437 5438 void 5439 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 5440 { 5441 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5442 return; 5443 5444 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 5445 } 5446 5447 void 5448 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 5449 { 5450 int ret; 5451 5452 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5453 return; 5454 5455 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 5456 return; 5457 5458 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 5459 fprintf(stderr, "map_value not in required range 0..%d\n", 5460 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 5461 return; 5462 } 5463 5464 if (!is_rx) { /* tx */ 5465 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 5466 map_value); 5467 if (ret) { 5468 fprintf(stderr, 5469 "failed to set tx queue stats mapping.\n"); 5470 return; 5471 } 5472 } else { /* rx */ 5473 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 5474 map_value); 5475 if (ret) { 5476 fprintf(stderr, 5477 "failed to set rx queue stats mapping.\n"); 5478 return; 5479 } 5480 } 5481 } 5482 5483 void 5484 set_xstats_hide_zero(uint8_t on_off) 5485 { 5486 xstats_hide_zero = on_off; 5487 } 5488 5489 void 5490 set_record_core_cycles(uint8_t on_off) 5491 { 5492 record_core_cycles = on_off; 5493 } 5494 5495 void 5496 set_record_burst_stats(uint8_t on_off) 5497 { 5498 record_burst_stats = on_off; 5499 } 5500 5501 uint16_t 5502 str_to_flowtype(const char *string) 5503 { 5504 uint8_t i; 5505 5506 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5507 if (!strcmp(flowtype_str_table[i].str, string)) 5508 return flowtype_str_table[i].ftype; 5509 } 5510 5511 if (isdigit(string[0])) { 5512 int val = atoi(string); 5513 if (val > 0 && val < 64) 5514 return (uint16_t)val; 5515 } 5516 5517 return RTE_ETH_FLOW_UNKNOWN; 5518 } 5519 5520 const char* 5521 flowtype_to_str(uint16_t flow_type) 5522 { 5523 uint8_t i; 5524 5525 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5526 if (flowtype_str_table[i].ftype == flow_type) 5527 return flowtype_str_table[i].str; 5528 } 5529 5530 return NULL; 5531 } 5532 5533 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 5534 5535 static inline void 5536 print_fdir_mask(struct rte_eth_fdir_masks *mask) 5537 { 5538 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 5539 5540 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5541 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 5542 " tunnel_id: 0x%08x", 5543 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 5544 rte_be_to_cpu_32(mask->tunnel_id_mask)); 5545 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5546 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 5547 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 5548 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 5549 5550 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 5551 rte_be_to_cpu_16(mask->src_port_mask), 5552 rte_be_to_cpu_16(mask->dst_port_mask)); 5553 5554 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5555 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 5556 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 5557 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 5558 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 5559 5560 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5561 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 5562 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 5563 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 5564 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 5565 } 5566 5567 printf("\n"); 5568 } 5569 5570 static inline void 5571 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5572 { 5573 struct rte_eth_flex_payload_cfg *cfg; 5574 uint32_t i, j; 5575 5576 for (i = 0; i < flex_conf->nb_payloads; i++) { 5577 cfg = &flex_conf->flex_set[i]; 5578 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 5579 printf("\n RAW: "); 5580 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 5581 printf("\n L2_PAYLOAD: "); 5582 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 5583 printf("\n L3_PAYLOAD: "); 5584 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 5585 printf("\n L4_PAYLOAD: "); 5586 else 5587 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 5588 for (j = 0; j < num; j++) 5589 printf(" %-5u", cfg->src_offset[j]); 5590 } 5591 printf("\n"); 5592 } 5593 5594 static inline void 5595 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5596 { 5597 struct rte_eth_fdir_flex_mask *mask; 5598 uint32_t i, j; 5599 const char *p; 5600 5601 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 5602 mask = &flex_conf->flex_mask[i]; 5603 p = flowtype_to_str(mask->flow_type); 5604 printf("\n %s:\t", p ? p : "unknown"); 5605 for (j = 0; j < num; j++) 5606 printf(" %02x", mask->mask[j]); 5607 } 5608 printf("\n"); 5609 } 5610 5611 static inline void 5612 print_fdir_flow_type(uint32_t flow_types_mask) 5613 { 5614 int i; 5615 const char *p; 5616 5617 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 5618 if (!(flow_types_mask & (1 << i))) 5619 continue; 5620 p = flowtype_to_str(i); 5621 if (p) 5622 printf(" %s", p); 5623 else 5624 printf(" unknown"); 5625 } 5626 printf("\n"); 5627 } 5628 5629 static int 5630 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 5631 struct rte_eth_fdir_stats *fdir_stat) 5632 { 5633 int ret = -ENOTSUP; 5634 5635 #ifdef RTE_NET_I40E 5636 if (ret == -ENOTSUP) { 5637 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 5638 if (!ret) 5639 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 5640 } 5641 #endif 5642 #ifdef RTE_NET_IXGBE 5643 if (ret == -ENOTSUP) { 5644 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 5645 if (!ret) 5646 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 5647 } 5648 #endif 5649 switch (ret) { 5650 case 0: 5651 break; 5652 case -ENOTSUP: 5653 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 5654 port_id); 5655 break; 5656 default: 5657 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 5658 break; 5659 } 5660 return ret; 5661 } 5662 5663 void 5664 fdir_get_infos(portid_t port_id) 5665 { 5666 struct rte_eth_fdir_stats fdir_stat; 5667 struct rte_eth_fdir_info fdir_info; 5668 5669 static const char *fdir_stats_border = "########################"; 5670 5671 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5672 return; 5673 5674 memset(&fdir_info, 0, sizeof(fdir_info)); 5675 memset(&fdir_stat, 0, sizeof(fdir_stat)); 5676 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 5677 return; 5678 5679 printf("\n %s FDIR infos for port %-2d %s\n", 5680 fdir_stats_border, port_id, fdir_stats_border); 5681 printf(" MODE: "); 5682 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 5683 printf(" PERFECT\n"); 5684 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 5685 printf(" PERFECT-MAC-VLAN\n"); 5686 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5687 printf(" PERFECT-TUNNEL\n"); 5688 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 5689 printf(" SIGNATURE\n"); 5690 else 5691 printf(" DISABLE\n"); 5692 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5693 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5694 printf(" SUPPORTED FLOW TYPE: "); 5695 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5696 } 5697 printf(" FLEX PAYLOAD INFO:\n"); 5698 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5699 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5700 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5701 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5702 fdir_info.flex_payload_unit, 5703 fdir_info.max_flex_payload_segment_num, 5704 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5705 printf(" MASK: "); 5706 print_fdir_mask(&fdir_info.mask); 5707 if (fdir_info.flex_conf.nb_payloads > 0) { 5708 printf(" FLEX PAYLOAD SRC OFFSET:"); 5709 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5710 } 5711 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5712 printf(" FLEX MASK CFG:"); 5713 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5714 } 5715 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5716 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5717 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5718 fdir_info.guarant_spc, fdir_info.best_spc); 5719 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5720 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5721 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5722 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5723 fdir_stat.collision, fdir_stat.free, 5724 fdir_stat.maxhash, fdir_stat.maxlen, 5725 fdir_stat.add, fdir_stat.remove, 5726 fdir_stat.f_add, fdir_stat.f_remove); 5727 printf(" %s############################%s\n", 5728 fdir_stats_border, fdir_stats_border); 5729 } 5730 5731 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5732 5733 void 5734 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5735 { 5736 struct rte_port *port; 5737 struct rte_eth_fdir_flex_conf *flex_conf; 5738 int i, idx = 0; 5739 5740 port = &ports[port_id]; 5741 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5742 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5743 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5744 idx = i; 5745 break; 5746 } 5747 } 5748 if (i >= RTE_ETH_FLOW_MAX) { 5749 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5750 idx = flex_conf->nb_flexmasks; 5751 flex_conf->nb_flexmasks++; 5752 } else { 5753 fprintf(stderr, 5754 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5755 cfg->flow_type); 5756 return; 5757 } 5758 } 5759 rte_memcpy(&flex_conf->flex_mask[idx], 5760 cfg, 5761 sizeof(struct rte_eth_fdir_flex_mask)); 5762 } 5763 5764 void 5765 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5766 { 5767 struct rte_port *port; 5768 struct rte_eth_fdir_flex_conf *flex_conf; 5769 int i, idx = 0; 5770 5771 port = &ports[port_id]; 5772 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5773 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5774 if (cfg->type == flex_conf->flex_set[i].type) { 5775 idx = i; 5776 break; 5777 } 5778 } 5779 if (i >= RTE_ETH_PAYLOAD_MAX) { 5780 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5781 idx = flex_conf->nb_payloads; 5782 flex_conf->nb_payloads++; 5783 } else { 5784 fprintf(stderr, 5785 "The flex payload table is full. Can not set flex payload for type(%u).", 5786 cfg->type); 5787 return; 5788 } 5789 } 5790 rte_memcpy(&flex_conf->flex_set[idx], 5791 cfg, 5792 sizeof(struct rte_eth_flex_payload_cfg)); 5793 5794 } 5795 5796 void 5797 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5798 { 5799 #ifdef RTE_NET_IXGBE 5800 int diag; 5801 5802 if (is_rx) 5803 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5804 else 5805 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5806 5807 if (diag == 0) 5808 return; 5809 fprintf(stderr, 5810 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5811 is_rx ? "rx" : "tx", port_id, diag); 5812 return; 5813 #endif 5814 fprintf(stderr, "VF %s setting not supported for port %d\n", 5815 is_rx ? "Rx" : "Tx", port_id); 5816 RTE_SET_USED(vf); 5817 RTE_SET_USED(on); 5818 } 5819 5820 int 5821 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5822 { 5823 int diag; 5824 struct rte_eth_link link; 5825 int ret; 5826 5827 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5828 return 1; 5829 ret = eth_link_get_nowait_print_err(port_id, &link); 5830 if (ret < 0) 5831 return 1; 5832 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5833 rate > link.link_speed) { 5834 fprintf(stderr, 5835 "Invalid rate value:%u bigger than link speed: %u\n", 5836 rate, link.link_speed); 5837 return 1; 5838 } 5839 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5840 if (diag == 0) 5841 return diag; 5842 fprintf(stderr, 5843 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5844 port_id, diag); 5845 return diag; 5846 } 5847 5848 int 5849 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5850 { 5851 int diag = -ENOTSUP; 5852 5853 RTE_SET_USED(vf); 5854 RTE_SET_USED(rate); 5855 RTE_SET_USED(q_msk); 5856 5857 #ifdef RTE_NET_IXGBE 5858 if (diag == -ENOTSUP) 5859 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5860 q_msk); 5861 #endif 5862 #ifdef RTE_NET_BNXT 5863 if (diag == -ENOTSUP) 5864 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5865 #endif 5866 if (diag == 0) 5867 return diag; 5868 5869 fprintf(stderr, 5870 "%s for port_id=%d failed diag=%d\n", 5871 __func__, port_id, diag); 5872 return diag; 5873 } 5874 5875 int 5876 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 5877 { 5878 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5879 return -EINVAL; 5880 5881 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 5882 } 5883 5884 /* 5885 * Functions to manage the set of filtered Multicast MAC addresses. 5886 * 5887 * A pool of filtered multicast MAC addresses is associated with each port. 5888 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5889 * The address of the pool and the number of valid multicast MAC addresses 5890 * recorded in the pool are stored in the fields "mc_addr_pool" and 5891 * "mc_addr_nb" of the "rte_port" data structure. 5892 * 5893 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5894 * to be supplied a contiguous array of multicast MAC addresses. 5895 * To comply with this constraint, the set of multicast addresses recorded 5896 * into the pool are systematically compacted at the beginning of the pool. 5897 * Hence, when a multicast address is removed from the pool, all following 5898 * addresses, if any, are copied back to keep the set contiguous. 5899 */ 5900 #define MCAST_POOL_INC 32 5901 5902 static int 5903 mcast_addr_pool_extend(struct rte_port *port) 5904 { 5905 struct rte_ether_addr *mc_pool; 5906 size_t mc_pool_size; 5907 5908 /* 5909 * If a free entry is available at the end of the pool, just 5910 * increment the number of recorded multicast addresses. 5911 */ 5912 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5913 port->mc_addr_nb++; 5914 return 0; 5915 } 5916 5917 /* 5918 * [re]allocate a pool with MCAST_POOL_INC more entries. 5919 * The previous test guarantees that port->mc_addr_nb is a multiple 5920 * of MCAST_POOL_INC. 5921 */ 5922 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5923 MCAST_POOL_INC); 5924 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5925 mc_pool_size); 5926 if (mc_pool == NULL) { 5927 fprintf(stderr, 5928 "allocation of pool of %u multicast addresses failed\n", 5929 port->mc_addr_nb + MCAST_POOL_INC); 5930 return -ENOMEM; 5931 } 5932 5933 port->mc_addr_pool = mc_pool; 5934 port->mc_addr_nb++; 5935 return 0; 5936 5937 } 5938 5939 static void 5940 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5941 { 5942 if (mcast_addr_pool_extend(port) != 0) 5943 return; 5944 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5945 } 5946 5947 static void 5948 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5949 { 5950 port->mc_addr_nb--; 5951 if (addr_idx == port->mc_addr_nb) { 5952 /* No need to recompact the set of multicast addresses. */ 5953 if (port->mc_addr_nb == 0) { 5954 /* free the pool of multicast addresses. */ 5955 free(port->mc_addr_pool); 5956 port->mc_addr_pool = NULL; 5957 } 5958 return; 5959 } 5960 memmove(&port->mc_addr_pool[addr_idx], 5961 &port->mc_addr_pool[addr_idx + 1], 5962 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5963 } 5964 5965 int 5966 mcast_addr_pool_destroy(portid_t port_id) 5967 { 5968 struct rte_port *port; 5969 5970 if (port_id_is_invalid(port_id, ENABLED_WARN) || 5971 port_id == (portid_t)RTE_PORT_ALL) 5972 return -EINVAL; 5973 port = &ports[port_id]; 5974 5975 if (port->mc_addr_nb != 0) { 5976 /* free the pool of multicast addresses. */ 5977 free(port->mc_addr_pool); 5978 port->mc_addr_pool = NULL; 5979 port->mc_addr_nb = 0; 5980 } 5981 return 0; 5982 } 5983 5984 static int 5985 eth_port_multicast_addr_list_set(portid_t port_id) 5986 { 5987 struct rte_port *port; 5988 int diag; 5989 5990 port = &ports[port_id]; 5991 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5992 port->mc_addr_nb); 5993 if (diag < 0) 5994 fprintf(stderr, 5995 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5996 port_id, port->mc_addr_nb, diag); 5997 5998 return diag; 5999 } 6000 6001 void 6002 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6003 { 6004 struct rte_port *port; 6005 uint32_t i; 6006 6007 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6008 return; 6009 6010 port = &ports[port_id]; 6011 6012 /* 6013 * Check that the added multicast MAC address is not already recorded 6014 * in the pool of multicast addresses. 6015 */ 6016 for (i = 0; i < port->mc_addr_nb; i++) { 6017 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6018 fprintf(stderr, 6019 "multicast address already filtered by port\n"); 6020 return; 6021 } 6022 } 6023 6024 mcast_addr_pool_append(port, mc_addr); 6025 if (eth_port_multicast_addr_list_set(port_id) < 0) 6026 /* Rollback on failure, remove the address from the pool */ 6027 mcast_addr_pool_remove(port, i); 6028 } 6029 6030 void 6031 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6032 { 6033 struct rte_port *port; 6034 uint32_t i; 6035 6036 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6037 return; 6038 6039 port = &ports[port_id]; 6040 6041 /* 6042 * Search the pool of multicast MAC addresses for the removed address. 6043 */ 6044 for (i = 0; i < port->mc_addr_nb; i++) { 6045 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6046 break; 6047 } 6048 if (i == port->mc_addr_nb) { 6049 fprintf(stderr, "multicast address not filtered by port %d\n", 6050 port_id); 6051 return; 6052 } 6053 6054 mcast_addr_pool_remove(port, i); 6055 if (eth_port_multicast_addr_list_set(port_id) < 0) 6056 /* Rollback on failure, add the address back into the pool */ 6057 mcast_addr_pool_append(port, mc_addr); 6058 } 6059 6060 void 6061 port_dcb_info_display(portid_t port_id) 6062 { 6063 struct rte_eth_dcb_info dcb_info; 6064 uint16_t i; 6065 int ret; 6066 static const char *border = "================"; 6067 6068 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6069 return; 6070 6071 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6072 if (ret) { 6073 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6074 port_id); 6075 return; 6076 } 6077 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6078 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6079 printf("\n TC : "); 6080 for (i = 0; i < dcb_info.nb_tcs; i++) 6081 printf("\t%4d", i); 6082 printf("\n Priority : "); 6083 for (i = 0; i < dcb_info.nb_tcs; i++) 6084 printf("\t%4d", dcb_info.prio_tc[i]); 6085 printf("\n BW percent :"); 6086 for (i = 0; i < dcb_info.nb_tcs; i++) 6087 printf("\t%4d%%", dcb_info.tc_bws[i]); 6088 printf("\n RXQ base : "); 6089 for (i = 0; i < dcb_info.nb_tcs; i++) 6090 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6091 printf("\n RXQ number :"); 6092 for (i = 0; i < dcb_info.nb_tcs; i++) 6093 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6094 printf("\n TXQ base : "); 6095 for (i = 0; i < dcb_info.nb_tcs; i++) 6096 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6097 printf("\n TXQ number :"); 6098 for (i = 0; i < dcb_info.nb_tcs; i++) 6099 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6100 printf("\n"); 6101 } 6102 6103 uint8_t * 6104 open_file(const char *file_path, uint32_t *size) 6105 { 6106 int fd = open(file_path, O_RDONLY); 6107 off_t pkg_size; 6108 uint8_t *buf = NULL; 6109 int ret = 0; 6110 struct stat st_buf; 6111 6112 if (size) 6113 *size = 0; 6114 6115 if (fd == -1) { 6116 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6117 return buf; 6118 } 6119 6120 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6121 close(fd); 6122 fprintf(stderr, "%s: File operations failed\n", __func__); 6123 return buf; 6124 } 6125 6126 pkg_size = st_buf.st_size; 6127 if (pkg_size < 0) { 6128 close(fd); 6129 fprintf(stderr, "%s: File operations failed\n", __func__); 6130 return buf; 6131 } 6132 6133 buf = (uint8_t *)malloc(pkg_size); 6134 if (!buf) { 6135 close(fd); 6136 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6137 return buf; 6138 } 6139 6140 ret = read(fd, buf, pkg_size); 6141 if (ret < 0) { 6142 close(fd); 6143 fprintf(stderr, "%s: File read operation failed\n", __func__); 6144 close_file(buf); 6145 return NULL; 6146 } 6147 6148 if (size) 6149 *size = pkg_size; 6150 6151 close(fd); 6152 6153 return buf; 6154 } 6155 6156 int 6157 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6158 { 6159 FILE *fh = fopen(file_path, "wb"); 6160 6161 if (fh == NULL) { 6162 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6163 return -1; 6164 } 6165 6166 if (fwrite(buf, 1, size, fh) != size) { 6167 fclose(fh); 6168 fprintf(stderr, "%s: File write operation failed\n", __func__); 6169 return -1; 6170 } 6171 6172 fclose(fh); 6173 6174 return 0; 6175 } 6176 6177 int 6178 close_file(uint8_t *buf) 6179 { 6180 if (buf) { 6181 free((void *)buf); 6182 return 0; 6183 } 6184 6185 return -1; 6186 } 6187 6188 void 6189 show_macs(portid_t port_id) 6190 { 6191 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6192 struct rte_eth_dev_info dev_info; 6193 int32_t i, rc, num_macs = 0; 6194 6195 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6196 return; 6197 6198 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6199 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6200 if (rc < 0) 6201 return; 6202 6203 for (i = 0; i < rc; i++) { 6204 6205 /* skip zero address */ 6206 if (rte_is_zero_ether_addr(&addr[i])) 6207 continue; 6208 6209 num_macs++; 6210 } 6211 6212 printf("Number of MAC address added: %d\n", num_macs); 6213 6214 for (i = 0; i < rc; i++) { 6215 6216 /* skip zero address */ 6217 if (rte_is_zero_ether_addr(&addr[i])) 6218 continue; 6219 6220 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6221 printf(" %s\n", buf); 6222 } 6223 } 6224 6225 void 6226 show_mcast_macs(portid_t port_id) 6227 { 6228 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6229 struct rte_ether_addr *addr; 6230 struct rte_port *port; 6231 uint32_t i; 6232 6233 port = &ports[port_id]; 6234 6235 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6236 6237 for (i = 0; i < port->mc_addr_nb; i++) { 6238 addr = &port->mc_addr_pool[i]; 6239 6240 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6241 printf(" %s\n", buf); 6242 } 6243 } 6244