1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 }; 174 175 static const struct { 176 char str[32]; 177 uint16_t ftype; 178 } flowtype_str_table[] = { 179 {"raw", RTE_ETH_FLOW_RAW}, 180 {"ipv4", RTE_ETH_FLOW_IPV4}, 181 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 182 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 183 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 184 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 185 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 186 {"ipv6", RTE_ETH_FLOW_IPV6}, 187 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 188 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 189 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 190 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 191 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 192 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 193 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 194 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 195 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 196 {"port", RTE_ETH_FLOW_PORT}, 197 {"vxlan", RTE_ETH_FLOW_VXLAN}, 198 {"geneve", RTE_ETH_FLOW_GENEVE}, 199 {"nvgre", RTE_ETH_FLOW_NVGRE}, 200 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 201 {"gtpu", RTE_ETH_FLOW_GTPU}, 202 }; 203 204 static void 205 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 206 { 207 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 208 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 209 printf("%s%s", name, buf); 210 } 211 212 static void 213 nic_xstats_display_periodic(portid_t port_id) 214 { 215 struct xstat_display_info *xstats_info; 216 uint64_t *prev_values, *curr_values; 217 uint64_t diff_value, value_rate; 218 struct timespec cur_time; 219 uint64_t *ids_supp; 220 size_t ids_supp_sz; 221 uint64_t diff_ns; 222 unsigned int i; 223 int rc; 224 225 xstats_info = &ports[port_id].xstats_info; 226 227 ids_supp_sz = xstats_info->ids_supp_sz; 228 if (ids_supp_sz == 0) 229 return; 230 231 printf("\n"); 232 233 ids_supp = xstats_info->ids_supp; 234 prev_values = xstats_info->prev_values; 235 curr_values = xstats_info->curr_values; 236 237 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 238 ids_supp_sz); 239 if (rc != (int)ids_supp_sz) { 240 fprintf(stderr, 241 "Failed to get values of %zu xstats for port %u - return code %d\n", 242 ids_supp_sz, port_id, rc); 243 return; 244 } 245 246 diff_ns = 0; 247 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 248 uint64_t ns; 249 250 ns = cur_time.tv_sec * NS_PER_SEC; 251 ns += cur_time.tv_nsec; 252 253 if (xstats_info->prev_ns != 0) 254 diff_ns = ns - xstats_info->prev_ns; 255 xstats_info->prev_ns = ns; 256 } 257 258 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 259 for (i = 0; i < ids_supp_sz; i++) { 260 diff_value = (curr_values[i] > prev_values[i]) ? 261 (curr_values[i] - prev_values[i]) : 0; 262 prev_values[i] = curr_values[i]; 263 value_rate = diff_ns > 0 ? 264 (double)diff_value / diff_ns * NS_PER_SEC : 0; 265 266 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 267 xstats_display[i].name, curr_values[i], value_rate); 268 } 269 } 270 271 void 272 nic_stats_display(portid_t port_id) 273 { 274 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 275 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 276 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 277 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 278 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 279 struct timespec cur_time; 280 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 281 diff_ns; 282 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 283 struct rte_eth_stats stats; 284 static const char *nic_stats_border = "########################"; 285 int ret; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 ret = rte_eth_stats_get(port_id, &stats); 292 if (ret != 0) { 293 fprintf(stderr, 294 "%s: Error: failed to get stats (port %u): %d", 295 __func__, port_id, ret); 296 return; 297 } 298 printf("\n %s NIC statistics for port %-2d %s\n", 299 nic_stats_border, port_id, nic_stats_border); 300 301 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 302 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 303 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 304 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 305 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 306 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 307 308 diff_ns = 0; 309 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 310 uint64_t ns; 311 312 ns = cur_time.tv_sec * NS_PER_SEC; 313 ns += cur_time.tv_nsec; 314 315 if (prev_ns[port_id] != 0) 316 diff_ns = ns - prev_ns[port_id]; 317 prev_ns[port_id] = ns; 318 } 319 320 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 321 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 322 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 323 (stats.opackets - prev_pkts_tx[port_id]) : 0; 324 prev_pkts_rx[port_id] = stats.ipackets; 325 prev_pkts_tx[port_id] = stats.opackets; 326 mpps_rx = diff_ns > 0 ? 327 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 328 mpps_tx = diff_ns > 0 ? 329 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 330 331 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 332 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 333 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 334 (stats.obytes - prev_bytes_tx[port_id]) : 0; 335 prev_bytes_rx[port_id] = stats.ibytes; 336 prev_bytes_tx[port_id] = stats.obytes; 337 mbps_rx = diff_ns > 0 ? 338 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 339 mbps_tx = diff_ns > 0 ? 340 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 341 342 printf("\n Throughput (since last show)\n"); 343 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 344 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 345 mpps_tx, mbps_tx * 8); 346 347 if (xstats_display_num > 0) 348 nic_xstats_display_periodic(port_id); 349 350 printf(" %s############################%s\n", 351 nic_stats_border, nic_stats_border); 352 } 353 354 void 355 nic_stats_clear(portid_t port_id) 356 { 357 int ret; 358 359 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 360 print_valid_ports(); 361 return; 362 } 363 364 ret = rte_eth_stats_reset(port_id); 365 if (ret != 0) { 366 fprintf(stderr, 367 "%s: Error: failed to reset stats (port %u): %s", 368 __func__, port_id, strerror(-ret)); 369 return; 370 } 371 372 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 373 if (ret != 0) { 374 if (ret < 0) 375 ret = -ret; 376 fprintf(stderr, 377 "%s: Error: failed to get stats (port %u): %s", 378 __func__, port_id, strerror(ret)); 379 return; 380 } 381 printf("\n NIC statistics for port %d cleared\n", port_id); 382 } 383 384 void 385 nic_xstats_display(portid_t port_id) 386 { 387 struct rte_eth_xstat *xstats; 388 int cnt_xstats, idx_xstat; 389 struct rte_eth_xstat_name *xstats_names; 390 391 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 392 print_valid_ports(); 393 return; 394 } 395 printf("###### NIC extended statistics for port %-2d\n", port_id); 396 if (!rte_eth_dev_is_valid_port(port_id)) { 397 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 398 return; 399 } 400 401 /* Get count */ 402 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 403 if (cnt_xstats < 0) { 404 fprintf(stderr, "Error: Cannot get count of xstats\n"); 405 return; 406 } 407 408 /* Get id-name lookup table */ 409 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 410 if (xstats_names == NULL) { 411 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 412 return; 413 } 414 if (cnt_xstats != rte_eth_xstats_get_names( 415 port_id, xstats_names, cnt_xstats)) { 416 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 417 free(xstats_names); 418 return; 419 } 420 421 /* Get stats themselves */ 422 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 423 if (xstats == NULL) { 424 fprintf(stderr, "Cannot allocate memory for xstats\n"); 425 free(xstats_names); 426 return; 427 } 428 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 429 fprintf(stderr, "Error: Unable to get xstats\n"); 430 free(xstats_names); 431 free(xstats); 432 return; 433 } 434 435 /* Display xstats */ 436 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 437 if (xstats_hide_zero && !xstats[idx_xstat].value) 438 continue; 439 printf("%s: %"PRIu64"\n", 440 xstats_names[idx_xstat].name, 441 xstats[idx_xstat].value); 442 } 443 free(xstats_names); 444 free(xstats); 445 } 446 447 void 448 nic_xstats_clear(portid_t port_id) 449 { 450 int ret; 451 452 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 453 print_valid_ports(); 454 return; 455 } 456 457 ret = rte_eth_xstats_reset(port_id); 458 if (ret != 0) { 459 fprintf(stderr, 460 "%s: Error: failed to reset xstats (port %u): %s\n", 461 __func__, port_id, strerror(-ret)); 462 return; 463 } 464 465 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 466 if (ret != 0) { 467 if (ret < 0) 468 ret = -ret; 469 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 470 __func__, port_id, strerror(ret)); 471 return; 472 } 473 } 474 475 static const char * 476 get_queue_state_name(uint8_t queue_state) 477 { 478 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 479 return "stopped"; 480 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 481 return "started"; 482 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 483 return "hairpin"; 484 else 485 return "unknown"; 486 } 487 488 void 489 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 490 { 491 struct rte_eth_burst_mode mode; 492 struct rte_eth_rxq_info qinfo; 493 int32_t rc; 494 static const char *info_border = "*********************"; 495 496 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 497 if (rc != 0) { 498 fprintf(stderr, 499 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 500 port_id, queue_id, strerror(-rc), rc); 501 return; 502 } 503 504 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 505 info_border, port_id, queue_id, info_border); 506 507 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 508 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 509 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 510 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 511 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 512 printf("\nRX drop packets: %s", 513 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 514 printf("\nRX deferred start: %s", 515 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 516 printf("\nRX scattered packets: %s", 517 (qinfo.scattered_rx != 0) ? "on" : "off"); 518 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 519 if (qinfo.rx_buf_size != 0) 520 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 521 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 522 523 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 void 533 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 534 { 535 struct rte_eth_burst_mode mode; 536 struct rte_eth_txq_info qinfo; 537 int32_t rc; 538 static const char *info_border = "*********************"; 539 540 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 541 if (rc != 0) { 542 fprintf(stderr, 543 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 544 port_id, queue_id, strerror(-rc), rc); 545 return; 546 } 547 548 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 549 info_border, port_id, queue_id, info_border); 550 551 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 552 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 553 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 554 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 555 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 556 printf("\nTX deferred start: %s", 557 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 558 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 559 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 560 561 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 562 printf("\nBurst mode: %s%s", 563 mode.info, 564 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 565 " (per queue)" : ""); 566 567 printf("\n"); 568 } 569 570 static int bus_match_all(const struct rte_bus *bus, const void *data) 571 { 572 RTE_SET_USED(bus); 573 RTE_SET_USED(data); 574 return 0; 575 } 576 577 static void 578 device_infos_display_speeds(uint32_t speed_capa) 579 { 580 printf("\n\tDevice speed capability:"); 581 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 582 printf(" Autonegotiate (all speeds)"); 583 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 584 printf(" Disable autonegotiate (fixed speed) "); 585 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 586 printf(" 10 Mbps half-duplex "); 587 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 588 printf(" 10 Mbps full-duplex "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 590 printf(" 100 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 592 printf(" 100 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 594 printf(" 1 Gbps "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 596 printf(" 2.5 Gbps "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 598 printf(" 5 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 600 printf(" 10 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 602 printf(" 20 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 604 printf(" 25 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 606 printf(" 40 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 608 printf(" 50 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 610 printf(" 56 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 612 printf(" 100 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 614 printf(" 200 Gbps "); 615 } 616 617 void 618 device_infos_display(const char *identifier) 619 { 620 static const char *info_border = "*********************"; 621 struct rte_bus *start = NULL, *next; 622 struct rte_dev_iterator dev_iter; 623 char name[RTE_ETH_NAME_MAX_LEN]; 624 struct rte_ether_addr mac_addr; 625 struct rte_device *dev; 626 struct rte_devargs da; 627 portid_t port_id; 628 struct rte_eth_dev_info dev_info; 629 char devstr[128]; 630 631 memset(&da, 0, sizeof(da)); 632 if (!identifier) 633 goto skip_parse; 634 635 if (rte_devargs_parsef(&da, "%s", identifier)) { 636 fprintf(stderr, "cannot parse identifier\n"); 637 return; 638 } 639 640 skip_parse: 641 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 642 643 start = next; 644 if (identifier && da.bus != next) 645 continue; 646 647 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 648 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 649 650 if (rte_dev_driver(dev) == NULL) 651 continue; 652 /* Check for matching device if identifier is present */ 653 if (identifier && 654 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 655 continue; 656 printf("\n%s Infos for device %s %s\n", 657 info_border, rte_dev_name(dev), info_border); 658 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 659 printf("\nBus information: %s", 660 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 661 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 662 printf("\nDevargs: %s", 663 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 664 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 665 printf("\n"); 666 667 /* List ports with matching device name */ 668 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 669 printf("\n\tPort id: %-2d", port_id); 670 if (eth_macaddr_get_print_err(port_id, 671 &mac_addr) == 0) 672 print_ethaddr("\n\tMAC address: ", 673 &mac_addr); 674 rte_eth_dev_get_name_by_port(port_id, name); 675 printf("\n\tDevice name: %s", name); 676 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 677 device_infos_display_speeds(dev_info.speed_capa); 678 printf("\n"); 679 } 680 } 681 }; 682 rte_devargs_reset(&da); 683 } 684 685 static void 686 print_dev_capabilities(uint64_t capabilities) 687 { 688 uint64_t single_capa; 689 int begin; 690 int end; 691 int bit; 692 693 if (capabilities == 0) 694 return; 695 696 begin = __builtin_ctzll(capabilities); 697 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 698 699 single_capa = 1ULL << begin; 700 for (bit = begin; bit < end; bit++) { 701 if (capabilities & single_capa) 702 printf(" %s", 703 rte_eth_dev_capability_name(single_capa)); 704 single_capa <<= 1; 705 } 706 } 707 708 uint64_t 709 str_to_rsstypes(const char *str) 710 { 711 uint16_t i; 712 713 for (i = 0; rss_type_table[i].str != NULL; i++) { 714 if (strcmp(rss_type_table[i].str, str) == 0) 715 return rss_type_table[i].rss_type; 716 } 717 718 return 0; 719 } 720 721 const char * 722 rsstypes_to_str(uint64_t rss_type) 723 { 724 uint16_t i; 725 726 for (i = 0; rss_type_table[i].str != NULL; i++) { 727 if (rss_type_table[i].rss_type == rss_type) 728 return rss_type_table[i].str; 729 } 730 731 return NULL; 732 } 733 734 static void 735 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 736 { 737 uint16_t user_defined_str_len; 738 uint16_t total_len = 0; 739 uint16_t str_len = 0; 740 uint64_t rss_offload; 741 uint16_t i; 742 743 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 744 rss_offload = RTE_BIT64(i); 745 if ((offload_types & rss_offload) != 0) { 746 const char *p = rsstypes_to_str(rss_offload); 747 748 user_defined_str_len = 749 strlen("user-defined-") + (i / 10 + 1); 750 str_len = p ? strlen(p) : user_defined_str_len; 751 str_len += 2; /* add two spaces */ 752 if (total_len + str_len >= char_num_per_line) { 753 total_len = 0; 754 printf("\n"); 755 } 756 757 if (p) 758 printf(" %s", p); 759 else 760 printf(" user-defined-%u", i); 761 total_len += str_len; 762 } 763 } 764 printf("\n"); 765 } 766 767 void 768 port_infos_display(portid_t port_id) 769 { 770 struct rte_port *port; 771 struct rte_ether_addr mac_addr; 772 struct rte_eth_link link; 773 struct rte_eth_dev_info dev_info; 774 int vlan_offload; 775 struct rte_mempool * mp; 776 static const char *info_border = "*********************"; 777 uint16_t mtu; 778 char name[RTE_ETH_NAME_MAX_LEN]; 779 int ret; 780 char fw_version[ETHDEV_FWVERS_LEN]; 781 782 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 783 print_valid_ports(); 784 return; 785 } 786 port = &ports[port_id]; 787 ret = eth_link_get_nowait_print_err(port_id, &link); 788 if (ret < 0) 789 return; 790 791 ret = eth_dev_info_get_print_err(port_id, &dev_info); 792 if (ret != 0) 793 return; 794 795 printf("\n%s Infos for port %-2d %s\n", 796 info_border, port_id, info_border); 797 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 798 print_ethaddr("MAC address: ", &mac_addr); 799 rte_eth_dev_get_name_by_port(port_id, name); 800 printf("\nDevice name: %s", name); 801 printf("\nDriver name: %s", dev_info.driver_name); 802 803 if (rte_eth_dev_fw_version_get(port_id, fw_version, 804 ETHDEV_FWVERS_LEN) == 0) 805 printf("\nFirmware-version: %s", fw_version); 806 else 807 printf("\nFirmware-version: %s", "not available"); 808 809 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 810 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 811 printf("\nConnect to socket: %u", port->socket_id); 812 813 if (port_numa[port_id] != NUMA_NO_CONFIG) { 814 mp = mbuf_pool_find(port_numa[port_id], 0); 815 if (mp) 816 printf("\nmemory allocation on the socket: %d", 817 port_numa[port_id]); 818 } else 819 printf("\nmemory allocation on the socket: %u",port->socket_id); 820 821 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 822 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 823 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 824 ("full-duplex") : ("half-duplex")); 825 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 826 ("On") : ("Off")); 827 828 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 829 printf("MTU: %u\n", mtu); 830 831 printf("Promiscuous mode: %s\n", 832 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 833 printf("Allmulticast mode: %s\n", 834 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 835 printf("Maximum number of MAC addresses: %u\n", 836 (unsigned int)(port->dev_info.max_mac_addrs)); 837 printf("Maximum number of MAC addresses of hash filtering: %u\n", 838 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 839 840 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 841 if (vlan_offload >= 0){ 842 printf("VLAN offload: \n"); 843 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 844 printf(" strip on, "); 845 else 846 printf(" strip off, "); 847 848 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 849 printf("filter on, "); 850 else 851 printf("filter off, "); 852 853 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 854 printf("extend on, "); 855 else 856 printf("extend off, "); 857 858 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 859 printf("qinq strip on\n"); 860 else 861 printf("qinq strip off\n"); 862 } 863 864 if (dev_info.hash_key_size > 0) 865 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 866 if (dev_info.reta_size > 0) 867 printf("Redirection table size: %u\n", dev_info.reta_size); 868 if (!dev_info.flow_type_rss_offloads) 869 printf("No RSS offload flow type is supported.\n"); 870 else { 871 printf("Supported RSS offload flow types:\n"); 872 rss_offload_types_display(dev_info.flow_type_rss_offloads, 873 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 874 } 875 876 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 877 printf("Maximum configurable length of RX packet: %u\n", 878 dev_info.max_rx_pktlen); 879 printf("Maximum configurable size of LRO aggregated packet: %u\n", 880 dev_info.max_lro_pkt_size); 881 if (dev_info.max_vfs) 882 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 883 if (dev_info.max_vmdq_pools) 884 printf("Maximum number of VMDq pools: %u\n", 885 dev_info.max_vmdq_pools); 886 887 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 888 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 889 printf("Max possible number of RXDs per queue: %hu\n", 890 dev_info.rx_desc_lim.nb_max); 891 printf("Min possible number of RXDs per queue: %hu\n", 892 dev_info.rx_desc_lim.nb_min); 893 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 894 895 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 896 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 897 printf("Max possible number of TXDs per queue: %hu\n", 898 dev_info.tx_desc_lim.nb_max); 899 printf("Min possible number of TXDs per queue: %hu\n", 900 dev_info.tx_desc_lim.nb_min); 901 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 902 printf("Max segment number per packet: %hu\n", 903 dev_info.tx_desc_lim.nb_seg_max); 904 printf("Max segment number per MTU/TSO: %hu\n", 905 dev_info.tx_desc_lim.nb_mtu_seg_max); 906 907 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 908 print_dev_capabilities(dev_info.dev_capa); 909 printf(" )\n"); 910 /* Show switch info only if valid switch domain and port id is set */ 911 if (dev_info.switch_info.domain_id != 912 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 913 if (dev_info.switch_info.name) 914 printf("Switch name: %s\n", dev_info.switch_info.name); 915 916 printf("Switch domain Id: %u\n", 917 dev_info.switch_info.domain_id); 918 printf("Switch Port Id: %u\n", 919 dev_info.switch_info.port_id); 920 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 921 printf("Switch Rx domain: %u\n", 922 dev_info.switch_info.rx_domain); 923 } 924 } 925 926 void 927 port_summary_header_display(void) 928 { 929 uint16_t port_number; 930 931 port_number = rte_eth_dev_count_avail(); 932 printf("Number of available ports: %i\n", port_number); 933 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 934 "Driver", "Status", "Link"); 935 } 936 937 void 938 port_summary_display(portid_t port_id) 939 { 940 struct rte_ether_addr mac_addr; 941 struct rte_eth_link link; 942 struct rte_eth_dev_info dev_info; 943 char name[RTE_ETH_NAME_MAX_LEN]; 944 int ret; 945 946 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 947 print_valid_ports(); 948 return; 949 } 950 951 ret = eth_link_get_nowait_print_err(port_id, &link); 952 if (ret < 0) 953 return; 954 955 ret = eth_dev_info_get_print_err(port_id, &dev_info); 956 if (ret != 0) 957 return; 958 959 rte_eth_dev_get_name_by_port(port_id, name); 960 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 961 if (ret != 0) 962 return; 963 964 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 965 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 966 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 967 rte_eth_link_speed_to_str(link.link_speed)); 968 } 969 970 void 971 port_eeprom_display(portid_t port_id) 972 { 973 struct rte_dev_eeprom_info einfo; 974 int ret; 975 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 976 print_valid_ports(); 977 return; 978 } 979 980 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 981 if (len_eeprom < 0) { 982 switch (len_eeprom) { 983 case -ENODEV: 984 fprintf(stderr, "port index %d invalid\n", port_id); 985 break; 986 case -ENOTSUP: 987 fprintf(stderr, "operation not supported by device\n"); 988 break; 989 case -EIO: 990 fprintf(stderr, "device is removed\n"); 991 break; 992 default: 993 fprintf(stderr, "Unable to get EEPROM: %d\n", 994 len_eeprom); 995 break; 996 } 997 return; 998 } 999 1000 einfo.offset = 0; 1001 einfo.length = len_eeprom; 1002 einfo.data = calloc(1, len_eeprom); 1003 if (!einfo.data) { 1004 fprintf(stderr, 1005 "Allocation of port %u eeprom data failed\n", 1006 port_id); 1007 return; 1008 } 1009 1010 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1011 if (ret != 0) { 1012 switch (ret) { 1013 case -ENODEV: 1014 fprintf(stderr, "port index %d invalid\n", port_id); 1015 break; 1016 case -ENOTSUP: 1017 fprintf(stderr, "operation not supported by device\n"); 1018 break; 1019 case -EIO: 1020 fprintf(stderr, "device is removed\n"); 1021 break; 1022 default: 1023 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1024 break; 1025 } 1026 free(einfo.data); 1027 return; 1028 } 1029 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1030 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1031 free(einfo.data); 1032 } 1033 1034 void 1035 port_module_eeprom_display(portid_t port_id) 1036 { 1037 struct rte_eth_dev_module_info minfo; 1038 struct rte_dev_eeprom_info einfo; 1039 int ret; 1040 1041 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1042 print_valid_ports(); 1043 return; 1044 } 1045 1046 1047 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1048 if (ret != 0) { 1049 switch (ret) { 1050 case -ENODEV: 1051 fprintf(stderr, "port index %d invalid\n", port_id); 1052 break; 1053 case -ENOTSUP: 1054 fprintf(stderr, "operation not supported by device\n"); 1055 break; 1056 case -EIO: 1057 fprintf(stderr, "device is removed\n"); 1058 break; 1059 default: 1060 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1061 ret); 1062 break; 1063 } 1064 return; 1065 } 1066 1067 einfo.offset = 0; 1068 einfo.length = minfo.eeprom_len; 1069 einfo.data = calloc(1, minfo.eeprom_len); 1070 if (!einfo.data) { 1071 fprintf(stderr, 1072 "Allocation of port %u eeprom data failed\n", 1073 port_id); 1074 return; 1075 } 1076 1077 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1078 if (ret != 0) { 1079 switch (ret) { 1080 case -ENODEV: 1081 fprintf(stderr, "port index %d invalid\n", port_id); 1082 break; 1083 case -ENOTSUP: 1084 fprintf(stderr, "operation not supported by device\n"); 1085 break; 1086 case -EIO: 1087 fprintf(stderr, "device is removed\n"); 1088 break; 1089 default: 1090 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1091 ret); 1092 break; 1093 } 1094 free(einfo.data); 1095 return; 1096 } 1097 1098 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1099 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1100 free(einfo.data); 1101 } 1102 1103 int 1104 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1105 { 1106 uint16_t pid; 1107 1108 if (port_id == (portid_t)RTE_PORT_ALL) 1109 return 0; 1110 1111 RTE_ETH_FOREACH_DEV(pid) 1112 if (port_id == pid) 1113 return 0; 1114 1115 if (warning == ENABLED_WARN) 1116 fprintf(stderr, "Invalid port %d\n", port_id); 1117 1118 return 1; 1119 } 1120 1121 void print_valid_ports(void) 1122 { 1123 portid_t pid; 1124 1125 printf("The valid ports array is ["); 1126 RTE_ETH_FOREACH_DEV(pid) { 1127 printf(" %d", pid); 1128 } 1129 printf(" ]\n"); 1130 } 1131 1132 static int 1133 vlan_id_is_invalid(uint16_t vlan_id) 1134 { 1135 if (vlan_id < 4096) 1136 return 0; 1137 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1138 return 1; 1139 } 1140 1141 static uint32_t 1142 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1143 { 1144 uint32_t overhead_len; 1145 1146 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1147 overhead_len = max_rx_pktlen - max_mtu; 1148 else 1149 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1150 1151 return overhead_len; 1152 } 1153 1154 static int 1155 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1156 { 1157 struct rte_eth_dev_info dev_info; 1158 uint32_t overhead_len; 1159 uint32_t frame_size; 1160 int ret; 1161 1162 ret = rte_eth_dev_info_get(port_id, &dev_info); 1163 if (ret != 0) 1164 return ret; 1165 1166 if (mtu < dev_info.min_mtu) { 1167 fprintf(stderr, 1168 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1169 mtu, dev_info.min_mtu, port_id); 1170 return -EINVAL; 1171 } 1172 if (mtu > dev_info.max_mtu) { 1173 fprintf(stderr, 1174 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1175 mtu, dev_info.max_mtu, port_id); 1176 return -EINVAL; 1177 } 1178 1179 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1180 dev_info.max_mtu); 1181 frame_size = mtu + overhead_len; 1182 if (frame_size > dev_info.max_rx_pktlen) { 1183 fprintf(stderr, 1184 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1185 frame_size, dev_info.max_rx_pktlen, port_id); 1186 return -EINVAL; 1187 } 1188 1189 return 0; 1190 } 1191 1192 void 1193 port_mtu_set(portid_t port_id, uint16_t mtu) 1194 { 1195 struct rte_port *port = &ports[port_id]; 1196 int diag; 1197 1198 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1199 return; 1200 1201 diag = eth_dev_validate_mtu(port_id, mtu); 1202 if (diag != 0) 1203 return; 1204 1205 if (port->need_reconfig == 0) { 1206 diag = rte_eth_dev_set_mtu(port_id, mtu); 1207 if (diag != 0) { 1208 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1209 return; 1210 } 1211 } 1212 1213 port->dev_conf.rxmode.mtu = mtu; 1214 } 1215 1216 /* Generic flow management functions. */ 1217 1218 static struct port_flow_tunnel * 1219 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1220 { 1221 struct port_flow_tunnel *flow_tunnel; 1222 1223 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1224 if (flow_tunnel->id == port_tunnel_id) 1225 goto out; 1226 } 1227 flow_tunnel = NULL; 1228 1229 out: 1230 return flow_tunnel; 1231 } 1232 1233 const char * 1234 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1235 { 1236 const char *type; 1237 switch (tunnel->type) { 1238 default: 1239 type = "unknown"; 1240 break; 1241 case RTE_FLOW_ITEM_TYPE_VXLAN: 1242 type = "vxlan"; 1243 break; 1244 case RTE_FLOW_ITEM_TYPE_GRE: 1245 type = "gre"; 1246 break; 1247 case RTE_FLOW_ITEM_TYPE_NVGRE: 1248 type = "nvgre"; 1249 break; 1250 case RTE_FLOW_ITEM_TYPE_GENEVE: 1251 type = "geneve"; 1252 break; 1253 } 1254 1255 return type; 1256 } 1257 1258 struct port_flow_tunnel * 1259 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1260 { 1261 struct rte_port *port = &ports[port_id]; 1262 struct port_flow_tunnel *flow_tunnel; 1263 1264 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1265 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1266 goto out; 1267 } 1268 flow_tunnel = NULL; 1269 1270 out: 1271 return flow_tunnel; 1272 } 1273 1274 void port_flow_tunnel_list(portid_t port_id) 1275 { 1276 struct rte_port *port = &ports[port_id]; 1277 struct port_flow_tunnel *flt; 1278 1279 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1280 printf("port %u tunnel #%u type=%s", 1281 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1282 if (flt->tunnel.tun_id) 1283 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1284 printf("\n"); 1285 } 1286 } 1287 1288 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1289 { 1290 struct rte_port *port = &ports[port_id]; 1291 struct port_flow_tunnel *flt; 1292 1293 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1294 if (flt->id == tunnel_id) 1295 break; 1296 } 1297 if (flt) { 1298 LIST_REMOVE(flt, chain); 1299 free(flt); 1300 printf("port %u: flow tunnel #%u destroyed\n", 1301 port_id, tunnel_id); 1302 } 1303 } 1304 1305 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1306 { 1307 struct rte_port *port = &ports[port_id]; 1308 enum rte_flow_item_type type; 1309 struct port_flow_tunnel *flt; 1310 1311 if (!strcmp(ops->type, "vxlan")) 1312 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1313 else if (!strcmp(ops->type, "gre")) 1314 type = RTE_FLOW_ITEM_TYPE_GRE; 1315 else if (!strcmp(ops->type, "nvgre")) 1316 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1317 else if (!strcmp(ops->type, "geneve")) 1318 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1319 else { 1320 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1321 ops->type); 1322 return; 1323 } 1324 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1325 if (flt->tunnel.type == type) 1326 break; 1327 } 1328 if (!flt) { 1329 flt = calloc(1, sizeof(*flt)); 1330 if (!flt) { 1331 fprintf(stderr, "failed to allocate port flt object\n"); 1332 return; 1333 } 1334 flt->tunnel.type = type; 1335 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1336 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1337 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1338 } 1339 printf("port %d: flow tunnel #%u type %s\n", 1340 port_id, flt->id, ops->type); 1341 } 1342 1343 /** Generate a port_flow entry from attributes/pattern/actions. */ 1344 static struct port_flow * 1345 port_flow_new(const struct rte_flow_attr *attr, 1346 const struct rte_flow_item *pattern, 1347 const struct rte_flow_action *actions, 1348 struct rte_flow_error *error) 1349 { 1350 const struct rte_flow_conv_rule rule = { 1351 .attr_ro = attr, 1352 .pattern_ro = pattern, 1353 .actions_ro = actions, 1354 }; 1355 struct port_flow *pf; 1356 int ret; 1357 1358 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1359 if (ret < 0) 1360 return NULL; 1361 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1362 if (!pf) { 1363 rte_flow_error_set 1364 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1365 "calloc() failed"); 1366 return NULL; 1367 } 1368 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1369 error) >= 0) 1370 return pf; 1371 free(pf); 1372 return NULL; 1373 } 1374 1375 /** Print a message out of a flow error. */ 1376 static int 1377 port_flow_complain(struct rte_flow_error *error) 1378 { 1379 static const char *const errstrlist[] = { 1380 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1381 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1382 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1383 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1384 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1385 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1386 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1387 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1388 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1389 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1390 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1391 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1392 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1393 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1394 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1395 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1396 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1397 }; 1398 const char *errstr; 1399 char buf[32]; 1400 int err = rte_errno; 1401 1402 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1403 !errstrlist[error->type]) 1404 errstr = "unknown type"; 1405 else 1406 errstr = errstrlist[error->type]; 1407 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1408 __func__, error->type, errstr, 1409 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1410 error->cause), buf) : "", 1411 error->message ? error->message : "(no stated reason)", 1412 rte_strerror(err)); 1413 1414 switch (error->type) { 1415 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1416 fprintf(stderr, "The status suggests the use of \"transfer\" " 1417 "as the possible cause of the failure. Make " 1418 "sure that the flow in question and its " 1419 "indirect components (if any) are managed " 1420 "via \"transfer\" proxy port. Use command " 1421 "\"show port (port_id) flow transfer proxy\" " 1422 "to figure out the proxy port ID\n"); 1423 break; 1424 default: 1425 break; 1426 } 1427 1428 return -err; 1429 } 1430 1431 static void 1432 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1433 { 1434 uint16_t total_len = 0; 1435 uint16_t str_len; 1436 uint16_t i; 1437 1438 if (rss_types == 0) 1439 return; 1440 1441 for (i = 0; rss_type_table[i].str; i++) { 1442 if (rss_type_table[i].rss_type == 0) 1443 continue; 1444 1445 if ((rss_types & rss_type_table[i].rss_type) == 1446 rss_type_table[i].rss_type) { 1447 /* Contain two spaces */ 1448 str_len = strlen(rss_type_table[i].str) + 2; 1449 if (total_len + str_len > char_num_per_line) { 1450 printf("\n"); 1451 total_len = 0; 1452 } 1453 printf(" %s", rss_type_table[i].str); 1454 total_len += str_len; 1455 } 1456 } 1457 printf("\n"); 1458 } 1459 1460 static void 1461 rss_config_display(struct rte_flow_action_rss *rss_conf) 1462 { 1463 uint8_t i; 1464 1465 if (rss_conf == NULL) { 1466 fprintf(stderr, "Invalid rule\n"); 1467 return; 1468 } 1469 1470 printf("RSS:\n" 1471 " queues:"); 1472 if (rss_conf->queue_num == 0) 1473 printf(" none"); 1474 for (i = 0; i < rss_conf->queue_num; i++) 1475 printf(" %d", rss_conf->queue[i]); 1476 printf("\n"); 1477 1478 printf(" function: "); 1479 switch (rss_conf->func) { 1480 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1481 printf("default\n"); 1482 break; 1483 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1484 printf("toeplitz\n"); 1485 break; 1486 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1487 printf("simple_xor\n"); 1488 break; 1489 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1490 printf("symmetric_toeplitz\n"); 1491 break; 1492 default: 1493 printf("Unknown function\n"); 1494 return; 1495 } 1496 1497 printf(" types:\n"); 1498 if (rss_conf->types == 0) { 1499 printf(" none\n"); 1500 return; 1501 } 1502 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1503 } 1504 1505 static struct port_indirect_action * 1506 action_get_by_id(portid_t port_id, uint32_t id) 1507 { 1508 struct rte_port *port; 1509 struct port_indirect_action **ppia; 1510 struct port_indirect_action *pia = NULL; 1511 1512 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1513 port_id == (portid_t)RTE_PORT_ALL) 1514 return NULL; 1515 port = &ports[port_id]; 1516 ppia = &port->actions_list; 1517 while (*ppia) { 1518 if ((*ppia)->id == id) { 1519 pia = *ppia; 1520 break; 1521 } 1522 ppia = &(*ppia)->next; 1523 } 1524 if (!pia) 1525 fprintf(stderr, 1526 "Failed to find indirect action #%u on port %u\n", 1527 id, port_id); 1528 return pia; 1529 } 1530 1531 static int 1532 action_alloc(portid_t port_id, uint32_t id, 1533 struct port_indirect_action **action) 1534 { 1535 struct rte_port *port; 1536 struct port_indirect_action **ppia; 1537 struct port_indirect_action *pia = NULL; 1538 1539 *action = NULL; 1540 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1541 port_id == (portid_t)RTE_PORT_ALL) 1542 return -EINVAL; 1543 port = &ports[port_id]; 1544 if (id == UINT32_MAX) { 1545 /* taking first available ID */ 1546 if (port->actions_list) { 1547 if (port->actions_list->id == UINT32_MAX - 1) { 1548 fprintf(stderr, 1549 "Highest indirect action ID is already assigned, delete it first\n"); 1550 return -ENOMEM; 1551 } 1552 id = port->actions_list->id + 1; 1553 } else { 1554 id = 0; 1555 } 1556 } 1557 pia = calloc(1, sizeof(*pia)); 1558 if (!pia) { 1559 fprintf(stderr, 1560 "Allocation of port %u indirect action failed\n", 1561 port_id); 1562 return -ENOMEM; 1563 } 1564 ppia = &port->actions_list; 1565 while (*ppia && (*ppia)->id > id) 1566 ppia = &(*ppia)->next; 1567 if (*ppia && (*ppia)->id == id) { 1568 fprintf(stderr, 1569 "Indirect action #%u is already assigned, delete it first\n", 1570 id); 1571 free(pia); 1572 return -EINVAL; 1573 } 1574 pia->next = *ppia; 1575 pia->id = id; 1576 *ppia = pia; 1577 *action = pia; 1578 return 0; 1579 } 1580 1581 static int 1582 template_alloc(uint32_t id, struct port_template **template, 1583 struct port_template **list) 1584 { 1585 struct port_template *lst = *list; 1586 struct port_template **ppt; 1587 struct port_template *pt = NULL; 1588 1589 *template = NULL; 1590 if (id == UINT32_MAX) { 1591 /* taking first available ID */ 1592 if (lst) { 1593 if (lst->id == UINT32_MAX - 1) { 1594 printf("Highest template ID is already" 1595 " assigned, delete it first\n"); 1596 return -ENOMEM; 1597 } 1598 id = lst->id + 1; 1599 } else { 1600 id = 0; 1601 } 1602 } 1603 pt = calloc(1, sizeof(*pt)); 1604 if (!pt) { 1605 printf("Allocation of port template failed\n"); 1606 return -ENOMEM; 1607 } 1608 ppt = list; 1609 while (*ppt && (*ppt)->id > id) 1610 ppt = &(*ppt)->next; 1611 if (*ppt && (*ppt)->id == id) { 1612 printf("Template #%u is already assigned," 1613 " delete it first\n", id); 1614 free(pt); 1615 return -EINVAL; 1616 } 1617 pt->next = *ppt; 1618 pt->id = id; 1619 *ppt = pt; 1620 *template = pt; 1621 return 0; 1622 } 1623 1624 static int 1625 table_alloc(uint32_t id, struct port_table **table, 1626 struct port_table **list) 1627 { 1628 struct port_table *lst = *list; 1629 struct port_table **ppt; 1630 struct port_table *pt = NULL; 1631 1632 *table = NULL; 1633 if (id == UINT32_MAX) { 1634 /* taking first available ID */ 1635 if (lst) { 1636 if (lst->id == UINT32_MAX - 1) { 1637 printf("Highest table ID is already" 1638 " assigned, delete it first\n"); 1639 return -ENOMEM; 1640 } 1641 id = lst->id + 1; 1642 } else { 1643 id = 0; 1644 } 1645 } 1646 pt = calloc(1, sizeof(*pt)); 1647 if (!pt) { 1648 printf("Allocation of table failed\n"); 1649 return -ENOMEM; 1650 } 1651 ppt = list; 1652 while (*ppt && (*ppt)->id > id) 1653 ppt = &(*ppt)->next; 1654 if (*ppt && (*ppt)->id == id) { 1655 printf("Table #%u is already assigned," 1656 " delete it first\n", id); 1657 free(pt); 1658 return -EINVAL; 1659 } 1660 pt->next = *ppt; 1661 pt->id = id; 1662 *ppt = pt; 1663 *table = pt; 1664 return 0; 1665 } 1666 1667 /** Get info about flow management resources. */ 1668 int 1669 port_flow_get_info(portid_t port_id) 1670 { 1671 struct rte_flow_port_info port_info; 1672 struct rte_flow_queue_info queue_info; 1673 struct rte_flow_error error; 1674 1675 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1676 port_id == (portid_t)RTE_PORT_ALL) 1677 return -EINVAL; 1678 /* Poisoning to make sure PMDs update it in case of error. */ 1679 memset(&error, 0x99, sizeof(error)); 1680 memset(&port_info, 0, sizeof(port_info)); 1681 memset(&queue_info, 0, sizeof(queue_info)); 1682 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1683 return port_flow_complain(&error); 1684 printf("Flow engine resources on port %u:\n" 1685 "Number of queues: %d\n" 1686 "Size of queues: %d\n" 1687 "Number of counters: %d\n" 1688 "Number of aging objects: %d\n" 1689 "Number of meter actions: %d\n", 1690 port_id, port_info.max_nb_queues, 1691 queue_info.max_size, 1692 port_info.max_nb_counters, 1693 port_info.max_nb_aging_objects, 1694 port_info.max_nb_meters); 1695 return 0; 1696 } 1697 1698 /** Configure flow management resources. */ 1699 int 1700 port_flow_configure(portid_t port_id, 1701 const struct rte_flow_port_attr *port_attr, 1702 uint16_t nb_queue, 1703 const struct rte_flow_queue_attr *queue_attr) 1704 { 1705 struct rte_port *port; 1706 struct rte_flow_error error; 1707 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1708 int std_queue; 1709 1710 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1711 port_id == (portid_t)RTE_PORT_ALL) 1712 return -EINVAL; 1713 port = &ports[port_id]; 1714 port->queue_nb = nb_queue; 1715 port->queue_sz = queue_attr->size; 1716 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1717 attr_list[std_queue] = queue_attr; 1718 /* Poisoning to make sure PMDs update it in case of error. */ 1719 memset(&error, 0x66, sizeof(error)); 1720 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1721 return port_flow_complain(&error); 1722 printf("Configure flows on port %u: " 1723 "number of queues %d with %d elements\n", 1724 port_id, nb_queue, queue_attr->size); 1725 return 0; 1726 } 1727 1728 /** Create indirect action */ 1729 int 1730 port_action_handle_create(portid_t port_id, uint32_t id, 1731 const struct rte_flow_indir_action_conf *conf, 1732 const struct rte_flow_action *action) 1733 { 1734 struct port_indirect_action *pia; 1735 int ret; 1736 struct rte_flow_error error; 1737 1738 ret = action_alloc(port_id, id, &pia); 1739 if (ret) 1740 return ret; 1741 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1742 struct rte_flow_action_age *age = 1743 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1744 1745 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1746 age->context = &pia->age_type; 1747 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1748 struct rte_flow_action_conntrack *ct = 1749 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1750 1751 memcpy(ct, &conntrack_context, sizeof(*ct)); 1752 } 1753 /* Poisoning to make sure PMDs update it in case of error. */ 1754 memset(&error, 0x22, sizeof(error)); 1755 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1756 &error); 1757 if (!pia->handle) { 1758 uint32_t destroy_id = pia->id; 1759 port_action_handle_destroy(port_id, 1, &destroy_id); 1760 return port_flow_complain(&error); 1761 } 1762 pia->type = action->type; 1763 printf("Indirect action #%u created\n", pia->id); 1764 return 0; 1765 } 1766 1767 /** Destroy indirect action */ 1768 int 1769 port_action_handle_destroy(portid_t port_id, 1770 uint32_t n, 1771 const uint32_t *actions) 1772 { 1773 struct rte_port *port; 1774 struct port_indirect_action **tmp; 1775 uint32_t c = 0; 1776 int ret = 0; 1777 1778 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1779 port_id == (portid_t)RTE_PORT_ALL) 1780 return -EINVAL; 1781 port = &ports[port_id]; 1782 tmp = &port->actions_list; 1783 while (*tmp) { 1784 uint32_t i; 1785 1786 for (i = 0; i != n; ++i) { 1787 struct rte_flow_error error; 1788 struct port_indirect_action *pia = *tmp; 1789 1790 if (actions[i] != pia->id) 1791 continue; 1792 /* 1793 * Poisoning to make sure PMDs update it in case 1794 * of error. 1795 */ 1796 memset(&error, 0x33, sizeof(error)); 1797 1798 if (pia->handle && rte_flow_action_handle_destroy( 1799 port_id, pia->handle, &error)) { 1800 ret = port_flow_complain(&error); 1801 continue; 1802 } 1803 *tmp = pia->next; 1804 printf("Indirect action #%u destroyed\n", pia->id); 1805 free(pia); 1806 break; 1807 } 1808 if (i == n) 1809 tmp = &(*tmp)->next; 1810 ++c; 1811 } 1812 return ret; 1813 } 1814 1815 int 1816 port_action_handle_flush(portid_t port_id) 1817 { 1818 struct rte_port *port; 1819 struct port_indirect_action **tmp; 1820 int ret = 0; 1821 1822 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1823 port_id == (portid_t)RTE_PORT_ALL) 1824 return -EINVAL; 1825 port = &ports[port_id]; 1826 tmp = &port->actions_list; 1827 while (*tmp != NULL) { 1828 struct rte_flow_error error; 1829 struct port_indirect_action *pia = *tmp; 1830 1831 /* Poisoning to make sure PMDs update it in case of error. */ 1832 memset(&error, 0x44, sizeof(error)); 1833 if (pia->handle != NULL && 1834 rte_flow_action_handle_destroy 1835 (port_id, pia->handle, &error) != 0) { 1836 printf("Indirect action #%u not destroyed\n", pia->id); 1837 ret = port_flow_complain(&error); 1838 tmp = &pia->next; 1839 } else { 1840 *tmp = pia->next; 1841 free(pia); 1842 } 1843 } 1844 return ret; 1845 } 1846 1847 /** Get indirect action by port + id */ 1848 struct rte_flow_action_handle * 1849 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1850 { 1851 1852 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1853 1854 return (pia) ? pia->handle : NULL; 1855 } 1856 1857 /** Update indirect action */ 1858 int 1859 port_action_handle_update(portid_t port_id, uint32_t id, 1860 const struct rte_flow_action *action) 1861 { 1862 struct rte_flow_error error; 1863 struct rte_flow_action_handle *action_handle; 1864 struct port_indirect_action *pia; 1865 const void *update; 1866 1867 action_handle = port_action_handle_get_by_id(port_id, id); 1868 if (!action_handle) 1869 return -EINVAL; 1870 pia = action_get_by_id(port_id, id); 1871 if (!pia) 1872 return -EINVAL; 1873 switch (pia->type) { 1874 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1875 update = action->conf; 1876 break; 1877 default: 1878 update = action; 1879 break; 1880 } 1881 if (rte_flow_action_handle_update(port_id, action_handle, update, 1882 &error)) { 1883 return port_flow_complain(&error); 1884 } 1885 printf("Indirect action #%u updated\n", id); 1886 return 0; 1887 } 1888 1889 static void 1890 port_action_handle_query_dump(uint32_t type, union port_action_query *query) 1891 { 1892 switch (type) { 1893 case RTE_FLOW_ACTION_TYPE_AGE: 1894 printf("Indirect AGE action:\n" 1895 " aged: %u\n" 1896 " sec_since_last_hit_valid: %u\n" 1897 " sec_since_last_hit: %" PRIu32 "\n", 1898 query->age.aged, 1899 query->age.sec_since_last_hit_valid, 1900 query->age.sec_since_last_hit); 1901 break; 1902 case RTE_FLOW_ACTION_TYPE_COUNT: 1903 printf("Indirect COUNT action:\n" 1904 " hits_set: %u\n" 1905 " bytes_set: %u\n" 1906 " hits: %" PRIu64 "\n" 1907 " bytes: %" PRIu64 "\n", 1908 query->count.hits_set, 1909 query->count.bytes_set, 1910 query->count.hits, 1911 query->count.bytes); 1912 break; 1913 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1914 printf("Conntrack Context:\n" 1915 " Peer: %u, Flow dir: %s, Enable: %u\n" 1916 " Live: %u, SACK: %u, CACK: %u\n" 1917 " Packet dir: %s, Liberal: %u, State: %u\n" 1918 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1919 " Last Seq: %u, Last ACK: %u\n" 1920 " Last Win: %u, Last End: %u\n", 1921 query->ct.peer_port, 1922 query->ct.is_original_dir ? "Original" : "Reply", 1923 query->ct.enable, query->ct.live_connection, 1924 query->ct.selective_ack, query->ct.challenge_ack_passed, 1925 query->ct.last_direction ? "Original" : "Reply", 1926 query->ct.liberal_mode, query->ct.state, 1927 query->ct.max_ack_window, query->ct.retransmission_limit, 1928 query->ct.last_index, query->ct.last_seq, 1929 query->ct.last_ack, query->ct.last_window, 1930 query->ct.last_end); 1931 printf(" Original Dir:\n" 1932 " scale: %u, fin: %u, ack seen: %u\n" 1933 " unacked data: %u\n Sent end: %u," 1934 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1935 query->ct.original_dir.scale, 1936 query->ct.original_dir.close_initiated, 1937 query->ct.original_dir.last_ack_seen, 1938 query->ct.original_dir.data_unacked, 1939 query->ct.original_dir.sent_end, 1940 query->ct.original_dir.reply_end, 1941 query->ct.original_dir.max_win, 1942 query->ct.original_dir.max_ack); 1943 printf(" Reply Dir:\n" 1944 " scale: %u, fin: %u, ack seen: %u\n" 1945 " unacked data: %u\n Sent end: %u," 1946 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1947 query->ct.reply_dir.scale, 1948 query->ct.reply_dir.close_initiated, 1949 query->ct.reply_dir.last_ack_seen, 1950 query->ct.reply_dir.data_unacked, 1951 query->ct.reply_dir.sent_end, 1952 query->ct.reply_dir.reply_end, 1953 query->ct.reply_dir.max_win, 1954 query->ct.reply_dir.max_ack); 1955 break; 1956 default: 1957 fprintf(stderr, 1958 "Indirect action (type: %d) doesn't support query\n", 1959 type); 1960 break; 1961 } 1962 1963 } 1964 1965 int 1966 port_action_handle_query(portid_t port_id, uint32_t id) 1967 { 1968 struct rte_flow_error error; 1969 struct port_indirect_action *pia; 1970 union port_action_query query; 1971 1972 pia = action_get_by_id(port_id, id); 1973 if (!pia) 1974 return -EINVAL; 1975 switch (pia->type) { 1976 case RTE_FLOW_ACTION_TYPE_AGE: 1977 case RTE_FLOW_ACTION_TYPE_COUNT: 1978 break; 1979 default: 1980 fprintf(stderr, 1981 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1982 id, pia->type, port_id); 1983 return -ENOTSUP; 1984 } 1985 /* Poisoning to make sure PMDs update it in case of error. */ 1986 memset(&error, 0x55, sizeof(error)); 1987 memset(&query, 0, sizeof(query)); 1988 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1989 return port_flow_complain(&error); 1990 port_action_handle_query_dump(pia->type, &query); 1991 return 0; 1992 } 1993 1994 static struct port_flow_tunnel * 1995 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1996 const struct rte_flow_item *pattern, 1997 const struct rte_flow_action *actions, 1998 const struct tunnel_ops *tunnel_ops) 1999 { 2000 int ret; 2001 struct rte_port *port; 2002 struct port_flow_tunnel *pft; 2003 struct rte_flow_error error; 2004 2005 port = &ports[port_id]; 2006 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2007 if (!pft) { 2008 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2009 tunnel_ops->id); 2010 return NULL; 2011 } 2012 if (tunnel_ops->actions) { 2013 uint32_t num_actions; 2014 const struct rte_flow_action *aptr; 2015 2016 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2017 &pft->pmd_actions, 2018 &pft->num_pmd_actions, 2019 &error); 2020 if (ret) { 2021 port_flow_complain(&error); 2022 return NULL; 2023 } 2024 for (aptr = actions, num_actions = 1; 2025 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2026 aptr++, num_actions++); 2027 pft->actions = malloc( 2028 (num_actions + pft->num_pmd_actions) * 2029 sizeof(actions[0])); 2030 if (!pft->actions) { 2031 rte_flow_tunnel_action_decap_release( 2032 port_id, pft->actions, 2033 pft->num_pmd_actions, &error); 2034 return NULL; 2035 } 2036 rte_memcpy(pft->actions, pft->pmd_actions, 2037 pft->num_pmd_actions * sizeof(actions[0])); 2038 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2039 num_actions * sizeof(actions[0])); 2040 } 2041 if (tunnel_ops->items) { 2042 uint32_t num_items; 2043 const struct rte_flow_item *iptr; 2044 2045 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2046 &pft->pmd_items, 2047 &pft->num_pmd_items, 2048 &error); 2049 if (ret) { 2050 port_flow_complain(&error); 2051 return NULL; 2052 } 2053 for (iptr = pattern, num_items = 1; 2054 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2055 iptr++, num_items++); 2056 pft->items = malloc((num_items + pft->num_pmd_items) * 2057 sizeof(pattern[0])); 2058 if (!pft->items) { 2059 rte_flow_tunnel_item_release( 2060 port_id, pft->pmd_items, 2061 pft->num_pmd_items, &error); 2062 return NULL; 2063 } 2064 rte_memcpy(pft->items, pft->pmd_items, 2065 pft->num_pmd_items * sizeof(pattern[0])); 2066 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2067 num_items * sizeof(pattern[0])); 2068 } 2069 2070 return pft; 2071 } 2072 2073 static void 2074 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2075 const struct tunnel_ops *tunnel_ops, 2076 struct port_flow_tunnel *pft) 2077 { 2078 struct rte_flow_error error; 2079 2080 if (tunnel_ops->actions) { 2081 free(pft->actions); 2082 rte_flow_tunnel_action_decap_release( 2083 port_id, pft->pmd_actions, 2084 pft->num_pmd_actions, &error); 2085 pft->actions = NULL; 2086 pft->pmd_actions = NULL; 2087 } 2088 if (tunnel_ops->items) { 2089 free(pft->items); 2090 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2091 pft->num_pmd_items, 2092 &error); 2093 pft->items = NULL; 2094 pft->pmd_items = NULL; 2095 } 2096 } 2097 2098 /** Add port meter policy */ 2099 int 2100 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2101 const struct rte_flow_action *actions) 2102 { 2103 struct rte_mtr_error error; 2104 const struct rte_flow_action *act = actions; 2105 const struct rte_flow_action *start; 2106 struct rte_mtr_meter_policy_params policy; 2107 uint32_t i = 0, act_n; 2108 int ret; 2109 2110 for (i = 0; i < RTE_COLORS; i++) { 2111 for (act_n = 0, start = act; 2112 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2113 act_n++; 2114 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2115 policy.actions[i] = start; 2116 else 2117 policy.actions[i] = NULL; 2118 act++; 2119 } 2120 ret = rte_mtr_meter_policy_add(port_id, 2121 policy_id, 2122 &policy, &error); 2123 if (ret) 2124 print_mtr_err_msg(&error); 2125 return ret; 2126 } 2127 2128 struct rte_flow_meter_profile * 2129 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2130 { 2131 struct rte_mtr_error error; 2132 struct rte_flow_meter_profile *profile; 2133 2134 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2135 if (!profile) 2136 print_mtr_err_msg(&error); 2137 return profile; 2138 } 2139 struct rte_flow_meter_policy * 2140 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2141 { 2142 struct rte_mtr_error error; 2143 struct rte_flow_meter_policy *policy; 2144 2145 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2146 if (!policy) 2147 print_mtr_err_msg(&error); 2148 return policy; 2149 } 2150 2151 /** Validate flow rule. */ 2152 int 2153 port_flow_validate(portid_t port_id, 2154 const struct rte_flow_attr *attr, 2155 const struct rte_flow_item *pattern, 2156 const struct rte_flow_action *actions, 2157 const struct tunnel_ops *tunnel_ops) 2158 { 2159 struct rte_flow_error error; 2160 struct port_flow_tunnel *pft = NULL; 2161 int ret; 2162 2163 /* Poisoning to make sure PMDs update it in case of error. */ 2164 memset(&error, 0x11, sizeof(error)); 2165 if (tunnel_ops->enabled) { 2166 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2167 actions, tunnel_ops); 2168 if (!pft) 2169 return -ENOENT; 2170 if (pft->items) 2171 pattern = pft->items; 2172 if (pft->actions) 2173 actions = pft->actions; 2174 } 2175 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2176 if (tunnel_ops->enabled) 2177 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2178 if (ret) 2179 return port_flow_complain(&error); 2180 printf("Flow rule validated\n"); 2181 return 0; 2182 } 2183 2184 /** Return age action structure if exists, otherwise NULL. */ 2185 static struct rte_flow_action_age * 2186 age_action_get(const struct rte_flow_action *actions) 2187 { 2188 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2189 switch (actions->type) { 2190 case RTE_FLOW_ACTION_TYPE_AGE: 2191 return (struct rte_flow_action_age *) 2192 (uintptr_t)actions->conf; 2193 default: 2194 break; 2195 } 2196 } 2197 return NULL; 2198 } 2199 2200 /** Create pattern template */ 2201 int 2202 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2203 const struct rte_flow_pattern_template_attr *attr, 2204 const struct rte_flow_item *pattern) 2205 { 2206 struct rte_port *port; 2207 struct port_template *pit; 2208 int ret; 2209 struct rte_flow_error error; 2210 2211 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2212 port_id == (portid_t)RTE_PORT_ALL) 2213 return -EINVAL; 2214 port = &ports[port_id]; 2215 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2216 if (ret) 2217 return ret; 2218 /* Poisoning to make sure PMDs update it in case of error. */ 2219 memset(&error, 0x22, sizeof(error)); 2220 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2221 attr, pattern, &error); 2222 if (!pit->template.pattern_template) { 2223 uint32_t destroy_id = pit->id; 2224 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2225 return port_flow_complain(&error); 2226 } 2227 printf("Pattern template #%u created\n", pit->id); 2228 return 0; 2229 } 2230 2231 /** Destroy pattern template */ 2232 int 2233 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2234 const uint32_t *template) 2235 { 2236 struct rte_port *port; 2237 struct port_template **tmp; 2238 uint32_t c = 0; 2239 int ret = 0; 2240 2241 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2242 port_id == (portid_t)RTE_PORT_ALL) 2243 return -EINVAL; 2244 port = &ports[port_id]; 2245 tmp = &port->pattern_templ_list; 2246 while (*tmp) { 2247 uint32_t i; 2248 2249 for (i = 0; i != n; ++i) { 2250 struct rte_flow_error error; 2251 struct port_template *pit = *tmp; 2252 2253 if (template[i] != pit->id) 2254 continue; 2255 /* 2256 * Poisoning to make sure PMDs update it in case 2257 * of error. 2258 */ 2259 memset(&error, 0x33, sizeof(error)); 2260 2261 if (pit->template.pattern_template && 2262 rte_flow_pattern_template_destroy(port_id, 2263 pit->template.pattern_template, 2264 &error)) { 2265 ret = port_flow_complain(&error); 2266 continue; 2267 } 2268 *tmp = pit->next; 2269 printf("Pattern template #%u destroyed\n", pit->id); 2270 free(pit); 2271 break; 2272 } 2273 if (i == n) 2274 tmp = &(*tmp)->next; 2275 ++c; 2276 } 2277 return ret; 2278 } 2279 2280 /** Create actions template */ 2281 int 2282 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2283 const struct rte_flow_actions_template_attr *attr, 2284 const struct rte_flow_action *actions, 2285 const struct rte_flow_action *masks) 2286 { 2287 struct rte_port *port; 2288 struct port_template *pat; 2289 int ret; 2290 struct rte_flow_error error; 2291 2292 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2293 port_id == (portid_t)RTE_PORT_ALL) 2294 return -EINVAL; 2295 port = &ports[port_id]; 2296 ret = template_alloc(id, &pat, &port->actions_templ_list); 2297 if (ret) 2298 return ret; 2299 /* Poisoning to make sure PMDs update it in case of error. */ 2300 memset(&error, 0x22, sizeof(error)); 2301 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2302 attr, actions, masks, &error); 2303 if (!pat->template.actions_template) { 2304 uint32_t destroy_id = pat->id; 2305 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2306 return port_flow_complain(&error); 2307 } 2308 printf("Actions template #%u created\n", pat->id); 2309 return 0; 2310 } 2311 2312 /** Destroy actions template */ 2313 int 2314 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2315 const uint32_t *template) 2316 { 2317 struct rte_port *port; 2318 struct port_template **tmp; 2319 uint32_t c = 0; 2320 int ret = 0; 2321 2322 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2323 port_id == (portid_t)RTE_PORT_ALL) 2324 return -EINVAL; 2325 port = &ports[port_id]; 2326 tmp = &port->actions_templ_list; 2327 while (*tmp) { 2328 uint32_t i; 2329 2330 for (i = 0; i != n; ++i) { 2331 struct rte_flow_error error; 2332 struct port_template *pat = *tmp; 2333 2334 if (template[i] != pat->id) 2335 continue; 2336 /* 2337 * Poisoning to make sure PMDs update it in case 2338 * of error. 2339 */ 2340 memset(&error, 0x33, sizeof(error)); 2341 2342 if (pat->template.actions_template && 2343 rte_flow_actions_template_destroy(port_id, 2344 pat->template.actions_template, &error)) { 2345 ret = port_flow_complain(&error); 2346 continue; 2347 } 2348 *tmp = pat->next; 2349 printf("Actions template #%u destroyed\n", pat->id); 2350 free(pat); 2351 break; 2352 } 2353 if (i == n) 2354 tmp = &(*tmp)->next; 2355 ++c; 2356 } 2357 return ret; 2358 } 2359 2360 /** Create table */ 2361 int 2362 port_flow_template_table_create(portid_t port_id, uint32_t id, 2363 const struct rte_flow_template_table_attr *table_attr, 2364 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2365 uint32_t nb_actions_templates, uint32_t *actions_templates) 2366 { 2367 struct rte_port *port; 2368 struct port_table *pt; 2369 struct port_template *temp = NULL; 2370 int ret; 2371 uint32_t i; 2372 struct rte_flow_error error; 2373 struct rte_flow_pattern_template 2374 *flow_pattern_templates[nb_pattern_templates]; 2375 struct rte_flow_actions_template 2376 *flow_actions_templates[nb_actions_templates]; 2377 2378 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2379 port_id == (portid_t)RTE_PORT_ALL) 2380 return -EINVAL; 2381 port = &ports[port_id]; 2382 for (i = 0; i < nb_pattern_templates; ++i) { 2383 bool found = false; 2384 temp = port->pattern_templ_list; 2385 while (temp) { 2386 if (pattern_templates[i] == temp->id) { 2387 flow_pattern_templates[i] = 2388 temp->template.pattern_template; 2389 found = true; 2390 break; 2391 } 2392 temp = temp->next; 2393 } 2394 if (!found) { 2395 printf("Pattern template #%u is invalid\n", 2396 pattern_templates[i]); 2397 return -EINVAL; 2398 } 2399 } 2400 for (i = 0; i < nb_actions_templates; ++i) { 2401 bool found = false; 2402 temp = port->actions_templ_list; 2403 while (temp) { 2404 if (actions_templates[i] == temp->id) { 2405 flow_actions_templates[i] = 2406 temp->template.actions_template; 2407 found = true; 2408 break; 2409 } 2410 temp = temp->next; 2411 } 2412 if (!found) { 2413 printf("Actions template #%u is invalid\n", 2414 actions_templates[i]); 2415 return -EINVAL; 2416 } 2417 } 2418 ret = table_alloc(id, &pt, &port->table_list); 2419 if (ret) 2420 return ret; 2421 /* Poisoning to make sure PMDs update it in case of error. */ 2422 memset(&error, 0x22, sizeof(error)); 2423 pt->table = rte_flow_template_table_create(port_id, table_attr, 2424 flow_pattern_templates, nb_pattern_templates, 2425 flow_actions_templates, nb_actions_templates, 2426 &error); 2427 2428 if (!pt->table) { 2429 uint32_t destroy_id = pt->id; 2430 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2431 return port_flow_complain(&error); 2432 } 2433 pt->nb_pattern_templates = nb_pattern_templates; 2434 pt->nb_actions_templates = nb_actions_templates; 2435 printf("Template table #%u created\n", pt->id); 2436 return 0; 2437 } 2438 2439 /** Destroy table */ 2440 int 2441 port_flow_template_table_destroy(portid_t port_id, 2442 uint32_t n, const uint32_t *table) 2443 { 2444 struct rte_port *port; 2445 struct port_table **tmp; 2446 uint32_t c = 0; 2447 int ret = 0; 2448 2449 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2450 port_id == (portid_t)RTE_PORT_ALL) 2451 return -EINVAL; 2452 port = &ports[port_id]; 2453 tmp = &port->table_list; 2454 while (*tmp) { 2455 uint32_t i; 2456 2457 for (i = 0; i != n; ++i) { 2458 struct rte_flow_error error; 2459 struct port_table *pt = *tmp; 2460 2461 if (table[i] != pt->id) 2462 continue; 2463 /* 2464 * Poisoning to make sure PMDs update it in case 2465 * of error. 2466 */ 2467 memset(&error, 0x33, sizeof(error)); 2468 2469 if (pt->table && 2470 rte_flow_template_table_destroy(port_id, 2471 pt->table, 2472 &error)) { 2473 ret = port_flow_complain(&error); 2474 continue; 2475 } 2476 *tmp = pt->next; 2477 printf("Template table #%u destroyed\n", pt->id); 2478 free(pt); 2479 break; 2480 } 2481 if (i == n) 2482 tmp = &(*tmp)->next; 2483 ++c; 2484 } 2485 return ret; 2486 } 2487 2488 /** Enqueue create flow rule operation. */ 2489 int 2490 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2491 bool postpone, uint32_t table_id, 2492 uint32_t pattern_idx, uint32_t actions_idx, 2493 const struct rte_flow_item *pattern, 2494 const struct rte_flow_action *actions) 2495 { 2496 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2497 struct rte_flow *flow; 2498 struct rte_port *port; 2499 struct port_flow *pf; 2500 struct port_table *pt; 2501 uint32_t id = 0; 2502 bool found; 2503 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2504 struct rte_flow_action_age *age = age_action_get(actions); 2505 struct queue_job *job; 2506 2507 port = &ports[port_id]; 2508 if (port->flow_list) { 2509 if (port->flow_list->id == UINT32_MAX) { 2510 printf("Highest rule ID is already assigned," 2511 " delete it first"); 2512 return -ENOMEM; 2513 } 2514 id = port->flow_list->id + 1; 2515 } 2516 2517 if (queue_id >= port->queue_nb) { 2518 printf("Queue #%u is invalid\n", queue_id); 2519 return -EINVAL; 2520 } 2521 2522 found = false; 2523 pt = port->table_list; 2524 while (pt) { 2525 if (table_id == pt->id) { 2526 found = true; 2527 break; 2528 } 2529 pt = pt->next; 2530 } 2531 if (!found) { 2532 printf("Table #%u is invalid\n", table_id); 2533 return -EINVAL; 2534 } 2535 2536 if (pattern_idx >= pt->nb_pattern_templates) { 2537 printf("Pattern template index #%u is invalid," 2538 " %u templates present in the table\n", 2539 pattern_idx, pt->nb_pattern_templates); 2540 return -EINVAL; 2541 } 2542 if (actions_idx >= pt->nb_actions_templates) { 2543 printf("Actions template index #%u is invalid," 2544 " %u templates present in the table\n", 2545 actions_idx, pt->nb_actions_templates); 2546 return -EINVAL; 2547 } 2548 2549 job = calloc(1, sizeof(*job)); 2550 if (!job) { 2551 printf("Queue flow create job allocate failed\n"); 2552 return -ENOMEM; 2553 } 2554 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2555 2556 pf = port_flow_new(NULL, pattern, actions, &error); 2557 if (!pf) { 2558 free(job); 2559 return port_flow_complain(&error); 2560 } 2561 if (age) { 2562 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2563 age->context = &pf->age_type; 2564 } 2565 /* Poisoning to make sure PMDs update it in case of error. */ 2566 memset(&error, 0x11, sizeof(error)); 2567 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2568 pattern, pattern_idx, actions, actions_idx, job, &error); 2569 if (!flow) { 2570 uint32_t flow_id = pf->id; 2571 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2572 free(job); 2573 return port_flow_complain(&error); 2574 } 2575 2576 pf->next = port->flow_list; 2577 pf->id = id; 2578 pf->flow = flow; 2579 job->pf = pf; 2580 port->flow_list = pf; 2581 printf("Flow rule #%u creation enqueued\n", pf->id); 2582 return 0; 2583 } 2584 2585 /** Enqueue number of destroy flow rules operations. */ 2586 int 2587 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2588 bool postpone, uint32_t n, const uint32_t *rule) 2589 { 2590 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2591 struct rte_port *port; 2592 struct port_flow **tmp; 2593 uint32_t c = 0; 2594 int ret = 0; 2595 struct queue_job *job; 2596 2597 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2598 port_id == (portid_t)RTE_PORT_ALL) 2599 return -EINVAL; 2600 port = &ports[port_id]; 2601 2602 if (queue_id >= port->queue_nb) { 2603 printf("Queue #%u is invalid\n", queue_id); 2604 return -EINVAL; 2605 } 2606 2607 tmp = &port->flow_list; 2608 while (*tmp) { 2609 uint32_t i; 2610 2611 for (i = 0; i != n; ++i) { 2612 struct rte_flow_error error; 2613 struct port_flow *pf = *tmp; 2614 2615 if (rule[i] != pf->id) 2616 continue; 2617 /* 2618 * Poisoning to make sure PMD 2619 * update it in case of error. 2620 */ 2621 memset(&error, 0x33, sizeof(error)); 2622 job = calloc(1, sizeof(*job)); 2623 if (!job) { 2624 printf("Queue flow destroy job allocate failed\n"); 2625 return -ENOMEM; 2626 } 2627 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2628 job->pf = pf; 2629 2630 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2631 pf->flow, job, &error)) { 2632 free(job); 2633 ret = port_flow_complain(&error); 2634 continue; 2635 } 2636 printf("Flow rule #%u destruction enqueued\n", pf->id); 2637 *tmp = pf->next; 2638 break; 2639 } 2640 if (i == n) 2641 tmp = &(*tmp)->next; 2642 ++c; 2643 } 2644 return ret; 2645 } 2646 2647 /** Enqueue indirect action create operation. */ 2648 int 2649 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2650 bool postpone, uint32_t id, 2651 const struct rte_flow_indir_action_conf *conf, 2652 const struct rte_flow_action *action) 2653 { 2654 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2655 struct rte_port *port; 2656 struct port_indirect_action *pia; 2657 int ret; 2658 struct rte_flow_error error; 2659 struct queue_job *job; 2660 2661 ret = action_alloc(port_id, id, &pia); 2662 if (ret) 2663 return ret; 2664 2665 port = &ports[port_id]; 2666 if (queue_id >= port->queue_nb) { 2667 printf("Queue #%u is invalid\n", queue_id); 2668 return -EINVAL; 2669 } 2670 job = calloc(1, sizeof(*job)); 2671 if (!job) { 2672 printf("Queue action create job allocate failed\n"); 2673 return -ENOMEM; 2674 } 2675 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 2676 job->pia = pia; 2677 2678 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2679 struct rte_flow_action_age *age = 2680 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2681 2682 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2683 age->context = &pia->age_type; 2684 } 2685 /* Poisoning to make sure PMDs update it in case of error. */ 2686 memset(&error, 0x88, sizeof(error)); 2687 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2688 &attr, conf, action, job, &error); 2689 if (!pia->handle) { 2690 uint32_t destroy_id = pia->id; 2691 port_queue_action_handle_destroy(port_id, queue_id, 2692 postpone, 1, &destroy_id); 2693 free(job); 2694 return port_flow_complain(&error); 2695 } 2696 pia->type = action->type; 2697 printf("Indirect action #%u creation queued\n", pia->id); 2698 return 0; 2699 } 2700 2701 /** Enqueue indirect action destroy operation. */ 2702 int 2703 port_queue_action_handle_destroy(portid_t port_id, 2704 uint32_t queue_id, bool postpone, 2705 uint32_t n, const uint32_t *actions) 2706 { 2707 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2708 struct rte_port *port; 2709 struct port_indirect_action **tmp; 2710 uint32_t c = 0; 2711 int ret = 0; 2712 struct queue_job *job; 2713 2714 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2715 port_id == (portid_t)RTE_PORT_ALL) 2716 return -EINVAL; 2717 port = &ports[port_id]; 2718 2719 if (queue_id >= port->queue_nb) { 2720 printf("Queue #%u is invalid\n", queue_id); 2721 return -EINVAL; 2722 } 2723 2724 tmp = &port->actions_list; 2725 while (*tmp) { 2726 uint32_t i; 2727 2728 for (i = 0; i != n; ++i) { 2729 struct rte_flow_error error; 2730 struct port_indirect_action *pia = *tmp; 2731 2732 if (actions[i] != pia->id) 2733 continue; 2734 /* 2735 * Poisoning to make sure PMDs update it in case 2736 * of error. 2737 */ 2738 memset(&error, 0x99, sizeof(error)); 2739 job = calloc(1, sizeof(*job)); 2740 if (!job) { 2741 printf("Queue action destroy job allocate failed\n"); 2742 return -ENOMEM; 2743 } 2744 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 2745 job->pia = pia; 2746 2747 if (pia->handle && 2748 rte_flow_async_action_handle_destroy(port_id, 2749 queue_id, &attr, pia->handle, job, &error)) { 2750 ret = port_flow_complain(&error); 2751 continue; 2752 } 2753 *tmp = pia->next; 2754 printf("Indirect action #%u destruction queued\n", 2755 pia->id); 2756 break; 2757 } 2758 if (i == n) 2759 tmp = &(*tmp)->next; 2760 ++c; 2761 } 2762 return ret; 2763 } 2764 2765 /** Enqueue indirect action update operation. */ 2766 int 2767 port_queue_action_handle_update(portid_t port_id, 2768 uint32_t queue_id, bool postpone, uint32_t id, 2769 const struct rte_flow_action *action) 2770 { 2771 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2772 struct rte_port *port; 2773 struct rte_flow_error error; 2774 struct rte_flow_action_handle *action_handle; 2775 struct queue_job *job; 2776 struct port_indirect_action *pia; 2777 struct rte_flow_update_meter_mark mtr_update; 2778 const void *update; 2779 2780 action_handle = port_action_handle_get_by_id(port_id, id); 2781 if (!action_handle) 2782 return -EINVAL; 2783 2784 port = &ports[port_id]; 2785 if (queue_id >= port->queue_nb) { 2786 printf("Queue #%u is invalid\n", queue_id); 2787 return -EINVAL; 2788 } 2789 2790 job = calloc(1, sizeof(*job)); 2791 if (!job) { 2792 printf("Queue action update job allocate failed\n"); 2793 return -ENOMEM; 2794 } 2795 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 2796 2797 pia = action_get_by_id(port_id, id); 2798 if (!pia) { 2799 free(job); 2800 return -EINVAL; 2801 } 2802 2803 if (pia->type == RTE_FLOW_ACTION_TYPE_METER_MARK) { 2804 rte_memcpy(&mtr_update.meter_mark, action->conf, 2805 sizeof(struct rte_flow_action_meter_mark)); 2806 mtr_update.profile_valid = 1; 2807 mtr_update.policy_valid = 1; 2808 mtr_update.color_mode_valid = 1; 2809 mtr_update.init_color_valid = 1; 2810 mtr_update.state_valid = 1; 2811 update = &mtr_update; 2812 } else { 2813 update = action; 2814 } 2815 2816 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2817 action_handle, update, job, &error)) { 2818 free(job); 2819 return port_flow_complain(&error); 2820 } 2821 printf("Indirect action #%u update queued\n", id); 2822 return 0; 2823 } 2824 2825 /** Enqueue indirect action query operation. */ 2826 int 2827 port_queue_action_handle_query(portid_t port_id, 2828 uint32_t queue_id, bool postpone, uint32_t id) 2829 { 2830 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2831 struct rte_port *port; 2832 struct rte_flow_error error; 2833 struct rte_flow_action_handle *action_handle; 2834 struct port_indirect_action *pia; 2835 struct queue_job *job; 2836 2837 pia = action_get_by_id(port_id, id); 2838 action_handle = pia ? pia->handle : NULL; 2839 if (!action_handle) 2840 return -EINVAL; 2841 2842 port = &ports[port_id]; 2843 if (queue_id >= port->queue_nb) { 2844 printf("Queue #%u is invalid\n", queue_id); 2845 return -EINVAL; 2846 } 2847 2848 job = calloc(1, sizeof(*job)); 2849 if (!job) { 2850 printf("Queue action update job allocate failed\n"); 2851 return -ENOMEM; 2852 } 2853 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 2854 job->pia = pia; 2855 2856 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 2857 action_handle, &job->query, job, &error)) { 2858 free(job); 2859 return port_flow_complain(&error); 2860 } 2861 printf("Indirect action #%u update queued\n", id); 2862 return 0; 2863 } 2864 2865 /** Push all the queue operations in the queue to the NIC. */ 2866 int 2867 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 2868 { 2869 struct rte_port *port; 2870 struct rte_flow_error error; 2871 int ret = 0; 2872 2873 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2874 port_id == (portid_t)RTE_PORT_ALL) 2875 return -EINVAL; 2876 port = &ports[port_id]; 2877 2878 if (queue_id >= port->queue_nb) { 2879 printf("Queue #%u is invalid\n", queue_id); 2880 return -EINVAL; 2881 } 2882 2883 memset(&error, 0x55, sizeof(error)); 2884 ret = rte_flow_push(port_id, queue_id, &error); 2885 if (ret < 0) { 2886 printf("Failed to push operations in the queue\n"); 2887 return -EINVAL; 2888 } 2889 printf("Queue #%u operations pushed\n", queue_id); 2890 return ret; 2891 } 2892 2893 /** Pull queue operation results from the queue. */ 2894 int 2895 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 2896 { 2897 struct rte_port *port; 2898 struct rte_flow_op_result *res; 2899 struct rte_flow_error error; 2900 int ret = 0; 2901 int success = 0; 2902 int i; 2903 struct queue_job *job; 2904 2905 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2906 port_id == (portid_t)RTE_PORT_ALL) 2907 return -EINVAL; 2908 port = &ports[port_id]; 2909 2910 if (queue_id >= port->queue_nb) { 2911 printf("Queue #%u is invalid\n", queue_id); 2912 return -EINVAL; 2913 } 2914 2915 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 2916 if (!res) { 2917 printf("Failed to allocate memory for pulled results\n"); 2918 return -ENOMEM; 2919 } 2920 2921 memset(&error, 0x66, sizeof(error)); 2922 ret = rte_flow_pull(port_id, queue_id, res, 2923 port->queue_sz, &error); 2924 if (ret < 0) { 2925 printf("Failed to pull a operation results\n"); 2926 free(res); 2927 return -EINVAL; 2928 } 2929 2930 for (i = 0; i < ret; i++) { 2931 if (res[i].status == RTE_FLOW_OP_SUCCESS) 2932 success++; 2933 job = (struct queue_job *)res[i].user_data; 2934 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY) 2935 free(job->pf); 2936 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 2937 free(job->pia); 2938 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 2939 port_action_handle_query_dump(job->pia->type, &job->query); 2940 free(job); 2941 } 2942 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 2943 queue_id, ret, ret - success, success); 2944 free(res); 2945 return ret; 2946 } 2947 2948 /** Create flow rule. */ 2949 int 2950 port_flow_create(portid_t port_id, 2951 const struct rte_flow_attr *attr, 2952 const struct rte_flow_item *pattern, 2953 const struct rte_flow_action *actions, 2954 const struct tunnel_ops *tunnel_ops) 2955 { 2956 struct rte_flow *flow; 2957 struct rte_port *port; 2958 struct port_flow *pf; 2959 uint32_t id = 0; 2960 struct rte_flow_error error; 2961 struct port_flow_tunnel *pft = NULL; 2962 struct rte_flow_action_age *age = age_action_get(actions); 2963 2964 port = &ports[port_id]; 2965 if (port->flow_list) { 2966 if (port->flow_list->id == UINT32_MAX) { 2967 fprintf(stderr, 2968 "Highest rule ID is already assigned, delete it first"); 2969 return -ENOMEM; 2970 } 2971 id = port->flow_list->id + 1; 2972 } 2973 if (tunnel_ops->enabled) { 2974 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2975 actions, tunnel_ops); 2976 if (!pft) 2977 return -ENOENT; 2978 if (pft->items) 2979 pattern = pft->items; 2980 if (pft->actions) 2981 actions = pft->actions; 2982 } 2983 pf = port_flow_new(attr, pattern, actions, &error); 2984 if (!pf) 2985 return port_flow_complain(&error); 2986 if (age) { 2987 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2988 age->context = &pf->age_type; 2989 } 2990 /* Poisoning to make sure PMDs update it in case of error. */ 2991 memset(&error, 0x22, sizeof(error)); 2992 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2993 if (!flow) { 2994 if (tunnel_ops->enabled) 2995 port_flow_tunnel_offload_cmd_release(port_id, 2996 tunnel_ops, pft); 2997 free(pf); 2998 return port_flow_complain(&error); 2999 } 3000 pf->next = port->flow_list; 3001 pf->id = id; 3002 pf->flow = flow; 3003 port->flow_list = pf; 3004 if (tunnel_ops->enabled) 3005 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3006 printf("Flow rule #%u created\n", pf->id); 3007 return 0; 3008 } 3009 3010 /** Destroy a number of flow rules. */ 3011 int 3012 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 3013 { 3014 struct rte_port *port; 3015 struct port_flow **tmp; 3016 uint32_t c = 0; 3017 int ret = 0; 3018 3019 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3020 port_id == (portid_t)RTE_PORT_ALL) 3021 return -EINVAL; 3022 port = &ports[port_id]; 3023 tmp = &port->flow_list; 3024 while (*tmp) { 3025 uint32_t i; 3026 3027 for (i = 0; i != n; ++i) { 3028 struct rte_flow_error error; 3029 struct port_flow *pf = *tmp; 3030 3031 if (rule[i] != pf->id) 3032 continue; 3033 /* 3034 * Poisoning to make sure PMDs update it in case 3035 * of error. 3036 */ 3037 memset(&error, 0x33, sizeof(error)); 3038 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3039 ret = port_flow_complain(&error); 3040 continue; 3041 } 3042 printf("Flow rule #%u destroyed\n", pf->id); 3043 *tmp = pf->next; 3044 free(pf); 3045 break; 3046 } 3047 if (i == n) 3048 tmp = &(*tmp)->next; 3049 ++c; 3050 } 3051 return ret; 3052 } 3053 3054 /** Remove all flow rules. */ 3055 int 3056 port_flow_flush(portid_t port_id) 3057 { 3058 struct rte_flow_error error; 3059 struct rte_port *port; 3060 int ret = 0; 3061 3062 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3063 port_id == (portid_t)RTE_PORT_ALL) 3064 return -EINVAL; 3065 3066 port = &ports[port_id]; 3067 3068 if (port->flow_list == NULL) 3069 return ret; 3070 3071 /* Poisoning to make sure PMDs update it in case of error. */ 3072 memset(&error, 0x44, sizeof(error)); 3073 if (rte_flow_flush(port_id, &error)) { 3074 port_flow_complain(&error); 3075 } 3076 3077 while (port->flow_list) { 3078 struct port_flow *pf = port->flow_list->next; 3079 3080 free(port->flow_list); 3081 port->flow_list = pf; 3082 } 3083 return ret; 3084 } 3085 3086 /** Dump flow rules. */ 3087 int 3088 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 3089 const char *file_name) 3090 { 3091 int ret = 0; 3092 FILE *file = stdout; 3093 struct rte_flow_error error; 3094 struct rte_port *port; 3095 struct port_flow *pflow; 3096 struct rte_flow *tmpFlow = NULL; 3097 bool found = false; 3098 3099 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3100 port_id == (portid_t)RTE_PORT_ALL) 3101 return -EINVAL; 3102 3103 if (!dump_all) { 3104 port = &ports[port_id]; 3105 pflow = port->flow_list; 3106 while (pflow) { 3107 if (rule_id != pflow->id) { 3108 pflow = pflow->next; 3109 } else { 3110 tmpFlow = pflow->flow; 3111 if (tmpFlow) 3112 found = true; 3113 break; 3114 } 3115 } 3116 if (found == false) { 3117 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 3118 return -EINVAL; 3119 } 3120 } 3121 3122 if (file_name && strlen(file_name)) { 3123 file = fopen(file_name, "w"); 3124 if (!file) { 3125 fprintf(stderr, "Failed to create file %s: %s\n", 3126 file_name, strerror(errno)); 3127 return -errno; 3128 } 3129 } 3130 3131 if (!dump_all) 3132 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3133 else 3134 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3135 if (ret) { 3136 port_flow_complain(&error); 3137 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3138 } else 3139 printf("Flow dump finished\n"); 3140 if (file_name && strlen(file_name)) 3141 fclose(file); 3142 return ret; 3143 } 3144 3145 /** Query a flow rule. */ 3146 int 3147 port_flow_query(portid_t port_id, uint32_t rule, 3148 const struct rte_flow_action *action) 3149 { 3150 struct rte_flow_error error; 3151 struct rte_port *port; 3152 struct port_flow *pf; 3153 const char *name; 3154 union { 3155 struct rte_flow_query_count count; 3156 struct rte_flow_action_rss rss_conf; 3157 struct rte_flow_query_age age; 3158 } query; 3159 int ret; 3160 3161 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3162 port_id == (portid_t)RTE_PORT_ALL) 3163 return -EINVAL; 3164 port = &ports[port_id]; 3165 for (pf = port->flow_list; pf; pf = pf->next) 3166 if (pf->id == rule) 3167 break; 3168 if (!pf) { 3169 fprintf(stderr, "Flow rule #%u not found\n", rule); 3170 return -ENOENT; 3171 } 3172 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3173 &name, sizeof(name), 3174 (void *)(uintptr_t)action->type, &error); 3175 if (ret < 0) 3176 return port_flow_complain(&error); 3177 switch (action->type) { 3178 case RTE_FLOW_ACTION_TYPE_COUNT: 3179 case RTE_FLOW_ACTION_TYPE_RSS: 3180 case RTE_FLOW_ACTION_TYPE_AGE: 3181 break; 3182 default: 3183 fprintf(stderr, "Cannot query action type %d (%s)\n", 3184 action->type, name); 3185 return -ENOTSUP; 3186 } 3187 /* Poisoning to make sure PMDs update it in case of error. */ 3188 memset(&error, 0x55, sizeof(error)); 3189 memset(&query, 0, sizeof(query)); 3190 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3191 return port_flow_complain(&error); 3192 switch (action->type) { 3193 case RTE_FLOW_ACTION_TYPE_COUNT: 3194 printf("%s:\n" 3195 " hits_set: %u\n" 3196 " bytes_set: %u\n" 3197 " hits: %" PRIu64 "\n" 3198 " bytes: %" PRIu64 "\n", 3199 name, 3200 query.count.hits_set, 3201 query.count.bytes_set, 3202 query.count.hits, 3203 query.count.bytes); 3204 break; 3205 case RTE_FLOW_ACTION_TYPE_RSS: 3206 rss_config_display(&query.rss_conf); 3207 break; 3208 case RTE_FLOW_ACTION_TYPE_AGE: 3209 printf("%s:\n" 3210 " aged: %u\n" 3211 " sec_since_last_hit_valid: %u\n" 3212 " sec_since_last_hit: %" PRIu32 "\n", 3213 name, 3214 query.age.aged, 3215 query.age.sec_since_last_hit_valid, 3216 query.age.sec_since_last_hit); 3217 break; 3218 default: 3219 fprintf(stderr, 3220 "Cannot display result for action type %d (%s)\n", 3221 action->type, name); 3222 break; 3223 } 3224 return 0; 3225 } 3226 3227 /** List simply and destroy all aged flows. */ 3228 void 3229 port_flow_aged(portid_t port_id, uint8_t destroy) 3230 { 3231 void **contexts; 3232 int nb_context, total = 0, idx; 3233 struct rte_flow_error error; 3234 enum age_action_context_type *type; 3235 union { 3236 struct port_flow *pf; 3237 struct port_indirect_action *pia; 3238 } ctx; 3239 3240 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3241 port_id == (portid_t)RTE_PORT_ALL) 3242 return; 3243 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3244 printf("Port %u total aged flows: %d\n", port_id, total); 3245 if (total < 0) { 3246 port_flow_complain(&error); 3247 return; 3248 } 3249 if (total == 0) 3250 return; 3251 contexts = malloc(sizeof(void *) * total); 3252 if (contexts == NULL) { 3253 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3254 return; 3255 } 3256 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3257 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3258 if (nb_context != total) { 3259 fprintf(stderr, 3260 "Port:%d get aged flows count(%d) != total(%d)\n", 3261 port_id, nb_context, total); 3262 free(contexts); 3263 return; 3264 } 3265 total = 0; 3266 for (idx = 0; idx < nb_context; idx++) { 3267 if (!contexts[idx]) { 3268 fprintf(stderr, "Error: get Null context in port %u\n", 3269 port_id); 3270 continue; 3271 } 3272 type = (enum age_action_context_type *)contexts[idx]; 3273 switch (*type) { 3274 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3275 ctx.pf = container_of(type, struct port_flow, age_type); 3276 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3277 "\t%c%c%c\t\n", 3278 "Flow", 3279 ctx.pf->id, 3280 ctx.pf->rule.attr->group, 3281 ctx.pf->rule.attr->priority, 3282 ctx.pf->rule.attr->ingress ? 'i' : '-', 3283 ctx.pf->rule.attr->egress ? 'e' : '-', 3284 ctx.pf->rule.attr->transfer ? 't' : '-'); 3285 if (destroy && !port_flow_destroy(port_id, 1, 3286 &ctx.pf->id)) 3287 total++; 3288 break; 3289 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3290 ctx.pia = container_of(type, 3291 struct port_indirect_action, age_type); 3292 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3293 ctx.pia->id); 3294 break; 3295 default: 3296 fprintf(stderr, "Error: invalid context type %u\n", 3297 port_id); 3298 break; 3299 } 3300 } 3301 printf("\n%d flows destroyed\n", total); 3302 free(contexts); 3303 } 3304 3305 /** List flow rules. */ 3306 void 3307 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3308 { 3309 struct rte_port *port; 3310 struct port_flow *pf; 3311 struct port_flow *list = NULL; 3312 uint32_t i; 3313 3314 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3315 port_id == (portid_t)RTE_PORT_ALL) 3316 return; 3317 port = &ports[port_id]; 3318 if (!port->flow_list) 3319 return; 3320 /* Sort flows by group, priority and ID. */ 3321 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3322 struct port_flow **tmp; 3323 const struct rte_flow_attr *curr = pf->rule.attr; 3324 3325 if (n) { 3326 /* Filter out unwanted groups. */ 3327 for (i = 0; i != n; ++i) 3328 if (curr->group == group[i]) 3329 break; 3330 if (i == n) 3331 continue; 3332 } 3333 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3334 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3335 3336 if (curr->group > comp->group || 3337 (curr->group == comp->group && 3338 curr->priority > comp->priority) || 3339 (curr->group == comp->group && 3340 curr->priority == comp->priority && 3341 pf->id > (*tmp)->id)) 3342 continue; 3343 break; 3344 } 3345 pf->tmp = *tmp; 3346 *tmp = pf; 3347 } 3348 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3349 for (pf = list; pf != NULL; pf = pf->tmp) { 3350 const struct rte_flow_item *item = pf->rule.pattern; 3351 const struct rte_flow_action *action = pf->rule.actions; 3352 const char *name; 3353 3354 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3355 pf->id, 3356 pf->rule.attr->group, 3357 pf->rule.attr->priority, 3358 pf->rule.attr->ingress ? 'i' : '-', 3359 pf->rule.attr->egress ? 'e' : '-', 3360 pf->rule.attr->transfer ? 't' : '-'); 3361 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3362 if ((uint32_t)item->type > INT_MAX) 3363 name = "PMD_INTERNAL"; 3364 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3365 &name, sizeof(name), 3366 (void *)(uintptr_t)item->type, 3367 NULL) <= 0) 3368 name = "[UNKNOWN]"; 3369 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3370 printf("%s ", name); 3371 ++item; 3372 } 3373 printf("=>"); 3374 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3375 if ((uint32_t)action->type > INT_MAX) 3376 name = "PMD_INTERNAL"; 3377 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3378 &name, sizeof(name), 3379 (void *)(uintptr_t)action->type, 3380 NULL) <= 0) 3381 name = "[UNKNOWN]"; 3382 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3383 printf(" %s", name); 3384 ++action; 3385 } 3386 printf("\n"); 3387 } 3388 } 3389 3390 /** Restrict ingress traffic to the defined flow rules. */ 3391 int 3392 port_flow_isolate(portid_t port_id, int set) 3393 { 3394 struct rte_flow_error error; 3395 3396 /* Poisoning to make sure PMDs update it in case of error. */ 3397 memset(&error, 0x66, sizeof(error)); 3398 if (rte_flow_isolate(port_id, set, &error)) 3399 return port_flow_complain(&error); 3400 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3401 port_id, 3402 set ? "now restricted" : "not restricted anymore"); 3403 return 0; 3404 } 3405 3406 /* 3407 * RX/TX ring descriptors display functions. 3408 */ 3409 int 3410 rx_queue_id_is_invalid(queueid_t rxq_id) 3411 { 3412 if (rxq_id < nb_rxq) 3413 return 0; 3414 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3415 rxq_id, nb_rxq); 3416 return 1; 3417 } 3418 3419 int 3420 tx_queue_id_is_invalid(queueid_t txq_id) 3421 { 3422 if (txq_id < nb_txq) 3423 return 0; 3424 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3425 txq_id, nb_txq); 3426 return 1; 3427 } 3428 3429 static int 3430 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3431 { 3432 struct rte_port *port = &ports[port_id]; 3433 struct rte_eth_rxq_info rx_qinfo; 3434 int ret; 3435 3436 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3437 if (ret == 0) { 3438 *ring_size = rx_qinfo.nb_desc; 3439 return ret; 3440 } 3441 3442 if (ret != -ENOTSUP) 3443 return ret; 3444 /* 3445 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3446 * ring_size stored in testpmd will be used for validity verification. 3447 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3448 * being 0, it will use a default value provided by PMDs to setup this 3449 * rxq. If the default value is 0, it will use the 3450 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3451 */ 3452 if (port->nb_rx_desc[rxq_id]) 3453 *ring_size = port->nb_rx_desc[rxq_id]; 3454 else if (port->dev_info.default_rxportconf.ring_size) 3455 *ring_size = port->dev_info.default_rxportconf.ring_size; 3456 else 3457 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3458 return 0; 3459 } 3460 3461 static int 3462 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3463 { 3464 struct rte_port *port = &ports[port_id]; 3465 struct rte_eth_txq_info tx_qinfo; 3466 int ret; 3467 3468 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3469 if (ret == 0) { 3470 *ring_size = tx_qinfo.nb_desc; 3471 return ret; 3472 } 3473 3474 if (ret != -ENOTSUP) 3475 return ret; 3476 /* 3477 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3478 * ring_size stored in testpmd will be used for validity verification. 3479 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3480 * being 0, it will use a default value provided by PMDs to setup this 3481 * txq. If the default value is 0, it will use the 3482 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3483 */ 3484 if (port->nb_tx_desc[txq_id]) 3485 *ring_size = port->nb_tx_desc[txq_id]; 3486 else if (port->dev_info.default_txportconf.ring_size) 3487 *ring_size = port->dev_info.default_txportconf.ring_size; 3488 else 3489 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3490 return 0; 3491 } 3492 3493 static int 3494 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3495 { 3496 uint16_t ring_size; 3497 int ret; 3498 3499 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3500 if (ret) 3501 return 1; 3502 3503 if (rxdesc_id < ring_size) 3504 return 0; 3505 3506 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3507 rxdesc_id, ring_size); 3508 return 1; 3509 } 3510 3511 static int 3512 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3513 { 3514 uint16_t ring_size; 3515 int ret; 3516 3517 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3518 if (ret) 3519 return 1; 3520 3521 if (txdesc_id < ring_size) 3522 return 0; 3523 3524 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3525 txdesc_id, ring_size); 3526 return 1; 3527 } 3528 3529 static const struct rte_memzone * 3530 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3531 { 3532 char mz_name[RTE_MEMZONE_NAMESIZE]; 3533 const struct rte_memzone *mz; 3534 3535 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3536 port_id, q_id, ring_name); 3537 mz = rte_memzone_lookup(mz_name); 3538 if (mz == NULL) 3539 fprintf(stderr, 3540 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3541 ring_name, port_id, q_id, mz_name); 3542 return mz; 3543 } 3544 3545 union igb_ring_dword { 3546 uint64_t dword; 3547 struct { 3548 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3549 uint32_t lo; 3550 uint32_t hi; 3551 #else 3552 uint32_t hi; 3553 uint32_t lo; 3554 #endif 3555 } words; 3556 }; 3557 3558 struct igb_ring_desc_32_bytes { 3559 union igb_ring_dword lo_dword; 3560 union igb_ring_dword hi_dword; 3561 union igb_ring_dword resv1; 3562 union igb_ring_dword resv2; 3563 }; 3564 3565 struct igb_ring_desc_16_bytes { 3566 union igb_ring_dword lo_dword; 3567 union igb_ring_dword hi_dword; 3568 }; 3569 3570 static void 3571 ring_rxd_display_dword(union igb_ring_dword dword) 3572 { 3573 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3574 (unsigned)dword.words.hi); 3575 } 3576 3577 static void 3578 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3579 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3580 portid_t port_id, 3581 #else 3582 __rte_unused portid_t port_id, 3583 #endif 3584 uint16_t desc_id) 3585 { 3586 struct igb_ring_desc_16_bytes *ring = 3587 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3588 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3589 int ret; 3590 struct rte_eth_dev_info dev_info; 3591 3592 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3593 if (ret != 0) 3594 return; 3595 3596 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3597 /* 32 bytes RX descriptor, i40e only */ 3598 struct igb_ring_desc_32_bytes *ring = 3599 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3600 ring[desc_id].lo_dword.dword = 3601 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3602 ring_rxd_display_dword(ring[desc_id].lo_dword); 3603 ring[desc_id].hi_dword.dword = 3604 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3605 ring_rxd_display_dword(ring[desc_id].hi_dword); 3606 ring[desc_id].resv1.dword = 3607 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3608 ring_rxd_display_dword(ring[desc_id].resv1); 3609 ring[desc_id].resv2.dword = 3610 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3611 ring_rxd_display_dword(ring[desc_id].resv2); 3612 3613 return; 3614 } 3615 #endif 3616 /* 16 bytes RX descriptor */ 3617 ring[desc_id].lo_dword.dword = 3618 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3619 ring_rxd_display_dword(ring[desc_id].lo_dword); 3620 ring[desc_id].hi_dword.dword = 3621 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3622 ring_rxd_display_dword(ring[desc_id].hi_dword); 3623 } 3624 3625 static void 3626 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3627 { 3628 struct igb_ring_desc_16_bytes *ring; 3629 struct igb_ring_desc_16_bytes txd; 3630 3631 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3632 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3633 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3634 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3635 (unsigned)txd.lo_dword.words.lo, 3636 (unsigned)txd.lo_dword.words.hi, 3637 (unsigned)txd.hi_dword.words.lo, 3638 (unsigned)txd.hi_dword.words.hi); 3639 } 3640 3641 void 3642 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3643 { 3644 const struct rte_memzone *rx_mz; 3645 3646 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3647 return; 3648 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3649 if (rx_mz == NULL) 3650 return; 3651 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 3652 } 3653 3654 void 3655 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 3656 { 3657 const struct rte_memzone *tx_mz; 3658 3659 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 3660 return; 3661 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 3662 if (tx_mz == NULL) 3663 return; 3664 ring_tx_descriptor_display(tx_mz, txd_id); 3665 } 3666 3667 void 3668 fwd_lcores_config_display(void) 3669 { 3670 lcoreid_t lc_id; 3671 3672 printf("List of forwarding lcores:"); 3673 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 3674 printf(" %2u", fwd_lcores_cpuids[lc_id]); 3675 printf("\n"); 3676 } 3677 void 3678 rxtx_config_display(void) 3679 { 3680 portid_t pid; 3681 queueid_t qid; 3682 3683 printf(" %s packet forwarding%s packets/burst=%d\n", 3684 cur_fwd_eng->fwd_mode_name, 3685 retry_enabled == 0 ? "" : " with retry", 3686 nb_pkt_per_burst); 3687 3688 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 3689 printf(" packet len=%u - nb packet segments=%d\n", 3690 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 3691 3692 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 3693 nb_fwd_lcores, nb_fwd_ports); 3694 3695 RTE_ETH_FOREACH_DEV(pid) { 3696 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 3697 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 3698 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 3699 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 3700 struct rte_eth_rxq_info rx_qinfo; 3701 struct rte_eth_txq_info tx_qinfo; 3702 uint16_t rx_free_thresh_tmp; 3703 uint16_t tx_free_thresh_tmp; 3704 uint16_t tx_rs_thresh_tmp; 3705 uint16_t nb_rx_desc_tmp; 3706 uint16_t nb_tx_desc_tmp; 3707 uint64_t offloads_tmp; 3708 uint8_t pthresh_tmp; 3709 uint8_t hthresh_tmp; 3710 uint8_t wthresh_tmp; 3711 int32_t rc; 3712 3713 /* per port config */ 3714 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 3715 (unsigned int)pid, nb_rxq, nb_txq); 3716 3717 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 3718 ports[pid].dev_conf.rxmode.offloads, 3719 ports[pid].dev_conf.txmode.offloads); 3720 3721 /* per rx queue config only for first queue to be less verbose */ 3722 for (qid = 0; qid < 1; qid++) { 3723 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 3724 if (rc) { 3725 nb_rx_desc_tmp = nb_rx_desc[qid]; 3726 rx_free_thresh_tmp = 3727 rx_conf[qid].rx_free_thresh; 3728 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 3729 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 3730 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 3731 offloads_tmp = rx_conf[qid].offloads; 3732 } else { 3733 nb_rx_desc_tmp = rx_qinfo.nb_desc; 3734 rx_free_thresh_tmp = 3735 rx_qinfo.conf.rx_free_thresh; 3736 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 3737 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 3738 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 3739 offloads_tmp = rx_qinfo.conf.offloads; 3740 } 3741 3742 printf(" RX queue: %d\n", qid); 3743 printf(" RX desc=%d - RX free threshold=%d\n", 3744 nb_rx_desc_tmp, rx_free_thresh_tmp); 3745 printf(" RX threshold registers: pthresh=%d hthresh=%d " 3746 " wthresh=%d\n", 3747 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3748 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 3749 if (rx_conf->share_group > 0) 3750 printf(" share_group=%u share_qid=%u", 3751 rx_conf->share_group, 3752 rx_conf->share_qid); 3753 printf("\n"); 3754 } 3755 3756 /* per tx queue config only for first queue to be less verbose */ 3757 for (qid = 0; qid < 1; qid++) { 3758 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 3759 if (rc) { 3760 nb_tx_desc_tmp = nb_tx_desc[qid]; 3761 tx_free_thresh_tmp = 3762 tx_conf[qid].tx_free_thresh; 3763 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 3764 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 3765 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 3766 offloads_tmp = tx_conf[qid].offloads; 3767 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 3768 } else { 3769 nb_tx_desc_tmp = tx_qinfo.nb_desc; 3770 tx_free_thresh_tmp = 3771 tx_qinfo.conf.tx_free_thresh; 3772 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 3773 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 3774 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 3775 offloads_tmp = tx_qinfo.conf.offloads; 3776 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 3777 } 3778 3779 printf(" TX queue: %d\n", qid); 3780 printf(" TX desc=%d - TX free threshold=%d\n", 3781 nb_tx_desc_tmp, tx_free_thresh_tmp); 3782 printf(" TX threshold registers: pthresh=%d hthresh=%d " 3783 " wthresh=%d\n", 3784 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3785 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 3786 offloads_tmp, tx_rs_thresh_tmp); 3787 } 3788 } 3789 } 3790 3791 void 3792 port_rss_reta_info(portid_t port_id, 3793 struct rte_eth_rss_reta_entry64 *reta_conf, 3794 uint16_t nb_entries) 3795 { 3796 uint16_t i, idx, shift; 3797 int ret; 3798 3799 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3800 return; 3801 3802 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 3803 if (ret != 0) { 3804 fprintf(stderr, 3805 "Failed to get RSS RETA info, return code = %d\n", 3806 ret); 3807 return; 3808 } 3809 3810 for (i = 0; i < nb_entries; i++) { 3811 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3812 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3813 if (!(reta_conf[idx].mask & (1ULL << shift))) 3814 continue; 3815 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 3816 i, reta_conf[idx].reta[shift]); 3817 } 3818 } 3819 3820 /* 3821 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 3822 * key of the port. 3823 */ 3824 void 3825 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 3826 { 3827 struct rte_eth_rss_conf rss_conf = {0}; 3828 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 3829 uint64_t rss_hf; 3830 uint8_t i; 3831 int diag; 3832 struct rte_eth_dev_info dev_info; 3833 uint8_t hash_key_size; 3834 int ret; 3835 3836 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3837 return; 3838 3839 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3840 if (ret != 0) 3841 return; 3842 3843 if (dev_info.hash_key_size > 0 && 3844 dev_info.hash_key_size <= sizeof(rss_key)) 3845 hash_key_size = dev_info.hash_key_size; 3846 else { 3847 fprintf(stderr, 3848 "dev_info did not provide a valid hash key size\n"); 3849 return; 3850 } 3851 3852 /* Get RSS hash key if asked to display it */ 3853 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3854 rss_conf.rss_key_len = hash_key_size; 3855 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3856 if (diag != 0) { 3857 switch (diag) { 3858 case -ENODEV: 3859 fprintf(stderr, "port index %d invalid\n", port_id); 3860 break; 3861 case -ENOTSUP: 3862 fprintf(stderr, "operation not supported by device\n"); 3863 break; 3864 default: 3865 fprintf(stderr, "operation failed - diag=%d\n", diag); 3866 break; 3867 } 3868 return; 3869 } 3870 rss_hf = rss_conf.rss_hf; 3871 if (rss_hf == 0) { 3872 printf("RSS disabled\n"); 3873 return; 3874 } 3875 printf("RSS functions:\n"); 3876 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 3877 if (!show_rss_key) 3878 return; 3879 printf("RSS key:\n"); 3880 for (i = 0; i < hash_key_size; i++) 3881 printf("%02X", rss_key[i]); 3882 printf("\n"); 3883 } 3884 3885 void 3886 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3887 uint8_t hash_key_len) 3888 { 3889 struct rte_eth_rss_conf rss_conf; 3890 int diag; 3891 3892 rss_conf.rss_key = NULL; 3893 rss_conf.rss_key_len = 0; 3894 rss_conf.rss_hf = str_to_rsstypes(rss_type); 3895 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3896 if (diag == 0) { 3897 rss_conf.rss_key = hash_key; 3898 rss_conf.rss_key_len = hash_key_len; 3899 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3900 } 3901 if (diag == 0) 3902 return; 3903 3904 switch (diag) { 3905 case -ENODEV: 3906 fprintf(stderr, "port index %d invalid\n", port_id); 3907 break; 3908 case -ENOTSUP: 3909 fprintf(stderr, "operation not supported by device\n"); 3910 break; 3911 default: 3912 fprintf(stderr, "operation failed - diag=%d\n", diag); 3913 break; 3914 } 3915 } 3916 3917 /* 3918 * Check whether a shared rxq scheduled on other lcores. 3919 */ 3920 static bool 3921 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3922 portid_t src_port, queueid_t src_rxq, 3923 uint32_t share_group, queueid_t share_rxq) 3924 { 3925 streamid_t sm_id; 3926 streamid_t nb_fs_per_lcore; 3927 lcoreid_t nb_fc; 3928 lcoreid_t lc_id; 3929 struct fwd_stream *fs; 3930 struct rte_port *port; 3931 struct rte_eth_dev_info *dev_info; 3932 struct rte_eth_rxconf *rxq_conf; 3933 3934 nb_fc = cur_fwd_config.nb_fwd_lcores; 3935 /* Check remaining cores. */ 3936 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3937 sm_id = fwd_lcores[lc_id]->stream_idx; 3938 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3939 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3940 sm_id++) { 3941 fs = fwd_streams[sm_id]; 3942 port = &ports[fs->rx_port]; 3943 dev_info = &port->dev_info; 3944 rxq_conf = &port->rxq[fs->rx_queue].conf; 3945 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3946 == 0 || rxq_conf->share_group == 0) 3947 /* Not shared rxq. */ 3948 continue; 3949 if (domain_id != port->dev_info.switch_info.domain_id) 3950 continue; 3951 if (rxq_conf->share_group != share_group) 3952 continue; 3953 if (rxq_conf->share_qid != share_rxq) 3954 continue; 3955 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3956 share_group, share_rxq); 3957 printf(" lcore %hhu Port %hu queue %hu\n", 3958 src_lc, src_port, src_rxq); 3959 printf(" lcore %hhu Port %hu queue %hu\n", 3960 lc_id, fs->rx_port, fs->rx_queue); 3961 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3962 nb_rxq); 3963 return true; 3964 } 3965 } 3966 return false; 3967 } 3968 3969 /* 3970 * Check shared rxq configuration. 3971 * 3972 * Shared group must not being scheduled on different core. 3973 */ 3974 bool 3975 pkt_fwd_shared_rxq_check(void) 3976 { 3977 streamid_t sm_id; 3978 streamid_t nb_fs_per_lcore; 3979 lcoreid_t nb_fc; 3980 lcoreid_t lc_id; 3981 struct fwd_stream *fs; 3982 uint16_t domain_id; 3983 struct rte_port *port; 3984 struct rte_eth_dev_info *dev_info; 3985 struct rte_eth_rxconf *rxq_conf; 3986 3987 if (rxq_share == 0) 3988 return true; 3989 nb_fc = cur_fwd_config.nb_fwd_lcores; 3990 /* 3991 * Check streams on each core, make sure the same switch domain + 3992 * group + queue doesn't get scheduled on other cores. 3993 */ 3994 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3995 sm_id = fwd_lcores[lc_id]->stream_idx; 3996 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3997 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3998 sm_id++) { 3999 fs = fwd_streams[sm_id]; 4000 /* Update lcore info stream being scheduled. */ 4001 fs->lcore = fwd_lcores[lc_id]; 4002 port = &ports[fs->rx_port]; 4003 dev_info = &port->dev_info; 4004 rxq_conf = &port->rxq[fs->rx_queue].conf; 4005 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4006 == 0 || rxq_conf->share_group == 0) 4007 /* Not shared rxq. */ 4008 continue; 4009 /* Check shared rxq not scheduled on remaining cores. */ 4010 domain_id = port->dev_info.switch_info.domain_id; 4011 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4012 fs->rx_port, 4013 fs->rx_queue, 4014 rxq_conf->share_group, 4015 rxq_conf->share_qid)) 4016 return false; 4017 } 4018 } 4019 return true; 4020 } 4021 4022 /* 4023 * Setup forwarding configuration for each logical core. 4024 */ 4025 static void 4026 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4027 { 4028 streamid_t nb_fs_per_lcore; 4029 streamid_t nb_fs; 4030 streamid_t sm_id; 4031 lcoreid_t nb_extra; 4032 lcoreid_t nb_fc; 4033 lcoreid_t nb_lc; 4034 lcoreid_t lc_id; 4035 4036 nb_fs = cfg->nb_fwd_streams; 4037 nb_fc = cfg->nb_fwd_lcores; 4038 if (nb_fs <= nb_fc) { 4039 nb_fs_per_lcore = 1; 4040 nb_extra = 0; 4041 } else { 4042 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4043 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4044 } 4045 4046 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4047 sm_id = 0; 4048 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4049 fwd_lcores[lc_id]->stream_idx = sm_id; 4050 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4051 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4052 } 4053 4054 /* 4055 * Assign extra remaining streams, if any. 4056 */ 4057 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4058 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4059 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4060 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4061 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4062 } 4063 } 4064 4065 static portid_t 4066 fwd_topology_tx_port_get(portid_t rxp) 4067 { 4068 static int warning_once = 1; 4069 4070 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4071 4072 switch (port_topology) { 4073 default: 4074 case PORT_TOPOLOGY_PAIRED: 4075 if ((rxp & 0x1) == 0) { 4076 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4077 return rxp + 1; 4078 if (warning_once) { 4079 fprintf(stderr, 4080 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4081 warning_once = 0; 4082 } 4083 return rxp; 4084 } 4085 return rxp - 1; 4086 case PORT_TOPOLOGY_CHAINED: 4087 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4088 case PORT_TOPOLOGY_LOOP: 4089 return rxp; 4090 } 4091 } 4092 4093 static void 4094 simple_fwd_config_setup(void) 4095 { 4096 portid_t i; 4097 4098 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4099 cur_fwd_config.nb_fwd_streams = 4100 (streamid_t) cur_fwd_config.nb_fwd_ports; 4101 4102 /* reinitialize forwarding streams */ 4103 init_fwd_streams(); 4104 4105 /* 4106 * In the simple forwarding test, the number of forwarding cores 4107 * must be lower or equal to the number of forwarding ports. 4108 */ 4109 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4110 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4111 cur_fwd_config.nb_fwd_lcores = 4112 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4113 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4114 4115 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4116 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4117 fwd_streams[i]->rx_queue = 0; 4118 fwd_streams[i]->tx_port = 4119 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4120 fwd_streams[i]->tx_queue = 0; 4121 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4122 fwd_streams[i]->retry_enabled = retry_enabled; 4123 } 4124 } 4125 4126 /** 4127 * For the RSS forwarding test all streams distributed over lcores. Each stream 4128 * being composed of a RX queue to poll on a RX port for input messages, 4129 * associated with a TX queue of a TX port where to send forwarded packets. 4130 */ 4131 static void 4132 rss_fwd_config_setup(void) 4133 { 4134 portid_t rxp; 4135 portid_t txp; 4136 queueid_t rxq; 4137 queueid_t nb_q; 4138 streamid_t sm_id; 4139 int start; 4140 int end; 4141 4142 nb_q = nb_rxq; 4143 if (nb_q > nb_txq) 4144 nb_q = nb_txq; 4145 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4146 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4147 cur_fwd_config.nb_fwd_streams = 4148 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4149 4150 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4151 cur_fwd_config.nb_fwd_lcores = 4152 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4153 4154 /* reinitialize forwarding streams */ 4155 init_fwd_streams(); 4156 4157 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4158 4159 if (proc_id > 0 && nb_q % num_procs != 0) 4160 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4161 4162 /** 4163 * In multi-process, All queues are allocated to different 4164 * processes based on num_procs and proc_id. For example: 4165 * if supports 4 queues(nb_q), 2 processes(num_procs), 4166 * the 0~1 queue for primary process. 4167 * the 2~3 queue for secondary process. 4168 */ 4169 start = proc_id * nb_q / num_procs; 4170 end = start + nb_q / num_procs; 4171 rxp = 0; 4172 rxq = start; 4173 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4174 struct fwd_stream *fs; 4175 4176 fs = fwd_streams[sm_id]; 4177 txp = fwd_topology_tx_port_get(rxp); 4178 fs->rx_port = fwd_ports_ids[rxp]; 4179 fs->rx_queue = rxq; 4180 fs->tx_port = fwd_ports_ids[txp]; 4181 fs->tx_queue = rxq; 4182 fs->peer_addr = fs->tx_port; 4183 fs->retry_enabled = retry_enabled; 4184 rxp++; 4185 if (rxp < nb_fwd_ports) 4186 continue; 4187 rxp = 0; 4188 rxq++; 4189 if (rxq >= end) 4190 rxq = start; 4191 } 4192 } 4193 4194 static uint16_t 4195 get_fwd_port_total_tc_num(void) 4196 { 4197 struct rte_eth_dcb_info dcb_info; 4198 uint16_t total_tc_num = 0; 4199 unsigned int i; 4200 4201 for (i = 0; i < nb_fwd_ports; i++) { 4202 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4203 total_tc_num += dcb_info.nb_tcs; 4204 } 4205 4206 return total_tc_num; 4207 } 4208 4209 /** 4210 * For the DCB forwarding test, each core is assigned on each traffic class. 4211 * 4212 * Each core is assigned a multi-stream, each stream being composed of 4213 * a RX queue to poll on a RX port for input messages, associated with 4214 * a TX queue of a TX port where to send forwarded packets. All RX and 4215 * TX queues are mapping to the same traffic class. 4216 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4217 * the same core 4218 */ 4219 static void 4220 dcb_fwd_config_setup(void) 4221 { 4222 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4223 portid_t txp, rxp = 0; 4224 queueid_t txq, rxq = 0; 4225 lcoreid_t lc_id; 4226 uint16_t nb_rx_queue, nb_tx_queue; 4227 uint16_t i, j, k, sm_id = 0; 4228 uint16_t total_tc_num; 4229 struct rte_port *port; 4230 uint8_t tc = 0; 4231 portid_t pid; 4232 int ret; 4233 4234 /* 4235 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4236 * or RTE_PORT_STOPPED. 4237 * 4238 * Re-configure ports to get updated mapping between tc and queue in 4239 * case the queue number of the port is changed. Skip for started ports 4240 * since modifying queue number and calling dev_configure need to stop 4241 * ports first. 4242 */ 4243 for (pid = 0; pid < nb_fwd_ports; pid++) { 4244 if (port_is_started(pid) == 1) 4245 continue; 4246 4247 port = &ports[pid]; 4248 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4249 &port->dev_conf); 4250 if (ret < 0) { 4251 fprintf(stderr, 4252 "Failed to re-configure port %d, ret = %d.\n", 4253 pid, ret); 4254 return; 4255 } 4256 } 4257 4258 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4259 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4260 cur_fwd_config.nb_fwd_streams = 4261 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4262 total_tc_num = get_fwd_port_total_tc_num(); 4263 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4264 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4265 4266 /* reinitialize forwarding streams */ 4267 init_fwd_streams(); 4268 sm_id = 0; 4269 txp = 1; 4270 /* get the dcb info on the first RX and TX ports */ 4271 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4272 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4273 4274 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4275 fwd_lcores[lc_id]->stream_nb = 0; 4276 fwd_lcores[lc_id]->stream_idx = sm_id; 4277 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4278 /* if the nb_queue is zero, means this tc is 4279 * not enabled on the POOL 4280 */ 4281 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4282 break; 4283 k = fwd_lcores[lc_id]->stream_nb + 4284 fwd_lcores[lc_id]->stream_idx; 4285 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4286 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4287 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4288 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4289 for (j = 0; j < nb_rx_queue; j++) { 4290 struct fwd_stream *fs; 4291 4292 fs = fwd_streams[k + j]; 4293 fs->rx_port = fwd_ports_ids[rxp]; 4294 fs->rx_queue = rxq + j; 4295 fs->tx_port = fwd_ports_ids[txp]; 4296 fs->tx_queue = txq + j % nb_tx_queue; 4297 fs->peer_addr = fs->tx_port; 4298 fs->retry_enabled = retry_enabled; 4299 } 4300 fwd_lcores[lc_id]->stream_nb += 4301 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4302 } 4303 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4304 4305 tc++; 4306 if (tc < rxp_dcb_info.nb_tcs) 4307 continue; 4308 /* Restart from TC 0 on next RX port */ 4309 tc = 0; 4310 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4311 rxp = (portid_t) 4312 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4313 else 4314 rxp++; 4315 if (rxp >= nb_fwd_ports) 4316 return; 4317 /* get the dcb information on next RX and TX ports */ 4318 if ((rxp & 0x1) == 0) 4319 txp = (portid_t) (rxp + 1); 4320 else 4321 txp = (portid_t) (rxp - 1); 4322 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4323 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4324 } 4325 } 4326 4327 static void 4328 icmp_echo_config_setup(void) 4329 { 4330 portid_t rxp; 4331 queueid_t rxq; 4332 lcoreid_t lc_id; 4333 uint16_t sm_id; 4334 4335 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4336 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4337 (nb_txq * nb_fwd_ports); 4338 else 4339 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4340 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4341 cur_fwd_config.nb_fwd_streams = 4342 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4343 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4344 cur_fwd_config.nb_fwd_lcores = 4345 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4346 if (verbose_level > 0) { 4347 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4348 __FUNCTION__, 4349 cur_fwd_config.nb_fwd_lcores, 4350 cur_fwd_config.nb_fwd_ports, 4351 cur_fwd_config.nb_fwd_streams); 4352 } 4353 4354 /* reinitialize forwarding streams */ 4355 init_fwd_streams(); 4356 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4357 rxp = 0; rxq = 0; 4358 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4359 if (verbose_level > 0) 4360 printf(" core=%d: \n", lc_id); 4361 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4362 struct fwd_stream *fs; 4363 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4364 fs->rx_port = fwd_ports_ids[rxp]; 4365 fs->rx_queue = rxq; 4366 fs->tx_port = fs->rx_port; 4367 fs->tx_queue = rxq; 4368 fs->peer_addr = fs->tx_port; 4369 fs->retry_enabled = retry_enabled; 4370 if (verbose_level > 0) 4371 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4372 sm_id, fs->rx_port, fs->rx_queue, 4373 fs->tx_queue); 4374 rxq = (queueid_t) (rxq + 1); 4375 if (rxq == nb_rxq) { 4376 rxq = 0; 4377 rxp = (portid_t) (rxp + 1); 4378 } 4379 } 4380 } 4381 } 4382 4383 void 4384 fwd_config_setup(void) 4385 { 4386 struct rte_port *port; 4387 portid_t pt_id; 4388 unsigned int i; 4389 4390 cur_fwd_config.fwd_eng = cur_fwd_eng; 4391 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4392 icmp_echo_config_setup(); 4393 return; 4394 } 4395 4396 if ((nb_rxq > 1) && (nb_txq > 1)){ 4397 if (dcb_config) { 4398 for (i = 0; i < nb_fwd_ports; i++) { 4399 pt_id = fwd_ports_ids[i]; 4400 port = &ports[pt_id]; 4401 if (!port->dcb_flag) { 4402 fprintf(stderr, 4403 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4404 return; 4405 } 4406 } 4407 if (nb_fwd_lcores == 1) { 4408 fprintf(stderr, 4409 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4410 return; 4411 } 4412 4413 dcb_fwd_config_setup(); 4414 } else 4415 rss_fwd_config_setup(); 4416 } 4417 else 4418 simple_fwd_config_setup(); 4419 } 4420 4421 static const char * 4422 mp_alloc_to_str(uint8_t mode) 4423 { 4424 switch (mode) { 4425 case MP_ALLOC_NATIVE: 4426 return "native"; 4427 case MP_ALLOC_ANON: 4428 return "anon"; 4429 case MP_ALLOC_XMEM: 4430 return "xmem"; 4431 case MP_ALLOC_XMEM_HUGE: 4432 return "xmemhuge"; 4433 case MP_ALLOC_XBUF: 4434 return "xbuf"; 4435 default: 4436 return "invalid"; 4437 } 4438 } 4439 4440 void 4441 pkt_fwd_config_display(struct fwd_config *cfg) 4442 { 4443 struct fwd_stream *fs; 4444 lcoreid_t lc_id; 4445 streamid_t sm_id; 4446 4447 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4448 "NUMA support %s, MP allocation mode: %s\n", 4449 cfg->fwd_eng->fwd_mode_name, 4450 retry_enabled == 0 ? "" : " with retry", 4451 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4452 numa_support == 1 ? "enabled" : "disabled", 4453 mp_alloc_to_str(mp_alloc_type)); 4454 4455 if (retry_enabled) 4456 printf("TX retry num: %u, delay between TX retries: %uus\n", 4457 burst_tx_retry_num, burst_tx_delay_time); 4458 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4459 printf("Logical Core %u (socket %u) forwards packets on " 4460 "%d streams:", 4461 fwd_lcores_cpuids[lc_id], 4462 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4463 fwd_lcores[lc_id]->stream_nb); 4464 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4465 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4466 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4467 "P=%d/Q=%d (socket %u) ", 4468 fs->rx_port, fs->rx_queue, 4469 ports[fs->rx_port].socket_id, 4470 fs->tx_port, fs->tx_queue, 4471 ports[fs->tx_port].socket_id); 4472 print_ethaddr("peer=", 4473 &peer_eth_addrs[fs->peer_addr]); 4474 } 4475 printf("\n"); 4476 } 4477 printf("\n"); 4478 } 4479 4480 void 4481 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4482 { 4483 struct rte_ether_addr new_peer_addr; 4484 if (!rte_eth_dev_is_valid_port(port_id)) { 4485 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4486 return; 4487 } 4488 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4489 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4490 peer_addr); 4491 return; 4492 } 4493 peer_eth_addrs[port_id] = new_peer_addr; 4494 } 4495 4496 int 4497 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4498 { 4499 unsigned int i; 4500 unsigned int lcore_cpuid; 4501 int record_now; 4502 4503 record_now = 0; 4504 again: 4505 for (i = 0; i < nb_lc; i++) { 4506 lcore_cpuid = lcorelist[i]; 4507 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4508 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4509 return -1; 4510 } 4511 if (lcore_cpuid == rte_get_main_lcore()) { 4512 fprintf(stderr, 4513 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4514 lcore_cpuid); 4515 return -1; 4516 } 4517 if (record_now) 4518 fwd_lcores_cpuids[i] = lcore_cpuid; 4519 } 4520 if (record_now == 0) { 4521 record_now = 1; 4522 goto again; 4523 } 4524 nb_cfg_lcores = (lcoreid_t) nb_lc; 4525 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4526 printf("previous number of forwarding cores %u - changed to " 4527 "number of configured cores %u\n", 4528 (unsigned int) nb_fwd_lcores, nb_lc); 4529 nb_fwd_lcores = (lcoreid_t) nb_lc; 4530 } 4531 4532 return 0; 4533 } 4534 4535 int 4536 set_fwd_lcores_mask(uint64_t lcoremask) 4537 { 4538 unsigned int lcorelist[64]; 4539 unsigned int nb_lc; 4540 unsigned int i; 4541 4542 if (lcoremask == 0) { 4543 fprintf(stderr, "Invalid NULL mask of cores\n"); 4544 return -1; 4545 } 4546 nb_lc = 0; 4547 for (i = 0; i < 64; i++) { 4548 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4549 continue; 4550 lcorelist[nb_lc++] = i; 4551 } 4552 return set_fwd_lcores_list(lcorelist, nb_lc); 4553 } 4554 4555 void 4556 set_fwd_lcores_number(uint16_t nb_lc) 4557 { 4558 if (test_done == 0) { 4559 fprintf(stderr, "Please stop forwarding first\n"); 4560 return; 4561 } 4562 if (nb_lc > nb_cfg_lcores) { 4563 fprintf(stderr, 4564 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4565 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4566 return; 4567 } 4568 nb_fwd_lcores = (lcoreid_t) nb_lc; 4569 printf("Number of forwarding cores set to %u\n", 4570 (unsigned int) nb_fwd_lcores); 4571 } 4572 4573 void 4574 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4575 { 4576 unsigned int i; 4577 portid_t port_id; 4578 int record_now; 4579 4580 record_now = 0; 4581 again: 4582 for (i = 0; i < nb_pt; i++) { 4583 port_id = (portid_t) portlist[i]; 4584 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4585 return; 4586 if (record_now) 4587 fwd_ports_ids[i] = port_id; 4588 } 4589 if (record_now == 0) { 4590 record_now = 1; 4591 goto again; 4592 } 4593 nb_cfg_ports = (portid_t) nb_pt; 4594 if (nb_fwd_ports != (portid_t) nb_pt) { 4595 printf("previous number of forwarding ports %u - changed to " 4596 "number of configured ports %u\n", 4597 (unsigned int) nb_fwd_ports, nb_pt); 4598 nb_fwd_ports = (portid_t) nb_pt; 4599 } 4600 } 4601 4602 /** 4603 * Parse the user input and obtain the list of forwarding ports 4604 * 4605 * @param[in] list 4606 * String containing the user input. User can specify 4607 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4608 * For example, if the user wants to use all the available 4609 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4610 * If the user wants to use only the ports 1,2 then the input 4611 * is 1,2. 4612 * valid characters are '-' and ',' 4613 * @param[out] values 4614 * This array will be filled with a list of port IDs 4615 * based on the user input 4616 * Note that duplicate entries are discarded and only the first 4617 * count entries in this array are port IDs and all the rest 4618 * will contain default values 4619 * @param[in] maxsize 4620 * This parameter denotes 2 things 4621 * 1) Number of elements in the values array 4622 * 2) Maximum value of each element in the values array 4623 * @return 4624 * On success, returns total count of parsed port IDs 4625 * On failure, returns 0 4626 */ 4627 static unsigned int 4628 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4629 { 4630 unsigned int count = 0; 4631 char *end = NULL; 4632 int min, max; 4633 int value, i; 4634 unsigned int marked[maxsize]; 4635 4636 if (list == NULL || values == NULL) 4637 return 0; 4638 4639 for (i = 0; i < (int)maxsize; i++) 4640 marked[i] = 0; 4641 4642 min = INT_MAX; 4643 4644 do { 4645 /*Remove the blank spaces if any*/ 4646 while (isblank(*list)) 4647 list++; 4648 if (*list == '\0') 4649 break; 4650 errno = 0; 4651 value = strtol(list, &end, 10); 4652 if (errno || end == NULL) 4653 return 0; 4654 if (value < 0 || value >= (int)maxsize) 4655 return 0; 4656 while (isblank(*end)) 4657 end++; 4658 if (*end == '-' && min == INT_MAX) { 4659 min = value; 4660 } else if ((*end == ',') || (*end == '\0')) { 4661 max = value; 4662 if (min == INT_MAX) 4663 min = value; 4664 for (i = min; i <= max; i++) { 4665 if (count < maxsize) { 4666 if (marked[i]) 4667 continue; 4668 values[count] = i; 4669 marked[i] = 1; 4670 count++; 4671 } 4672 } 4673 min = INT_MAX; 4674 } else 4675 return 0; 4676 list = end + 1; 4677 } while (*end != '\0'); 4678 4679 return count; 4680 } 4681 4682 void 4683 parse_fwd_portlist(const char *portlist) 4684 { 4685 unsigned int portcount; 4686 unsigned int portindex[RTE_MAX_ETHPORTS]; 4687 unsigned int i, valid_port_count = 0; 4688 4689 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 4690 if (!portcount) 4691 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 4692 4693 /* 4694 * Here we verify the validity of the ports 4695 * and thereby calculate the total number of 4696 * valid ports 4697 */ 4698 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 4699 if (rte_eth_dev_is_valid_port(portindex[i])) { 4700 portindex[valid_port_count] = portindex[i]; 4701 valid_port_count++; 4702 } 4703 } 4704 4705 set_fwd_ports_list(portindex, valid_port_count); 4706 } 4707 4708 void 4709 set_fwd_ports_mask(uint64_t portmask) 4710 { 4711 unsigned int portlist[64]; 4712 unsigned int nb_pt; 4713 unsigned int i; 4714 4715 if (portmask == 0) { 4716 fprintf(stderr, "Invalid NULL mask of ports\n"); 4717 return; 4718 } 4719 nb_pt = 0; 4720 RTE_ETH_FOREACH_DEV(i) { 4721 if (! ((uint64_t)(1ULL << i) & portmask)) 4722 continue; 4723 portlist[nb_pt++] = i; 4724 } 4725 set_fwd_ports_list(portlist, nb_pt); 4726 } 4727 4728 void 4729 set_fwd_ports_number(uint16_t nb_pt) 4730 { 4731 if (nb_pt > nb_cfg_ports) { 4732 fprintf(stderr, 4733 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 4734 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 4735 return; 4736 } 4737 nb_fwd_ports = (portid_t) nb_pt; 4738 printf("Number of forwarding ports set to %u\n", 4739 (unsigned int) nb_fwd_ports); 4740 } 4741 4742 int 4743 port_is_forwarding(portid_t port_id) 4744 { 4745 unsigned int i; 4746 4747 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4748 return -1; 4749 4750 for (i = 0; i < nb_fwd_ports; i++) { 4751 if (fwd_ports_ids[i] == port_id) 4752 return 1; 4753 } 4754 4755 return 0; 4756 } 4757 4758 void 4759 set_nb_pkt_per_burst(uint16_t nb) 4760 { 4761 if (nb > MAX_PKT_BURST) { 4762 fprintf(stderr, 4763 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 4764 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 4765 return; 4766 } 4767 nb_pkt_per_burst = nb; 4768 printf("Number of packets per burst set to %u\n", 4769 (unsigned int) nb_pkt_per_burst); 4770 } 4771 4772 static const char * 4773 tx_split_get_name(enum tx_pkt_split split) 4774 { 4775 uint32_t i; 4776 4777 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4778 if (tx_split_name[i].split == split) 4779 return tx_split_name[i].name; 4780 } 4781 return NULL; 4782 } 4783 4784 void 4785 set_tx_pkt_split(const char *name) 4786 { 4787 uint32_t i; 4788 4789 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4790 if (strcmp(tx_split_name[i].name, name) == 0) { 4791 tx_pkt_split = tx_split_name[i].split; 4792 return; 4793 } 4794 } 4795 fprintf(stderr, "unknown value: \"%s\"\n", name); 4796 } 4797 4798 int 4799 parse_fec_mode(const char *name, uint32_t *fec_capa) 4800 { 4801 uint8_t i; 4802 4803 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 4804 if (strcmp(fec_mode_name[i].name, name) == 0) { 4805 *fec_capa = 4806 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 4807 return 0; 4808 } 4809 } 4810 return -1; 4811 } 4812 4813 void 4814 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 4815 { 4816 unsigned int i, j; 4817 4818 printf("FEC capabilities:\n"); 4819 4820 for (i = 0; i < num; i++) { 4821 printf("%s : ", 4822 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 4823 4824 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 4825 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 4826 speed_fec_capa[i].capa) 4827 printf("%s ", fec_mode_name[j].name); 4828 } 4829 printf("\n"); 4830 } 4831 } 4832 4833 void 4834 show_rx_pkt_offsets(void) 4835 { 4836 uint32_t i, n; 4837 4838 n = rx_pkt_nb_offs; 4839 printf("Number of offsets: %u\n", n); 4840 if (n) { 4841 printf("Segment offsets: "); 4842 for (i = 0; i != n - 1; i++) 4843 printf("%hu,", rx_pkt_seg_offsets[i]); 4844 printf("%hu\n", rx_pkt_seg_lengths[i]); 4845 } 4846 } 4847 4848 void 4849 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4850 { 4851 unsigned int i; 4852 4853 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4854 printf("nb segments per RX packets=%u >= " 4855 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4856 return; 4857 } 4858 4859 /* 4860 * No extra check here, the segment length will be checked by PMD 4861 * in the extended queue setup. 4862 */ 4863 for (i = 0; i < nb_offs; i++) { 4864 if (seg_offsets[i] >= UINT16_MAX) { 4865 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4866 i, seg_offsets[i]); 4867 return; 4868 } 4869 } 4870 4871 for (i = 0; i < nb_offs; i++) 4872 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4873 4874 rx_pkt_nb_offs = (uint8_t) nb_offs; 4875 } 4876 4877 void 4878 show_rx_pkt_segments(void) 4879 { 4880 uint32_t i, n; 4881 4882 n = rx_pkt_nb_segs; 4883 printf("Number of segments: %u\n", n); 4884 if (n) { 4885 printf("Segment sizes: "); 4886 for (i = 0; i != n - 1; i++) 4887 printf("%hu,", rx_pkt_seg_lengths[i]); 4888 printf("%hu\n", rx_pkt_seg_lengths[i]); 4889 } 4890 } 4891 4892 void 4893 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4894 { 4895 unsigned int i; 4896 4897 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4898 printf("nb segments per RX packets=%u >= " 4899 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4900 return; 4901 } 4902 4903 /* 4904 * No extra check here, the segment length will be checked by PMD 4905 * in the extended queue setup. 4906 */ 4907 for (i = 0; i < nb_segs; i++) { 4908 if (seg_lengths[i] >= UINT16_MAX) { 4909 printf("length[%u]=%u > UINT16_MAX - give up\n", 4910 i, seg_lengths[i]); 4911 return; 4912 } 4913 } 4914 4915 for (i = 0; i < nb_segs; i++) 4916 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4917 4918 rx_pkt_nb_segs = (uint8_t) nb_segs; 4919 } 4920 4921 void 4922 show_tx_pkt_segments(void) 4923 { 4924 uint32_t i, n; 4925 const char *split; 4926 4927 n = tx_pkt_nb_segs; 4928 split = tx_split_get_name(tx_pkt_split); 4929 4930 printf("Number of segments: %u\n", n); 4931 printf("Segment sizes: "); 4932 for (i = 0; i != n - 1; i++) 4933 printf("%hu,", tx_pkt_seg_lengths[i]); 4934 printf("%hu\n", tx_pkt_seg_lengths[i]); 4935 printf("Split packet: %s\n", split); 4936 } 4937 4938 static bool 4939 nb_segs_is_invalid(unsigned int nb_segs) 4940 { 4941 uint16_t ring_size; 4942 uint16_t queue_id; 4943 uint16_t port_id; 4944 int ret; 4945 4946 RTE_ETH_FOREACH_DEV(port_id) { 4947 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4948 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4949 if (ret) { 4950 /* Port may not be initialized yet, can't say 4951 * the port is invalid in this stage. 4952 */ 4953 continue; 4954 } 4955 if (ring_size < nb_segs) { 4956 printf("nb segments per TX packets=%u >= TX " 4957 "queue(%u) ring_size=%u - txpkts ignored\n", 4958 nb_segs, queue_id, ring_size); 4959 return true; 4960 } 4961 } 4962 } 4963 4964 return false; 4965 } 4966 4967 void 4968 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4969 { 4970 uint16_t tx_pkt_len; 4971 unsigned int i; 4972 4973 /* 4974 * For single segment settings failed check is ignored. 4975 * It is a very basic capability to send the single segment 4976 * packets, suppose it is always supported. 4977 */ 4978 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4979 fprintf(stderr, 4980 "Tx segment size(%u) is not supported - txpkts ignored\n", 4981 nb_segs); 4982 return; 4983 } 4984 4985 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4986 fprintf(stderr, 4987 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4988 nb_segs, RTE_MAX_SEGS_PER_PKT); 4989 return; 4990 } 4991 4992 /* 4993 * Check that each segment length is greater or equal than 4994 * the mbuf data size. 4995 * Check also that the total packet length is greater or equal than the 4996 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4997 * 20 + 8). 4998 */ 4999 tx_pkt_len = 0; 5000 for (i = 0; i < nb_segs; i++) { 5001 if (seg_lengths[i] > mbuf_data_size[0]) { 5002 fprintf(stderr, 5003 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5004 i, seg_lengths[i], mbuf_data_size[0]); 5005 return; 5006 } 5007 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5008 } 5009 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5010 fprintf(stderr, "total packet length=%u < %d - give up\n", 5011 (unsigned) tx_pkt_len, 5012 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5013 return; 5014 } 5015 5016 for (i = 0; i < nb_segs; i++) 5017 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5018 5019 tx_pkt_length = tx_pkt_len; 5020 tx_pkt_nb_segs = (uint8_t) nb_segs; 5021 } 5022 5023 void 5024 show_tx_pkt_times(void) 5025 { 5026 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5027 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5028 } 5029 5030 void 5031 set_tx_pkt_times(unsigned int *tx_times) 5032 { 5033 tx_pkt_times_inter = tx_times[0]; 5034 tx_pkt_times_intra = tx_times[1]; 5035 } 5036 5037 #ifdef RTE_LIB_GRO 5038 void 5039 setup_gro(const char *onoff, portid_t port_id) 5040 { 5041 if (!rte_eth_dev_is_valid_port(port_id)) { 5042 fprintf(stderr, "invalid port id %u\n", port_id); 5043 return; 5044 } 5045 if (test_done == 0) { 5046 fprintf(stderr, 5047 "Before enable/disable GRO, please stop forwarding first\n"); 5048 return; 5049 } 5050 if (strcmp(onoff, "on") == 0) { 5051 if (gro_ports[port_id].enable != 0) { 5052 fprintf(stderr, 5053 "Port %u has enabled GRO. Please disable GRO first\n", 5054 port_id); 5055 return; 5056 } 5057 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5058 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5059 gro_ports[port_id].param.max_flow_num = 5060 GRO_DEFAULT_FLOW_NUM; 5061 gro_ports[port_id].param.max_item_per_flow = 5062 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5063 } 5064 gro_ports[port_id].enable = 1; 5065 } else { 5066 if (gro_ports[port_id].enable == 0) { 5067 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5068 return; 5069 } 5070 gro_ports[port_id].enable = 0; 5071 } 5072 } 5073 5074 void 5075 setup_gro_flush_cycles(uint8_t cycles) 5076 { 5077 if (test_done == 0) { 5078 fprintf(stderr, 5079 "Before change flush interval for GRO, please stop forwarding first.\n"); 5080 return; 5081 } 5082 5083 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5084 GRO_DEFAULT_FLUSH_CYCLES) { 5085 fprintf(stderr, 5086 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5087 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5088 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5089 } 5090 5091 gro_flush_cycles = cycles; 5092 } 5093 5094 void 5095 show_gro(portid_t port_id) 5096 { 5097 struct rte_gro_param *param; 5098 uint32_t max_pkts_num; 5099 5100 param = &gro_ports[port_id].param; 5101 5102 if (!rte_eth_dev_is_valid_port(port_id)) { 5103 fprintf(stderr, "Invalid port id %u.\n", port_id); 5104 return; 5105 } 5106 if (gro_ports[port_id].enable) { 5107 printf("GRO type: TCP/IPv4\n"); 5108 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5109 max_pkts_num = param->max_flow_num * 5110 param->max_item_per_flow; 5111 } else 5112 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5113 printf("Max number of packets to perform GRO: %u\n", 5114 max_pkts_num); 5115 printf("Flushing cycles: %u\n", gro_flush_cycles); 5116 } else 5117 printf("Port %u doesn't enable GRO.\n", port_id); 5118 } 5119 #endif /* RTE_LIB_GRO */ 5120 5121 #ifdef RTE_LIB_GSO 5122 void 5123 setup_gso(const char *mode, portid_t port_id) 5124 { 5125 if (!rte_eth_dev_is_valid_port(port_id)) { 5126 fprintf(stderr, "invalid port id %u\n", port_id); 5127 return; 5128 } 5129 if (strcmp(mode, "on") == 0) { 5130 if (test_done == 0) { 5131 fprintf(stderr, 5132 "before enabling GSO, please stop forwarding first\n"); 5133 return; 5134 } 5135 gso_ports[port_id].enable = 1; 5136 } else if (strcmp(mode, "off") == 0) { 5137 if (test_done == 0) { 5138 fprintf(stderr, 5139 "before disabling GSO, please stop forwarding first\n"); 5140 return; 5141 } 5142 gso_ports[port_id].enable = 0; 5143 } 5144 } 5145 #endif /* RTE_LIB_GSO */ 5146 5147 char* 5148 list_pkt_forwarding_modes(void) 5149 { 5150 static char fwd_modes[128] = ""; 5151 const char *separator = "|"; 5152 struct fwd_engine *fwd_eng; 5153 unsigned i = 0; 5154 5155 if (strlen (fwd_modes) == 0) { 5156 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5157 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5158 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5159 strncat(fwd_modes, separator, 5160 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5161 } 5162 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5163 } 5164 5165 return fwd_modes; 5166 } 5167 5168 char* 5169 list_pkt_forwarding_retry_modes(void) 5170 { 5171 static char fwd_modes[128] = ""; 5172 const char *separator = "|"; 5173 struct fwd_engine *fwd_eng; 5174 unsigned i = 0; 5175 5176 if (strlen(fwd_modes) == 0) { 5177 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5178 if (fwd_eng == &rx_only_engine) 5179 continue; 5180 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5181 sizeof(fwd_modes) - 5182 strlen(fwd_modes) - 1); 5183 strncat(fwd_modes, separator, 5184 sizeof(fwd_modes) - 5185 strlen(fwd_modes) - 1); 5186 } 5187 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5188 } 5189 5190 return fwd_modes; 5191 } 5192 5193 void 5194 set_pkt_forwarding_mode(const char *fwd_mode_name) 5195 { 5196 struct fwd_engine *fwd_eng; 5197 unsigned i; 5198 5199 i = 0; 5200 while ((fwd_eng = fwd_engines[i]) != NULL) { 5201 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5202 printf("Set %s packet forwarding mode%s\n", 5203 fwd_mode_name, 5204 retry_enabled == 0 ? "" : " with retry"); 5205 cur_fwd_eng = fwd_eng; 5206 return; 5207 } 5208 i++; 5209 } 5210 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5211 } 5212 5213 void 5214 add_rx_dump_callbacks(portid_t portid) 5215 { 5216 struct rte_eth_dev_info dev_info; 5217 uint16_t queue; 5218 int ret; 5219 5220 if (port_id_is_invalid(portid, ENABLED_WARN)) 5221 return; 5222 5223 ret = eth_dev_info_get_print_err(portid, &dev_info); 5224 if (ret != 0) 5225 return; 5226 5227 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5228 if (!ports[portid].rx_dump_cb[queue]) 5229 ports[portid].rx_dump_cb[queue] = 5230 rte_eth_add_rx_callback(portid, queue, 5231 dump_rx_pkts, NULL); 5232 } 5233 5234 void 5235 add_tx_dump_callbacks(portid_t portid) 5236 { 5237 struct rte_eth_dev_info dev_info; 5238 uint16_t queue; 5239 int ret; 5240 5241 if (port_id_is_invalid(portid, ENABLED_WARN)) 5242 return; 5243 5244 ret = eth_dev_info_get_print_err(portid, &dev_info); 5245 if (ret != 0) 5246 return; 5247 5248 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5249 if (!ports[portid].tx_dump_cb[queue]) 5250 ports[portid].tx_dump_cb[queue] = 5251 rte_eth_add_tx_callback(portid, queue, 5252 dump_tx_pkts, NULL); 5253 } 5254 5255 void 5256 remove_rx_dump_callbacks(portid_t portid) 5257 { 5258 struct rte_eth_dev_info dev_info; 5259 uint16_t queue; 5260 int ret; 5261 5262 if (port_id_is_invalid(portid, ENABLED_WARN)) 5263 return; 5264 5265 ret = eth_dev_info_get_print_err(portid, &dev_info); 5266 if (ret != 0) 5267 return; 5268 5269 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5270 if (ports[portid].rx_dump_cb[queue]) { 5271 rte_eth_remove_rx_callback(portid, queue, 5272 ports[portid].rx_dump_cb[queue]); 5273 ports[portid].rx_dump_cb[queue] = NULL; 5274 } 5275 } 5276 5277 void 5278 remove_tx_dump_callbacks(portid_t portid) 5279 { 5280 struct rte_eth_dev_info dev_info; 5281 uint16_t queue; 5282 int ret; 5283 5284 if (port_id_is_invalid(portid, ENABLED_WARN)) 5285 return; 5286 5287 ret = eth_dev_info_get_print_err(portid, &dev_info); 5288 if (ret != 0) 5289 return; 5290 5291 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5292 if (ports[portid].tx_dump_cb[queue]) { 5293 rte_eth_remove_tx_callback(portid, queue, 5294 ports[portid].tx_dump_cb[queue]); 5295 ports[portid].tx_dump_cb[queue] = NULL; 5296 } 5297 } 5298 5299 void 5300 configure_rxtx_dump_callbacks(uint16_t verbose) 5301 { 5302 portid_t portid; 5303 5304 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5305 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5306 return; 5307 #endif 5308 5309 RTE_ETH_FOREACH_DEV(portid) 5310 { 5311 if (verbose == 1 || verbose > 2) 5312 add_rx_dump_callbacks(portid); 5313 else 5314 remove_rx_dump_callbacks(portid); 5315 if (verbose >= 2) 5316 add_tx_dump_callbacks(portid); 5317 else 5318 remove_tx_dump_callbacks(portid); 5319 } 5320 } 5321 5322 void 5323 set_verbose_level(uint16_t vb_level) 5324 { 5325 printf("Change verbose level from %u to %u\n", 5326 (unsigned int) verbose_level, (unsigned int) vb_level); 5327 verbose_level = vb_level; 5328 configure_rxtx_dump_callbacks(verbose_level); 5329 } 5330 5331 void 5332 vlan_extend_set(portid_t port_id, int on) 5333 { 5334 int diag; 5335 int vlan_offload; 5336 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5337 5338 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5339 return; 5340 5341 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5342 5343 if (on) { 5344 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5345 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5346 } else { 5347 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5348 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5349 } 5350 5351 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5352 if (diag < 0) { 5353 fprintf(stderr, 5354 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5355 port_id, on, diag); 5356 return; 5357 } 5358 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5359 } 5360 5361 void 5362 rx_vlan_strip_set(portid_t port_id, int on) 5363 { 5364 int diag; 5365 int vlan_offload; 5366 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5367 5368 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5369 return; 5370 5371 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5372 5373 if (on) { 5374 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5375 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5376 } else { 5377 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5378 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5379 } 5380 5381 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5382 if (diag < 0) { 5383 fprintf(stderr, 5384 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5385 __func__, port_id, on, diag); 5386 return; 5387 } 5388 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5389 } 5390 5391 void 5392 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5393 { 5394 int diag; 5395 5396 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5397 return; 5398 5399 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5400 if (diag < 0) 5401 fprintf(stderr, 5402 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5403 __func__, port_id, queue_id, on, diag); 5404 } 5405 5406 void 5407 rx_vlan_filter_set(portid_t port_id, int on) 5408 { 5409 int diag; 5410 int vlan_offload; 5411 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5412 5413 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5414 return; 5415 5416 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5417 5418 if (on) { 5419 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5420 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5421 } else { 5422 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5423 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5424 } 5425 5426 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5427 if (diag < 0) { 5428 fprintf(stderr, 5429 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5430 __func__, port_id, on, diag); 5431 return; 5432 } 5433 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5434 } 5435 5436 void 5437 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5438 { 5439 int diag; 5440 int vlan_offload; 5441 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5442 5443 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5444 return; 5445 5446 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5447 5448 if (on) { 5449 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5450 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5451 } else { 5452 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5453 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5454 } 5455 5456 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5457 if (diag < 0) { 5458 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5459 __func__, port_id, on, diag); 5460 return; 5461 } 5462 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5463 } 5464 5465 int 5466 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5467 { 5468 int diag; 5469 5470 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5471 return 1; 5472 if (vlan_id_is_invalid(vlan_id)) 5473 return 1; 5474 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5475 if (diag == 0) 5476 return 0; 5477 fprintf(stderr, 5478 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5479 port_id, vlan_id, on, diag); 5480 return -1; 5481 } 5482 5483 void 5484 rx_vlan_all_filter_set(portid_t port_id, int on) 5485 { 5486 uint16_t vlan_id; 5487 5488 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5489 return; 5490 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5491 if (rx_vft_set(port_id, vlan_id, on)) 5492 break; 5493 } 5494 } 5495 5496 void 5497 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5498 { 5499 int diag; 5500 5501 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5502 return; 5503 5504 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5505 if (diag == 0) 5506 return; 5507 5508 fprintf(stderr, 5509 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5510 port_id, vlan_type, tp_id, diag); 5511 } 5512 5513 void 5514 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5515 { 5516 struct rte_eth_dev_info dev_info; 5517 int ret; 5518 5519 if (vlan_id_is_invalid(vlan_id)) 5520 return; 5521 5522 if (ports[port_id].dev_conf.txmode.offloads & 5523 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5524 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5525 return; 5526 } 5527 5528 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5529 if (ret != 0) 5530 return; 5531 5532 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5533 fprintf(stderr, 5534 "Error: vlan insert is not supported by port %d\n", 5535 port_id); 5536 return; 5537 } 5538 5539 tx_vlan_reset(port_id); 5540 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 5541 ports[port_id].tx_vlan_id = vlan_id; 5542 } 5543 5544 void 5545 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 5546 { 5547 struct rte_eth_dev_info dev_info; 5548 int ret; 5549 5550 if (vlan_id_is_invalid(vlan_id)) 5551 return; 5552 if (vlan_id_is_invalid(vlan_id_outer)) 5553 return; 5554 5555 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5556 if (ret != 0) 5557 return; 5558 5559 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 5560 fprintf(stderr, 5561 "Error: qinq insert not supported by port %d\n", 5562 port_id); 5563 return; 5564 } 5565 5566 tx_vlan_reset(port_id); 5567 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5568 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5569 ports[port_id].tx_vlan_id = vlan_id; 5570 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 5571 } 5572 5573 void 5574 tx_vlan_reset(portid_t port_id) 5575 { 5576 ports[port_id].dev_conf.txmode.offloads &= 5577 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5578 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5579 ports[port_id].tx_vlan_id = 0; 5580 ports[port_id].tx_vlan_id_outer = 0; 5581 } 5582 5583 void 5584 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 5585 { 5586 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5587 return; 5588 5589 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 5590 } 5591 5592 void 5593 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 5594 { 5595 int ret; 5596 5597 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5598 return; 5599 5600 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 5601 return; 5602 5603 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 5604 fprintf(stderr, "map_value not in required range 0..%d\n", 5605 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 5606 return; 5607 } 5608 5609 if (!is_rx) { /* tx */ 5610 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 5611 map_value); 5612 if (ret) { 5613 fprintf(stderr, 5614 "failed to set tx queue stats mapping.\n"); 5615 return; 5616 } 5617 } else { /* rx */ 5618 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 5619 map_value); 5620 if (ret) { 5621 fprintf(stderr, 5622 "failed to set rx queue stats mapping.\n"); 5623 return; 5624 } 5625 } 5626 } 5627 5628 void 5629 set_xstats_hide_zero(uint8_t on_off) 5630 { 5631 xstats_hide_zero = on_off; 5632 } 5633 5634 void 5635 set_record_core_cycles(uint8_t on_off) 5636 { 5637 record_core_cycles = on_off; 5638 } 5639 5640 void 5641 set_record_burst_stats(uint8_t on_off) 5642 { 5643 record_burst_stats = on_off; 5644 } 5645 5646 uint16_t 5647 str_to_flowtype(const char *string) 5648 { 5649 uint8_t i; 5650 5651 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5652 if (!strcmp(flowtype_str_table[i].str, string)) 5653 return flowtype_str_table[i].ftype; 5654 } 5655 5656 if (isdigit(string[0])) { 5657 int val = atoi(string); 5658 if (val > 0 && val < 64) 5659 return (uint16_t)val; 5660 } 5661 5662 return RTE_ETH_FLOW_UNKNOWN; 5663 } 5664 5665 const char* 5666 flowtype_to_str(uint16_t flow_type) 5667 { 5668 uint8_t i; 5669 5670 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5671 if (flowtype_str_table[i].ftype == flow_type) 5672 return flowtype_str_table[i].str; 5673 } 5674 5675 return NULL; 5676 } 5677 5678 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 5679 5680 static inline void 5681 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5682 { 5683 struct rte_eth_flex_payload_cfg *cfg; 5684 uint32_t i, j; 5685 5686 for (i = 0; i < flex_conf->nb_payloads; i++) { 5687 cfg = &flex_conf->flex_set[i]; 5688 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 5689 printf("\n RAW: "); 5690 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 5691 printf("\n L2_PAYLOAD: "); 5692 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 5693 printf("\n L3_PAYLOAD: "); 5694 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 5695 printf("\n L4_PAYLOAD: "); 5696 else 5697 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 5698 for (j = 0; j < num; j++) 5699 printf(" %-5u", cfg->src_offset[j]); 5700 } 5701 printf("\n"); 5702 } 5703 5704 static inline void 5705 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5706 { 5707 struct rte_eth_fdir_flex_mask *mask; 5708 uint32_t i, j; 5709 const char *p; 5710 5711 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 5712 mask = &flex_conf->flex_mask[i]; 5713 p = flowtype_to_str(mask->flow_type); 5714 printf("\n %s:\t", p ? p : "unknown"); 5715 for (j = 0; j < num; j++) 5716 printf(" %02x", mask->mask[j]); 5717 } 5718 printf("\n"); 5719 } 5720 5721 static inline void 5722 print_fdir_flow_type(uint32_t flow_types_mask) 5723 { 5724 int i; 5725 const char *p; 5726 5727 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 5728 if (!(flow_types_mask & (1 << i))) 5729 continue; 5730 p = flowtype_to_str(i); 5731 if (p) 5732 printf(" %s", p); 5733 else 5734 printf(" unknown"); 5735 } 5736 printf("\n"); 5737 } 5738 5739 static int 5740 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 5741 struct rte_eth_fdir_stats *fdir_stat) 5742 { 5743 int ret = -ENOTSUP; 5744 5745 #ifdef RTE_NET_I40E 5746 if (ret == -ENOTSUP) { 5747 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 5748 if (!ret) 5749 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 5750 } 5751 #endif 5752 #ifdef RTE_NET_IXGBE 5753 if (ret == -ENOTSUP) { 5754 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 5755 if (!ret) 5756 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 5757 } 5758 #endif 5759 switch (ret) { 5760 case 0: 5761 break; 5762 case -ENOTSUP: 5763 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 5764 port_id); 5765 break; 5766 default: 5767 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 5768 break; 5769 } 5770 return ret; 5771 } 5772 5773 void 5774 fdir_get_infos(portid_t port_id) 5775 { 5776 struct rte_eth_fdir_stats fdir_stat; 5777 struct rte_eth_fdir_info fdir_info; 5778 5779 static const char *fdir_stats_border = "########################"; 5780 5781 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5782 return; 5783 5784 memset(&fdir_info, 0, sizeof(fdir_info)); 5785 memset(&fdir_stat, 0, sizeof(fdir_stat)); 5786 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 5787 return; 5788 5789 printf("\n %s FDIR infos for port %-2d %s\n", 5790 fdir_stats_border, port_id, fdir_stats_border); 5791 printf(" MODE: "); 5792 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 5793 printf(" PERFECT\n"); 5794 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 5795 printf(" PERFECT-MAC-VLAN\n"); 5796 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5797 printf(" PERFECT-TUNNEL\n"); 5798 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 5799 printf(" SIGNATURE\n"); 5800 else 5801 printf(" DISABLE\n"); 5802 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5803 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5804 printf(" SUPPORTED FLOW TYPE: "); 5805 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5806 } 5807 printf(" FLEX PAYLOAD INFO:\n"); 5808 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5809 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5810 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5811 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5812 fdir_info.flex_payload_unit, 5813 fdir_info.max_flex_payload_segment_num, 5814 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5815 if (fdir_info.flex_conf.nb_payloads > 0) { 5816 printf(" FLEX PAYLOAD SRC OFFSET:"); 5817 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5818 } 5819 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5820 printf(" FLEX MASK CFG:"); 5821 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5822 } 5823 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5824 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5825 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5826 fdir_info.guarant_spc, fdir_info.best_spc); 5827 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5828 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5829 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5830 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5831 fdir_stat.collision, fdir_stat.free, 5832 fdir_stat.maxhash, fdir_stat.maxlen, 5833 fdir_stat.add, fdir_stat.remove, 5834 fdir_stat.f_add, fdir_stat.f_remove); 5835 printf(" %s############################%s\n", 5836 fdir_stats_border, fdir_stats_border); 5837 } 5838 5839 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5840 5841 void 5842 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5843 { 5844 #ifdef RTE_NET_IXGBE 5845 int diag; 5846 5847 if (is_rx) 5848 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5849 else 5850 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5851 5852 if (diag == 0) 5853 return; 5854 fprintf(stderr, 5855 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5856 is_rx ? "rx" : "tx", port_id, diag); 5857 return; 5858 #endif 5859 fprintf(stderr, "VF %s setting not supported for port %d\n", 5860 is_rx ? "Rx" : "Tx", port_id); 5861 RTE_SET_USED(vf); 5862 RTE_SET_USED(on); 5863 } 5864 5865 int 5866 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 5867 { 5868 int diag; 5869 struct rte_eth_link link; 5870 int ret; 5871 5872 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5873 return 1; 5874 ret = eth_link_get_nowait_print_err(port_id, &link); 5875 if (ret < 0) 5876 return 1; 5877 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5878 rate > link.link_speed) { 5879 fprintf(stderr, 5880 "Invalid rate value:%u bigger than link speed: %u\n", 5881 rate, link.link_speed); 5882 return 1; 5883 } 5884 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5885 if (diag == 0) 5886 return diag; 5887 fprintf(stderr, 5888 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5889 port_id, diag); 5890 return diag; 5891 } 5892 5893 int 5894 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 5895 { 5896 int diag = -ENOTSUP; 5897 5898 RTE_SET_USED(vf); 5899 RTE_SET_USED(rate); 5900 RTE_SET_USED(q_msk); 5901 5902 #ifdef RTE_NET_IXGBE 5903 if (diag == -ENOTSUP) 5904 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5905 q_msk); 5906 #endif 5907 #ifdef RTE_NET_BNXT 5908 if (diag == -ENOTSUP) 5909 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5910 #endif 5911 if (diag == 0) 5912 return diag; 5913 5914 fprintf(stderr, 5915 "%s for port_id=%d failed diag=%d\n", 5916 __func__, port_id, diag); 5917 return diag; 5918 } 5919 5920 int 5921 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 5922 { 5923 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5924 return -EINVAL; 5925 5926 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 5927 } 5928 5929 /* 5930 * Functions to manage the set of filtered Multicast MAC addresses. 5931 * 5932 * A pool of filtered multicast MAC addresses is associated with each port. 5933 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5934 * The address of the pool and the number of valid multicast MAC addresses 5935 * recorded in the pool are stored in the fields "mc_addr_pool" and 5936 * "mc_addr_nb" of the "rte_port" data structure. 5937 * 5938 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5939 * to be supplied a contiguous array of multicast MAC addresses. 5940 * To comply with this constraint, the set of multicast addresses recorded 5941 * into the pool are systematically compacted at the beginning of the pool. 5942 * Hence, when a multicast address is removed from the pool, all following 5943 * addresses, if any, are copied back to keep the set contiguous. 5944 */ 5945 #define MCAST_POOL_INC 32 5946 5947 static int 5948 mcast_addr_pool_extend(struct rte_port *port) 5949 { 5950 struct rte_ether_addr *mc_pool; 5951 size_t mc_pool_size; 5952 5953 /* 5954 * If a free entry is available at the end of the pool, just 5955 * increment the number of recorded multicast addresses. 5956 */ 5957 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5958 port->mc_addr_nb++; 5959 return 0; 5960 } 5961 5962 /* 5963 * [re]allocate a pool with MCAST_POOL_INC more entries. 5964 * The previous test guarantees that port->mc_addr_nb is a multiple 5965 * of MCAST_POOL_INC. 5966 */ 5967 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5968 MCAST_POOL_INC); 5969 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5970 mc_pool_size); 5971 if (mc_pool == NULL) { 5972 fprintf(stderr, 5973 "allocation of pool of %u multicast addresses failed\n", 5974 port->mc_addr_nb + MCAST_POOL_INC); 5975 return -ENOMEM; 5976 } 5977 5978 port->mc_addr_pool = mc_pool; 5979 port->mc_addr_nb++; 5980 return 0; 5981 5982 } 5983 5984 static void 5985 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5986 { 5987 if (mcast_addr_pool_extend(port) != 0) 5988 return; 5989 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5990 } 5991 5992 static void 5993 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5994 { 5995 port->mc_addr_nb--; 5996 if (addr_idx == port->mc_addr_nb) { 5997 /* No need to recompact the set of multicast addresses. */ 5998 if (port->mc_addr_nb == 0) { 5999 /* free the pool of multicast addresses. */ 6000 free(port->mc_addr_pool); 6001 port->mc_addr_pool = NULL; 6002 } 6003 return; 6004 } 6005 memmove(&port->mc_addr_pool[addr_idx], 6006 &port->mc_addr_pool[addr_idx + 1], 6007 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6008 } 6009 6010 int 6011 mcast_addr_pool_destroy(portid_t port_id) 6012 { 6013 struct rte_port *port; 6014 6015 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6016 port_id == (portid_t)RTE_PORT_ALL) 6017 return -EINVAL; 6018 port = &ports[port_id]; 6019 6020 if (port->mc_addr_nb != 0) { 6021 /* free the pool of multicast addresses. */ 6022 free(port->mc_addr_pool); 6023 port->mc_addr_pool = NULL; 6024 port->mc_addr_nb = 0; 6025 } 6026 return 0; 6027 } 6028 6029 static int 6030 eth_port_multicast_addr_list_set(portid_t port_id) 6031 { 6032 struct rte_port *port; 6033 int diag; 6034 6035 port = &ports[port_id]; 6036 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6037 port->mc_addr_nb); 6038 if (diag < 0) 6039 fprintf(stderr, 6040 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6041 port_id, port->mc_addr_nb, diag); 6042 6043 return diag; 6044 } 6045 6046 void 6047 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6048 { 6049 struct rte_port *port; 6050 uint32_t i; 6051 6052 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6053 return; 6054 6055 port = &ports[port_id]; 6056 6057 /* 6058 * Check that the added multicast MAC address is not already recorded 6059 * in the pool of multicast addresses. 6060 */ 6061 for (i = 0; i < port->mc_addr_nb; i++) { 6062 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6063 fprintf(stderr, 6064 "multicast address already filtered by port\n"); 6065 return; 6066 } 6067 } 6068 6069 mcast_addr_pool_append(port, mc_addr); 6070 if (eth_port_multicast_addr_list_set(port_id) < 0) 6071 /* Rollback on failure, remove the address from the pool */ 6072 mcast_addr_pool_remove(port, i); 6073 } 6074 6075 void 6076 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6077 { 6078 struct rte_port *port; 6079 uint32_t i; 6080 6081 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6082 return; 6083 6084 port = &ports[port_id]; 6085 6086 /* 6087 * Search the pool of multicast MAC addresses for the removed address. 6088 */ 6089 for (i = 0; i < port->mc_addr_nb; i++) { 6090 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6091 break; 6092 } 6093 if (i == port->mc_addr_nb) { 6094 fprintf(stderr, "multicast address not filtered by port %d\n", 6095 port_id); 6096 return; 6097 } 6098 6099 mcast_addr_pool_remove(port, i); 6100 if (eth_port_multicast_addr_list_set(port_id) < 0) 6101 /* Rollback on failure, add the address back into the pool */ 6102 mcast_addr_pool_append(port, mc_addr); 6103 } 6104 6105 void 6106 port_dcb_info_display(portid_t port_id) 6107 { 6108 struct rte_eth_dcb_info dcb_info; 6109 uint16_t i; 6110 int ret; 6111 static const char *border = "================"; 6112 6113 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6114 return; 6115 6116 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6117 if (ret) { 6118 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6119 port_id); 6120 return; 6121 } 6122 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6123 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6124 printf("\n TC : "); 6125 for (i = 0; i < dcb_info.nb_tcs; i++) 6126 printf("\t%4d", i); 6127 printf("\n Priority : "); 6128 for (i = 0; i < dcb_info.nb_tcs; i++) 6129 printf("\t%4d", dcb_info.prio_tc[i]); 6130 printf("\n BW percent :"); 6131 for (i = 0; i < dcb_info.nb_tcs; i++) 6132 printf("\t%4d%%", dcb_info.tc_bws[i]); 6133 printf("\n RXQ base : "); 6134 for (i = 0; i < dcb_info.nb_tcs; i++) 6135 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6136 printf("\n RXQ number :"); 6137 for (i = 0; i < dcb_info.nb_tcs; i++) 6138 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6139 printf("\n TXQ base : "); 6140 for (i = 0; i < dcb_info.nb_tcs; i++) 6141 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6142 printf("\n TXQ number :"); 6143 for (i = 0; i < dcb_info.nb_tcs; i++) 6144 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6145 printf("\n"); 6146 } 6147 6148 uint8_t * 6149 open_file(const char *file_path, uint32_t *size) 6150 { 6151 int fd = open(file_path, O_RDONLY); 6152 off_t pkg_size; 6153 uint8_t *buf = NULL; 6154 int ret = 0; 6155 struct stat st_buf; 6156 6157 if (size) 6158 *size = 0; 6159 6160 if (fd == -1) { 6161 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6162 return buf; 6163 } 6164 6165 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6166 close(fd); 6167 fprintf(stderr, "%s: File operations failed\n", __func__); 6168 return buf; 6169 } 6170 6171 pkg_size = st_buf.st_size; 6172 if (pkg_size < 0) { 6173 close(fd); 6174 fprintf(stderr, "%s: File operations failed\n", __func__); 6175 return buf; 6176 } 6177 6178 buf = (uint8_t *)malloc(pkg_size); 6179 if (!buf) { 6180 close(fd); 6181 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6182 return buf; 6183 } 6184 6185 ret = read(fd, buf, pkg_size); 6186 if (ret < 0) { 6187 close(fd); 6188 fprintf(stderr, "%s: File read operation failed\n", __func__); 6189 close_file(buf); 6190 return NULL; 6191 } 6192 6193 if (size) 6194 *size = pkg_size; 6195 6196 close(fd); 6197 6198 return buf; 6199 } 6200 6201 int 6202 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6203 { 6204 FILE *fh = fopen(file_path, "wb"); 6205 6206 if (fh == NULL) { 6207 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6208 return -1; 6209 } 6210 6211 if (fwrite(buf, 1, size, fh) != size) { 6212 fclose(fh); 6213 fprintf(stderr, "%s: File write operation failed\n", __func__); 6214 return -1; 6215 } 6216 6217 fclose(fh); 6218 6219 return 0; 6220 } 6221 6222 int 6223 close_file(uint8_t *buf) 6224 { 6225 if (buf) { 6226 free((void *)buf); 6227 return 0; 6228 } 6229 6230 return -1; 6231 } 6232 6233 void 6234 show_macs(portid_t port_id) 6235 { 6236 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6237 struct rte_eth_dev_info dev_info; 6238 int32_t i, rc, num_macs = 0; 6239 6240 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6241 return; 6242 6243 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6244 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6245 if (rc < 0) 6246 return; 6247 6248 for (i = 0; i < rc; i++) { 6249 6250 /* skip zero address */ 6251 if (rte_is_zero_ether_addr(&addr[i])) 6252 continue; 6253 6254 num_macs++; 6255 } 6256 6257 printf("Number of MAC address added: %d\n", num_macs); 6258 6259 for (i = 0; i < rc; i++) { 6260 6261 /* skip zero address */ 6262 if (rte_is_zero_ether_addr(&addr[i])) 6263 continue; 6264 6265 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6266 printf(" %s\n", buf); 6267 } 6268 } 6269 6270 void 6271 show_mcast_macs(portid_t port_id) 6272 { 6273 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6274 struct rte_ether_addr *addr; 6275 struct rte_port *port; 6276 uint32_t i; 6277 6278 port = &ports[port_id]; 6279 6280 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6281 6282 for (i = 0; i < port->mc_addr_nb; i++) { 6283 addr = &port->mc_addr_pool[i]; 6284 6285 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6286 printf(" %s\n", buf); 6287 } 6288 } 6289