1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <ctype.h> 7 #include <stdarg.h> 8 #include <errno.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <stdint.h> 13 #include <inttypes.h> 14 15 #include <sys/queue.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <fcntl.h> 19 #include <unistd.h> 20 21 #include <rte_common.h> 22 #include <rte_byteorder.h> 23 #include <rte_debug.h> 24 #include <rte_log.h> 25 #include <rte_memory.h> 26 #include <rte_memcpy.h> 27 #include <rte_memzone.h> 28 #include <rte_launch.h> 29 #include <rte_bus.h> 30 #include <rte_eal.h> 31 #include <rte_per_lcore.h> 32 #include <rte_lcore.h> 33 #include <rte_branch_prediction.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_interrupts.h> 37 #include <rte_ether.h> 38 #include <rte_ethdev.h> 39 #include <rte_string_fns.h> 40 #include <rte_cycles.h> 41 #include <rte_flow.h> 42 #include <rte_mtr.h> 43 #include <rte_errno.h> 44 #ifdef RTE_NET_IXGBE 45 #include <rte_pmd_ixgbe.h> 46 #endif 47 #ifdef RTE_NET_I40E 48 #include <rte_pmd_i40e.h> 49 #endif 50 #ifdef RTE_NET_BNXT 51 #include <rte_pmd_bnxt.h> 52 #endif 53 #ifdef RTE_LIB_GRO 54 #include <rte_gro.h> 55 #endif 56 #include <rte_hexdump.h> 57 58 #include "testpmd.h" 59 #include "cmdline_mtr.h" 60 61 #define ETHDEV_FWVERS_LEN 32 62 63 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 65 #else 66 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 67 #endif 68 69 #define NS_PER_SEC 1E9 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 /* Group types */ 91 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 92 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 93 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 94 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 95 { "none", 0 }, 96 { "ip", RTE_ETH_RSS_IP }, 97 { "udp", RTE_ETH_RSS_UDP }, 98 { "tcp", RTE_ETH_RSS_TCP }, 99 { "sctp", RTE_ETH_RSS_SCTP }, 100 { "tunnel", RTE_ETH_RSS_TUNNEL }, 101 { "vlan", RTE_ETH_RSS_VLAN }, 102 103 /* Individual type */ 104 { "ipv4", RTE_ETH_RSS_IPV4 }, 105 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 106 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 107 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 108 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 109 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 110 { "ipv6", RTE_ETH_RSS_IPV6 }, 111 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 112 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 113 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 114 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 115 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 116 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 117 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 118 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 119 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 120 { "port", RTE_ETH_RSS_PORT }, 121 { "vxlan", RTE_ETH_RSS_VXLAN }, 122 { "geneve", RTE_ETH_RSS_GENEVE }, 123 { "nvgre", RTE_ETH_RSS_NVGRE }, 124 { "gtpu", RTE_ETH_RSS_GTPU }, 125 { "eth", RTE_ETH_RSS_ETH }, 126 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 127 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 128 { "esp", RTE_ETH_RSS_ESP }, 129 { "ah", RTE_ETH_RSS_AH }, 130 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 131 { "pfcp", RTE_ETH_RSS_PFCP }, 132 { "pppoe", RTE_ETH_RSS_PPPOE }, 133 { "ecpri", RTE_ETH_RSS_ECPRI }, 134 { "mpls", RTE_ETH_RSS_MPLS }, 135 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 136 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 137 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 138 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 139 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 140 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 141 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 142 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 143 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 144 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 145 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 146 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 147 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 148 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 149 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 150 { NULL, 0}, 151 }; 152 153 static const struct { 154 enum rte_eth_fec_mode mode; 155 const char *name; 156 } fec_mode_name[] = { 157 { 158 .mode = RTE_ETH_FEC_NOFEC, 159 .name = "off", 160 }, 161 { 162 .mode = RTE_ETH_FEC_AUTO, 163 .name = "auto", 164 }, 165 { 166 .mode = RTE_ETH_FEC_BASER, 167 .name = "baser", 168 }, 169 { 170 .mode = RTE_ETH_FEC_RS, 171 .name = "rs", 172 }, 173 }; 174 175 static const struct { 176 char str[32]; 177 uint16_t ftype; 178 } flowtype_str_table[] = { 179 {"raw", RTE_ETH_FLOW_RAW}, 180 {"ipv4", RTE_ETH_FLOW_IPV4}, 181 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 182 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 183 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 184 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 185 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 186 {"ipv6", RTE_ETH_FLOW_IPV6}, 187 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 188 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 189 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 190 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 191 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 192 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 193 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 194 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 195 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 196 {"port", RTE_ETH_FLOW_PORT}, 197 {"vxlan", RTE_ETH_FLOW_VXLAN}, 198 {"geneve", RTE_ETH_FLOW_GENEVE}, 199 {"nvgre", RTE_ETH_FLOW_NVGRE}, 200 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 201 {"gtpu", RTE_ETH_FLOW_GTPU}, 202 }; 203 204 static void 205 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 206 { 207 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 208 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 209 printf("%s%s", name, buf); 210 } 211 212 static void 213 nic_xstats_display_periodic(portid_t port_id) 214 { 215 struct xstat_display_info *xstats_info; 216 uint64_t *prev_values, *curr_values; 217 uint64_t diff_value, value_rate; 218 struct timespec cur_time; 219 uint64_t *ids_supp; 220 size_t ids_supp_sz; 221 uint64_t diff_ns; 222 unsigned int i; 223 int rc; 224 225 xstats_info = &ports[port_id].xstats_info; 226 227 ids_supp_sz = xstats_info->ids_supp_sz; 228 if (ids_supp_sz == 0) 229 return; 230 231 printf("\n"); 232 233 ids_supp = xstats_info->ids_supp; 234 prev_values = xstats_info->prev_values; 235 curr_values = xstats_info->curr_values; 236 237 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 238 ids_supp_sz); 239 if (rc != (int)ids_supp_sz) { 240 fprintf(stderr, 241 "Failed to get values of %zu xstats for port %u - return code %d\n", 242 ids_supp_sz, port_id, rc); 243 return; 244 } 245 246 diff_ns = 0; 247 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 248 uint64_t ns; 249 250 ns = cur_time.tv_sec * NS_PER_SEC; 251 ns += cur_time.tv_nsec; 252 253 if (xstats_info->prev_ns != 0) 254 diff_ns = ns - xstats_info->prev_ns; 255 xstats_info->prev_ns = ns; 256 } 257 258 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 259 for (i = 0; i < ids_supp_sz; i++) { 260 diff_value = (curr_values[i] > prev_values[i]) ? 261 (curr_values[i] - prev_values[i]) : 0; 262 prev_values[i] = curr_values[i]; 263 value_rate = diff_ns > 0 ? 264 (double)diff_value / diff_ns * NS_PER_SEC : 0; 265 266 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 267 xstats_display[i].name, curr_values[i], value_rate); 268 } 269 } 270 271 void 272 nic_stats_display(portid_t port_id) 273 { 274 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 275 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 276 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 277 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 278 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 279 struct timespec cur_time; 280 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 281 diff_ns; 282 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 283 struct rte_eth_stats stats; 284 static const char *nic_stats_border = "########################"; 285 int ret; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 ret = rte_eth_stats_get(port_id, &stats); 292 if (ret != 0) { 293 fprintf(stderr, 294 "%s: Error: failed to get stats (port %u): %d", 295 __func__, port_id, ret); 296 return; 297 } 298 printf("\n %s NIC statistics for port %-2d %s\n", 299 nic_stats_border, port_id, nic_stats_border); 300 301 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 302 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 303 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 304 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 305 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 306 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 307 308 diff_ns = 0; 309 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 310 uint64_t ns; 311 312 ns = cur_time.tv_sec * NS_PER_SEC; 313 ns += cur_time.tv_nsec; 314 315 if (prev_ns[port_id] != 0) 316 diff_ns = ns - prev_ns[port_id]; 317 prev_ns[port_id] = ns; 318 } 319 320 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 321 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 322 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 323 (stats.opackets - prev_pkts_tx[port_id]) : 0; 324 prev_pkts_rx[port_id] = stats.ipackets; 325 prev_pkts_tx[port_id] = stats.opackets; 326 mpps_rx = diff_ns > 0 ? 327 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 328 mpps_tx = diff_ns > 0 ? 329 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 330 331 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 332 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 333 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 334 (stats.obytes - prev_bytes_tx[port_id]) : 0; 335 prev_bytes_rx[port_id] = stats.ibytes; 336 prev_bytes_tx[port_id] = stats.obytes; 337 mbps_rx = diff_ns > 0 ? 338 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 339 mbps_tx = diff_ns > 0 ? 340 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 341 342 printf("\n Throughput (since last show)\n"); 343 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 344 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 345 mpps_tx, mbps_tx * 8); 346 347 if (xstats_display_num > 0) 348 nic_xstats_display_periodic(port_id); 349 350 printf(" %s############################%s\n", 351 nic_stats_border, nic_stats_border); 352 } 353 354 void 355 nic_stats_clear(portid_t port_id) 356 { 357 int ret; 358 359 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 360 print_valid_ports(); 361 return; 362 } 363 364 ret = rte_eth_stats_reset(port_id); 365 if (ret != 0) { 366 fprintf(stderr, 367 "%s: Error: failed to reset stats (port %u): %s", 368 __func__, port_id, strerror(-ret)); 369 return; 370 } 371 372 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 373 if (ret != 0) { 374 if (ret < 0) 375 ret = -ret; 376 fprintf(stderr, 377 "%s: Error: failed to get stats (port %u): %s", 378 __func__, port_id, strerror(ret)); 379 return; 380 } 381 printf("\n NIC statistics for port %d cleared\n", port_id); 382 } 383 384 void 385 nic_xstats_display(portid_t port_id) 386 { 387 struct rte_eth_xstat *xstats; 388 int cnt_xstats, idx_xstat; 389 struct rte_eth_xstat_name *xstats_names; 390 391 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 392 print_valid_ports(); 393 return; 394 } 395 printf("###### NIC extended statistics for port %-2d\n", port_id); 396 if (!rte_eth_dev_is_valid_port(port_id)) { 397 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 398 return; 399 } 400 401 /* Get count */ 402 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 403 if (cnt_xstats < 0) { 404 fprintf(stderr, "Error: Cannot get count of xstats\n"); 405 return; 406 } 407 408 /* Get id-name lookup table */ 409 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 410 if (xstats_names == NULL) { 411 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 412 return; 413 } 414 if (cnt_xstats != rte_eth_xstats_get_names( 415 port_id, xstats_names, cnt_xstats)) { 416 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 417 free(xstats_names); 418 return; 419 } 420 421 /* Get stats themselves */ 422 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 423 if (xstats == NULL) { 424 fprintf(stderr, "Cannot allocate memory for xstats\n"); 425 free(xstats_names); 426 return; 427 } 428 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 429 fprintf(stderr, "Error: Unable to get xstats\n"); 430 free(xstats_names); 431 free(xstats); 432 return; 433 } 434 435 /* Display xstats */ 436 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 437 if (xstats_hide_zero && !xstats[idx_xstat].value) 438 continue; 439 printf("%s: %"PRIu64"\n", 440 xstats_names[idx_xstat].name, 441 xstats[idx_xstat].value); 442 } 443 free(xstats_names); 444 free(xstats); 445 } 446 447 void 448 nic_xstats_clear(portid_t port_id) 449 { 450 int ret; 451 452 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 453 print_valid_ports(); 454 return; 455 } 456 457 ret = rte_eth_xstats_reset(port_id); 458 if (ret != 0) { 459 fprintf(stderr, 460 "%s: Error: failed to reset xstats (port %u): %s\n", 461 __func__, port_id, strerror(-ret)); 462 return; 463 } 464 465 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 466 if (ret != 0) { 467 if (ret < 0) 468 ret = -ret; 469 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 470 __func__, port_id, strerror(ret)); 471 return; 472 } 473 } 474 475 static const char * 476 get_queue_state_name(uint8_t queue_state) 477 { 478 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 479 return "stopped"; 480 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 481 return "started"; 482 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 483 return "hairpin"; 484 else 485 return "unknown"; 486 } 487 488 void 489 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 490 { 491 struct rte_eth_burst_mode mode; 492 struct rte_eth_rxq_info qinfo; 493 int32_t rc; 494 static const char *info_border = "*********************"; 495 496 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 497 if (rc != 0) { 498 fprintf(stderr, 499 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 500 port_id, queue_id, strerror(-rc), rc); 501 return; 502 } 503 504 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 505 info_border, port_id, queue_id, info_border); 506 507 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 508 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 509 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 510 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 511 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 512 printf("\nRX drop packets: %s", 513 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 514 printf("\nRX deferred start: %s", 515 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 516 printf("\nRX scattered packets: %s", 517 (qinfo.scattered_rx != 0) ? "on" : "off"); 518 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 519 if (qinfo.rx_buf_size != 0) 520 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 521 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 522 523 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 void 533 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 534 { 535 struct rte_eth_burst_mode mode; 536 struct rte_eth_txq_info qinfo; 537 int32_t rc; 538 static const char *info_border = "*********************"; 539 540 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 541 if (rc != 0) { 542 fprintf(stderr, 543 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 544 port_id, queue_id, strerror(-rc), rc); 545 return; 546 } 547 548 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 549 info_border, port_id, queue_id, info_border); 550 551 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 552 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 553 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 554 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 555 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 556 printf("\nTX deferred start: %s", 557 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 558 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 559 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 560 561 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 562 printf("\nBurst mode: %s%s", 563 mode.info, 564 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 565 " (per queue)" : ""); 566 567 printf("\n"); 568 } 569 570 static int bus_match_all(const struct rte_bus *bus, const void *data) 571 { 572 RTE_SET_USED(bus); 573 RTE_SET_USED(data); 574 return 0; 575 } 576 577 static void 578 device_infos_display_speeds(uint32_t speed_capa) 579 { 580 printf("\n\tDevice speed capability:"); 581 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 582 printf(" Autonegotiate (all speeds)"); 583 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 584 printf(" Disable autonegotiate (fixed speed) "); 585 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 586 printf(" 10 Mbps half-duplex "); 587 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 588 printf(" 10 Mbps full-duplex "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 590 printf(" 100 Mbps half-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 592 printf(" 100 Mbps full-duplex "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 594 printf(" 1 Gbps "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 596 printf(" 2.5 Gbps "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 598 printf(" 5 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 600 printf(" 10 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 602 printf(" 20 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 604 printf(" 25 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 606 printf(" 40 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 608 printf(" 50 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 610 printf(" 56 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 612 printf(" 100 Gbps "); 613 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 614 printf(" 200 Gbps "); 615 } 616 617 void 618 device_infos_display(const char *identifier) 619 { 620 static const char *info_border = "*********************"; 621 struct rte_bus *start = NULL, *next; 622 struct rte_dev_iterator dev_iter; 623 char name[RTE_ETH_NAME_MAX_LEN]; 624 struct rte_ether_addr mac_addr; 625 struct rte_device *dev; 626 struct rte_devargs da; 627 portid_t port_id; 628 struct rte_eth_dev_info dev_info; 629 char devstr[128]; 630 631 memset(&da, 0, sizeof(da)); 632 if (!identifier) 633 goto skip_parse; 634 635 if (rte_devargs_parsef(&da, "%s", identifier)) { 636 fprintf(stderr, "cannot parse identifier\n"); 637 return; 638 } 639 640 skip_parse: 641 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 642 643 start = next; 644 if (identifier && da.bus != next) 645 continue; 646 647 snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next)); 648 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 649 650 if (rte_dev_driver(dev) == NULL) 651 continue; 652 /* Check for matching device if identifier is present */ 653 if (identifier && 654 strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev)))) 655 continue; 656 printf("\n%s Infos for device %s %s\n", 657 info_border, rte_dev_name(dev), info_border); 658 printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev))); 659 printf("\nBus information: %s", 660 rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : ""); 661 printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev))); 662 printf("\nDevargs: %s", 663 rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : ""); 664 printf("\nConnect to socket: %d", rte_dev_numa_node(dev)); 665 printf("\n"); 666 667 /* List ports with matching device name */ 668 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 669 printf("\n\tPort id: %-2d", port_id); 670 if (eth_macaddr_get_print_err(port_id, 671 &mac_addr) == 0) 672 print_ethaddr("\n\tMAC address: ", 673 &mac_addr); 674 rte_eth_dev_get_name_by_port(port_id, name); 675 printf("\n\tDevice name: %s", name); 676 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 677 device_infos_display_speeds(dev_info.speed_capa); 678 printf("\n"); 679 } 680 } 681 }; 682 rte_devargs_reset(&da); 683 } 684 685 static void 686 print_dev_capabilities(uint64_t capabilities) 687 { 688 uint64_t single_capa; 689 int begin; 690 int end; 691 int bit; 692 693 if (capabilities == 0) 694 return; 695 696 begin = __builtin_ctzll(capabilities); 697 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 698 699 single_capa = 1ULL << begin; 700 for (bit = begin; bit < end; bit++) { 701 if (capabilities & single_capa) 702 printf(" %s", 703 rte_eth_dev_capability_name(single_capa)); 704 single_capa <<= 1; 705 } 706 } 707 708 uint64_t 709 str_to_rsstypes(const char *str) 710 { 711 uint16_t i; 712 713 for (i = 0; rss_type_table[i].str != NULL; i++) { 714 if (strcmp(rss_type_table[i].str, str) == 0) 715 return rss_type_table[i].rss_type; 716 } 717 718 return 0; 719 } 720 721 const char * 722 rsstypes_to_str(uint64_t rss_type) 723 { 724 uint16_t i; 725 726 for (i = 0; rss_type_table[i].str != NULL; i++) { 727 if (rss_type_table[i].rss_type == rss_type) 728 return rss_type_table[i].str; 729 } 730 731 return NULL; 732 } 733 734 static void 735 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 736 { 737 uint16_t user_defined_str_len; 738 uint16_t total_len = 0; 739 uint16_t str_len = 0; 740 uint64_t rss_offload; 741 uint16_t i; 742 743 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 744 rss_offload = RTE_BIT64(i); 745 if ((offload_types & rss_offload) != 0) { 746 const char *p = rsstypes_to_str(rss_offload); 747 748 user_defined_str_len = 749 strlen("user-defined-") + (i / 10 + 1); 750 str_len = p ? strlen(p) : user_defined_str_len; 751 str_len += 2; /* add two spaces */ 752 if (total_len + str_len >= char_num_per_line) { 753 total_len = 0; 754 printf("\n"); 755 } 756 757 if (p) 758 printf(" %s", p); 759 else 760 printf(" user-defined-%u", i); 761 total_len += str_len; 762 } 763 } 764 printf("\n"); 765 } 766 767 void 768 port_infos_display(portid_t port_id) 769 { 770 struct rte_port *port; 771 struct rte_ether_addr mac_addr; 772 struct rte_eth_link link; 773 struct rte_eth_dev_info dev_info; 774 int vlan_offload; 775 struct rte_mempool * mp; 776 static const char *info_border = "*********************"; 777 uint16_t mtu; 778 char name[RTE_ETH_NAME_MAX_LEN]; 779 int ret; 780 char fw_version[ETHDEV_FWVERS_LEN]; 781 782 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 783 print_valid_ports(); 784 return; 785 } 786 port = &ports[port_id]; 787 ret = eth_link_get_nowait_print_err(port_id, &link); 788 if (ret < 0) 789 return; 790 791 ret = eth_dev_info_get_print_err(port_id, &dev_info); 792 if (ret != 0) 793 return; 794 795 printf("\n%s Infos for port %-2d %s\n", 796 info_border, port_id, info_border); 797 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 798 print_ethaddr("MAC address: ", &mac_addr); 799 rte_eth_dev_get_name_by_port(port_id, name); 800 printf("\nDevice name: %s", name); 801 printf("\nDriver name: %s", dev_info.driver_name); 802 803 if (rte_eth_dev_fw_version_get(port_id, fw_version, 804 ETHDEV_FWVERS_LEN) == 0) 805 printf("\nFirmware-version: %s", fw_version); 806 else 807 printf("\nFirmware-version: %s", "not available"); 808 809 if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args) 810 printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args); 811 printf("\nConnect to socket: %u", port->socket_id); 812 813 if (port_numa[port_id] != NUMA_NO_CONFIG) { 814 mp = mbuf_pool_find(port_numa[port_id], 0); 815 if (mp) 816 printf("\nmemory allocation on the socket: %d", 817 port_numa[port_id]); 818 } else 819 printf("\nmemory allocation on the socket: %u",port->socket_id); 820 821 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 822 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 823 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 824 ("full-duplex") : ("half-duplex")); 825 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 826 ("On") : ("Off")); 827 828 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 829 printf("MTU: %u\n", mtu); 830 831 printf("Promiscuous mode: %s\n", 832 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 833 printf("Allmulticast mode: %s\n", 834 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 835 printf("Maximum number of MAC addresses: %u\n", 836 (unsigned int)(port->dev_info.max_mac_addrs)); 837 printf("Maximum number of MAC addresses of hash filtering: %u\n", 838 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 839 840 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 841 if (vlan_offload >= 0){ 842 printf("VLAN offload: \n"); 843 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 844 printf(" strip on, "); 845 else 846 printf(" strip off, "); 847 848 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 849 printf("filter on, "); 850 else 851 printf("filter off, "); 852 853 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 854 printf("extend on, "); 855 else 856 printf("extend off, "); 857 858 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 859 printf("qinq strip on\n"); 860 else 861 printf("qinq strip off\n"); 862 } 863 864 if (dev_info.hash_key_size > 0) 865 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 866 if (dev_info.reta_size > 0) 867 printf("Redirection table size: %u\n", dev_info.reta_size); 868 if (!dev_info.flow_type_rss_offloads) 869 printf("No RSS offload flow type is supported.\n"); 870 else { 871 printf("Supported RSS offload flow types:\n"); 872 rss_offload_types_display(dev_info.flow_type_rss_offloads, 873 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 874 } 875 876 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 877 printf("Maximum configurable length of RX packet: %u\n", 878 dev_info.max_rx_pktlen); 879 printf("Maximum configurable size of LRO aggregated packet: %u\n", 880 dev_info.max_lro_pkt_size); 881 if (dev_info.max_vfs) 882 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 883 if (dev_info.max_vmdq_pools) 884 printf("Maximum number of VMDq pools: %u\n", 885 dev_info.max_vmdq_pools); 886 887 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 888 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 889 printf("Max possible number of RXDs per queue: %hu\n", 890 dev_info.rx_desc_lim.nb_max); 891 printf("Min possible number of RXDs per queue: %hu\n", 892 dev_info.rx_desc_lim.nb_min); 893 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 894 895 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 896 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 897 printf("Max possible number of TXDs per queue: %hu\n", 898 dev_info.tx_desc_lim.nb_max); 899 printf("Min possible number of TXDs per queue: %hu\n", 900 dev_info.tx_desc_lim.nb_min); 901 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 902 printf("Max segment number per packet: %hu\n", 903 dev_info.tx_desc_lim.nb_seg_max); 904 printf("Max segment number per MTU/TSO: %hu\n", 905 dev_info.tx_desc_lim.nb_mtu_seg_max); 906 907 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 908 print_dev_capabilities(dev_info.dev_capa); 909 printf(" )\n"); 910 /* Show switch info only if valid switch domain and port id is set */ 911 if (dev_info.switch_info.domain_id != 912 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 913 if (dev_info.switch_info.name) 914 printf("Switch name: %s\n", dev_info.switch_info.name); 915 916 printf("Switch domain Id: %u\n", 917 dev_info.switch_info.domain_id); 918 printf("Switch Port Id: %u\n", 919 dev_info.switch_info.port_id); 920 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 921 printf("Switch Rx domain: %u\n", 922 dev_info.switch_info.rx_domain); 923 } 924 printf("Device error handling mode: "); 925 switch (dev_info.err_handle_mode) { 926 case RTE_ETH_ERROR_HANDLE_MODE_NONE: 927 printf("none\n"); 928 break; 929 case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE: 930 printf("passive\n"); 931 break; 932 case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE: 933 printf("proactive\n"); 934 break; 935 default: 936 printf("unknown\n"); 937 break; 938 } 939 } 940 941 void 942 port_summary_header_display(void) 943 { 944 uint16_t port_number; 945 946 port_number = rte_eth_dev_count_avail(); 947 printf("Number of available ports: %i\n", port_number); 948 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 949 "Driver", "Status", "Link"); 950 } 951 952 void 953 port_summary_display(portid_t port_id) 954 { 955 struct rte_ether_addr mac_addr; 956 struct rte_eth_link link; 957 struct rte_eth_dev_info dev_info; 958 char name[RTE_ETH_NAME_MAX_LEN]; 959 int ret; 960 961 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 962 print_valid_ports(); 963 return; 964 } 965 966 ret = eth_link_get_nowait_print_err(port_id, &link); 967 if (ret < 0) 968 return; 969 970 ret = eth_dev_info_get_print_err(port_id, &dev_info); 971 if (ret != 0) 972 return; 973 974 rte_eth_dev_get_name_by_port(port_id, name); 975 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 976 if (ret != 0) 977 return; 978 979 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 980 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 981 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 982 rte_eth_link_speed_to_str(link.link_speed)); 983 } 984 985 void 986 port_eeprom_display(portid_t port_id) 987 { 988 struct rte_dev_eeprom_info einfo; 989 int ret; 990 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 991 print_valid_ports(); 992 return; 993 } 994 995 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 996 if (len_eeprom < 0) { 997 switch (len_eeprom) { 998 case -ENODEV: 999 fprintf(stderr, "port index %d invalid\n", port_id); 1000 break; 1001 case -ENOTSUP: 1002 fprintf(stderr, "operation not supported by device\n"); 1003 break; 1004 case -EIO: 1005 fprintf(stderr, "device is removed\n"); 1006 break; 1007 default: 1008 fprintf(stderr, "Unable to get EEPROM: %d\n", 1009 len_eeprom); 1010 break; 1011 } 1012 return; 1013 } 1014 1015 einfo.offset = 0; 1016 einfo.length = len_eeprom; 1017 einfo.data = calloc(1, len_eeprom); 1018 if (!einfo.data) { 1019 fprintf(stderr, 1020 "Allocation of port %u eeprom data failed\n", 1021 port_id); 1022 return; 1023 } 1024 1025 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1026 if (ret != 0) { 1027 switch (ret) { 1028 case -ENODEV: 1029 fprintf(stderr, "port index %d invalid\n", port_id); 1030 break; 1031 case -ENOTSUP: 1032 fprintf(stderr, "operation not supported by device\n"); 1033 break; 1034 case -EIO: 1035 fprintf(stderr, "device is removed\n"); 1036 break; 1037 default: 1038 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1039 break; 1040 } 1041 free(einfo.data); 1042 return; 1043 } 1044 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1045 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1046 free(einfo.data); 1047 } 1048 1049 void 1050 port_module_eeprom_display(portid_t port_id) 1051 { 1052 struct rte_eth_dev_module_info minfo; 1053 struct rte_dev_eeprom_info einfo; 1054 int ret; 1055 1056 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1057 print_valid_ports(); 1058 return; 1059 } 1060 1061 1062 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1063 if (ret != 0) { 1064 switch (ret) { 1065 case -ENODEV: 1066 fprintf(stderr, "port index %d invalid\n", port_id); 1067 break; 1068 case -ENOTSUP: 1069 fprintf(stderr, "operation not supported by device\n"); 1070 break; 1071 case -EIO: 1072 fprintf(stderr, "device is removed\n"); 1073 break; 1074 default: 1075 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1076 ret); 1077 break; 1078 } 1079 return; 1080 } 1081 1082 einfo.offset = 0; 1083 einfo.length = minfo.eeprom_len; 1084 einfo.data = calloc(1, minfo.eeprom_len); 1085 if (!einfo.data) { 1086 fprintf(stderr, 1087 "Allocation of port %u eeprom data failed\n", 1088 port_id); 1089 return; 1090 } 1091 1092 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1093 if (ret != 0) { 1094 switch (ret) { 1095 case -ENODEV: 1096 fprintf(stderr, "port index %d invalid\n", port_id); 1097 break; 1098 case -ENOTSUP: 1099 fprintf(stderr, "operation not supported by device\n"); 1100 break; 1101 case -EIO: 1102 fprintf(stderr, "device is removed\n"); 1103 break; 1104 default: 1105 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1106 ret); 1107 break; 1108 } 1109 free(einfo.data); 1110 return; 1111 } 1112 1113 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1114 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1115 free(einfo.data); 1116 } 1117 1118 int 1119 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1120 { 1121 uint16_t pid; 1122 1123 if (port_id == (portid_t)RTE_PORT_ALL) 1124 return 0; 1125 1126 RTE_ETH_FOREACH_DEV(pid) 1127 if (port_id == pid) 1128 return 0; 1129 1130 if (warning == ENABLED_WARN) 1131 fprintf(stderr, "Invalid port %d\n", port_id); 1132 1133 return 1; 1134 } 1135 1136 void print_valid_ports(void) 1137 { 1138 portid_t pid; 1139 1140 printf("The valid ports array is ["); 1141 RTE_ETH_FOREACH_DEV(pid) { 1142 printf(" %d", pid); 1143 } 1144 printf(" ]\n"); 1145 } 1146 1147 static int 1148 vlan_id_is_invalid(uint16_t vlan_id) 1149 { 1150 if (vlan_id < 4096) 1151 return 0; 1152 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1153 return 1; 1154 } 1155 1156 static uint32_t 1157 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1158 { 1159 uint32_t overhead_len; 1160 1161 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1162 overhead_len = max_rx_pktlen - max_mtu; 1163 else 1164 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1165 1166 return overhead_len; 1167 } 1168 1169 static int 1170 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1171 { 1172 struct rte_eth_dev_info dev_info; 1173 uint32_t overhead_len; 1174 uint32_t frame_size; 1175 int ret; 1176 1177 ret = rte_eth_dev_info_get(port_id, &dev_info); 1178 if (ret != 0) 1179 return ret; 1180 1181 if (mtu < dev_info.min_mtu) { 1182 fprintf(stderr, 1183 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1184 mtu, dev_info.min_mtu, port_id); 1185 return -EINVAL; 1186 } 1187 if (mtu > dev_info.max_mtu) { 1188 fprintf(stderr, 1189 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1190 mtu, dev_info.max_mtu, port_id); 1191 return -EINVAL; 1192 } 1193 1194 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1195 dev_info.max_mtu); 1196 frame_size = mtu + overhead_len; 1197 if (frame_size > dev_info.max_rx_pktlen) { 1198 fprintf(stderr, 1199 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1200 frame_size, dev_info.max_rx_pktlen, port_id); 1201 return -EINVAL; 1202 } 1203 1204 return 0; 1205 } 1206 1207 void 1208 port_mtu_set(portid_t port_id, uint16_t mtu) 1209 { 1210 struct rte_port *port = &ports[port_id]; 1211 int diag; 1212 1213 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1214 return; 1215 1216 diag = eth_dev_validate_mtu(port_id, mtu); 1217 if (diag != 0) 1218 return; 1219 1220 if (port->need_reconfig == 0) { 1221 diag = rte_eth_dev_set_mtu(port_id, mtu); 1222 if (diag != 0) { 1223 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1224 return; 1225 } 1226 } 1227 1228 port->dev_conf.rxmode.mtu = mtu; 1229 } 1230 1231 /* Generic flow management functions. */ 1232 1233 static struct port_flow_tunnel * 1234 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1235 { 1236 struct port_flow_tunnel *flow_tunnel; 1237 1238 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1239 if (flow_tunnel->id == port_tunnel_id) 1240 goto out; 1241 } 1242 flow_tunnel = NULL; 1243 1244 out: 1245 return flow_tunnel; 1246 } 1247 1248 const char * 1249 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1250 { 1251 const char *type; 1252 switch (tunnel->type) { 1253 default: 1254 type = "unknown"; 1255 break; 1256 case RTE_FLOW_ITEM_TYPE_VXLAN: 1257 type = "vxlan"; 1258 break; 1259 case RTE_FLOW_ITEM_TYPE_GRE: 1260 type = "gre"; 1261 break; 1262 case RTE_FLOW_ITEM_TYPE_NVGRE: 1263 type = "nvgre"; 1264 break; 1265 case RTE_FLOW_ITEM_TYPE_GENEVE: 1266 type = "geneve"; 1267 break; 1268 } 1269 1270 return type; 1271 } 1272 1273 struct port_flow_tunnel * 1274 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1275 { 1276 struct rte_port *port = &ports[port_id]; 1277 struct port_flow_tunnel *flow_tunnel; 1278 1279 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1280 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1281 goto out; 1282 } 1283 flow_tunnel = NULL; 1284 1285 out: 1286 return flow_tunnel; 1287 } 1288 1289 void port_flow_tunnel_list(portid_t port_id) 1290 { 1291 struct rte_port *port = &ports[port_id]; 1292 struct port_flow_tunnel *flt; 1293 1294 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1295 printf("port %u tunnel #%u type=%s", 1296 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1297 if (flt->tunnel.tun_id) 1298 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1299 printf("\n"); 1300 } 1301 } 1302 1303 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1304 { 1305 struct rte_port *port = &ports[port_id]; 1306 struct port_flow_tunnel *flt; 1307 1308 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1309 if (flt->id == tunnel_id) 1310 break; 1311 } 1312 if (flt) { 1313 LIST_REMOVE(flt, chain); 1314 free(flt); 1315 printf("port %u: flow tunnel #%u destroyed\n", 1316 port_id, tunnel_id); 1317 } 1318 } 1319 1320 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1321 { 1322 struct rte_port *port = &ports[port_id]; 1323 enum rte_flow_item_type type; 1324 struct port_flow_tunnel *flt; 1325 1326 if (!strcmp(ops->type, "vxlan")) 1327 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1328 else if (!strcmp(ops->type, "gre")) 1329 type = RTE_FLOW_ITEM_TYPE_GRE; 1330 else if (!strcmp(ops->type, "nvgre")) 1331 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1332 else if (!strcmp(ops->type, "geneve")) 1333 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1334 else { 1335 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1336 ops->type); 1337 return; 1338 } 1339 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1340 if (flt->tunnel.type == type) 1341 break; 1342 } 1343 if (!flt) { 1344 flt = calloc(1, sizeof(*flt)); 1345 if (!flt) { 1346 fprintf(stderr, "failed to allocate port flt object\n"); 1347 return; 1348 } 1349 flt->tunnel.type = type; 1350 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1351 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1352 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1353 } 1354 printf("port %d: flow tunnel #%u type %s\n", 1355 port_id, flt->id, ops->type); 1356 } 1357 1358 /** Generate a port_flow entry from attributes/pattern/actions. */ 1359 static struct port_flow * 1360 port_flow_new(const struct rte_flow_attr *attr, 1361 const struct rte_flow_item *pattern, 1362 const struct rte_flow_action *actions, 1363 struct rte_flow_error *error) 1364 { 1365 const struct rte_flow_conv_rule rule = { 1366 .attr_ro = attr, 1367 .pattern_ro = pattern, 1368 .actions_ro = actions, 1369 }; 1370 struct port_flow *pf; 1371 int ret; 1372 1373 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1374 if (ret < 0) 1375 return NULL; 1376 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1377 if (!pf) { 1378 rte_flow_error_set 1379 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1380 "calloc() failed"); 1381 return NULL; 1382 } 1383 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1384 error) >= 0) 1385 return pf; 1386 free(pf); 1387 return NULL; 1388 } 1389 1390 /** Print a message out of a flow error. */ 1391 static int 1392 port_flow_complain(struct rte_flow_error *error) 1393 { 1394 static const char *const errstrlist[] = { 1395 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1396 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1397 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1398 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1399 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1400 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1401 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1402 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1403 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1404 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1405 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1406 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1407 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1408 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1409 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1410 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1411 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1412 }; 1413 const char *errstr; 1414 char buf[32]; 1415 int err = rte_errno; 1416 1417 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1418 !errstrlist[error->type]) 1419 errstr = "unknown type"; 1420 else 1421 errstr = errstrlist[error->type]; 1422 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1423 __func__, error->type, errstr, 1424 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1425 error->cause), buf) : "", 1426 error->message ? error->message : "(no stated reason)", 1427 rte_strerror(err)); 1428 1429 switch (error->type) { 1430 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1431 fprintf(stderr, "The status suggests the use of \"transfer\" " 1432 "as the possible cause of the failure. Make " 1433 "sure that the flow in question and its " 1434 "indirect components (if any) are managed " 1435 "via \"transfer\" proxy port. Use command " 1436 "\"show port (port_id) flow transfer proxy\" " 1437 "to figure out the proxy port ID\n"); 1438 break; 1439 default: 1440 break; 1441 } 1442 1443 return -err; 1444 } 1445 1446 static void 1447 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1448 { 1449 uint16_t total_len = 0; 1450 uint16_t str_len; 1451 uint16_t i; 1452 1453 if (rss_types == 0) 1454 return; 1455 1456 for (i = 0; rss_type_table[i].str; i++) { 1457 if (rss_type_table[i].rss_type == 0) 1458 continue; 1459 1460 if ((rss_types & rss_type_table[i].rss_type) == 1461 rss_type_table[i].rss_type) { 1462 /* Contain two spaces */ 1463 str_len = strlen(rss_type_table[i].str) + 2; 1464 if (total_len + str_len > char_num_per_line) { 1465 printf("\n"); 1466 total_len = 0; 1467 } 1468 printf(" %s", rss_type_table[i].str); 1469 total_len += str_len; 1470 } 1471 } 1472 printf("\n"); 1473 } 1474 1475 static void 1476 rss_config_display(struct rte_flow_action_rss *rss_conf) 1477 { 1478 uint8_t i; 1479 1480 if (rss_conf == NULL) { 1481 fprintf(stderr, "Invalid rule\n"); 1482 return; 1483 } 1484 1485 printf("RSS:\n" 1486 " queues:"); 1487 if (rss_conf->queue_num == 0) 1488 printf(" none"); 1489 for (i = 0; i < rss_conf->queue_num; i++) 1490 printf(" %d", rss_conf->queue[i]); 1491 printf("\n"); 1492 1493 printf(" function: "); 1494 switch (rss_conf->func) { 1495 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1496 printf("default\n"); 1497 break; 1498 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1499 printf("toeplitz\n"); 1500 break; 1501 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1502 printf("simple_xor\n"); 1503 break; 1504 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1505 printf("symmetric_toeplitz\n"); 1506 break; 1507 default: 1508 printf("Unknown function\n"); 1509 return; 1510 } 1511 1512 printf(" types:\n"); 1513 if (rss_conf->types == 0) { 1514 printf(" none\n"); 1515 return; 1516 } 1517 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1518 } 1519 1520 static struct port_indirect_action * 1521 action_get_by_id(portid_t port_id, uint32_t id) 1522 { 1523 struct rte_port *port; 1524 struct port_indirect_action **ppia; 1525 struct port_indirect_action *pia = NULL; 1526 1527 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1528 port_id == (portid_t)RTE_PORT_ALL) 1529 return NULL; 1530 port = &ports[port_id]; 1531 ppia = &port->actions_list; 1532 while (*ppia) { 1533 if ((*ppia)->id == id) { 1534 pia = *ppia; 1535 break; 1536 } 1537 ppia = &(*ppia)->next; 1538 } 1539 if (!pia) 1540 fprintf(stderr, 1541 "Failed to find indirect action #%u on port %u\n", 1542 id, port_id); 1543 return pia; 1544 } 1545 1546 static int 1547 action_alloc(portid_t port_id, uint32_t id, 1548 struct port_indirect_action **action) 1549 { 1550 struct rte_port *port; 1551 struct port_indirect_action **ppia; 1552 struct port_indirect_action *pia = NULL; 1553 1554 *action = NULL; 1555 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1556 port_id == (portid_t)RTE_PORT_ALL) 1557 return -EINVAL; 1558 port = &ports[port_id]; 1559 if (id == UINT32_MAX) { 1560 /* taking first available ID */ 1561 if (port->actions_list) { 1562 if (port->actions_list->id == UINT32_MAX - 1) { 1563 fprintf(stderr, 1564 "Highest indirect action ID is already assigned, delete it first\n"); 1565 return -ENOMEM; 1566 } 1567 id = port->actions_list->id + 1; 1568 } else { 1569 id = 0; 1570 } 1571 } 1572 pia = calloc(1, sizeof(*pia)); 1573 if (!pia) { 1574 fprintf(stderr, 1575 "Allocation of port %u indirect action failed\n", 1576 port_id); 1577 return -ENOMEM; 1578 } 1579 ppia = &port->actions_list; 1580 while (*ppia && (*ppia)->id > id) 1581 ppia = &(*ppia)->next; 1582 if (*ppia && (*ppia)->id == id) { 1583 fprintf(stderr, 1584 "Indirect action #%u is already assigned, delete it first\n", 1585 id); 1586 free(pia); 1587 return -EINVAL; 1588 } 1589 pia->next = *ppia; 1590 pia->id = id; 1591 *ppia = pia; 1592 *action = pia; 1593 return 0; 1594 } 1595 1596 static int 1597 template_alloc(uint32_t id, struct port_template **template, 1598 struct port_template **list) 1599 { 1600 struct port_template *lst = *list; 1601 struct port_template **ppt; 1602 struct port_template *pt = NULL; 1603 1604 *template = NULL; 1605 if (id == UINT32_MAX) { 1606 /* taking first available ID */ 1607 if (lst) { 1608 if (lst->id == UINT32_MAX - 1) { 1609 printf("Highest template ID is already" 1610 " assigned, delete it first\n"); 1611 return -ENOMEM; 1612 } 1613 id = lst->id + 1; 1614 } else { 1615 id = 0; 1616 } 1617 } 1618 pt = calloc(1, sizeof(*pt)); 1619 if (!pt) { 1620 printf("Allocation of port template failed\n"); 1621 return -ENOMEM; 1622 } 1623 ppt = list; 1624 while (*ppt && (*ppt)->id > id) 1625 ppt = &(*ppt)->next; 1626 if (*ppt && (*ppt)->id == id) { 1627 printf("Template #%u is already assigned," 1628 " delete it first\n", id); 1629 free(pt); 1630 return -EINVAL; 1631 } 1632 pt->next = *ppt; 1633 pt->id = id; 1634 *ppt = pt; 1635 *template = pt; 1636 return 0; 1637 } 1638 1639 static int 1640 table_alloc(uint32_t id, struct port_table **table, 1641 struct port_table **list) 1642 { 1643 struct port_table *lst = *list; 1644 struct port_table **ppt; 1645 struct port_table *pt = NULL; 1646 1647 *table = NULL; 1648 if (id == UINT32_MAX) { 1649 /* taking first available ID */ 1650 if (lst) { 1651 if (lst->id == UINT32_MAX - 1) { 1652 printf("Highest table ID is already" 1653 " assigned, delete it first\n"); 1654 return -ENOMEM; 1655 } 1656 id = lst->id + 1; 1657 } else { 1658 id = 0; 1659 } 1660 } 1661 pt = calloc(1, sizeof(*pt)); 1662 if (!pt) { 1663 printf("Allocation of table failed\n"); 1664 return -ENOMEM; 1665 } 1666 ppt = list; 1667 while (*ppt && (*ppt)->id > id) 1668 ppt = &(*ppt)->next; 1669 if (*ppt && (*ppt)->id == id) { 1670 printf("Table #%u is already assigned," 1671 " delete it first\n", id); 1672 free(pt); 1673 return -EINVAL; 1674 } 1675 pt->next = *ppt; 1676 pt->id = id; 1677 *ppt = pt; 1678 *table = pt; 1679 return 0; 1680 } 1681 1682 /** Get info about flow management resources. */ 1683 int 1684 port_flow_get_info(portid_t port_id) 1685 { 1686 struct rte_flow_port_info port_info; 1687 struct rte_flow_queue_info queue_info; 1688 struct rte_flow_error error; 1689 1690 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1691 port_id == (portid_t)RTE_PORT_ALL) 1692 return -EINVAL; 1693 /* Poisoning to make sure PMDs update it in case of error. */ 1694 memset(&error, 0x99, sizeof(error)); 1695 memset(&port_info, 0, sizeof(port_info)); 1696 memset(&queue_info, 0, sizeof(queue_info)); 1697 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1698 return port_flow_complain(&error); 1699 printf("Flow engine resources on port %u:\n" 1700 "Number of queues: %d\n" 1701 "Size of queues: %d\n" 1702 "Number of counters: %d\n" 1703 "Number of aging objects: %d\n" 1704 "Number of meter actions: %d\n", 1705 port_id, port_info.max_nb_queues, 1706 queue_info.max_size, 1707 port_info.max_nb_counters, 1708 port_info.max_nb_aging_objects, 1709 port_info.max_nb_meters); 1710 return 0; 1711 } 1712 1713 /** Configure flow management resources. */ 1714 int 1715 port_flow_configure(portid_t port_id, 1716 const struct rte_flow_port_attr *port_attr, 1717 uint16_t nb_queue, 1718 const struct rte_flow_queue_attr *queue_attr) 1719 { 1720 struct rte_port *port; 1721 struct rte_flow_error error; 1722 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1723 int std_queue; 1724 1725 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1726 port_id == (portid_t)RTE_PORT_ALL) 1727 return -EINVAL; 1728 port = &ports[port_id]; 1729 port->queue_nb = nb_queue; 1730 port->queue_sz = queue_attr->size; 1731 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1732 attr_list[std_queue] = queue_attr; 1733 /* Poisoning to make sure PMDs update it in case of error. */ 1734 memset(&error, 0x66, sizeof(error)); 1735 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1736 return port_flow_complain(&error); 1737 printf("Configure flows on port %u: " 1738 "number of queues %d with %d elements\n", 1739 port_id, nb_queue, queue_attr->size); 1740 return 0; 1741 } 1742 1743 /** Create indirect action */ 1744 int 1745 port_action_handle_create(portid_t port_id, uint32_t id, 1746 const struct rte_flow_indir_action_conf *conf, 1747 const struct rte_flow_action *action) 1748 { 1749 struct port_indirect_action *pia; 1750 int ret; 1751 struct rte_flow_error error; 1752 1753 ret = action_alloc(port_id, id, &pia); 1754 if (ret) 1755 return ret; 1756 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1757 struct rte_flow_action_age *age = 1758 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1759 1760 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1761 age->context = &pia->age_type; 1762 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1763 struct rte_flow_action_conntrack *ct = 1764 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1765 1766 memcpy(ct, &conntrack_context, sizeof(*ct)); 1767 } 1768 /* Poisoning to make sure PMDs update it in case of error. */ 1769 memset(&error, 0x22, sizeof(error)); 1770 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1771 &error); 1772 if (!pia->handle) { 1773 uint32_t destroy_id = pia->id; 1774 port_action_handle_destroy(port_id, 1, &destroy_id); 1775 return port_flow_complain(&error); 1776 } 1777 pia->type = action->type; 1778 printf("Indirect action #%u created\n", pia->id); 1779 return 0; 1780 } 1781 1782 /** Destroy indirect action */ 1783 int 1784 port_action_handle_destroy(portid_t port_id, 1785 uint32_t n, 1786 const uint32_t *actions) 1787 { 1788 struct rte_port *port; 1789 struct port_indirect_action **tmp; 1790 int ret = 0; 1791 1792 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1793 port_id == (portid_t)RTE_PORT_ALL) 1794 return -EINVAL; 1795 port = &ports[port_id]; 1796 tmp = &port->actions_list; 1797 while (*tmp) { 1798 uint32_t i; 1799 1800 for (i = 0; i != n; ++i) { 1801 struct rte_flow_error error; 1802 struct port_indirect_action *pia = *tmp; 1803 1804 if (actions[i] != pia->id) 1805 continue; 1806 /* 1807 * Poisoning to make sure PMDs update it in case 1808 * of error. 1809 */ 1810 memset(&error, 0x33, sizeof(error)); 1811 1812 if (pia->handle && rte_flow_action_handle_destroy( 1813 port_id, pia->handle, &error)) { 1814 ret = port_flow_complain(&error); 1815 continue; 1816 } 1817 *tmp = pia->next; 1818 printf("Indirect action #%u destroyed\n", pia->id); 1819 free(pia); 1820 break; 1821 } 1822 if (i == n) 1823 tmp = &(*tmp)->next; 1824 } 1825 return ret; 1826 } 1827 1828 int 1829 port_action_handle_flush(portid_t port_id) 1830 { 1831 struct rte_port *port; 1832 struct port_indirect_action **tmp; 1833 int ret = 0; 1834 1835 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1836 port_id == (portid_t)RTE_PORT_ALL) 1837 return -EINVAL; 1838 port = &ports[port_id]; 1839 tmp = &port->actions_list; 1840 while (*tmp != NULL) { 1841 struct rte_flow_error error; 1842 struct port_indirect_action *pia = *tmp; 1843 1844 /* Poisoning to make sure PMDs update it in case of error. */ 1845 memset(&error, 0x44, sizeof(error)); 1846 if (pia->handle != NULL && 1847 rte_flow_action_handle_destroy 1848 (port_id, pia->handle, &error) != 0) { 1849 printf("Indirect action #%u not destroyed\n", pia->id); 1850 ret = port_flow_complain(&error); 1851 tmp = &pia->next; 1852 } else { 1853 *tmp = pia->next; 1854 free(pia); 1855 } 1856 } 1857 return ret; 1858 } 1859 1860 /** Get indirect action by port + id */ 1861 struct rte_flow_action_handle * 1862 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1863 { 1864 1865 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1866 1867 return (pia) ? pia->handle : NULL; 1868 } 1869 1870 /** Update indirect action */ 1871 int 1872 port_action_handle_update(portid_t port_id, uint32_t id, 1873 const struct rte_flow_action *action) 1874 { 1875 struct rte_flow_error error; 1876 struct rte_flow_action_handle *action_handle; 1877 struct port_indirect_action *pia; 1878 const void *update; 1879 1880 action_handle = port_action_handle_get_by_id(port_id, id); 1881 if (!action_handle) 1882 return -EINVAL; 1883 pia = action_get_by_id(port_id, id); 1884 if (!pia) 1885 return -EINVAL; 1886 switch (pia->type) { 1887 case RTE_FLOW_ACTION_TYPE_AGE: 1888 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1889 update = action->conf; 1890 break; 1891 default: 1892 update = action; 1893 break; 1894 } 1895 if (rte_flow_action_handle_update(port_id, action_handle, update, 1896 &error)) { 1897 return port_flow_complain(&error); 1898 } 1899 printf("Indirect action #%u updated\n", id); 1900 return 0; 1901 } 1902 1903 static void 1904 port_action_handle_query_dump(portid_t port_id, 1905 const struct port_indirect_action *pia, 1906 union port_action_query *query) 1907 { 1908 if (!pia || !query) 1909 return; 1910 switch (pia->type) { 1911 case RTE_FLOW_ACTION_TYPE_AGE: 1912 printf("Indirect AGE action:\n" 1913 " aged: %u\n" 1914 " sec_since_last_hit_valid: %u\n" 1915 " sec_since_last_hit: %" PRIu32 "\n", 1916 query->age.aged, 1917 query->age.sec_since_last_hit_valid, 1918 query->age.sec_since_last_hit); 1919 break; 1920 case RTE_FLOW_ACTION_TYPE_COUNT: 1921 printf("Indirect COUNT action:\n" 1922 " hits_set: %u\n" 1923 " bytes_set: %u\n" 1924 " hits: %" PRIu64 "\n" 1925 " bytes: %" PRIu64 "\n", 1926 query->count.hits_set, 1927 query->count.bytes_set, 1928 query->count.hits, 1929 query->count.bytes); 1930 break; 1931 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1932 printf("Conntrack Context:\n" 1933 " Peer: %u, Flow dir: %s, Enable: %u\n" 1934 " Live: %u, SACK: %u, CACK: %u\n" 1935 " Packet dir: %s, Liberal: %u, State: %u\n" 1936 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1937 " Last Seq: %u, Last ACK: %u\n" 1938 " Last Win: %u, Last End: %u\n", 1939 query->ct.peer_port, 1940 query->ct.is_original_dir ? "Original" : "Reply", 1941 query->ct.enable, query->ct.live_connection, 1942 query->ct.selective_ack, query->ct.challenge_ack_passed, 1943 query->ct.last_direction ? "Original" : "Reply", 1944 query->ct.liberal_mode, query->ct.state, 1945 query->ct.max_ack_window, query->ct.retransmission_limit, 1946 query->ct.last_index, query->ct.last_seq, 1947 query->ct.last_ack, query->ct.last_window, 1948 query->ct.last_end); 1949 printf(" Original Dir:\n" 1950 " scale: %u, fin: %u, ack seen: %u\n" 1951 " unacked data: %u\n Sent end: %u," 1952 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1953 query->ct.original_dir.scale, 1954 query->ct.original_dir.close_initiated, 1955 query->ct.original_dir.last_ack_seen, 1956 query->ct.original_dir.data_unacked, 1957 query->ct.original_dir.sent_end, 1958 query->ct.original_dir.reply_end, 1959 query->ct.original_dir.max_win, 1960 query->ct.original_dir.max_ack); 1961 printf(" Reply Dir:\n" 1962 " scale: %u, fin: %u, ack seen: %u\n" 1963 " unacked data: %u\n Sent end: %u," 1964 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1965 query->ct.reply_dir.scale, 1966 query->ct.reply_dir.close_initiated, 1967 query->ct.reply_dir.last_ack_seen, 1968 query->ct.reply_dir.data_unacked, 1969 query->ct.reply_dir.sent_end, 1970 query->ct.reply_dir.reply_end, 1971 query->ct.reply_dir.max_win, 1972 query->ct.reply_dir.max_ack); 1973 break; 1974 case RTE_FLOW_ACTION_TYPE_QUOTA: 1975 printf("Indirect QUOTA action %u\n" 1976 " unused quota: %" PRId64 "\n", 1977 pia->id, query->quota.quota); 1978 break; 1979 default: 1980 printf("port-%u: indirect action %u (type: %d) doesn't support query\n", 1981 pia->type, pia->id, port_id); 1982 break; 1983 } 1984 1985 } 1986 1987 void 1988 port_action_handle_query_update(portid_t port_id, uint32_t id, 1989 enum rte_flow_query_update_mode qu_mode, 1990 const struct rte_flow_action *action) 1991 { 1992 int ret; 1993 struct rte_flow_error error; 1994 struct port_indirect_action *pia; 1995 union port_action_query query; 1996 1997 pia = action_get_by_id(port_id, id); 1998 if (!pia || !pia->handle) 1999 return; 2000 ret = rte_flow_action_handle_query_update(port_id, pia->handle, action, 2001 &query, qu_mode, &error); 2002 if (ret) 2003 port_flow_complain(&error); 2004 else 2005 port_action_handle_query_dump(port_id, pia, &query); 2006 2007 } 2008 2009 int 2010 port_action_handle_query(portid_t port_id, uint32_t id) 2011 { 2012 struct rte_flow_error error; 2013 struct port_indirect_action *pia; 2014 union port_action_query query; 2015 2016 pia = action_get_by_id(port_id, id); 2017 if (!pia) 2018 return -EINVAL; 2019 switch (pia->type) { 2020 case RTE_FLOW_ACTION_TYPE_AGE: 2021 case RTE_FLOW_ACTION_TYPE_COUNT: 2022 case RTE_FLOW_ACTION_TYPE_QUOTA: 2023 break; 2024 default: 2025 fprintf(stderr, 2026 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2027 id, pia->type, port_id); 2028 return -ENOTSUP; 2029 } 2030 /* Poisoning to make sure PMDs update it in case of error. */ 2031 memset(&error, 0x55, sizeof(error)); 2032 memset(&query, 0, sizeof(query)); 2033 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2034 return port_flow_complain(&error); 2035 port_action_handle_query_dump(port_id, pia, &query); 2036 return 0; 2037 } 2038 2039 static struct port_flow_tunnel * 2040 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2041 const struct rte_flow_item *pattern, 2042 const struct rte_flow_action *actions, 2043 const struct tunnel_ops *tunnel_ops) 2044 { 2045 int ret; 2046 struct rte_port *port; 2047 struct port_flow_tunnel *pft; 2048 struct rte_flow_error error; 2049 2050 port = &ports[port_id]; 2051 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2052 if (!pft) { 2053 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2054 tunnel_ops->id); 2055 return NULL; 2056 } 2057 if (tunnel_ops->actions) { 2058 uint32_t num_actions; 2059 const struct rte_flow_action *aptr; 2060 2061 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2062 &pft->pmd_actions, 2063 &pft->num_pmd_actions, 2064 &error); 2065 if (ret) { 2066 port_flow_complain(&error); 2067 return NULL; 2068 } 2069 for (aptr = actions, num_actions = 1; 2070 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2071 aptr++, num_actions++); 2072 pft->actions = malloc( 2073 (num_actions + pft->num_pmd_actions) * 2074 sizeof(actions[0])); 2075 if (!pft->actions) { 2076 rte_flow_tunnel_action_decap_release( 2077 port_id, pft->actions, 2078 pft->num_pmd_actions, &error); 2079 return NULL; 2080 } 2081 rte_memcpy(pft->actions, pft->pmd_actions, 2082 pft->num_pmd_actions * sizeof(actions[0])); 2083 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2084 num_actions * sizeof(actions[0])); 2085 } 2086 if (tunnel_ops->items) { 2087 uint32_t num_items; 2088 const struct rte_flow_item *iptr; 2089 2090 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2091 &pft->pmd_items, 2092 &pft->num_pmd_items, 2093 &error); 2094 if (ret) { 2095 port_flow_complain(&error); 2096 return NULL; 2097 } 2098 for (iptr = pattern, num_items = 1; 2099 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2100 iptr++, num_items++); 2101 pft->items = malloc((num_items + pft->num_pmd_items) * 2102 sizeof(pattern[0])); 2103 if (!pft->items) { 2104 rte_flow_tunnel_item_release( 2105 port_id, pft->pmd_items, 2106 pft->num_pmd_items, &error); 2107 return NULL; 2108 } 2109 rte_memcpy(pft->items, pft->pmd_items, 2110 pft->num_pmd_items * sizeof(pattern[0])); 2111 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2112 num_items * sizeof(pattern[0])); 2113 } 2114 2115 return pft; 2116 } 2117 2118 static void 2119 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2120 const struct tunnel_ops *tunnel_ops, 2121 struct port_flow_tunnel *pft) 2122 { 2123 struct rte_flow_error error; 2124 2125 if (tunnel_ops->actions) { 2126 free(pft->actions); 2127 rte_flow_tunnel_action_decap_release( 2128 port_id, pft->pmd_actions, 2129 pft->num_pmd_actions, &error); 2130 pft->actions = NULL; 2131 pft->pmd_actions = NULL; 2132 } 2133 if (tunnel_ops->items) { 2134 free(pft->items); 2135 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2136 pft->num_pmd_items, 2137 &error); 2138 pft->items = NULL; 2139 pft->pmd_items = NULL; 2140 } 2141 } 2142 2143 /** Add port meter policy */ 2144 int 2145 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2146 const struct rte_flow_action *actions) 2147 { 2148 struct rte_mtr_error error; 2149 const struct rte_flow_action *act = actions; 2150 const struct rte_flow_action *start; 2151 struct rte_mtr_meter_policy_params policy; 2152 uint32_t i = 0, act_n; 2153 int ret; 2154 2155 for (i = 0; i < RTE_COLORS; i++) { 2156 for (act_n = 0, start = act; 2157 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2158 act_n++; 2159 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2160 policy.actions[i] = start; 2161 else 2162 policy.actions[i] = NULL; 2163 act++; 2164 } 2165 ret = rte_mtr_meter_policy_add(port_id, 2166 policy_id, 2167 &policy, &error); 2168 if (ret) 2169 print_mtr_err_msg(&error); 2170 return ret; 2171 } 2172 2173 struct rte_flow_meter_profile * 2174 port_meter_profile_get_by_id(portid_t port_id, uint32_t id) 2175 { 2176 struct rte_mtr_error error; 2177 struct rte_flow_meter_profile *profile; 2178 2179 profile = rte_mtr_meter_profile_get(port_id, id, &error); 2180 if (!profile) 2181 print_mtr_err_msg(&error); 2182 return profile; 2183 } 2184 struct rte_flow_meter_policy * 2185 port_meter_policy_get_by_id(portid_t port_id, uint32_t id) 2186 { 2187 struct rte_mtr_error error; 2188 struct rte_flow_meter_policy *policy; 2189 2190 policy = rte_mtr_meter_policy_get(port_id, id, &error); 2191 if (!policy) 2192 print_mtr_err_msg(&error); 2193 return policy; 2194 } 2195 2196 /** Validate flow rule. */ 2197 int 2198 port_flow_validate(portid_t port_id, 2199 const struct rte_flow_attr *attr, 2200 const struct rte_flow_item *pattern, 2201 const struct rte_flow_action *actions, 2202 const struct tunnel_ops *tunnel_ops) 2203 { 2204 struct rte_flow_error error; 2205 struct port_flow_tunnel *pft = NULL; 2206 int ret; 2207 2208 /* Poisoning to make sure PMDs update it in case of error. */ 2209 memset(&error, 0x11, sizeof(error)); 2210 if (tunnel_ops->enabled) { 2211 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2212 actions, tunnel_ops); 2213 if (!pft) 2214 return -ENOENT; 2215 if (pft->items) 2216 pattern = pft->items; 2217 if (pft->actions) 2218 actions = pft->actions; 2219 } 2220 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2221 if (tunnel_ops->enabled) 2222 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2223 if (ret) 2224 return port_flow_complain(&error); 2225 printf("Flow rule validated\n"); 2226 return 0; 2227 } 2228 2229 /** Return age action structure if exists, otherwise NULL. */ 2230 static struct rte_flow_action_age * 2231 age_action_get(const struct rte_flow_action *actions) 2232 { 2233 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2234 switch (actions->type) { 2235 case RTE_FLOW_ACTION_TYPE_AGE: 2236 return (struct rte_flow_action_age *) 2237 (uintptr_t)actions->conf; 2238 default: 2239 break; 2240 } 2241 } 2242 return NULL; 2243 } 2244 2245 /** Create pattern template */ 2246 int 2247 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2248 const struct rte_flow_pattern_template_attr *attr, 2249 const struct rte_flow_item *pattern) 2250 { 2251 struct rte_port *port; 2252 struct port_template *pit; 2253 int ret; 2254 struct rte_flow_error error; 2255 2256 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2257 port_id == (portid_t)RTE_PORT_ALL) 2258 return -EINVAL; 2259 port = &ports[port_id]; 2260 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2261 if (ret) 2262 return ret; 2263 /* Poisoning to make sure PMDs update it in case of error. */ 2264 memset(&error, 0x22, sizeof(error)); 2265 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2266 attr, pattern, &error); 2267 if (!pit->template.pattern_template) { 2268 uint32_t destroy_id = pit->id; 2269 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2270 return port_flow_complain(&error); 2271 } 2272 printf("Pattern template #%u created\n", pit->id); 2273 return 0; 2274 } 2275 2276 /** Destroy pattern template */ 2277 int 2278 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2279 const uint32_t *template) 2280 { 2281 struct rte_port *port; 2282 struct port_template **tmp; 2283 int ret = 0; 2284 2285 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2286 port_id == (portid_t)RTE_PORT_ALL) 2287 return -EINVAL; 2288 port = &ports[port_id]; 2289 tmp = &port->pattern_templ_list; 2290 while (*tmp) { 2291 uint32_t i; 2292 2293 for (i = 0; i != n; ++i) { 2294 struct rte_flow_error error; 2295 struct port_template *pit = *tmp; 2296 2297 if (template[i] != pit->id) 2298 continue; 2299 /* 2300 * Poisoning to make sure PMDs update it in case 2301 * of error. 2302 */ 2303 memset(&error, 0x33, sizeof(error)); 2304 2305 if (pit->template.pattern_template && 2306 rte_flow_pattern_template_destroy(port_id, 2307 pit->template.pattern_template, 2308 &error)) { 2309 ret = port_flow_complain(&error); 2310 continue; 2311 } 2312 *tmp = pit->next; 2313 printf("Pattern template #%u destroyed\n", pit->id); 2314 free(pit); 2315 break; 2316 } 2317 if (i == n) 2318 tmp = &(*tmp)->next; 2319 } 2320 return ret; 2321 } 2322 2323 /** Flush pattern template */ 2324 int 2325 port_flow_pattern_template_flush(portid_t port_id) 2326 { 2327 struct rte_port *port; 2328 struct port_template **tmp; 2329 int ret = 0; 2330 2331 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2332 port_id == (portid_t)RTE_PORT_ALL) 2333 return -EINVAL; 2334 port = &ports[port_id]; 2335 tmp = &port->pattern_templ_list; 2336 while (*tmp) { 2337 struct rte_flow_error error; 2338 struct port_template *pit = *tmp; 2339 2340 /* 2341 * Poisoning to make sure PMDs update it in case 2342 * of error. 2343 */ 2344 memset(&error, 0x33, sizeof(error)); 2345 if (pit->template.pattern_template && 2346 rte_flow_pattern_template_destroy(port_id, 2347 pit->template.pattern_template, &error)) { 2348 printf("Pattern template #%u not destroyed\n", pit->id); 2349 ret = port_flow_complain(&error); 2350 tmp = &pit->next; 2351 } else { 2352 *tmp = pit->next; 2353 free(pit); 2354 } 2355 } 2356 return ret; 2357 } 2358 2359 /** Create actions template */ 2360 int 2361 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2362 const struct rte_flow_actions_template_attr *attr, 2363 const struct rte_flow_action *actions, 2364 const struct rte_flow_action *masks) 2365 { 2366 struct rte_port *port; 2367 struct port_template *pat; 2368 int ret; 2369 struct rte_flow_error error; 2370 2371 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2372 port_id == (portid_t)RTE_PORT_ALL) 2373 return -EINVAL; 2374 port = &ports[port_id]; 2375 ret = template_alloc(id, &pat, &port->actions_templ_list); 2376 if (ret) 2377 return ret; 2378 /* Poisoning to make sure PMDs update it in case of error. */ 2379 memset(&error, 0x22, sizeof(error)); 2380 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2381 attr, actions, masks, &error); 2382 if (!pat->template.actions_template) { 2383 uint32_t destroy_id = pat->id; 2384 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2385 return port_flow_complain(&error); 2386 } 2387 printf("Actions template #%u created\n", pat->id); 2388 return 0; 2389 } 2390 2391 /** Destroy actions template */ 2392 int 2393 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2394 const uint32_t *template) 2395 { 2396 struct rte_port *port; 2397 struct port_template **tmp; 2398 int ret = 0; 2399 2400 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2401 port_id == (portid_t)RTE_PORT_ALL) 2402 return -EINVAL; 2403 port = &ports[port_id]; 2404 tmp = &port->actions_templ_list; 2405 while (*tmp) { 2406 uint32_t i; 2407 2408 for (i = 0; i != n; ++i) { 2409 struct rte_flow_error error; 2410 struct port_template *pat = *tmp; 2411 2412 if (template[i] != pat->id) 2413 continue; 2414 /* 2415 * Poisoning to make sure PMDs update it in case 2416 * of error. 2417 */ 2418 memset(&error, 0x33, sizeof(error)); 2419 2420 if (pat->template.actions_template && 2421 rte_flow_actions_template_destroy(port_id, 2422 pat->template.actions_template, &error)) { 2423 ret = port_flow_complain(&error); 2424 continue; 2425 } 2426 *tmp = pat->next; 2427 printf("Actions template #%u destroyed\n", pat->id); 2428 free(pat); 2429 break; 2430 } 2431 if (i == n) 2432 tmp = &(*tmp)->next; 2433 } 2434 return ret; 2435 } 2436 2437 /** Flush actions template */ 2438 int 2439 port_flow_actions_template_flush(portid_t port_id) 2440 { 2441 struct rte_port *port; 2442 struct port_template **tmp; 2443 int ret = 0; 2444 2445 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2446 port_id == (portid_t)RTE_PORT_ALL) 2447 return -EINVAL; 2448 port = &ports[port_id]; 2449 tmp = &port->actions_templ_list; 2450 while (*tmp) { 2451 struct rte_flow_error error; 2452 struct port_template *pat = *tmp; 2453 2454 /* 2455 * Poisoning to make sure PMDs update it in case 2456 * of error. 2457 */ 2458 memset(&error, 0x33, sizeof(error)); 2459 2460 if (pat->template.actions_template && 2461 rte_flow_actions_template_destroy(port_id, 2462 pat->template.actions_template, &error)) { 2463 ret = port_flow_complain(&error); 2464 printf("Actions template #%u not destroyed\n", pat->id); 2465 tmp = &pat->next; 2466 } else { 2467 *tmp = pat->next; 2468 free(pat); 2469 } 2470 } 2471 return ret; 2472 } 2473 2474 /** Create table */ 2475 int 2476 port_flow_template_table_create(portid_t port_id, uint32_t id, 2477 const struct rte_flow_template_table_attr *table_attr, 2478 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2479 uint32_t nb_actions_templates, uint32_t *actions_templates) 2480 { 2481 struct rte_port *port; 2482 struct port_table *pt; 2483 struct port_template *temp = NULL; 2484 int ret; 2485 uint32_t i; 2486 struct rte_flow_error error; 2487 struct rte_flow_pattern_template 2488 *flow_pattern_templates[nb_pattern_templates]; 2489 struct rte_flow_actions_template 2490 *flow_actions_templates[nb_actions_templates]; 2491 2492 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2493 port_id == (portid_t)RTE_PORT_ALL) 2494 return -EINVAL; 2495 port = &ports[port_id]; 2496 for (i = 0; i < nb_pattern_templates; ++i) { 2497 bool found = false; 2498 temp = port->pattern_templ_list; 2499 while (temp) { 2500 if (pattern_templates[i] == temp->id) { 2501 flow_pattern_templates[i] = 2502 temp->template.pattern_template; 2503 found = true; 2504 break; 2505 } 2506 temp = temp->next; 2507 } 2508 if (!found) { 2509 printf("Pattern template #%u is invalid\n", 2510 pattern_templates[i]); 2511 return -EINVAL; 2512 } 2513 } 2514 for (i = 0; i < nb_actions_templates; ++i) { 2515 bool found = false; 2516 temp = port->actions_templ_list; 2517 while (temp) { 2518 if (actions_templates[i] == temp->id) { 2519 flow_actions_templates[i] = 2520 temp->template.actions_template; 2521 found = true; 2522 break; 2523 } 2524 temp = temp->next; 2525 } 2526 if (!found) { 2527 printf("Actions template #%u is invalid\n", 2528 actions_templates[i]); 2529 return -EINVAL; 2530 } 2531 } 2532 ret = table_alloc(id, &pt, &port->table_list); 2533 if (ret) 2534 return ret; 2535 /* Poisoning to make sure PMDs update it in case of error. */ 2536 memset(&error, 0x22, sizeof(error)); 2537 pt->table = rte_flow_template_table_create(port_id, table_attr, 2538 flow_pattern_templates, nb_pattern_templates, 2539 flow_actions_templates, nb_actions_templates, 2540 &error); 2541 2542 if (!pt->table) { 2543 uint32_t destroy_id = pt->id; 2544 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2545 return port_flow_complain(&error); 2546 } 2547 pt->nb_pattern_templates = nb_pattern_templates; 2548 pt->nb_actions_templates = nb_actions_templates; 2549 rte_memcpy(&pt->flow_attr, &table_attr->flow_attr, 2550 sizeof(struct rte_flow_attr)); 2551 printf("Template table #%u created\n", pt->id); 2552 return 0; 2553 } 2554 2555 /** Destroy table */ 2556 int 2557 port_flow_template_table_destroy(portid_t port_id, 2558 uint32_t n, const uint32_t *table) 2559 { 2560 struct rte_port *port; 2561 struct port_table **tmp; 2562 int ret = 0; 2563 2564 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2565 port_id == (portid_t)RTE_PORT_ALL) 2566 return -EINVAL; 2567 port = &ports[port_id]; 2568 tmp = &port->table_list; 2569 while (*tmp) { 2570 uint32_t i; 2571 2572 for (i = 0; i != n; ++i) { 2573 struct rte_flow_error error; 2574 struct port_table *pt = *tmp; 2575 2576 if (table[i] != pt->id) 2577 continue; 2578 /* 2579 * Poisoning to make sure PMDs update it in case 2580 * of error. 2581 */ 2582 memset(&error, 0x33, sizeof(error)); 2583 2584 if (pt->table && 2585 rte_flow_template_table_destroy(port_id, 2586 pt->table, 2587 &error)) { 2588 ret = port_flow_complain(&error); 2589 continue; 2590 } 2591 *tmp = pt->next; 2592 printf("Template table #%u destroyed\n", pt->id); 2593 free(pt); 2594 break; 2595 } 2596 if (i == n) 2597 tmp = &(*tmp)->next; 2598 } 2599 return ret; 2600 } 2601 2602 /** Flush table */ 2603 int 2604 port_flow_template_table_flush(portid_t port_id) 2605 { 2606 struct rte_port *port; 2607 struct port_table **tmp; 2608 int ret = 0; 2609 2610 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2611 port_id == (portid_t)RTE_PORT_ALL) 2612 return -EINVAL; 2613 port = &ports[port_id]; 2614 tmp = &port->table_list; 2615 while (*tmp) { 2616 struct rte_flow_error error; 2617 struct port_table *pt = *tmp; 2618 2619 /* 2620 * Poisoning to make sure PMDs update it in case 2621 * of error. 2622 */ 2623 memset(&error, 0x33, sizeof(error)); 2624 2625 if (pt->table && 2626 rte_flow_template_table_destroy(port_id, 2627 pt->table, 2628 &error)) { 2629 ret = port_flow_complain(&error); 2630 printf("Template table #%u not destroyed\n", pt->id); 2631 tmp = &pt->next; 2632 } else { 2633 *tmp = pt->next; 2634 free(pt); 2635 } 2636 } 2637 return ret; 2638 } 2639 2640 /** Enqueue create flow rule operation. */ 2641 int 2642 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2643 bool postpone, uint32_t table_id, uint32_t rule_idx, 2644 uint32_t pattern_idx, uint32_t actions_idx, 2645 const struct rte_flow_item *pattern, 2646 const struct rte_flow_action *actions) 2647 { 2648 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2649 struct rte_flow *flow; 2650 struct rte_port *port; 2651 struct port_flow *pf; 2652 struct port_table *pt; 2653 uint32_t id = 0; 2654 bool found; 2655 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2656 struct rte_flow_action_age *age = age_action_get(actions); 2657 struct queue_job *job; 2658 2659 port = &ports[port_id]; 2660 if (port->flow_list) { 2661 if (port->flow_list->id == UINT32_MAX) { 2662 printf("Highest rule ID is already assigned," 2663 " delete it first"); 2664 return -ENOMEM; 2665 } 2666 id = port->flow_list->id + 1; 2667 } 2668 2669 if (queue_id >= port->queue_nb) { 2670 printf("Queue #%u is invalid\n", queue_id); 2671 return -EINVAL; 2672 } 2673 2674 found = false; 2675 pt = port->table_list; 2676 while (pt) { 2677 if (table_id == pt->id) { 2678 found = true; 2679 break; 2680 } 2681 pt = pt->next; 2682 } 2683 if (!found) { 2684 printf("Table #%u is invalid\n", table_id); 2685 return -EINVAL; 2686 } 2687 2688 if (pattern_idx >= pt->nb_pattern_templates) { 2689 printf("Pattern template index #%u is invalid," 2690 " %u templates present in the table\n", 2691 pattern_idx, pt->nb_pattern_templates); 2692 return -EINVAL; 2693 } 2694 if (actions_idx >= pt->nb_actions_templates) { 2695 printf("Actions template index #%u is invalid," 2696 " %u templates present in the table\n", 2697 actions_idx, pt->nb_actions_templates); 2698 return -EINVAL; 2699 } 2700 2701 job = calloc(1, sizeof(*job)); 2702 if (!job) { 2703 printf("Queue flow create job allocate failed\n"); 2704 return -ENOMEM; 2705 } 2706 job->type = QUEUE_JOB_TYPE_FLOW_CREATE; 2707 2708 pf = port_flow_new(&pt->flow_attr, pattern, actions, &error); 2709 if (!pf) { 2710 free(job); 2711 return port_flow_complain(&error); 2712 } 2713 if (age) { 2714 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2715 age->context = &pf->age_type; 2716 } 2717 /* Poisoning to make sure PMDs update it in case of error. */ 2718 memset(&error, 0x11, sizeof(error)); 2719 if (rule_idx == UINT32_MAX) 2720 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2721 pattern, pattern_idx, actions, actions_idx, job, &error); 2722 else 2723 flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, 2724 rule_idx, actions, actions_idx, job, &error); 2725 if (!flow) { 2726 uint32_t flow_id = pf->id; 2727 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2728 free(job); 2729 return port_flow_complain(&error); 2730 } 2731 2732 pf->next = port->flow_list; 2733 pf->id = id; 2734 pf->flow = flow; 2735 job->pf = pf; 2736 port->flow_list = pf; 2737 printf("Flow rule #%u creation enqueued\n", pf->id); 2738 return 0; 2739 } 2740 2741 /** Enqueue number of destroy flow rules operations. */ 2742 int 2743 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2744 bool postpone, uint32_t n, const uint32_t *rule) 2745 { 2746 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2747 struct rte_port *port; 2748 struct port_flow **tmp; 2749 int ret = 0; 2750 struct queue_job *job; 2751 2752 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2753 port_id == (portid_t)RTE_PORT_ALL) 2754 return -EINVAL; 2755 port = &ports[port_id]; 2756 2757 if (queue_id >= port->queue_nb) { 2758 printf("Queue #%u is invalid\n", queue_id); 2759 return -EINVAL; 2760 } 2761 2762 tmp = &port->flow_list; 2763 while (*tmp) { 2764 uint32_t i; 2765 2766 for (i = 0; i != n; ++i) { 2767 struct rte_flow_error error; 2768 struct port_flow *pf = *tmp; 2769 2770 if (rule[i] != pf->id) 2771 continue; 2772 /* 2773 * Poisoning to make sure PMD 2774 * update it in case of error. 2775 */ 2776 memset(&error, 0x33, sizeof(error)); 2777 job = calloc(1, sizeof(*job)); 2778 if (!job) { 2779 printf("Queue flow destroy job allocate failed\n"); 2780 return -ENOMEM; 2781 } 2782 job->type = QUEUE_JOB_TYPE_FLOW_DESTROY; 2783 job->pf = pf; 2784 2785 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2786 pf->flow, job, &error)) { 2787 free(job); 2788 ret = port_flow_complain(&error); 2789 continue; 2790 } 2791 printf("Flow rule #%u destruction enqueued\n", pf->id); 2792 *tmp = pf->next; 2793 break; 2794 } 2795 if (i == n) 2796 tmp = &(*tmp)->next; 2797 } 2798 return ret; 2799 } 2800 2801 /** Enqueue indirect action create operation. */ 2802 int 2803 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2804 bool postpone, uint32_t id, 2805 const struct rte_flow_indir_action_conf *conf, 2806 const struct rte_flow_action *action) 2807 { 2808 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2809 struct rte_port *port; 2810 struct port_indirect_action *pia; 2811 int ret; 2812 struct rte_flow_error error; 2813 struct queue_job *job; 2814 2815 ret = action_alloc(port_id, id, &pia); 2816 if (ret) 2817 return ret; 2818 2819 port = &ports[port_id]; 2820 if (queue_id >= port->queue_nb) { 2821 printf("Queue #%u is invalid\n", queue_id); 2822 return -EINVAL; 2823 } 2824 job = calloc(1, sizeof(*job)); 2825 if (!job) { 2826 printf("Queue action create job allocate failed\n"); 2827 return -ENOMEM; 2828 } 2829 job->type = QUEUE_JOB_TYPE_ACTION_CREATE; 2830 job->pia = pia; 2831 2832 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2833 struct rte_flow_action_age *age = 2834 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2835 2836 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2837 age->context = &pia->age_type; 2838 } 2839 /* Poisoning to make sure PMDs update it in case of error. */ 2840 memset(&error, 0x88, sizeof(error)); 2841 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2842 &attr, conf, action, job, &error); 2843 if (!pia->handle) { 2844 uint32_t destroy_id = pia->id; 2845 port_queue_action_handle_destroy(port_id, queue_id, 2846 postpone, 1, &destroy_id); 2847 free(job); 2848 return port_flow_complain(&error); 2849 } 2850 pia->type = action->type; 2851 printf("Indirect action #%u creation queued\n", pia->id); 2852 return 0; 2853 } 2854 2855 /** Enqueue indirect action destroy operation. */ 2856 int 2857 port_queue_action_handle_destroy(portid_t port_id, 2858 uint32_t queue_id, bool postpone, 2859 uint32_t n, const uint32_t *actions) 2860 { 2861 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2862 struct rte_port *port; 2863 struct port_indirect_action **tmp; 2864 int ret = 0; 2865 struct queue_job *job; 2866 2867 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2868 port_id == (portid_t)RTE_PORT_ALL) 2869 return -EINVAL; 2870 port = &ports[port_id]; 2871 2872 if (queue_id >= port->queue_nb) { 2873 printf("Queue #%u is invalid\n", queue_id); 2874 return -EINVAL; 2875 } 2876 2877 tmp = &port->actions_list; 2878 while (*tmp) { 2879 uint32_t i; 2880 2881 for (i = 0; i != n; ++i) { 2882 struct rte_flow_error error; 2883 struct port_indirect_action *pia = *tmp; 2884 2885 if (actions[i] != pia->id) 2886 continue; 2887 /* 2888 * Poisoning to make sure PMDs update it in case 2889 * of error. 2890 */ 2891 memset(&error, 0x99, sizeof(error)); 2892 job = calloc(1, sizeof(*job)); 2893 if (!job) { 2894 printf("Queue action destroy job allocate failed\n"); 2895 return -ENOMEM; 2896 } 2897 job->type = QUEUE_JOB_TYPE_ACTION_DESTROY; 2898 job->pia = pia; 2899 2900 if (rte_flow_async_action_handle_destroy(port_id, 2901 queue_id, &attr, pia->handle, job, &error)) { 2902 free(job); 2903 ret = port_flow_complain(&error); 2904 continue; 2905 } 2906 *tmp = pia->next; 2907 printf("Indirect action #%u destruction queued\n", 2908 pia->id); 2909 break; 2910 } 2911 if (i == n) 2912 tmp = &(*tmp)->next; 2913 } 2914 return ret; 2915 } 2916 2917 /** Enqueue indirect action update operation. */ 2918 int 2919 port_queue_action_handle_update(portid_t port_id, 2920 uint32_t queue_id, bool postpone, uint32_t id, 2921 const struct rte_flow_action *action) 2922 { 2923 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2924 struct rte_port *port; 2925 struct rte_flow_error error; 2926 struct rte_flow_action_handle *action_handle; 2927 struct queue_job *job; 2928 struct port_indirect_action *pia; 2929 struct rte_flow_update_meter_mark mtr_update; 2930 const void *update; 2931 2932 action_handle = port_action_handle_get_by_id(port_id, id); 2933 if (!action_handle) 2934 return -EINVAL; 2935 2936 port = &ports[port_id]; 2937 if (queue_id >= port->queue_nb) { 2938 printf("Queue #%u is invalid\n", queue_id); 2939 return -EINVAL; 2940 } 2941 2942 job = calloc(1, sizeof(*job)); 2943 if (!job) { 2944 printf("Queue action update job allocate failed\n"); 2945 return -ENOMEM; 2946 } 2947 job->type = QUEUE_JOB_TYPE_ACTION_UPDATE; 2948 2949 pia = action_get_by_id(port_id, id); 2950 if (!pia) { 2951 free(job); 2952 return -EINVAL; 2953 } 2954 2955 switch (pia->type) { 2956 case RTE_FLOW_ACTION_TYPE_AGE: 2957 update = action->conf; 2958 break; 2959 case RTE_FLOW_ACTION_TYPE_METER_MARK: 2960 rte_memcpy(&mtr_update.meter_mark, action->conf, 2961 sizeof(struct rte_flow_action_meter_mark)); 2962 mtr_update.profile_valid = 1; 2963 mtr_update.policy_valid = 1; 2964 mtr_update.color_mode_valid = 1; 2965 mtr_update.init_color_valid = 1; 2966 mtr_update.state_valid = 1; 2967 update = &mtr_update; 2968 break; 2969 default: 2970 update = action; 2971 break; 2972 } 2973 2974 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2975 action_handle, update, job, &error)) { 2976 free(job); 2977 return port_flow_complain(&error); 2978 } 2979 printf("Indirect action #%u update queued\n", id); 2980 return 0; 2981 } 2982 2983 void 2984 port_queue_action_handle_query_update(portid_t port_id, 2985 uint32_t queue_id, bool postpone, 2986 uint32_t id, 2987 enum rte_flow_query_update_mode qu_mode, 2988 const struct rte_flow_action *action) 2989 { 2990 int ret; 2991 struct rte_flow_error error; 2992 struct port_indirect_action *pia = action_get_by_id(port_id, id); 2993 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2994 struct queue_job *job; 2995 2996 if (!pia || !pia->handle) 2997 return; 2998 job = calloc(1, sizeof(*job)); 2999 if (!job) 3000 return; 3001 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3002 job->pia = pia; 3003 3004 ret = rte_flow_async_action_handle_query_update(port_id, queue_id, 3005 &attr, pia->handle, 3006 action, 3007 &job->query, 3008 qu_mode, job, 3009 &error); 3010 if (ret) { 3011 port_flow_complain(&error); 3012 free(job); 3013 } else { 3014 printf("port-%u: indirect action #%u update-and-query queued\n", 3015 port_id, id); 3016 } 3017 } 3018 3019 /** Enqueue indirect action query operation. */ 3020 int 3021 port_queue_action_handle_query(portid_t port_id, 3022 uint32_t queue_id, bool postpone, uint32_t id) 3023 { 3024 const struct rte_flow_op_attr attr = { .postpone = postpone}; 3025 struct rte_port *port; 3026 struct rte_flow_error error; 3027 struct rte_flow_action_handle *action_handle; 3028 struct port_indirect_action *pia; 3029 struct queue_job *job; 3030 3031 pia = action_get_by_id(port_id, id); 3032 action_handle = pia ? pia->handle : NULL; 3033 if (!action_handle) 3034 return -EINVAL; 3035 3036 port = &ports[port_id]; 3037 if (queue_id >= port->queue_nb) { 3038 printf("Queue #%u is invalid\n", queue_id); 3039 return -EINVAL; 3040 } 3041 3042 job = calloc(1, sizeof(*job)); 3043 if (!job) { 3044 printf("Queue action update job allocate failed\n"); 3045 return -ENOMEM; 3046 } 3047 job->type = QUEUE_JOB_TYPE_ACTION_QUERY; 3048 job->pia = pia; 3049 3050 if (rte_flow_async_action_handle_query(port_id, queue_id, &attr, 3051 action_handle, &job->query, job, &error)) { 3052 free(job); 3053 return port_flow_complain(&error); 3054 } 3055 printf("Indirect action #%u update queued\n", id); 3056 return 0; 3057 } 3058 3059 /** Push all the queue operations in the queue to the NIC. */ 3060 int 3061 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 3062 { 3063 struct rte_port *port; 3064 struct rte_flow_error error; 3065 int ret = 0; 3066 3067 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3068 port_id == (portid_t)RTE_PORT_ALL) 3069 return -EINVAL; 3070 port = &ports[port_id]; 3071 3072 if (queue_id >= port->queue_nb) { 3073 printf("Queue #%u is invalid\n", queue_id); 3074 return -EINVAL; 3075 } 3076 3077 memset(&error, 0x55, sizeof(error)); 3078 ret = rte_flow_push(port_id, queue_id, &error); 3079 if (ret < 0) { 3080 printf("Failed to push operations in the queue\n"); 3081 return -EINVAL; 3082 } 3083 printf("Queue #%u operations pushed\n", queue_id); 3084 return ret; 3085 } 3086 3087 /** Pull queue operation results from the queue. */ 3088 static int 3089 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, 3090 const uint32_t *rule, int nb_flows) 3091 { 3092 struct rte_port *port = &ports[port_id]; 3093 struct rte_flow_op_result *res; 3094 struct rte_flow_error error; 3095 uint32_t n = nb_flows; 3096 int ret = 0; 3097 int i; 3098 3099 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3100 if (!res) { 3101 printf("Failed to allocate memory for pulled results\n"); 3102 return -ENOMEM; 3103 } 3104 3105 memset(&error, 0x66, sizeof(error)); 3106 while (nb_flows > 0) { 3107 int success = 0; 3108 3109 if (n > port->queue_sz) 3110 n = port->queue_sz; 3111 ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule); 3112 if (ret < 0) { 3113 free(res); 3114 return ret; 3115 } 3116 ret = rte_flow_push(port_id, queue_id, &error); 3117 if (ret < 0) { 3118 printf("Failed to push operations in the queue: %s\n", 3119 strerror(-ret)); 3120 free(res); 3121 return ret; 3122 } 3123 while (success < nb_flows) { 3124 ret = rte_flow_pull(port_id, queue_id, res, 3125 port->queue_sz, &error); 3126 if (ret < 0) { 3127 printf("Failed to pull a operation results: %s\n", 3128 strerror(-ret)); 3129 free(res); 3130 return ret; 3131 } 3132 3133 for (i = 0; i < ret; i++) { 3134 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3135 success++; 3136 } 3137 } 3138 rule += n; 3139 nb_flows -= n; 3140 n = nb_flows; 3141 } 3142 3143 free(res); 3144 return ret; 3145 } 3146 3147 /** List simply and destroy all aged flows per queue. */ 3148 void 3149 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy) 3150 { 3151 void **contexts; 3152 int nb_context, total = 0, idx; 3153 uint32_t *rules = NULL; 3154 struct rte_port *port; 3155 struct rte_flow_error error; 3156 enum age_action_context_type *type; 3157 union { 3158 struct port_flow *pf; 3159 struct port_indirect_action *pia; 3160 } ctx; 3161 3162 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3163 port_id == (portid_t)RTE_PORT_ALL) 3164 return; 3165 port = &ports[port_id]; 3166 if (queue_id >= port->queue_nb) { 3167 printf("Error: queue #%u is invalid\n", queue_id); 3168 return; 3169 } 3170 total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error); 3171 if (total < 0) { 3172 port_flow_complain(&error); 3173 return; 3174 } 3175 printf("Port %u queue %u total aged flows: %d\n", 3176 port_id, queue_id, total); 3177 if (total == 0) 3178 return; 3179 contexts = calloc(total, sizeof(void *)); 3180 if (contexts == NULL) { 3181 printf("Cannot allocate contexts for aged flow\n"); 3182 return; 3183 } 3184 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3185 nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts, 3186 total, &error); 3187 if (nb_context > total) { 3188 printf("Port %u queue %u get aged flows count(%d) > total(%d)\n", 3189 port_id, queue_id, nb_context, total); 3190 free(contexts); 3191 return; 3192 } 3193 if (destroy) { 3194 rules = malloc(sizeof(uint32_t) * nb_context); 3195 if (rules == NULL) 3196 printf("Cannot allocate memory for destroy aged flow\n"); 3197 } 3198 total = 0; 3199 for (idx = 0; idx < nb_context; idx++) { 3200 if (!contexts[idx]) { 3201 printf("Error: get Null context in port %u queue %u\n", 3202 port_id, queue_id); 3203 continue; 3204 } 3205 type = (enum age_action_context_type *)contexts[idx]; 3206 switch (*type) { 3207 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3208 ctx.pf = container_of(type, struct port_flow, age_type); 3209 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3210 "\t%c%c%c\t\n", 3211 "Flow", 3212 ctx.pf->id, 3213 ctx.pf->rule.attr->group, 3214 ctx.pf->rule.attr->priority, 3215 ctx.pf->rule.attr->ingress ? 'i' : '-', 3216 ctx.pf->rule.attr->egress ? 'e' : '-', 3217 ctx.pf->rule.attr->transfer ? 't' : '-'); 3218 if (rules != NULL) { 3219 rules[total] = ctx.pf->id; 3220 total++; 3221 } 3222 break; 3223 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3224 ctx.pia = container_of(type, 3225 struct port_indirect_action, 3226 age_type); 3227 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3228 ctx.pia->id); 3229 break; 3230 default: 3231 printf("Error: invalid context type %u\n", port_id); 3232 break; 3233 } 3234 } 3235 if (rules != NULL) { 3236 port_queue_aged_flow_destroy(port_id, queue_id, rules, total); 3237 free(rules); 3238 } 3239 printf("\n%d flows destroyed\n", total); 3240 free(contexts); 3241 } 3242 3243 /** Pull queue operation results from the queue. */ 3244 int 3245 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 3246 { 3247 struct rte_port *port; 3248 struct rte_flow_op_result *res; 3249 struct rte_flow_error error; 3250 int ret = 0; 3251 int success = 0; 3252 int i; 3253 struct queue_job *job; 3254 3255 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3256 port_id == (portid_t)RTE_PORT_ALL) 3257 return -EINVAL; 3258 port = &ports[port_id]; 3259 3260 if (queue_id >= port->queue_nb) { 3261 printf("Queue #%u is invalid\n", queue_id); 3262 return -EINVAL; 3263 } 3264 3265 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 3266 if (!res) { 3267 printf("Failed to allocate memory for pulled results\n"); 3268 return -ENOMEM; 3269 } 3270 3271 memset(&error, 0x66, sizeof(error)); 3272 ret = rte_flow_pull(port_id, queue_id, res, 3273 port->queue_sz, &error); 3274 if (ret < 0) { 3275 printf("Failed to pull a operation results\n"); 3276 free(res); 3277 return -EINVAL; 3278 } 3279 3280 for (i = 0; i < ret; i++) { 3281 if (res[i].status == RTE_FLOW_OP_SUCCESS) 3282 success++; 3283 job = (struct queue_job *)res[i].user_data; 3284 if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY) 3285 free(job->pf); 3286 else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY) 3287 free(job->pia); 3288 else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY) 3289 port_action_handle_query_dump(port_id, job->pia, 3290 &job->query); 3291 free(job); 3292 } 3293 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 3294 queue_id, ret, ret - success, success); 3295 free(res); 3296 return ret; 3297 } 3298 3299 /** Create flow rule. */ 3300 int 3301 port_flow_create(portid_t port_id, 3302 const struct rte_flow_attr *attr, 3303 const struct rte_flow_item *pattern, 3304 const struct rte_flow_action *actions, 3305 const struct tunnel_ops *tunnel_ops) 3306 { 3307 struct rte_flow *flow; 3308 struct rte_port *port; 3309 struct port_flow *pf; 3310 uint32_t id = 0; 3311 struct rte_flow_error error; 3312 struct port_flow_tunnel *pft = NULL; 3313 struct rte_flow_action_age *age = age_action_get(actions); 3314 3315 port = &ports[port_id]; 3316 if (port->flow_list) { 3317 if (port->flow_list->id == UINT32_MAX) { 3318 fprintf(stderr, 3319 "Highest rule ID is already assigned, delete it first"); 3320 return -ENOMEM; 3321 } 3322 id = port->flow_list->id + 1; 3323 } 3324 if (tunnel_ops->enabled) { 3325 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3326 actions, tunnel_ops); 3327 if (!pft) 3328 return -ENOENT; 3329 if (pft->items) 3330 pattern = pft->items; 3331 if (pft->actions) 3332 actions = pft->actions; 3333 } 3334 pf = port_flow_new(attr, pattern, actions, &error); 3335 if (!pf) 3336 return port_flow_complain(&error); 3337 if (age) { 3338 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3339 age->context = &pf->age_type; 3340 } 3341 /* Poisoning to make sure PMDs update it in case of error. */ 3342 memset(&error, 0x22, sizeof(error)); 3343 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3344 if (!flow) { 3345 if (tunnel_ops->enabled) 3346 port_flow_tunnel_offload_cmd_release(port_id, 3347 tunnel_ops, pft); 3348 free(pf); 3349 return port_flow_complain(&error); 3350 } 3351 pf->next = port->flow_list; 3352 pf->id = id; 3353 pf->flow = flow; 3354 port->flow_list = pf; 3355 if (tunnel_ops->enabled) 3356 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3357 printf("Flow rule #%u created\n", pf->id); 3358 return 0; 3359 } 3360 3361 /** Destroy a number of flow rules. */ 3362 int 3363 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 3364 { 3365 struct rte_port *port; 3366 struct port_flow **tmp; 3367 int ret = 0; 3368 3369 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3370 port_id == (portid_t)RTE_PORT_ALL) 3371 return -EINVAL; 3372 port = &ports[port_id]; 3373 tmp = &port->flow_list; 3374 while (*tmp) { 3375 uint32_t i; 3376 3377 for (i = 0; i != n; ++i) { 3378 struct rte_flow_error error; 3379 struct port_flow *pf = *tmp; 3380 3381 if (rule[i] != pf->id) 3382 continue; 3383 /* 3384 * Poisoning to make sure PMDs update it in case 3385 * of error. 3386 */ 3387 memset(&error, 0x33, sizeof(error)); 3388 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3389 ret = port_flow_complain(&error); 3390 continue; 3391 } 3392 printf("Flow rule #%u destroyed\n", pf->id); 3393 *tmp = pf->next; 3394 free(pf); 3395 break; 3396 } 3397 if (i == n) 3398 tmp = &(*tmp)->next; 3399 } 3400 return ret; 3401 } 3402 3403 /** Remove all flow rules. */ 3404 int 3405 port_flow_flush(portid_t port_id) 3406 { 3407 struct rte_flow_error error; 3408 struct rte_port *port; 3409 int ret = 0; 3410 3411 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3412 port_id == (portid_t)RTE_PORT_ALL) 3413 return -EINVAL; 3414 3415 port = &ports[port_id]; 3416 3417 if (port->flow_list == NULL) 3418 return ret; 3419 3420 /* Poisoning to make sure PMDs update it in case of error. */ 3421 memset(&error, 0x44, sizeof(error)); 3422 if (rte_flow_flush(port_id, &error)) { 3423 port_flow_complain(&error); 3424 } 3425 3426 while (port->flow_list) { 3427 struct port_flow *pf = port->flow_list->next; 3428 3429 free(port->flow_list); 3430 port->flow_list = pf; 3431 } 3432 return ret; 3433 } 3434 3435 /** Dump flow rules. */ 3436 int 3437 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 3438 const char *file_name) 3439 { 3440 int ret = 0; 3441 FILE *file = stdout; 3442 struct rte_flow_error error; 3443 struct rte_port *port; 3444 struct port_flow *pflow; 3445 struct rte_flow *tmpFlow = NULL; 3446 bool found = false; 3447 3448 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3449 port_id == (portid_t)RTE_PORT_ALL) 3450 return -EINVAL; 3451 3452 if (!dump_all) { 3453 port = &ports[port_id]; 3454 pflow = port->flow_list; 3455 while (pflow) { 3456 if (rule_id != pflow->id) { 3457 pflow = pflow->next; 3458 } else { 3459 tmpFlow = pflow->flow; 3460 if (tmpFlow) 3461 found = true; 3462 break; 3463 } 3464 } 3465 if (found == false) { 3466 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 3467 return -EINVAL; 3468 } 3469 } 3470 3471 if (file_name && strlen(file_name)) { 3472 file = fopen(file_name, "w"); 3473 if (!file) { 3474 fprintf(stderr, "Failed to create file %s: %s\n", 3475 file_name, strerror(errno)); 3476 return -errno; 3477 } 3478 } 3479 3480 if (!dump_all) 3481 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3482 else 3483 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3484 if (ret) { 3485 port_flow_complain(&error); 3486 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3487 } else 3488 printf("Flow dump finished\n"); 3489 if (file_name && strlen(file_name)) 3490 fclose(file); 3491 return ret; 3492 } 3493 3494 /** Query a flow rule. */ 3495 int 3496 port_flow_query(portid_t port_id, uint32_t rule, 3497 const struct rte_flow_action *action) 3498 { 3499 struct rte_flow_error error; 3500 struct rte_port *port; 3501 struct port_flow *pf; 3502 const char *name; 3503 union { 3504 struct rte_flow_query_count count; 3505 struct rte_flow_action_rss rss_conf; 3506 struct rte_flow_query_age age; 3507 } query; 3508 int ret; 3509 3510 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3511 port_id == (portid_t)RTE_PORT_ALL) 3512 return -EINVAL; 3513 port = &ports[port_id]; 3514 for (pf = port->flow_list; pf; pf = pf->next) 3515 if (pf->id == rule) 3516 break; 3517 if (!pf) { 3518 fprintf(stderr, "Flow rule #%u not found\n", rule); 3519 return -ENOENT; 3520 } 3521 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3522 &name, sizeof(name), 3523 (void *)(uintptr_t)action->type, &error); 3524 if (ret < 0) 3525 return port_flow_complain(&error); 3526 switch (action->type) { 3527 case RTE_FLOW_ACTION_TYPE_COUNT: 3528 case RTE_FLOW_ACTION_TYPE_RSS: 3529 case RTE_FLOW_ACTION_TYPE_AGE: 3530 break; 3531 default: 3532 fprintf(stderr, "Cannot query action type %d (%s)\n", 3533 action->type, name); 3534 return -ENOTSUP; 3535 } 3536 /* Poisoning to make sure PMDs update it in case of error. */ 3537 memset(&error, 0x55, sizeof(error)); 3538 memset(&query, 0, sizeof(query)); 3539 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3540 return port_flow_complain(&error); 3541 switch (action->type) { 3542 case RTE_FLOW_ACTION_TYPE_COUNT: 3543 printf("%s:\n" 3544 " hits_set: %u\n" 3545 " bytes_set: %u\n" 3546 " hits: %" PRIu64 "\n" 3547 " bytes: %" PRIu64 "\n", 3548 name, 3549 query.count.hits_set, 3550 query.count.bytes_set, 3551 query.count.hits, 3552 query.count.bytes); 3553 break; 3554 case RTE_FLOW_ACTION_TYPE_RSS: 3555 rss_config_display(&query.rss_conf); 3556 break; 3557 case RTE_FLOW_ACTION_TYPE_AGE: 3558 printf("%s:\n" 3559 " aged: %u\n" 3560 " sec_since_last_hit_valid: %u\n" 3561 " sec_since_last_hit: %" PRIu32 "\n", 3562 name, 3563 query.age.aged, 3564 query.age.sec_since_last_hit_valid, 3565 query.age.sec_since_last_hit); 3566 break; 3567 default: 3568 fprintf(stderr, 3569 "Cannot display result for action type %d (%s)\n", 3570 action->type, name); 3571 break; 3572 } 3573 return 0; 3574 } 3575 3576 /** List simply and destroy all aged flows. */ 3577 void 3578 port_flow_aged(portid_t port_id, uint8_t destroy) 3579 { 3580 void **contexts; 3581 int nb_context, total = 0, idx; 3582 struct rte_flow_error error; 3583 enum age_action_context_type *type; 3584 union { 3585 struct port_flow *pf; 3586 struct port_indirect_action *pia; 3587 } ctx; 3588 3589 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3590 port_id == (portid_t)RTE_PORT_ALL) 3591 return; 3592 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3593 printf("Port %u total aged flows: %d\n", port_id, total); 3594 if (total < 0) { 3595 port_flow_complain(&error); 3596 return; 3597 } 3598 if (total == 0) 3599 return; 3600 contexts = malloc(sizeof(void *) * total); 3601 if (contexts == NULL) { 3602 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3603 return; 3604 } 3605 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3606 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3607 if (nb_context != total) { 3608 fprintf(stderr, 3609 "Port:%d get aged flows count(%d) != total(%d)\n", 3610 port_id, nb_context, total); 3611 free(contexts); 3612 return; 3613 } 3614 total = 0; 3615 for (idx = 0; idx < nb_context; idx++) { 3616 if (!contexts[idx]) { 3617 fprintf(stderr, "Error: get Null context in port %u\n", 3618 port_id); 3619 continue; 3620 } 3621 type = (enum age_action_context_type *)contexts[idx]; 3622 switch (*type) { 3623 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3624 ctx.pf = container_of(type, struct port_flow, age_type); 3625 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3626 "\t%c%c%c\t\n", 3627 "Flow", 3628 ctx.pf->id, 3629 ctx.pf->rule.attr->group, 3630 ctx.pf->rule.attr->priority, 3631 ctx.pf->rule.attr->ingress ? 'i' : '-', 3632 ctx.pf->rule.attr->egress ? 'e' : '-', 3633 ctx.pf->rule.attr->transfer ? 't' : '-'); 3634 if (destroy && !port_flow_destroy(port_id, 1, 3635 &ctx.pf->id)) 3636 total++; 3637 break; 3638 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3639 ctx.pia = container_of(type, 3640 struct port_indirect_action, age_type); 3641 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3642 ctx.pia->id); 3643 break; 3644 default: 3645 fprintf(stderr, "Error: invalid context type %u\n", 3646 port_id); 3647 break; 3648 } 3649 } 3650 printf("\n%d flows destroyed\n", total); 3651 free(contexts); 3652 } 3653 3654 /** List flow rules. */ 3655 void 3656 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3657 { 3658 struct rte_port *port; 3659 struct port_flow *pf; 3660 struct port_flow *list = NULL; 3661 uint32_t i; 3662 3663 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3664 port_id == (portid_t)RTE_PORT_ALL) 3665 return; 3666 port = &ports[port_id]; 3667 if (!port->flow_list) 3668 return; 3669 /* Sort flows by group, priority and ID. */ 3670 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3671 struct port_flow **tmp; 3672 const struct rte_flow_attr *curr = pf->rule.attr; 3673 3674 if (n) { 3675 /* Filter out unwanted groups. */ 3676 for (i = 0; i != n; ++i) 3677 if (curr->group == group[i]) 3678 break; 3679 if (i == n) 3680 continue; 3681 } 3682 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3683 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3684 3685 if (curr->group > comp->group || 3686 (curr->group == comp->group && 3687 curr->priority > comp->priority) || 3688 (curr->group == comp->group && 3689 curr->priority == comp->priority && 3690 pf->id > (*tmp)->id)) 3691 continue; 3692 break; 3693 } 3694 pf->tmp = *tmp; 3695 *tmp = pf; 3696 } 3697 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3698 for (pf = list; pf != NULL; pf = pf->tmp) { 3699 const struct rte_flow_item *item = pf->rule.pattern; 3700 const struct rte_flow_action *action = pf->rule.actions; 3701 const char *name; 3702 3703 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3704 pf->id, 3705 pf->rule.attr->group, 3706 pf->rule.attr->priority, 3707 pf->rule.attr->ingress ? 'i' : '-', 3708 pf->rule.attr->egress ? 'e' : '-', 3709 pf->rule.attr->transfer ? 't' : '-'); 3710 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3711 if ((uint32_t)item->type > INT_MAX) 3712 name = "PMD_INTERNAL"; 3713 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3714 &name, sizeof(name), 3715 (void *)(uintptr_t)item->type, 3716 NULL) <= 0) 3717 name = "[UNKNOWN]"; 3718 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3719 printf("%s ", name); 3720 ++item; 3721 } 3722 printf("=>"); 3723 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3724 if ((uint32_t)action->type > INT_MAX) 3725 name = "PMD_INTERNAL"; 3726 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3727 &name, sizeof(name), 3728 (void *)(uintptr_t)action->type, 3729 NULL) <= 0) 3730 name = "[UNKNOWN]"; 3731 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3732 printf(" %s", name); 3733 ++action; 3734 } 3735 printf("\n"); 3736 } 3737 } 3738 3739 /** Restrict ingress traffic to the defined flow rules. */ 3740 int 3741 port_flow_isolate(portid_t port_id, int set) 3742 { 3743 struct rte_flow_error error; 3744 3745 /* Poisoning to make sure PMDs update it in case of error. */ 3746 memset(&error, 0x66, sizeof(error)); 3747 if (rte_flow_isolate(port_id, set, &error)) 3748 return port_flow_complain(&error); 3749 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3750 port_id, 3751 set ? "now restricted" : "not restricted anymore"); 3752 return 0; 3753 } 3754 3755 /* 3756 * RX/TX ring descriptors display functions. 3757 */ 3758 int 3759 rx_queue_id_is_invalid(queueid_t rxq_id) 3760 { 3761 if (rxq_id < nb_rxq) 3762 return 0; 3763 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3764 rxq_id, nb_rxq); 3765 return 1; 3766 } 3767 3768 int 3769 tx_queue_id_is_invalid(queueid_t txq_id) 3770 { 3771 if (txq_id < nb_txq) 3772 return 0; 3773 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3774 txq_id, nb_txq); 3775 return 1; 3776 } 3777 3778 static int 3779 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3780 { 3781 struct rte_port *port = &ports[port_id]; 3782 struct rte_eth_rxq_info rx_qinfo; 3783 int ret; 3784 3785 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3786 if (ret == 0) { 3787 *ring_size = rx_qinfo.nb_desc; 3788 return ret; 3789 } 3790 3791 if (ret != -ENOTSUP) 3792 return ret; 3793 /* 3794 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3795 * ring_size stored in testpmd will be used for validity verification. 3796 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3797 * being 0, it will use a default value provided by PMDs to setup this 3798 * rxq. If the default value is 0, it will use the 3799 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3800 */ 3801 if (port->nb_rx_desc[rxq_id]) 3802 *ring_size = port->nb_rx_desc[rxq_id]; 3803 else if (port->dev_info.default_rxportconf.ring_size) 3804 *ring_size = port->dev_info.default_rxportconf.ring_size; 3805 else 3806 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3807 return 0; 3808 } 3809 3810 static int 3811 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3812 { 3813 struct rte_port *port = &ports[port_id]; 3814 struct rte_eth_txq_info tx_qinfo; 3815 int ret; 3816 3817 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3818 if (ret == 0) { 3819 *ring_size = tx_qinfo.nb_desc; 3820 return ret; 3821 } 3822 3823 if (ret != -ENOTSUP) 3824 return ret; 3825 /* 3826 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3827 * ring_size stored in testpmd will be used for validity verification. 3828 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3829 * being 0, it will use a default value provided by PMDs to setup this 3830 * txq. If the default value is 0, it will use the 3831 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3832 */ 3833 if (port->nb_tx_desc[txq_id]) 3834 *ring_size = port->nb_tx_desc[txq_id]; 3835 else if (port->dev_info.default_txportconf.ring_size) 3836 *ring_size = port->dev_info.default_txportconf.ring_size; 3837 else 3838 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3839 return 0; 3840 } 3841 3842 static int 3843 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3844 { 3845 uint16_t ring_size; 3846 int ret; 3847 3848 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3849 if (ret) 3850 return 1; 3851 3852 if (rxdesc_id < ring_size) 3853 return 0; 3854 3855 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3856 rxdesc_id, ring_size); 3857 return 1; 3858 } 3859 3860 static int 3861 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3862 { 3863 uint16_t ring_size; 3864 int ret; 3865 3866 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3867 if (ret) 3868 return 1; 3869 3870 if (txdesc_id < ring_size) 3871 return 0; 3872 3873 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3874 txdesc_id, ring_size); 3875 return 1; 3876 } 3877 3878 static const struct rte_memzone * 3879 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3880 { 3881 char mz_name[RTE_MEMZONE_NAMESIZE]; 3882 const struct rte_memzone *mz; 3883 3884 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3885 port_id, q_id, ring_name); 3886 mz = rte_memzone_lookup(mz_name); 3887 if (mz == NULL) 3888 fprintf(stderr, 3889 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3890 ring_name, port_id, q_id, mz_name); 3891 return mz; 3892 } 3893 3894 union igb_ring_dword { 3895 uint64_t dword; 3896 struct { 3897 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3898 uint32_t lo; 3899 uint32_t hi; 3900 #else 3901 uint32_t hi; 3902 uint32_t lo; 3903 #endif 3904 } words; 3905 }; 3906 3907 struct igb_ring_desc_32_bytes { 3908 union igb_ring_dword lo_dword; 3909 union igb_ring_dword hi_dword; 3910 union igb_ring_dword resv1; 3911 union igb_ring_dword resv2; 3912 }; 3913 3914 struct igb_ring_desc_16_bytes { 3915 union igb_ring_dword lo_dword; 3916 union igb_ring_dword hi_dword; 3917 }; 3918 3919 static void 3920 ring_rxd_display_dword(union igb_ring_dword dword) 3921 { 3922 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3923 (unsigned)dword.words.hi); 3924 } 3925 3926 static void 3927 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3928 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3929 portid_t port_id, 3930 #else 3931 __rte_unused portid_t port_id, 3932 #endif 3933 uint16_t desc_id) 3934 { 3935 struct igb_ring_desc_16_bytes *ring = 3936 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3937 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3938 int ret; 3939 struct rte_eth_dev_info dev_info; 3940 3941 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3942 if (ret != 0) 3943 return; 3944 3945 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3946 /* 32 bytes RX descriptor, i40e only */ 3947 struct igb_ring_desc_32_bytes *ring = 3948 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3949 ring[desc_id].lo_dword.dword = 3950 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3951 ring_rxd_display_dword(ring[desc_id].lo_dword); 3952 ring[desc_id].hi_dword.dword = 3953 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3954 ring_rxd_display_dword(ring[desc_id].hi_dword); 3955 ring[desc_id].resv1.dword = 3956 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3957 ring_rxd_display_dword(ring[desc_id].resv1); 3958 ring[desc_id].resv2.dword = 3959 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3960 ring_rxd_display_dword(ring[desc_id].resv2); 3961 3962 return; 3963 } 3964 #endif 3965 /* 16 bytes RX descriptor */ 3966 ring[desc_id].lo_dword.dword = 3967 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3968 ring_rxd_display_dword(ring[desc_id].lo_dword); 3969 ring[desc_id].hi_dword.dword = 3970 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3971 ring_rxd_display_dword(ring[desc_id].hi_dword); 3972 } 3973 3974 static void 3975 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3976 { 3977 struct igb_ring_desc_16_bytes *ring; 3978 struct igb_ring_desc_16_bytes txd; 3979 3980 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3981 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3982 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3983 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3984 (unsigned)txd.lo_dword.words.lo, 3985 (unsigned)txd.lo_dword.words.hi, 3986 (unsigned)txd.hi_dword.words.lo, 3987 (unsigned)txd.hi_dword.words.hi); 3988 } 3989 3990 void 3991 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3992 { 3993 const struct rte_memzone *rx_mz; 3994 3995 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3996 return; 3997 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3998 if (rx_mz == NULL) 3999 return; 4000 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 4001 } 4002 4003 void 4004 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 4005 { 4006 const struct rte_memzone *tx_mz; 4007 4008 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 4009 return; 4010 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 4011 if (tx_mz == NULL) 4012 return; 4013 ring_tx_descriptor_display(tx_mz, txd_id); 4014 } 4015 4016 void 4017 fwd_lcores_config_display(void) 4018 { 4019 lcoreid_t lc_id; 4020 4021 printf("List of forwarding lcores:"); 4022 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 4023 printf(" %2u", fwd_lcores_cpuids[lc_id]); 4024 printf("\n"); 4025 } 4026 void 4027 rxtx_config_display(void) 4028 { 4029 portid_t pid; 4030 queueid_t qid; 4031 4032 printf(" %s packet forwarding%s packets/burst=%d\n", 4033 cur_fwd_eng->fwd_mode_name, 4034 retry_enabled == 0 ? "" : " with retry", 4035 nb_pkt_per_burst); 4036 4037 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 4038 printf(" packet len=%u - nb packet segments=%d\n", 4039 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 4040 4041 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 4042 nb_fwd_lcores, nb_fwd_ports); 4043 4044 RTE_ETH_FOREACH_DEV(pid) { 4045 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 4046 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 4047 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 4048 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 4049 struct rte_eth_rxq_info rx_qinfo; 4050 struct rte_eth_txq_info tx_qinfo; 4051 uint16_t rx_free_thresh_tmp; 4052 uint16_t tx_free_thresh_tmp; 4053 uint16_t tx_rs_thresh_tmp; 4054 uint16_t nb_rx_desc_tmp; 4055 uint16_t nb_tx_desc_tmp; 4056 uint64_t offloads_tmp; 4057 uint8_t pthresh_tmp; 4058 uint8_t hthresh_tmp; 4059 uint8_t wthresh_tmp; 4060 int32_t rc; 4061 4062 /* per port config */ 4063 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 4064 (unsigned int)pid, nb_rxq, nb_txq); 4065 4066 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 4067 ports[pid].dev_conf.rxmode.offloads, 4068 ports[pid].dev_conf.txmode.offloads); 4069 4070 /* per rx queue config only for first queue to be less verbose */ 4071 for (qid = 0; qid < 1; qid++) { 4072 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 4073 if (rc) { 4074 nb_rx_desc_tmp = nb_rx_desc[qid]; 4075 rx_free_thresh_tmp = 4076 rx_conf[qid].rx_free_thresh; 4077 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 4078 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 4079 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 4080 offloads_tmp = rx_conf[qid].offloads; 4081 } else { 4082 nb_rx_desc_tmp = rx_qinfo.nb_desc; 4083 rx_free_thresh_tmp = 4084 rx_qinfo.conf.rx_free_thresh; 4085 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 4086 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 4087 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 4088 offloads_tmp = rx_qinfo.conf.offloads; 4089 } 4090 4091 printf(" RX queue: %d\n", qid); 4092 printf(" RX desc=%d - RX free threshold=%d\n", 4093 nb_rx_desc_tmp, rx_free_thresh_tmp); 4094 printf(" RX threshold registers: pthresh=%d hthresh=%d " 4095 " wthresh=%d\n", 4096 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4097 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 4098 if (rx_conf->share_group > 0) 4099 printf(" share_group=%u share_qid=%u", 4100 rx_conf->share_group, 4101 rx_conf->share_qid); 4102 printf("\n"); 4103 } 4104 4105 /* per tx queue config only for first queue to be less verbose */ 4106 for (qid = 0; qid < 1; qid++) { 4107 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 4108 if (rc) { 4109 nb_tx_desc_tmp = nb_tx_desc[qid]; 4110 tx_free_thresh_tmp = 4111 tx_conf[qid].tx_free_thresh; 4112 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 4113 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 4114 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 4115 offloads_tmp = tx_conf[qid].offloads; 4116 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 4117 } else { 4118 nb_tx_desc_tmp = tx_qinfo.nb_desc; 4119 tx_free_thresh_tmp = 4120 tx_qinfo.conf.tx_free_thresh; 4121 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 4122 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 4123 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 4124 offloads_tmp = tx_qinfo.conf.offloads; 4125 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 4126 } 4127 4128 printf(" TX queue: %d\n", qid); 4129 printf(" TX desc=%d - TX free threshold=%d\n", 4130 nb_tx_desc_tmp, tx_free_thresh_tmp); 4131 printf(" TX threshold registers: pthresh=%d hthresh=%d " 4132 " wthresh=%d\n", 4133 pthresh_tmp, hthresh_tmp, wthresh_tmp); 4134 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 4135 offloads_tmp, tx_rs_thresh_tmp); 4136 } 4137 } 4138 } 4139 4140 void 4141 port_rss_reta_info(portid_t port_id, 4142 struct rte_eth_rss_reta_entry64 *reta_conf, 4143 uint16_t nb_entries) 4144 { 4145 uint16_t i, idx, shift; 4146 int ret; 4147 4148 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4149 return; 4150 4151 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 4152 if (ret != 0) { 4153 fprintf(stderr, 4154 "Failed to get RSS RETA info, return code = %d\n", 4155 ret); 4156 return; 4157 } 4158 4159 for (i = 0; i < nb_entries; i++) { 4160 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4161 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4162 if (!(reta_conf[idx].mask & (1ULL << shift))) 4163 continue; 4164 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 4165 i, reta_conf[idx].reta[shift]); 4166 } 4167 } 4168 4169 /* 4170 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 4171 * key of the port. 4172 */ 4173 void 4174 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 4175 { 4176 struct rte_eth_rss_conf rss_conf = {0}; 4177 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 4178 uint64_t rss_hf; 4179 uint8_t i; 4180 int diag; 4181 struct rte_eth_dev_info dev_info; 4182 uint8_t hash_key_size; 4183 int ret; 4184 4185 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4186 return; 4187 4188 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4189 if (ret != 0) 4190 return; 4191 4192 if (dev_info.hash_key_size > 0 && 4193 dev_info.hash_key_size <= sizeof(rss_key)) 4194 hash_key_size = dev_info.hash_key_size; 4195 else { 4196 fprintf(stderr, 4197 "dev_info did not provide a valid hash key size\n"); 4198 return; 4199 } 4200 4201 /* Get RSS hash key if asked to display it */ 4202 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 4203 rss_conf.rss_key_len = hash_key_size; 4204 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4205 if (diag != 0) { 4206 switch (diag) { 4207 case -ENODEV: 4208 fprintf(stderr, "port index %d invalid\n", port_id); 4209 break; 4210 case -ENOTSUP: 4211 fprintf(stderr, "operation not supported by device\n"); 4212 break; 4213 default: 4214 fprintf(stderr, "operation failed - diag=%d\n", diag); 4215 break; 4216 } 4217 return; 4218 } 4219 rss_hf = rss_conf.rss_hf; 4220 if (rss_hf == 0) { 4221 printf("RSS disabled\n"); 4222 return; 4223 } 4224 printf("RSS functions:\n"); 4225 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 4226 if (!show_rss_key) 4227 return; 4228 printf("RSS key:\n"); 4229 for (i = 0; i < hash_key_size; i++) 4230 printf("%02X", rss_key[i]); 4231 printf("\n"); 4232 } 4233 4234 void 4235 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 4236 uint8_t hash_key_len) 4237 { 4238 struct rte_eth_rss_conf rss_conf; 4239 int diag; 4240 4241 rss_conf.rss_key = NULL; 4242 rss_conf.rss_key_len = 0; 4243 rss_conf.rss_hf = str_to_rsstypes(rss_type); 4244 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 4245 if (diag == 0) { 4246 rss_conf.rss_key = hash_key; 4247 rss_conf.rss_key_len = hash_key_len; 4248 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 4249 } 4250 if (diag == 0) 4251 return; 4252 4253 switch (diag) { 4254 case -ENODEV: 4255 fprintf(stderr, "port index %d invalid\n", port_id); 4256 break; 4257 case -ENOTSUP: 4258 fprintf(stderr, "operation not supported by device\n"); 4259 break; 4260 default: 4261 fprintf(stderr, "operation failed - diag=%d\n", diag); 4262 break; 4263 } 4264 } 4265 4266 /* 4267 * Check whether a shared rxq scheduled on other lcores. 4268 */ 4269 static bool 4270 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 4271 portid_t src_port, queueid_t src_rxq, 4272 uint32_t share_group, queueid_t share_rxq) 4273 { 4274 streamid_t sm_id; 4275 streamid_t nb_fs_per_lcore; 4276 lcoreid_t nb_fc; 4277 lcoreid_t lc_id; 4278 struct fwd_stream *fs; 4279 struct rte_port *port; 4280 struct rte_eth_dev_info *dev_info; 4281 struct rte_eth_rxconf *rxq_conf; 4282 4283 nb_fc = cur_fwd_config.nb_fwd_lcores; 4284 /* Check remaining cores. */ 4285 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 4286 sm_id = fwd_lcores[lc_id]->stream_idx; 4287 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4288 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4289 sm_id++) { 4290 fs = fwd_streams[sm_id]; 4291 port = &ports[fs->rx_port]; 4292 dev_info = &port->dev_info; 4293 rxq_conf = &port->rxq[fs->rx_queue].conf; 4294 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4295 == 0 || rxq_conf->share_group == 0) 4296 /* Not shared rxq. */ 4297 continue; 4298 if (domain_id != port->dev_info.switch_info.domain_id) 4299 continue; 4300 if (rxq_conf->share_group != share_group) 4301 continue; 4302 if (rxq_conf->share_qid != share_rxq) 4303 continue; 4304 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4305 share_group, share_rxq); 4306 printf(" lcore %hhu Port %hu queue %hu\n", 4307 src_lc, src_port, src_rxq); 4308 printf(" lcore %hhu Port %hu queue %hu\n", 4309 lc_id, fs->rx_port, fs->rx_queue); 4310 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4311 nb_rxq); 4312 return true; 4313 } 4314 } 4315 return false; 4316 } 4317 4318 /* 4319 * Check shared rxq configuration. 4320 * 4321 * Shared group must not being scheduled on different core. 4322 */ 4323 bool 4324 pkt_fwd_shared_rxq_check(void) 4325 { 4326 streamid_t sm_id; 4327 streamid_t nb_fs_per_lcore; 4328 lcoreid_t nb_fc; 4329 lcoreid_t lc_id; 4330 struct fwd_stream *fs; 4331 uint16_t domain_id; 4332 struct rte_port *port; 4333 struct rte_eth_dev_info *dev_info; 4334 struct rte_eth_rxconf *rxq_conf; 4335 4336 if (rxq_share == 0) 4337 return true; 4338 nb_fc = cur_fwd_config.nb_fwd_lcores; 4339 /* 4340 * Check streams on each core, make sure the same switch domain + 4341 * group + queue doesn't get scheduled on other cores. 4342 */ 4343 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4344 sm_id = fwd_lcores[lc_id]->stream_idx; 4345 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4346 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4347 sm_id++) { 4348 fs = fwd_streams[sm_id]; 4349 /* Update lcore info stream being scheduled. */ 4350 fs->lcore = fwd_lcores[lc_id]; 4351 port = &ports[fs->rx_port]; 4352 dev_info = &port->dev_info; 4353 rxq_conf = &port->rxq[fs->rx_queue].conf; 4354 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4355 == 0 || rxq_conf->share_group == 0) 4356 /* Not shared rxq. */ 4357 continue; 4358 /* Check shared rxq not scheduled on remaining cores. */ 4359 domain_id = port->dev_info.switch_info.domain_id; 4360 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4361 fs->rx_port, 4362 fs->rx_queue, 4363 rxq_conf->share_group, 4364 rxq_conf->share_qid)) 4365 return false; 4366 } 4367 } 4368 return true; 4369 } 4370 4371 /* 4372 * Setup forwarding configuration for each logical core. 4373 */ 4374 static void 4375 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4376 { 4377 streamid_t nb_fs_per_lcore; 4378 streamid_t nb_fs; 4379 streamid_t sm_id; 4380 lcoreid_t nb_extra; 4381 lcoreid_t nb_fc; 4382 lcoreid_t nb_lc; 4383 lcoreid_t lc_id; 4384 4385 nb_fs = cfg->nb_fwd_streams; 4386 nb_fc = cfg->nb_fwd_lcores; 4387 if (nb_fs <= nb_fc) { 4388 nb_fs_per_lcore = 1; 4389 nb_extra = 0; 4390 } else { 4391 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4392 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4393 } 4394 4395 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4396 sm_id = 0; 4397 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4398 fwd_lcores[lc_id]->stream_idx = sm_id; 4399 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4400 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4401 } 4402 4403 /* 4404 * Assign extra remaining streams, if any. 4405 */ 4406 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4407 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4408 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4409 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4410 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4411 } 4412 } 4413 4414 static portid_t 4415 fwd_topology_tx_port_get(portid_t rxp) 4416 { 4417 static int warning_once = 1; 4418 4419 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4420 4421 switch (port_topology) { 4422 default: 4423 case PORT_TOPOLOGY_PAIRED: 4424 if ((rxp & 0x1) == 0) { 4425 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4426 return rxp + 1; 4427 if (warning_once) { 4428 fprintf(stderr, 4429 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4430 warning_once = 0; 4431 } 4432 return rxp; 4433 } 4434 return rxp - 1; 4435 case PORT_TOPOLOGY_CHAINED: 4436 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4437 case PORT_TOPOLOGY_LOOP: 4438 return rxp; 4439 } 4440 } 4441 4442 static void 4443 simple_fwd_config_setup(void) 4444 { 4445 portid_t i; 4446 4447 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4448 cur_fwd_config.nb_fwd_streams = 4449 (streamid_t) cur_fwd_config.nb_fwd_ports; 4450 4451 /* reinitialize forwarding streams */ 4452 init_fwd_streams(); 4453 4454 /* 4455 * In the simple forwarding test, the number of forwarding cores 4456 * must be lower or equal to the number of forwarding ports. 4457 */ 4458 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4459 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4460 cur_fwd_config.nb_fwd_lcores = 4461 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4462 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4463 4464 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4465 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4466 fwd_streams[i]->rx_queue = 0; 4467 fwd_streams[i]->tx_port = 4468 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4469 fwd_streams[i]->tx_queue = 0; 4470 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4471 fwd_streams[i]->retry_enabled = retry_enabled; 4472 } 4473 } 4474 4475 /** 4476 * For the RSS forwarding test all streams distributed over lcores. Each stream 4477 * being composed of a RX queue to poll on a RX port for input messages, 4478 * associated with a TX queue of a TX port where to send forwarded packets. 4479 */ 4480 static void 4481 rss_fwd_config_setup(void) 4482 { 4483 portid_t rxp; 4484 portid_t txp; 4485 queueid_t rxq; 4486 queueid_t nb_q; 4487 streamid_t sm_id; 4488 int start; 4489 int end; 4490 4491 nb_q = nb_rxq; 4492 if (nb_q > nb_txq) 4493 nb_q = nb_txq; 4494 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4495 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4496 cur_fwd_config.nb_fwd_streams = 4497 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4498 4499 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4500 cur_fwd_config.nb_fwd_lcores = 4501 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4502 4503 /* reinitialize forwarding streams */ 4504 init_fwd_streams(); 4505 4506 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4507 4508 if (proc_id > 0 && nb_q % num_procs != 0) 4509 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4510 4511 /** 4512 * In multi-process, All queues are allocated to different 4513 * processes based on num_procs and proc_id. For example: 4514 * if supports 4 queues(nb_q), 2 processes(num_procs), 4515 * the 0~1 queue for primary process. 4516 * the 2~3 queue for secondary process. 4517 */ 4518 start = proc_id * nb_q / num_procs; 4519 end = start + nb_q / num_procs; 4520 rxp = 0; 4521 rxq = start; 4522 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4523 struct fwd_stream *fs; 4524 4525 fs = fwd_streams[sm_id]; 4526 txp = fwd_topology_tx_port_get(rxp); 4527 fs->rx_port = fwd_ports_ids[rxp]; 4528 fs->rx_queue = rxq; 4529 fs->tx_port = fwd_ports_ids[txp]; 4530 fs->tx_queue = rxq; 4531 fs->peer_addr = fs->tx_port; 4532 fs->retry_enabled = retry_enabled; 4533 rxp++; 4534 if (rxp < nb_fwd_ports) 4535 continue; 4536 rxp = 0; 4537 rxq++; 4538 if (rxq >= end) 4539 rxq = start; 4540 } 4541 } 4542 4543 static uint16_t 4544 get_fwd_port_total_tc_num(void) 4545 { 4546 struct rte_eth_dcb_info dcb_info; 4547 uint16_t total_tc_num = 0; 4548 unsigned int i; 4549 4550 for (i = 0; i < nb_fwd_ports; i++) { 4551 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4552 total_tc_num += dcb_info.nb_tcs; 4553 } 4554 4555 return total_tc_num; 4556 } 4557 4558 /** 4559 * For the DCB forwarding test, each core is assigned on each traffic class. 4560 * 4561 * Each core is assigned a multi-stream, each stream being composed of 4562 * a RX queue to poll on a RX port for input messages, associated with 4563 * a TX queue of a TX port where to send forwarded packets. All RX and 4564 * TX queues are mapping to the same traffic class. 4565 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4566 * the same core 4567 */ 4568 static void 4569 dcb_fwd_config_setup(void) 4570 { 4571 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4572 portid_t txp, rxp = 0; 4573 queueid_t txq, rxq = 0; 4574 lcoreid_t lc_id; 4575 uint16_t nb_rx_queue, nb_tx_queue; 4576 uint16_t i, j, k, sm_id = 0; 4577 uint16_t total_tc_num; 4578 struct rte_port *port; 4579 uint8_t tc = 0; 4580 portid_t pid; 4581 int ret; 4582 4583 /* 4584 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4585 * or RTE_PORT_STOPPED. 4586 * 4587 * Re-configure ports to get updated mapping between tc and queue in 4588 * case the queue number of the port is changed. Skip for started ports 4589 * since modifying queue number and calling dev_configure need to stop 4590 * ports first. 4591 */ 4592 for (pid = 0; pid < nb_fwd_ports; pid++) { 4593 if (port_is_started(pid) == 1) 4594 continue; 4595 4596 port = &ports[pid]; 4597 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4598 &port->dev_conf); 4599 if (ret < 0) { 4600 fprintf(stderr, 4601 "Failed to re-configure port %d, ret = %d.\n", 4602 pid, ret); 4603 return; 4604 } 4605 } 4606 4607 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4608 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4609 cur_fwd_config.nb_fwd_streams = 4610 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4611 total_tc_num = get_fwd_port_total_tc_num(); 4612 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4613 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4614 4615 /* reinitialize forwarding streams */ 4616 init_fwd_streams(); 4617 sm_id = 0; 4618 txp = 1; 4619 /* get the dcb info on the first RX and TX ports */ 4620 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4621 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4622 4623 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4624 fwd_lcores[lc_id]->stream_nb = 0; 4625 fwd_lcores[lc_id]->stream_idx = sm_id; 4626 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4627 /* if the nb_queue is zero, means this tc is 4628 * not enabled on the POOL 4629 */ 4630 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4631 break; 4632 k = fwd_lcores[lc_id]->stream_nb + 4633 fwd_lcores[lc_id]->stream_idx; 4634 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4635 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4636 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4637 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4638 for (j = 0; j < nb_rx_queue; j++) { 4639 struct fwd_stream *fs; 4640 4641 fs = fwd_streams[k + j]; 4642 fs->rx_port = fwd_ports_ids[rxp]; 4643 fs->rx_queue = rxq + j; 4644 fs->tx_port = fwd_ports_ids[txp]; 4645 fs->tx_queue = txq + j % nb_tx_queue; 4646 fs->peer_addr = fs->tx_port; 4647 fs->retry_enabled = retry_enabled; 4648 } 4649 fwd_lcores[lc_id]->stream_nb += 4650 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4651 } 4652 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4653 4654 tc++; 4655 if (tc < rxp_dcb_info.nb_tcs) 4656 continue; 4657 /* Restart from TC 0 on next RX port */ 4658 tc = 0; 4659 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4660 rxp = (portid_t) 4661 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4662 else 4663 rxp++; 4664 if (rxp >= nb_fwd_ports) 4665 return; 4666 /* get the dcb information on next RX and TX ports */ 4667 if ((rxp & 0x1) == 0) 4668 txp = (portid_t) (rxp + 1); 4669 else 4670 txp = (portid_t) (rxp - 1); 4671 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4672 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4673 } 4674 } 4675 4676 static void 4677 icmp_echo_config_setup(void) 4678 { 4679 portid_t rxp; 4680 queueid_t rxq; 4681 lcoreid_t lc_id; 4682 uint16_t sm_id; 4683 4684 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4685 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4686 (nb_txq * nb_fwd_ports); 4687 else 4688 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4689 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4690 cur_fwd_config.nb_fwd_streams = 4691 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4692 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4693 cur_fwd_config.nb_fwd_lcores = 4694 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4695 if (verbose_level > 0) { 4696 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4697 __FUNCTION__, 4698 cur_fwd_config.nb_fwd_lcores, 4699 cur_fwd_config.nb_fwd_ports, 4700 cur_fwd_config.nb_fwd_streams); 4701 } 4702 4703 /* reinitialize forwarding streams */ 4704 init_fwd_streams(); 4705 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4706 rxp = 0; rxq = 0; 4707 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4708 if (verbose_level > 0) 4709 printf(" core=%d: \n", lc_id); 4710 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4711 struct fwd_stream *fs; 4712 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4713 fs->rx_port = fwd_ports_ids[rxp]; 4714 fs->rx_queue = rxq; 4715 fs->tx_port = fs->rx_port; 4716 fs->tx_queue = rxq; 4717 fs->peer_addr = fs->tx_port; 4718 fs->retry_enabled = retry_enabled; 4719 if (verbose_level > 0) 4720 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4721 sm_id, fs->rx_port, fs->rx_queue, 4722 fs->tx_queue); 4723 rxq = (queueid_t) (rxq + 1); 4724 if (rxq == nb_rxq) { 4725 rxq = 0; 4726 rxp = (portid_t) (rxp + 1); 4727 } 4728 } 4729 } 4730 } 4731 4732 void 4733 fwd_config_setup(void) 4734 { 4735 struct rte_port *port; 4736 portid_t pt_id; 4737 unsigned int i; 4738 4739 cur_fwd_config.fwd_eng = cur_fwd_eng; 4740 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4741 icmp_echo_config_setup(); 4742 return; 4743 } 4744 4745 if ((nb_rxq > 1) && (nb_txq > 1)){ 4746 if (dcb_config) { 4747 for (i = 0; i < nb_fwd_ports; i++) { 4748 pt_id = fwd_ports_ids[i]; 4749 port = &ports[pt_id]; 4750 if (!port->dcb_flag) { 4751 fprintf(stderr, 4752 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4753 return; 4754 } 4755 } 4756 if (nb_fwd_lcores == 1) { 4757 fprintf(stderr, 4758 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4759 return; 4760 } 4761 4762 dcb_fwd_config_setup(); 4763 } else 4764 rss_fwd_config_setup(); 4765 } 4766 else 4767 simple_fwd_config_setup(); 4768 } 4769 4770 static const char * 4771 mp_alloc_to_str(uint8_t mode) 4772 { 4773 switch (mode) { 4774 case MP_ALLOC_NATIVE: 4775 return "native"; 4776 case MP_ALLOC_ANON: 4777 return "anon"; 4778 case MP_ALLOC_XMEM: 4779 return "xmem"; 4780 case MP_ALLOC_XMEM_HUGE: 4781 return "xmemhuge"; 4782 case MP_ALLOC_XBUF: 4783 return "xbuf"; 4784 default: 4785 return "invalid"; 4786 } 4787 } 4788 4789 void 4790 pkt_fwd_config_display(struct fwd_config *cfg) 4791 { 4792 struct fwd_stream *fs; 4793 lcoreid_t lc_id; 4794 streamid_t sm_id; 4795 4796 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4797 "NUMA support %s, MP allocation mode: %s\n", 4798 cfg->fwd_eng->fwd_mode_name, 4799 retry_enabled == 0 ? "" : " with retry", 4800 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4801 numa_support == 1 ? "enabled" : "disabled", 4802 mp_alloc_to_str(mp_alloc_type)); 4803 4804 if (retry_enabled) 4805 printf("TX retry num: %u, delay between TX retries: %uus\n", 4806 burst_tx_retry_num, burst_tx_delay_time); 4807 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4808 printf("Logical Core %u (socket %u) forwards packets on " 4809 "%d streams:", 4810 fwd_lcores_cpuids[lc_id], 4811 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4812 fwd_lcores[lc_id]->stream_nb); 4813 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4814 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4815 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4816 "P=%d/Q=%d (socket %u) ", 4817 fs->rx_port, fs->rx_queue, 4818 ports[fs->rx_port].socket_id, 4819 fs->tx_port, fs->tx_queue, 4820 ports[fs->tx_port].socket_id); 4821 print_ethaddr("peer=", 4822 &peer_eth_addrs[fs->peer_addr]); 4823 } 4824 printf("\n"); 4825 } 4826 printf("\n"); 4827 } 4828 4829 void 4830 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4831 { 4832 struct rte_ether_addr new_peer_addr; 4833 if (!rte_eth_dev_is_valid_port(port_id)) { 4834 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4835 return; 4836 } 4837 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4838 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4839 peer_addr); 4840 return; 4841 } 4842 peer_eth_addrs[port_id] = new_peer_addr; 4843 } 4844 4845 int 4846 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4847 { 4848 unsigned int i; 4849 unsigned int lcore_cpuid; 4850 int record_now; 4851 4852 record_now = 0; 4853 again: 4854 for (i = 0; i < nb_lc; i++) { 4855 lcore_cpuid = lcorelist[i]; 4856 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4857 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4858 return -1; 4859 } 4860 if (lcore_cpuid == rte_get_main_lcore()) { 4861 fprintf(stderr, 4862 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4863 lcore_cpuid); 4864 return -1; 4865 } 4866 if (record_now) 4867 fwd_lcores_cpuids[i] = lcore_cpuid; 4868 } 4869 if (record_now == 0) { 4870 record_now = 1; 4871 goto again; 4872 } 4873 nb_cfg_lcores = (lcoreid_t) nb_lc; 4874 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4875 printf("previous number of forwarding cores %u - changed to " 4876 "number of configured cores %u\n", 4877 (unsigned int) nb_fwd_lcores, nb_lc); 4878 nb_fwd_lcores = (lcoreid_t) nb_lc; 4879 } 4880 4881 return 0; 4882 } 4883 4884 int 4885 set_fwd_lcores_mask(uint64_t lcoremask) 4886 { 4887 unsigned int lcorelist[64]; 4888 unsigned int nb_lc; 4889 unsigned int i; 4890 4891 if (lcoremask == 0) { 4892 fprintf(stderr, "Invalid NULL mask of cores\n"); 4893 return -1; 4894 } 4895 nb_lc = 0; 4896 for (i = 0; i < 64; i++) { 4897 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4898 continue; 4899 lcorelist[nb_lc++] = i; 4900 } 4901 return set_fwd_lcores_list(lcorelist, nb_lc); 4902 } 4903 4904 void 4905 set_fwd_lcores_number(uint16_t nb_lc) 4906 { 4907 if (test_done == 0) { 4908 fprintf(stderr, "Please stop forwarding first\n"); 4909 return; 4910 } 4911 if (nb_lc > nb_cfg_lcores) { 4912 fprintf(stderr, 4913 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4914 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4915 return; 4916 } 4917 nb_fwd_lcores = (lcoreid_t) nb_lc; 4918 printf("Number of forwarding cores set to %u\n", 4919 (unsigned int) nb_fwd_lcores); 4920 } 4921 4922 void 4923 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4924 { 4925 unsigned int i; 4926 portid_t port_id; 4927 int record_now; 4928 4929 record_now = 0; 4930 again: 4931 for (i = 0; i < nb_pt; i++) { 4932 port_id = (portid_t) portlist[i]; 4933 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4934 return; 4935 if (record_now) 4936 fwd_ports_ids[i] = port_id; 4937 } 4938 if (record_now == 0) { 4939 record_now = 1; 4940 goto again; 4941 } 4942 nb_cfg_ports = (portid_t) nb_pt; 4943 if (nb_fwd_ports != (portid_t) nb_pt) { 4944 printf("previous number of forwarding ports %u - changed to " 4945 "number of configured ports %u\n", 4946 (unsigned int) nb_fwd_ports, nb_pt); 4947 nb_fwd_ports = (portid_t) nb_pt; 4948 } 4949 } 4950 4951 /** 4952 * Parse the user input and obtain the list of forwarding ports 4953 * 4954 * @param[in] list 4955 * String containing the user input. User can specify 4956 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4957 * For example, if the user wants to use all the available 4958 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4959 * If the user wants to use only the ports 1,2 then the input 4960 * is 1,2. 4961 * valid characters are '-' and ',' 4962 * @param[out] values 4963 * This array will be filled with a list of port IDs 4964 * based on the user input 4965 * Note that duplicate entries are discarded and only the first 4966 * count entries in this array are port IDs and all the rest 4967 * will contain default values 4968 * @param[in] maxsize 4969 * This parameter denotes 2 things 4970 * 1) Number of elements in the values array 4971 * 2) Maximum value of each element in the values array 4972 * @return 4973 * On success, returns total count of parsed port IDs 4974 * On failure, returns 0 4975 */ 4976 static unsigned int 4977 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4978 { 4979 unsigned int count = 0; 4980 char *end = NULL; 4981 int min, max; 4982 int value, i; 4983 unsigned int marked[maxsize]; 4984 4985 if (list == NULL || values == NULL) 4986 return 0; 4987 4988 for (i = 0; i < (int)maxsize; i++) 4989 marked[i] = 0; 4990 4991 min = INT_MAX; 4992 4993 do { 4994 /*Remove the blank spaces if any*/ 4995 while (isblank(*list)) 4996 list++; 4997 if (*list == '\0') 4998 break; 4999 errno = 0; 5000 value = strtol(list, &end, 10); 5001 if (errno || end == NULL) 5002 return 0; 5003 if (value < 0 || value >= (int)maxsize) 5004 return 0; 5005 while (isblank(*end)) 5006 end++; 5007 if (*end == '-' && min == INT_MAX) { 5008 min = value; 5009 } else if ((*end == ',') || (*end == '\0')) { 5010 max = value; 5011 if (min == INT_MAX) 5012 min = value; 5013 for (i = min; i <= max; i++) { 5014 if (count < maxsize) { 5015 if (marked[i]) 5016 continue; 5017 values[count] = i; 5018 marked[i] = 1; 5019 count++; 5020 } 5021 } 5022 min = INT_MAX; 5023 } else 5024 return 0; 5025 list = end + 1; 5026 } while (*end != '\0'); 5027 5028 return count; 5029 } 5030 5031 void 5032 parse_fwd_portlist(const char *portlist) 5033 { 5034 unsigned int portcount; 5035 unsigned int portindex[RTE_MAX_ETHPORTS]; 5036 unsigned int i, valid_port_count = 0; 5037 5038 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 5039 if (!portcount) 5040 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 5041 5042 /* 5043 * Here we verify the validity of the ports 5044 * and thereby calculate the total number of 5045 * valid ports 5046 */ 5047 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 5048 if (rte_eth_dev_is_valid_port(portindex[i])) { 5049 portindex[valid_port_count] = portindex[i]; 5050 valid_port_count++; 5051 } 5052 } 5053 5054 set_fwd_ports_list(portindex, valid_port_count); 5055 } 5056 5057 void 5058 set_fwd_ports_mask(uint64_t portmask) 5059 { 5060 unsigned int portlist[64]; 5061 unsigned int nb_pt; 5062 unsigned int i; 5063 5064 if (portmask == 0) { 5065 fprintf(stderr, "Invalid NULL mask of ports\n"); 5066 return; 5067 } 5068 nb_pt = 0; 5069 RTE_ETH_FOREACH_DEV(i) { 5070 if (! ((uint64_t)(1ULL << i) & portmask)) 5071 continue; 5072 portlist[nb_pt++] = i; 5073 } 5074 set_fwd_ports_list(portlist, nb_pt); 5075 } 5076 5077 void 5078 set_fwd_ports_number(uint16_t nb_pt) 5079 { 5080 if (nb_pt > nb_cfg_ports) { 5081 fprintf(stderr, 5082 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 5083 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 5084 return; 5085 } 5086 nb_fwd_ports = (portid_t) nb_pt; 5087 printf("Number of forwarding ports set to %u\n", 5088 (unsigned int) nb_fwd_ports); 5089 } 5090 5091 int 5092 port_is_forwarding(portid_t port_id) 5093 { 5094 unsigned int i; 5095 5096 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5097 return -1; 5098 5099 for (i = 0; i < nb_fwd_ports; i++) { 5100 if (fwd_ports_ids[i] == port_id) 5101 return 1; 5102 } 5103 5104 return 0; 5105 } 5106 5107 void 5108 set_nb_pkt_per_burst(uint16_t nb) 5109 { 5110 if (nb > MAX_PKT_BURST) { 5111 fprintf(stderr, 5112 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 5113 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 5114 return; 5115 } 5116 nb_pkt_per_burst = nb; 5117 printf("Number of packets per burst set to %u\n", 5118 (unsigned int) nb_pkt_per_burst); 5119 } 5120 5121 static const char * 5122 tx_split_get_name(enum tx_pkt_split split) 5123 { 5124 uint32_t i; 5125 5126 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5127 if (tx_split_name[i].split == split) 5128 return tx_split_name[i].name; 5129 } 5130 return NULL; 5131 } 5132 5133 void 5134 set_tx_pkt_split(const char *name) 5135 { 5136 uint32_t i; 5137 5138 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 5139 if (strcmp(tx_split_name[i].name, name) == 0) { 5140 tx_pkt_split = tx_split_name[i].split; 5141 return; 5142 } 5143 } 5144 fprintf(stderr, "unknown value: \"%s\"\n", name); 5145 } 5146 5147 int 5148 parse_fec_mode(const char *name, uint32_t *fec_capa) 5149 { 5150 uint8_t i; 5151 5152 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 5153 if (strcmp(fec_mode_name[i].name, name) == 0) { 5154 *fec_capa = 5155 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 5156 return 0; 5157 } 5158 } 5159 return -1; 5160 } 5161 5162 void 5163 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 5164 { 5165 unsigned int i, j; 5166 5167 printf("FEC capabilities:\n"); 5168 5169 for (i = 0; i < num; i++) { 5170 printf("%s : ", 5171 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 5172 5173 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 5174 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 5175 speed_fec_capa[i].capa) 5176 printf("%s ", fec_mode_name[j].name); 5177 } 5178 printf("\n"); 5179 } 5180 } 5181 5182 void 5183 show_rx_pkt_offsets(void) 5184 { 5185 uint32_t i, n; 5186 5187 n = rx_pkt_nb_offs; 5188 printf("Number of offsets: %u\n", n); 5189 if (n) { 5190 printf("Segment offsets: "); 5191 for (i = 0; i != n - 1; i++) 5192 printf("%hu,", rx_pkt_seg_offsets[i]); 5193 printf("%hu\n", rx_pkt_seg_lengths[i]); 5194 } 5195 } 5196 5197 void 5198 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 5199 { 5200 unsigned int i; 5201 5202 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 5203 printf("nb segments per RX packets=%u >= " 5204 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 5205 return; 5206 } 5207 5208 /* 5209 * No extra check here, the segment length will be checked by PMD 5210 * in the extended queue setup. 5211 */ 5212 for (i = 0; i < nb_offs; i++) { 5213 if (seg_offsets[i] >= UINT16_MAX) { 5214 printf("offset[%u]=%u > UINT16_MAX - give up\n", 5215 i, seg_offsets[i]); 5216 return; 5217 } 5218 } 5219 5220 for (i = 0; i < nb_offs; i++) 5221 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 5222 5223 rx_pkt_nb_offs = (uint8_t) nb_offs; 5224 } 5225 5226 void 5227 show_rx_pkt_segments(void) 5228 { 5229 uint32_t i, n; 5230 5231 n = rx_pkt_nb_segs; 5232 printf("Number of segments: %u\n", n); 5233 if (n) { 5234 printf("Segment sizes: "); 5235 for (i = 0; i != n - 1; i++) 5236 printf("%hu,", rx_pkt_seg_lengths[i]); 5237 printf("%hu\n", rx_pkt_seg_lengths[i]); 5238 } 5239 } 5240 5241 static const char *get_ptype_str(uint32_t ptype) 5242 { 5243 const char *str; 5244 5245 switch (ptype) { 5246 case RTE_PTYPE_L2_ETHER: 5247 str = "eth"; 5248 break; 5249 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 5250 str = "ipv4"; 5251 break; 5252 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 5253 str = "ipv6"; 5254 break; 5255 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5256 str = "ipv4-tcp"; 5257 break; 5258 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5259 str = "ipv4-udp"; 5260 break; 5261 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5262 str = "ipv4-sctp"; 5263 break; 5264 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP: 5265 str = "ipv6-tcp"; 5266 break; 5267 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP: 5268 str = "ipv6-udp"; 5269 break; 5270 case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP: 5271 str = "ipv6-sctp"; 5272 break; 5273 case RTE_PTYPE_TUNNEL_GRENAT: 5274 str = "grenat"; 5275 break; 5276 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER: 5277 str = "inner-eth"; 5278 break; 5279 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5280 | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 5281 str = "inner-ipv4"; 5282 break; 5283 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER 5284 | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 5285 str = "inner-ipv6"; 5286 break; 5287 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5288 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5289 str = "inner-ipv4-tcp"; 5290 break; 5291 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5292 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5293 str = "inner-ipv4-udp"; 5294 break; 5295 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5296 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5297 str = "inner-ipv4-sctp"; 5298 break; 5299 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5300 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP: 5301 str = "inner-ipv6-tcp"; 5302 break; 5303 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5304 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP: 5305 str = "inner-ipv6-udp"; 5306 break; 5307 case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 5308 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP: 5309 str = "inner-ipv6-sctp"; 5310 break; 5311 default: 5312 str = "unsupported"; 5313 } 5314 5315 return str; 5316 } 5317 5318 void 5319 show_rx_pkt_hdrs(void) 5320 { 5321 uint32_t i, n; 5322 5323 n = rx_pkt_nb_segs; 5324 printf("Number of segments: %u\n", n); 5325 if (n) { 5326 printf("Packet segs: "); 5327 for (i = 0; i < n - 1; i++) 5328 printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i])); 5329 printf("payload\n"); 5330 } 5331 } 5332 5333 void 5334 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs) 5335 { 5336 unsigned int i; 5337 5338 if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) { 5339 printf("nb segments per RX packets=%u > " 5340 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1); 5341 return; 5342 } 5343 5344 memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos)); 5345 5346 for (i = 0; i < nb_segs; i++) 5347 rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i]; 5348 /* 5349 * We calculate the number of hdrs, but payload is not included, 5350 * so rx_pkt_nb_segs would increase 1. 5351 */ 5352 rx_pkt_nb_segs = nb_segs + 1; 5353 } 5354 5355 void 5356 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5357 { 5358 unsigned int i; 5359 5360 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 5361 printf("nb segments per RX packets=%u >= " 5362 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 5363 return; 5364 } 5365 5366 /* 5367 * No extra check here, the segment length will be checked by PMD 5368 * in the extended queue setup. 5369 */ 5370 for (i = 0; i < nb_segs; i++) { 5371 if (seg_lengths[i] >= UINT16_MAX) { 5372 printf("length[%u]=%u > UINT16_MAX - give up\n", 5373 i, seg_lengths[i]); 5374 return; 5375 } 5376 } 5377 5378 for (i = 0; i < nb_segs; i++) 5379 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5380 5381 rx_pkt_nb_segs = (uint8_t) nb_segs; 5382 } 5383 5384 void 5385 show_tx_pkt_segments(void) 5386 { 5387 uint32_t i, n; 5388 const char *split; 5389 5390 n = tx_pkt_nb_segs; 5391 split = tx_split_get_name(tx_pkt_split); 5392 5393 printf("Number of segments: %u\n", n); 5394 printf("Segment sizes: "); 5395 for (i = 0; i != n - 1; i++) 5396 printf("%hu,", tx_pkt_seg_lengths[i]); 5397 printf("%hu\n", tx_pkt_seg_lengths[i]); 5398 printf("Split packet: %s\n", split); 5399 } 5400 5401 static bool 5402 nb_segs_is_invalid(unsigned int nb_segs) 5403 { 5404 uint16_t ring_size; 5405 uint16_t queue_id; 5406 uint16_t port_id; 5407 int ret; 5408 5409 RTE_ETH_FOREACH_DEV(port_id) { 5410 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 5411 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5412 if (ret) { 5413 /* Port may not be initialized yet, can't say 5414 * the port is invalid in this stage. 5415 */ 5416 continue; 5417 } 5418 if (ring_size < nb_segs) { 5419 printf("nb segments per TX packets=%u >= TX " 5420 "queue(%u) ring_size=%u - txpkts ignored\n", 5421 nb_segs, queue_id, ring_size); 5422 return true; 5423 } 5424 } 5425 } 5426 5427 return false; 5428 } 5429 5430 void 5431 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5432 { 5433 uint16_t tx_pkt_len; 5434 unsigned int i; 5435 5436 /* 5437 * For single segment settings failed check is ignored. 5438 * It is a very basic capability to send the single segment 5439 * packets, suppose it is always supported. 5440 */ 5441 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5442 fprintf(stderr, 5443 "Tx segment size(%u) is not supported - txpkts ignored\n", 5444 nb_segs); 5445 return; 5446 } 5447 5448 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5449 fprintf(stderr, 5450 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5451 nb_segs, RTE_MAX_SEGS_PER_PKT); 5452 return; 5453 } 5454 5455 /* 5456 * Check that each segment length is greater or equal than 5457 * the mbuf data size. 5458 * Check also that the total packet length is greater or equal than the 5459 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5460 * 20 + 8). 5461 */ 5462 tx_pkt_len = 0; 5463 for (i = 0; i < nb_segs; i++) { 5464 if (seg_lengths[i] > mbuf_data_size[0]) { 5465 fprintf(stderr, 5466 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5467 i, seg_lengths[i], mbuf_data_size[0]); 5468 return; 5469 } 5470 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5471 } 5472 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5473 fprintf(stderr, "total packet length=%u < %d - give up\n", 5474 (unsigned) tx_pkt_len, 5475 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5476 return; 5477 } 5478 5479 for (i = 0; i < nb_segs; i++) 5480 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5481 5482 tx_pkt_length = tx_pkt_len; 5483 tx_pkt_nb_segs = (uint8_t) nb_segs; 5484 } 5485 5486 void 5487 show_tx_pkt_times(void) 5488 { 5489 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5490 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5491 } 5492 5493 void 5494 set_tx_pkt_times(unsigned int *tx_times) 5495 { 5496 tx_pkt_times_inter = tx_times[0]; 5497 tx_pkt_times_intra = tx_times[1]; 5498 } 5499 5500 #ifdef RTE_LIB_GRO 5501 void 5502 setup_gro(const char *onoff, portid_t port_id) 5503 { 5504 if (!rte_eth_dev_is_valid_port(port_id)) { 5505 fprintf(stderr, "invalid port id %u\n", port_id); 5506 return; 5507 } 5508 if (test_done == 0) { 5509 fprintf(stderr, 5510 "Before enable/disable GRO, please stop forwarding first\n"); 5511 return; 5512 } 5513 if (strcmp(onoff, "on") == 0) { 5514 if (gro_ports[port_id].enable != 0) { 5515 fprintf(stderr, 5516 "Port %u has enabled GRO. Please disable GRO first\n", 5517 port_id); 5518 return; 5519 } 5520 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5521 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5522 gro_ports[port_id].param.max_flow_num = 5523 GRO_DEFAULT_FLOW_NUM; 5524 gro_ports[port_id].param.max_item_per_flow = 5525 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5526 } 5527 gro_ports[port_id].enable = 1; 5528 } else { 5529 if (gro_ports[port_id].enable == 0) { 5530 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5531 return; 5532 } 5533 gro_ports[port_id].enable = 0; 5534 } 5535 } 5536 5537 void 5538 setup_gro_flush_cycles(uint8_t cycles) 5539 { 5540 if (test_done == 0) { 5541 fprintf(stderr, 5542 "Before change flush interval for GRO, please stop forwarding first.\n"); 5543 return; 5544 } 5545 5546 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5547 GRO_DEFAULT_FLUSH_CYCLES) { 5548 fprintf(stderr, 5549 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5550 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5551 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5552 } 5553 5554 gro_flush_cycles = cycles; 5555 } 5556 5557 void 5558 show_gro(portid_t port_id) 5559 { 5560 struct rte_gro_param *param; 5561 uint32_t max_pkts_num; 5562 5563 param = &gro_ports[port_id].param; 5564 5565 if (!rte_eth_dev_is_valid_port(port_id)) { 5566 fprintf(stderr, "Invalid port id %u.\n", port_id); 5567 return; 5568 } 5569 if (gro_ports[port_id].enable) { 5570 printf("GRO type: TCP/IPv4\n"); 5571 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5572 max_pkts_num = param->max_flow_num * 5573 param->max_item_per_flow; 5574 } else 5575 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5576 printf("Max number of packets to perform GRO: %u\n", 5577 max_pkts_num); 5578 printf("Flushing cycles: %u\n", gro_flush_cycles); 5579 } else 5580 printf("Port %u doesn't enable GRO.\n", port_id); 5581 } 5582 #endif /* RTE_LIB_GRO */ 5583 5584 #ifdef RTE_LIB_GSO 5585 void 5586 setup_gso(const char *mode, portid_t port_id) 5587 { 5588 if (!rte_eth_dev_is_valid_port(port_id)) { 5589 fprintf(stderr, "invalid port id %u\n", port_id); 5590 return; 5591 } 5592 if (strcmp(mode, "on") == 0) { 5593 if (test_done == 0) { 5594 fprintf(stderr, 5595 "before enabling GSO, please stop forwarding first\n"); 5596 return; 5597 } 5598 gso_ports[port_id].enable = 1; 5599 } else if (strcmp(mode, "off") == 0) { 5600 if (test_done == 0) { 5601 fprintf(stderr, 5602 "before disabling GSO, please stop forwarding first\n"); 5603 return; 5604 } 5605 gso_ports[port_id].enable = 0; 5606 } 5607 } 5608 #endif /* RTE_LIB_GSO */ 5609 5610 char* 5611 list_pkt_forwarding_modes(void) 5612 { 5613 static char fwd_modes[128] = ""; 5614 const char *separator = "|"; 5615 struct fwd_engine *fwd_eng; 5616 unsigned i = 0; 5617 5618 if (strlen (fwd_modes) == 0) { 5619 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5620 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5621 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5622 strncat(fwd_modes, separator, 5623 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5624 } 5625 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5626 } 5627 5628 return fwd_modes; 5629 } 5630 5631 char* 5632 list_pkt_forwarding_retry_modes(void) 5633 { 5634 static char fwd_modes[128] = ""; 5635 const char *separator = "|"; 5636 struct fwd_engine *fwd_eng; 5637 unsigned i = 0; 5638 5639 if (strlen(fwd_modes) == 0) { 5640 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5641 if (fwd_eng == &rx_only_engine) 5642 continue; 5643 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5644 sizeof(fwd_modes) - 5645 strlen(fwd_modes) - 1); 5646 strncat(fwd_modes, separator, 5647 sizeof(fwd_modes) - 5648 strlen(fwd_modes) - 1); 5649 } 5650 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5651 } 5652 5653 return fwd_modes; 5654 } 5655 5656 void 5657 set_pkt_forwarding_mode(const char *fwd_mode_name) 5658 { 5659 struct fwd_engine *fwd_eng; 5660 unsigned i; 5661 5662 i = 0; 5663 while ((fwd_eng = fwd_engines[i]) != NULL) { 5664 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5665 printf("Set %s packet forwarding mode%s\n", 5666 fwd_mode_name, 5667 retry_enabled == 0 ? "" : " with retry"); 5668 cur_fwd_eng = fwd_eng; 5669 return; 5670 } 5671 i++; 5672 } 5673 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5674 } 5675 5676 void 5677 add_rx_dump_callbacks(portid_t portid) 5678 { 5679 struct rte_eth_dev_info dev_info; 5680 uint16_t queue; 5681 int ret; 5682 5683 if (port_id_is_invalid(portid, ENABLED_WARN)) 5684 return; 5685 5686 ret = eth_dev_info_get_print_err(portid, &dev_info); 5687 if (ret != 0) 5688 return; 5689 5690 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5691 if (!ports[portid].rx_dump_cb[queue]) 5692 ports[portid].rx_dump_cb[queue] = 5693 rte_eth_add_rx_callback(portid, queue, 5694 dump_rx_pkts, NULL); 5695 } 5696 5697 void 5698 add_tx_dump_callbacks(portid_t portid) 5699 { 5700 struct rte_eth_dev_info dev_info; 5701 uint16_t queue; 5702 int ret; 5703 5704 if (port_id_is_invalid(portid, ENABLED_WARN)) 5705 return; 5706 5707 ret = eth_dev_info_get_print_err(portid, &dev_info); 5708 if (ret != 0) 5709 return; 5710 5711 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5712 if (!ports[portid].tx_dump_cb[queue]) 5713 ports[portid].tx_dump_cb[queue] = 5714 rte_eth_add_tx_callback(portid, queue, 5715 dump_tx_pkts, NULL); 5716 } 5717 5718 void 5719 remove_rx_dump_callbacks(portid_t portid) 5720 { 5721 struct rte_eth_dev_info dev_info; 5722 uint16_t queue; 5723 int ret; 5724 5725 if (port_id_is_invalid(portid, ENABLED_WARN)) 5726 return; 5727 5728 ret = eth_dev_info_get_print_err(portid, &dev_info); 5729 if (ret != 0) 5730 return; 5731 5732 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5733 if (ports[portid].rx_dump_cb[queue]) { 5734 rte_eth_remove_rx_callback(portid, queue, 5735 ports[portid].rx_dump_cb[queue]); 5736 ports[portid].rx_dump_cb[queue] = NULL; 5737 } 5738 } 5739 5740 void 5741 remove_tx_dump_callbacks(portid_t portid) 5742 { 5743 struct rte_eth_dev_info dev_info; 5744 uint16_t queue; 5745 int ret; 5746 5747 if (port_id_is_invalid(portid, ENABLED_WARN)) 5748 return; 5749 5750 ret = eth_dev_info_get_print_err(portid, &dev_info); 5751 if (ret != 0) 5752 return; 5753 5754 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5755 if (ports[portid].tx_dump_cb[queue]) { 5756 rte_eth_remove_tx_callback(portid, queue, 5757 ports[portid].tx_dump_cb[queue]); 5758 ports[portid].tx_dump_cb[queue] = NULL; 5759 } 5760 } 5761 5762 void 5763 configure_rxtx_dump_callbacks(uint16_t verbose) 5764 { 5765 portid_t portid; 5766 5767 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5768 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5769 return; 5770 #endif 5771 5772 RTE_ETH_FOREACH_DEV(portid) 5773 { 5774 if (verbose == 1 || verbose > 2) 5775 add_rx_dump_callbacks(portid); 5776 else 5777 remove_rx_dump_callbacks(portid); 5778 if (verbose >= 2) 5779 add_tx_dump_callbacks(portid); 5780 else 5781 remove_tx_dump_callbacks(portid); 5782 } 5783 } 5784 5785 void 5786 set_verbose_level(uint16_t vb_level) 5787 { 5788 printf("Change verbose level from %u to %u\n", 5789 (unsigned int) verbose_level, (unsigned int) vb_level); 5790 verbose_level = vb_level; 5791 configure_rxtx_dump_callbacks(verbose_level); 5792 } 5793 5794 void 5795 vlan_extend_set(portid_t port_id, int on) 5796 { 5797 int diag; 5798 int vlan_offload; 5799 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5800 5801 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5802 return; 5803 5804 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5805 5806 if (on) { 5807 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5808 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5809 } else { 5810 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5811 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5812 } 5813 5814 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5815 if (diag < 0) { 5816 fprintf(stderr, 5817 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5818 port_id, on, diag); 5819 return; 5820 } 5821 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5822 } 5823 5824 void 5825 rx_vlan_strip_set(portid_t port_id, int on) 5826 { 5827 int diag; 5828 int vlan_offload; 5829 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5830 5831 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5832 return; 5833 5834 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5835 5836 if (on) { 5837 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5838 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5839 } else { 5840 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5841 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5842 } 5843 5844 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5845 if (diag < 0) { 5846 fprintf(stderr, 5847 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5848 __func__, port_id, on, diag); 5849 return; 5850 } 5851 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5852 } 5853 5854 void 5855 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5856 { 5857 int diag; 5858 5859 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5860 return; 5861 5862 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5863 if (diag < 0) 5864 fprintf(stderr, 5865 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5866 __func__, port_id, queue_id, on, diag); 5867 } 5868 5869 void 5870 rx_vlan_filter_set(portid_t port_id, int on) 5871 { 5872 int diag; 5873 int vlan_offload; 5874 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5875 5876 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5877 return; 5878 5879 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5880 5881 if (on) { 5882 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5883 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5884 } else { 5885 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5886 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5887 } 5888 5889 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5890 if (diag < 0) { 5891 fprintf(stderr, 5892 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5893 __func__, port_id, on, diag); 5894 return; 5895 } 5896 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5897 } 5898 5899 void 5900 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5901 { 5902 int diag; 5903 int vlan_offload; 5904 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5905 5906 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5907 return; 5908 5909 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5910 5911 if (on) { 5912 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5913 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5914 } else { 5915 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5916 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5917 } 5918 5919 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5920 if (diag < 0) { 5921 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5922 __func__, port_id, on, diag); 5923 return; 5924 } 5925 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5926 } 5927 5928 int 5929 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5930 { 5931 int diag; 5932 5933 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5934 return 1; 5935 if (vlan_id_is_invalid(vlan_id)) 5936 return 1; 5937 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5938 if (diag == 0) 5939 return 0; 5940 fprintf(stderr, 5941 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5942 port_id, vlan_id, on, diag); 5943 return -1; 5944 } 5945 5946 void 5947 rx_vlan_all_filter_set(portid_t port_id, int on) 5948 { 5949 uint16_t vlan_id; 5950 5951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5952 return; 5953 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5954 if (rx_vft_set(port_id, vlan_id, on)) 5955 break; 5956 } 5957 } 5958 5959 void 5960 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5961 { 5962 int diag; 5963 5964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5965 return; 5966 5967 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5968 if (diag == 0) 5969 return; 5970 5971 fprintf(stderr, 5972 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5973 port_id, vlan_type, tp_id, diag); 5974 } 5975 5976 void 5977 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5978 { 5979 struct rte_eth_dev_info dev_info; 5980 int ret; 5981 5982 if (vlan_id_is_invalid(vlan_id)) 5983 return; 5984 5985 if (ports[port_id].dev_conf.txmode.offloads & 5986 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5987 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5988 return; 5989 } 5990 5991 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5992 if (ret != 0) 5993 return; 5994 5995 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5996 fprintf(stderr, 5997 "Error: vlan insert is not supported by port %d\n", 5998 port_id); 5999 return; 6000 } 6001 6002 tx_vlan_reset(port_id); 6003 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 6004 ports[port_id].tx_vlan_id = vlan_id; 6005 } 6006 6007 void 6008 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 6009 { 6010 struct rte_eth_dev_info dev_info; 6011 int ret; 6012 6013 if (vlan_id_is_invalid(vlan_id)) 6014 return; 6015 if (vlan_id_is_invalid(vlan_id_outer)) 6016 return; 6017 6018 ret = eth_dev_info_get_print_err(port_id, &dev_info); 6019 if (ret != 0) 6020 return; 6021 6022 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 6023 fprintf(stderr, 6024 "Error: qinq insert not supported by port %d\n", 6025 port_id); 6026 return; 6027 } 6028 6029 tx_vlan_reset(port_id); 6030 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6031 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6032 ports[port_id].tx_vlan_id = vlan_id; 6033 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 6034 } 6035 6036 void 6037 tx_vlan_reset(portid_t port_id) 6038 { 6039 ports[port_id].dev_conf.txmode.offloads &= 6040 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 6041 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 6042 ports[port_id].tx_vlan_id = 0; 6043 ports[port_id].tx_vlan_id_outer = 0; 6044 } 6045 6046 void 6047 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 6048 { 6049 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6050 return; 6051 6052 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 6053 } 6054 6055 void 6056 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 6057 { 6058 int ret; 6059 6060 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6061 return; 6062 6063 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 6064 return; 6065 6066 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 6067 fprintf(stderr, "map_value not in required range 0..%d\n", 6068 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 6069 return; 6070 } 6071 6072 if (!is_rx) { /* tx */ 6073 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 6074 map_value); 6075 if (ret) { 6076 fprintf(stderr, 6077 "failed to set tx queue stats mapping.\n"); 6078 return; 6079 } 6080 } else { /* rx */ 6081 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 6082 map_value); 6083 if (ret) { 6084 fprintf(stderr, 6085 "failed to set rx queue stats mapping.\n"); 6086 return; 6087 } 6088 } 6089 } 6090 6091 void 6092 set_xstats_hide_zero(uint8_t on_off) 6093 { 6094 xstats_hide_zero = on_off; 6095 } 6096 6097 void 6098 set_record_core_cycles(uint8_t on_off) 6099 { 6100 record_core_cycles = on_off; 6101 } 6102 6103 void 6104 set_record_burst_stats(uint8_t on_off) 6105 { 6106 record_burst_stats = on_off; 6107 } 6108 6109 uint16_t 6110 str_to_flowtype(const char *string) 6111 { 6112 uint8_t i; 6113 6114 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6115 if (!strcmp(flowtype_str_table[i].str, string)) 6116 return flowtype_str_table[i].ftype; 6117 } 6118 6119 if (isdigit(string[0])) { 6120 int val = atoi(string); 6121 if (val > 0 && val < 64) 6122 return (uint16_t)val; 6123 } 6124 6125 return RTE_ETH_FLOW_UNKNOWN; 6126 } 6127 6128 const char* 6129 flowtype_to_str(uint16_t flow_type) 6130 { 6131 uint8_t i; 6132 6133 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 6134 if (flowtype_str_table[i].ftype == flow_type) 6135 return flowtype_str_table[i].str; 6136 } 6137 6138 return NULL; 6139 } 6140 6141 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 6142 6143 static inline void 6144 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6145 { 6146 struct rte_eth_flex_payload_cfg *cfg; 6147 uint32_t i, j; 6148 6149 for (i = 0; i < flex_conf->nb_payloads; i++) { 6150 cfg = &flex_conf->flex_set[i]; 6151 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 6152 printf("\n RAW: "); 6153 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 6154 printf("\n L2_PAYLOAD: "); 6155 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 6156 printf("\n L3_PAYLOAD: "); 6157 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 6158 printf("\n L4_PAYLOAD: "); 6159 else 6160 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 6161 for (j = 0; j < num; j++) 6162 printf(" %-5u", cfg->src_offset[j]); 6163 } 6164 printf("\n"); 6165 } 6166 6167 static inline void 6168 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 6169 { 6170 struct rte_eth_fdir_flex_mask *mask; 6171 uint32_t i, j; 6172 const char *p; 6173 6174 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 6175 mask = &flex_conf->flex_mask[i]; 6176 p = flowtype_to_str(mask->flow_type); 6177 printf("\n %s:\t", p ? p : "unknown"); 6178 for (j = 0; j < num; j++) 6179 printf(" %02x", mask->mask[j]); 6180 } 6181 printf("\n"); 6182 } 6183 6184 static inline void 6185 print_fdir_flow_type(uint32_t flow_types_mask) 6186 { 6187 int i; 6188 const char *p; 6189 6190 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 6191 if (!(flow_types_mask & (1 << i))) 6192 continue; 6193 p = flowtype_to_str(i); 6194 if (p) 6195 printf(" %s", p); 6196 else 6197 printf(" unknown"); 6198 } 6199 printf("\n"); 6200 } 6201 6202 static int 6203 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 6204 struct rte_eth_fdir_stats *fdir_stat) 6205 { 6206 int ret = -ENOTSUP; 6207 6208 #ifdef RTE_NET_I40E 6209 if (ret == -ENOTSUP) { 6210 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 6211 if (!ret) 6212 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 6213 } 6214 #endif 6215 #ifdef RTE_NET_IXGBE 6216 if (ret == -ENOTSUP) { 6217 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 6218 if (!ret) 6219 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 6220 } 6221 #endif 6222 switch (ret) { 6223 case 0: 6224 break; 6225 case -ENOTSUP: 6226 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 6227 port_id); 6228 break; 6229 default: 6230 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 6231 break; 6232 } 6233 return ret; 6234 } 6235 6236 void 6237 fdir_get_infos(portid_t port_id) 6238 { 6239 struct rte_eth_fdir_stats fdir_stat; 6240 struct rte_eth_fdir_info fdir_info; 6241 6242 static const char *fdir_stats_border = "########################"; 6243 6244 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6245 return; 6246 6247 memset(&fdir_info, 0, sizeof(fdir_info)); 6248 memset(&fdir_stat, 0, sizeof(fdir_stat)); 6249 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 6250 return; 6251 6252 printf("\n %s FDIR infos for port %-2d %s\n", 6253 fdir_stats_border, port_id, fdir_stats_border); 6254 printf(" MODE: "); 6255 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 6256 printf(" PERFECT\n"); 6257 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 6258 printf(" PERFECT-MAC-VLAN\n"); 6259 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 6260 printf(" PERFECT-TUNNEL\n"); 6261 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 6262 printf(" SIGNATURE\n"); 6263 else 6264 printf(" DISABLE\n"); 6265 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 6266 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 6267 printf(" SUPPORTED FLOW TYPE: "); 6268 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 6269 } 6270 printf(" FLEX PAYLOAD INFO:\n"); 6271 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 6272 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 6273 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 6274 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 6275 fdir_info.flex_payload_unit, 6276 fdir_info.max_flex_payload_segment_num, 6277 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 6278 if (fdir_info.flex_conf.nb_payloads > 0) { 6279 printf(" FLEX PAYLOAD SRC OFFSET:"); 6280 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6281 } 6282 if (fdir_info.flex_conf.nb_flexmasks > 0) { 6283 printf(" FLEX MASK CFG:"); 6284 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 6285 } 6286 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 6287 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 6288 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 6289 fdir_info.guarant_spc, fdir_info.best_spc); 6290 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 6291 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 6292 " add: %-10"PRIu64" remove: %"PRIu64"\n" 6293 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 6294 fdir_stat.collision, fdir_stat.free, 6295 fdir_stat.maxhash, fdir_stat.maxlen, 6296 fdir_stat.add, fdir_stat.remove, 6297 fdir_stat.f_add, fdir_stat.f_remove); 6298 printf(" %s############################%s\n", 6299 fdir_stats_border, fdir_stats_border); 6300 } 6301 6302 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 6303 6304 void 6305 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 6306 { 6307 #ifdef RTE_NET_IXGBE 6308 int diag; 6309 6310 if (is_rx) 6311 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6312 else 6313 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6314 6315 if (diag == 0) 6316 return; 6317 fprintf(stderr, 6318 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6319 is_rx ? "rx" : "tx", port_id, diag); 6320 return; 6321 #endif 6322 fprintf(stderr, "VF %s setting not supported for port %d\n", 6323 is_rx ? "Rx" : "Tx", port_id); 6324 RTE_SET_USED(vf); 6325 RTE_SET_USED(on); 6326 } 6327 6328 int 6329 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate) 6330 { 6331 int diag; 6332 struct rte_eth_link link; 6333 int ret; 6334 6335 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6336 return 1; 6337 ret = eth_link_get_nowait_print_err(port_id, &link); 6338 if (ret < 0) 6339 return 1; 6340 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6341 rate > link.link_speed) { 6342 fprintf(stderr, 6343 "Invalid rate value:%u bigger than link speed: %u\n", 6344 rate, link.link_speed); 6345 return 1; 6346 } 6347 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6348 if (diag == 0) 6349 return diag; 6350 fprintf(stderr, 6351 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6352 port_id, diag); 6353 return diag; 6354 } 6355 6356 int 6357 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk) 6358 { 6359 int diag = -ENOTSUP; 6360 6361 RTE_SET_USED(vf); 6362 RTE_SET_USED(rate); 6363 RTE_SET_USED(q_msk); 6364 6365 #ifdef RTE_NET_IXGBE 6366 if (diag == -ENOTSUP) 6367 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6368 q_msk); 6369 #endif 6370 #ifdef RTE_NET_BNXT 6371 if (diag == -ENOTSUP) 6372 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6373 #endif 6374 if (diag == 0) 6375 return diag; 6376 6377 fprintf(stderr, 6378 "%s for port_id=%d failed diag=%d\n", 6379 __func__, port_id, diag); 6380 return diag; 6381 } 6382 6383 int 6384 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6385 { 6386 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6387 return -EINVAL; 6388 6389 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6390 } 6391 6392 /* 6393 * Functions to manage the set of filtered Multicast MAC addresses. 6394 * 6395 * A pool of filtered multicast MAC addresses is associated with each port. 6396 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6397 * The address of the pool and the number of valid multicast MAC addresses 6398 * recorded in the pool are stored in the fields "mc_addr_pool" and 6399 * "mc_addr_nb" of the "rte_port" data structure. 6400 * 6401 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6402 * to be supplied a contiguous array of multicast MAC addresses. 6403 * To comply with this constraint, the set of multicast addresses recorded 6404 * into the pool are systematically compacted at the beginning of the pool. 6405 * Hence, when a multicast address is removed from the pool, all following 6406 * addresses, if any, are copied back to keep the set contiguous. 6407 */ 6408 #define MCAST_POOL_INC 32 6409 6410 static int 6411 mcast_addr_pool_extend(struct rte_port *port) 6412 { 6413 struct rte_ether_addr *mc_pool; 6414 size_t mc_pool_size; 6415 6416 /* 6417 * If a free entry is available at the end of the pool, just 6418 * increment the number of recorded multicast addresses. 6419 */ 6420 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6421 port->mc_addr_nb++; 6422 return 0; 6423 } 6424 6425 /* 6426 * [re]allocate a pool with MCAST_POOL_INC more entries. 6427 * The previous test guarantees that port->mc_addr_nb is a multiple 6428 * of MCAST_POOL_INC. 6429 */ 6430 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6431 MCAST_POOL_INC); 6432 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6433 mc_pool_size); 6434 if (mc_pool == NULL) { 6435 fprintf(stderr, 6436 "allocation of pool of %u multicast addresses failed\n", 6437 port->mc_addr_nb + MCAST_POOL_INC); 6438 return -ENOMEM; 6439 } 6440 6441 port->mc_addr_pool = mc_pool; 6442 port->mc_addr_nb++; 6443 return 0; 6444 6445 } 6446 6447 static void 6448 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6449 { 6450 if (mcast_addr_pool_extend(port) != 0) 6451 return; 6452 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6453 } 6454 6455 static void 6456 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6457 { 6458 port->mc_addr_nb--; 6459 if (addr_idx == port->mc_addr_nb) { 6460 /* No need to recompact the set of multicast addresses. */ 6461 if (port->mc_addr_nb == 0) { 6462 /* free the pool of multicast addresses. */ 6463 free(port->mc_addr_pool); 6464 port->mc_addr_pool = NULL; 6465 } 6466 return; 6467 } 6468 memmove(&port->mc_addr_pool[addr_idx], 6469 &port->mc_addr_pool[addr_idx + 1], 6470 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6471 } 6472 6473 int 6474 mcast_addr_pool_destroy(portid_t port_id) 6475 { 6476 struct rte_port *port; 6477 6478 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6479 port_id == (portid_t)RTE_PORT_ALL) 6480 return -EINVAL; 6481 port = &ports[port_id]; 6482 6483 if (port->mc_addr_nb != 0) { 6484 /* free the pool of multicast addresses. */ 6485 free(port->mc_addr_pool); 6486 port->mc_addr_pool = NULL; 6487 port->mc_addr_nb = 0; 6488 } 6489 return 0; 6490 } 6491 6492 static int 6493 eth_port_multicast_addr_list_set(portid_t port_id) 6494 { 6495 struct rte_port *port; 6496 int diag; 6497 6498 port = &ports[port_id]; 6499 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6500 port->mc_addr_nb); 6501 if (diag < 0) 6502 fprintf(stderr, 6503 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6504 port_id, port->mc_addr_nb, diag); 6505 6506 return diag; 6507 } 6508 6509 void 6510 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6511 { 6512 struct rte_port *port; 6513 uint32_t i; 6514 6515 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6516 return; 6517 6518 port = &ports[port_id]; 6519 6520 /* 6521 * Check that the added multicast MAC address is not already recorded 6522 * in the pool of multicast addresses. 6523 */ 6524 for (i = 0; i < port->mc_addr_nb; i++) { 6525 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6526 fprintf(stderr, 6527 "multicast address already filtered by port\n"); 6528 return; 6529 } 6530 } 6531 6532 mcast_addr_pool_append(port, mc_addr); 6533 if (eth_port_multicast_addr_list_set(port_id) < 0) 6534 /* Rollback on failure, remove the address from the pool */ 6535 mcast_addr_pool_remove(port, i); 6536 } 6537 6538 void 6539 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6540 { 6541 struct rte_port *port; 6542 uint32_t i; 6543 6544 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6545 return; 6546 6547 port = &ports[port_id]; 6548 6549 /* 6550 * Search the pool of multicast MAC addresses for the removed address. 6551 */ 6552 for (i = 0; i < port->mc_addr_nb; i++) { 6553 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6554 break; 6555 } 6556 if (i == port->mc_addr_nb) { 6557 fprintf(stderr, "multicast address not filtered by port %d\n", 6558 port_id); 6559 return; 6560 } 6561 6562 mcast_addr_pool_remove(port, i); 6563 if (eth_port_multicast_addr_list_set(port_id) < 0) 6564 /* Rollback on failure, add the address back into the pool */ 6565 mcast_addr_pool_append(port, mc_addr); 6566 } 6567 6568 void 6569 port_dcb_info_display(portid_t port_id) 6570 { 6571 struct rte_eth_dcb_info dcb_info; 6572 uint16_t i; 6573 int ret; 6574 static const char *border = "================"; 6575 6576 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6577 return; 6578 6579 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6580 if (ret) { 6581 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6582 port_id); 6583 return; 6584 } 6585 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6586 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6587 printf("\n TC : "); 6588 for (i = 0; i < dcb_info.nb_tcs; i++) 6589 printf("\t%4d", i); 6590 printf("\n Priority : "); 6591 for (i = 0; i < dcb_info.nb_tcs; i++) 6592 printf("\t%4d", dcb_info.prio_tc[i]); 6593 printf("\n BW percent :"); 6594 for (i = 0; i < dcb_info.nb_tcs; i++) 6595 printf("\t%4d%%", dcb_info.tc_bws[i]); 6596 printf("\n RXQ base : "); 6597 for (i = 0; i < dcb_info.nb_tcs; i++) 6598 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6599 printf("\n RXQ number :"); 6600 for (i = 0; i < dcb_info.nb_tcs; i++) 6601 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6602 printf("\n TXQ base : "); 6603 for (i = 0; i < dcb_info.nb_tcs; i++) 6604 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6605 printf("\n TXQ number :"); 6606 for (i = 0; i < dcb_info.nb_tcs; i++) 6607 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6608 printf("\n"); 6609 } 6610 6611 uint8_t * 6612 open_file(const char *file_path, uint32_t *size) 6613 { 6614 int fd = open(file_path, O_RDONLY); 6615 off_t pkg_size; 6616 uint8_t *buf = NULL; 6617 int ret = 0; 6618 struct stat st_buf; 6619 6620 if (size) 6621 *size = 0; 6622 6623 if (fd == -1) { 6624 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6625 return buf; 6626 } 6627 6628 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6629 close(fd); 6630 fprintf(stderr, "%s: File operations failed\n", __func__); 6631 return buf; 6632 } 6633 6634 pkg_size = st_buf.st_size; 6635 if (pkg_size < 0) { 6636 close(fd); 6637 fprintf(stderr, "%s: File operations failed\n", __func__); 6638 return buf; 6639 } 6640 6641 buf = (uint8_t *)malloc(pkg_size); 6642 if (!buf) { 6643 close(fd); 6644 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6645 return buf; 6646 } 6647 6648 ret = read(fd, buf, pkg_size); 6649 if (ret < 0) { 6650 close(fd); 6651 fprintf(stderr, "%s: File read operation failed\n", __func__); 6652 close_file(buf); 6653 return NULL; 6654 } 6655 6656 if (size) 6657 *size = pkg_size; 6658 6659 close(fd); 6660 6661 return buf; 6662 } 6663 6664 int 6665 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6666 { 6667 FILE *fh = fopen(file_path, "wb"); 6668 6669 if (fh == NULL) { 6670 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6671 return -1; 6672 } 6673 6674 if (fwrite(buf, 1, size, fh) != size) { 6675 fclose(fh); 6676 fprintf(stderr, "%s: File write operation failed\n", __func__); 6677 return -1; 6678 } 6679 6680 fclose(fh); 6681 6682 return 0; 6683 } 6684 6685 int 6686 close_file(uint8_t *buf) 6687 { 6688 if (buf) { 6689 free((void *)buf); 6690 return 0; 6691 } 6692 6693 return -1; 6694 } 6695 6696 void 6697 show_macs(portid_t port_id) 6698 { 6699 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6700 struct rte_eth_dev_info dev_info; 6701 int32_t i, rc, num_macs = 0; 6702 6703 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6704 return; 6705 6706 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6707 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6708 if (rc < 0) 6709 return; 6710 6711 for (i = 0; i < rc; i++) { 6712 6713 /* skip zero address */ 6714 if (rte_is_zero_ether_addr(&addr[i])) 6715 continue; 6716 6717 num_macs++; 6718 } 6719 6720 printf("Number of MAC address added: %d\n", num_macs); 6721 6722 for (i = 0; i < rc; i++) { 6723 6724 /* skip zero address */ 6725 if (rte_is_zero_ether_addr(&addr[i])) 6726 continue; 6727 6728 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6729 printf(" %s\n", buf); 6730 } 6731 } 6732 6733 void 6734 show_mcast_macs(portid_t port_id) 6735 { 6736 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6737 struct rte_ether_addr *addr; 6738 struct rte_port *port; 6739 uint32_t i; 6740 6741 port = &ports[port_id]; 6742 6743 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6744 6745 for (i = 0; i < port->mc_addr_nb; i++) { 6746 addr = &port->mc_addr_pool[i]; 6747 6748 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6749 printf(" %s\n", buf); 6750 } 6751 } 6752