1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static const struct { 70 enum tx_pkt_split split; 71 const char *name; 72 } tx_split_name[] = { 73 { 74 .split = TX_PKT_SPLIT_OFF, 75 .name = "off", 76 }, 77 { 78 .split = TX_PKT_SPLIT_ON, 79 .name = "on", 80 }, 81 { 82 .split = TX_PKT_SPLIT_RND, 83 .name = "rand", 84 }, 85 }; 86 87 const struct rss_type_info rss_type_table[] = { 88 /* Group types */ 89 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 90 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 91 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 92 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 93 { "none", 0 }, 94 { "ip", RTE_ETH_RSS_IP }, 95 { "udp", RTE_ETH_RSS_UDP }, 96 { "tcp", RTE_ETH_RSS_TCP }, 97 { "sctp", RTE_ETH_RSS_SCTP }, 98 { "tunnel", RTE_ETH_RSS_TUNNEL }, 99 { "vlan", RTE_ETH_RSS_VLAN }, 100 101 /* Individual type */ 102 { "ipv4", RTE_ETH_RSS_IPV4 }, 103 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 104 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 105 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 106 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 107 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 108 { "ipv6", RTE_ETH_RSS_IPV6 }, 109 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 110 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 111 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 112 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 113 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 114 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 115 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 116 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 117 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 118 { "port", RTE_ETH_RSS_PORT }, 119 { "vxlan", RTE_ETH_RSS_VXLAN }, 120 { "geneve", RTE_ETH_RSS_GENEVE }, 121 { "nvgre", RTE_ETH_RSS_NVGRE }, 122 { "gtpu", RTE_ETH_RSS_GTPU }, 123 { "eth", RTE_ETH_RSS_ETH }, 124 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 125 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 126 { "esp", RTE_ETH_RSS_ESP }, 127 { "ah", RTE_ETH_RSS_AH }, 128 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 129 { "pfcp", RTE_ETH_RSS_PFCP }, 130 { "pppoe", RTE_ETH_RSS_PPPOE }, 131 { "ecpri", RTE_ETH_RSS_ECPRI }, 132 { "mpls", RTE_ETH_RSS_MPLS }, 133 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 134 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 135 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 136 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 137 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 138 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 139 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 140 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 141 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 142 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 143 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 144 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 145 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 146 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 147 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 148 { NULL, 0}, 149 }; 150 151 static const struct { 152 enum rte_eth_fec_mode mode; 153 const char *name; 154 } fec_mode_name[] = { 155 { 156 .mode = RTE_ETH_FEC_NOFEC, 157 .name = "off", 158 }, 159 { 160 .mode = RTE_ETH_FEC_AUTO, 161 .name = "auto", 162 }, 163 { 164 .mode = RTE_ETH_FEC_BASER, 165 .name = "baser", 166 }, 167 { 168 .mode = RTE_ETH_FEC_RS, 169 .name = "rs", 170 }, 171 }; 172 173 static const struct { 174 char str[32]; 175 uint16_t ftype; 176 } flowtype_str_table[] = { 177 {"raw", RTE_ETH_FLOW_RAW}, 178 {"ipv4", RTE_ETH_FLOW_IPV4}, 179 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 180 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 181 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 182 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 183 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 184 {"ipv6", RTE_ETH_FLOW_IPV6}, 185 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 186 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 187 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 188 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 189 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 190 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 191 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 192 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 193 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 194 {"port", RTE_ETH_FLOW_PORT}, 195 {"vxlan", RTE_ETH_FLOW_VXLAN}, 196 {"geneve", RTE_ETH_FLOW_GENEVE}, 197 {"nvgre", RTE_ETH_FLOW_NVGRE}, 198 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 199 {"gtpu", RTE_ETH_FLOW_GTPU}, 200 }; 201 202 static void 203 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 204 { 205 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 206 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 207 printf("%s%s", name, buf); 208 } 209 210 static void 211 nic_xstats_display_periodic(portid_t port_id) 212 { 213 struct xstat_display_info *xstats_info; 214 uint64_t *prev_values, *curr_values; 215 uint64_t diff_value, value_rate; 216 struct timespec cur_time; 217 uint64_t *ids_supp; 218 size_t ids_supp_sz; 219 uint64_t diff_ns; 220 unsigned int i; 221 int rc; 222 223 xstats_info = &ports[port_id].xstats_info; 224 225 ids_supp_sz = xstats_info->ids_supp_sz; 226 if (ids_supp_sz == 0) 227 return; 228 229 printf("\n"); 230 231 ids_supp = xstats_info->ids_supp; 232 prev_values = xstats_info->prev_values; 233 curr_values = xstats_info->curr_values; 234 235 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 236 ids_supp_sz); 237 if (rc != (int)ids_supp_sz) { 238 fprintf(stderr, 239 "Failed to get values of %zu xstats for port %u - return code %d\n", 240 ids_supp_sz, port_id, rc); 241 return; 242 } 243 244 diff_ns = 0; 245 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 246 uint64_t ns; 247 248 ns = cur_time.tv_sec * NS_PER_SEC; 249 ns += cur_time.tv_nsec; 250 251 if (xstats_info->prev_ns != 0) 252 diff_ns = ns - xstats_info->prev_ns; 253 xstats_info->prev_ns = ns; 254 } 255 256 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 257 for (i = 0; i < ids_supp_sz; i++) { 258 diff_value = (curr_values[i] > prev_values[i]) ? 259 (curr_values[i] - prev_values[i]) : 0; 260 prev_values[i] = curr_values[i]; 261 value_rate = diff_ns > 0 ? 262 (double)diff_value / diff_ns * NS_PER_SEC : 0; 263 264 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 265 xstats_display[i].name, curr_values[i], value_rate); 266 } 267 } 268 269 void 270 nic_stats_display(portid_t port_id) 271 { 272 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 273 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 274 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 275 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 276 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 277 struct timespec cur_time; 278 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 279 diff_ns; 280 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 281 struct rte_eth_stats stats; 282 static const char *nic_stats_border = "########################"; 283 int ret; 284 285 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 286 print_valid_ports(); 287 return; 288 } 289 ret = rte_eth_stats_get(port_id, &stats); 290 if (ret != 0) { 291 fprintf(stderr, 292 "%s: Error: failed to get stats (port %u): %d", 293 __func__, port_id, ret); 294 return; 295 } 296 printf("\n %s NIC statistics for port %-2d %s\n", 297 nic_stats_border, port_id, nic_stats_border); 298 299 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 300 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 301 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 302 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 303 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 304 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 305 306 diff_ns = 0; 307 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 308 uint64_t ns; 309 310 ns = cur_time.tv_sec * NS_PER_SEC; 311 ns += cur_time.tv_nsec; 312 313 if (prev_ns[port_id] != 0) 314 diff_ns = ns - prev_ns[port_id]; 315 prev_ns[port_id] = ns; 316 } 317 318 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 319 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 320 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 321 (stats.opackets - prev_pkts_tx[port_id]) : 0; 322 prev_pkts_rx[port_id] = stats.ipackets; 323 prev_pkts_tx[port_id] = stats.opackets; 324 mpps_rx = diff_ns > 0 ? 325 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 326 mpps_tx = diff_ns > 0 ? 327 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 328 329 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 330 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 331 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 332 (stats.obytes - prev_bytes_tx[port_id]) : 0; 333 prev_bytes_rx[port_id] = stats.ibytes; 334 prev_bytes_tx[port_id] = stats.obytes; 335 mbps_rx = diff_ns > 0 ? 336 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 337 mbps_tx = diff_ns > 0 ? 338 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 339 340 printf("\n Throughput (since last show)\n"); 341 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 342 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 343 mpps_tx, mbps_tx * 8); 344 345 if (xstats_display_num > 0) 346 nic_xstats_display_periodic(port_id); 347 348 printf(" %s############################%s\n", 349 nic_stats_border, nic_stats_border); 350 } 351 352 void 353 nic_stats_clear(portid_t port_id) 354 { 355 int ret; 356 357 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 358 print_valid_ports(); 359 return; 360 } 361 362 ret = rte_eth_stats_reset(port_id); 363 if (ret != 0) { 364 fprintf(stderr, 365 "%s: Error: failed to reset stats (port %u): %s", 366 __func__, port_id, strerror(-ret)); 367 return; 368 } 369 370 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 371 if (ret != 0) { 372 if (ret < 0) 373 ret = -ret; 374 fprintf(stderr, 375 "%s: Error: failed to get stats (port %u): %s", 376 __func__, port_id, strerror(ret)); 377 return; 378 } 379 printf("\n NIC statistics for port %d cleared\n", port_id); 380 } 381 382 void 383 nic_xstats_display(portid_t port_id) 384 { 385 struct rte_eth_xstat *xstats; 386 int cnt_xstats, idx_xstat; 387 struct rte_eth_xstat_name *xstats_names; 388 389 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 390 print_valid_ports(); 391 return; 392 } 393 printf("###### NIC extended statistics for port %-2d\n", port_id); 394 if (!rte_eth_dev_is_valid_port(port_id)) { 395 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 396 return; 397 } 398 399 /* Get count */ 400 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 401 if (cnt_xstats < 0) { 402 fprintf(stderr, "Error: Cannot get count of xstats\n"); 403 return; 404 } 405 406 /* Get id-name lookup table */ 407 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 408 if (xstats_names == NULL) { 409 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 410 return; 411 } 412 if (cnt_xstats != rte_eth_xstats_get_names( 413 port_id, xstats_names, cnt_xstats)) { 414 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 415 free(xstats_names); 416 return; 417 } 418 419 /* Get stats themselves */ 420 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 421 if (xstats == NULL) { 422 fprintf(stderr, "Cannot allocate memory for xstats\n"); 423 free(xstats_names); 424 return; 425 } 426 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 427 fprintf(stderr, "Error: Unable to get xstats\n"); 428 free(xstats_names); 429 free(xstats); 430 return; 431 } 432 433 /* Display xstats */ 434 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 435 if (xstats_hide_zero && !xstats[idx_xstat].value) 436 continue; 437 printf("%s: %"PRIu64"\n", 438 xstats_names[idx_xstat].name, 439 xstats[idx_xstat].value); 440 } 441 free(xstats_names); 442 free(xstats); 443 } 444 445 void 446 nic_xstats_clear(portid_t port_id) 447 { 448 int ret; 449 450 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 451 print_valid_ports(); 452 return; 453 } 454 455 ret = rte_eth_xstats_reset(port_id); 456 if (ret != 0) { 457 fprintf(stderr, 458 "%s: Error: failed to reset xstats (port %u): %s\n", 459 __func__, port_id, strerror(-ret)); 460 return; 461 } 462 463 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 464 if (ret != 0) { 465 if (ret < 0) 466 ret = -ret; 467 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 468 __func__, port_id, strerror(ret)); 469 return; 470 } 471 } 472 473 static const char * 474 get_queue_state_name(uint8_t queue_state) 475 { 476 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 477 return "stopped"; 478 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 479 return "started"; 480 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 481 return "hairpin"; 482 else 483 return "unknown"; 484 } 485 486 void 487 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 488 { 489 struct rte_eth_burst_mode mode; 490 struct rte_eth_rxq_info qinfo; 491 int32_t rc; 492 static const char *info_border = "*********************"; 493 494 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 495 if (rc != 0) { 496 fprintf(stderr, 497 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 498 port_id, queue_id, strerror(-rc), rc); 499 return; 500 } 501 502 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 503 info_border, port_id, queue_id, info_border); 504 505 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 506 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 507 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 508 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 509 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 510 printf("\nRX drop packets: %s", 511 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 512 printf("\nRX deferred start: %s", 513 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 514 printf("\nRX scattered packets: %s", 515 (qinfo.scattered_rx != 0) ? "on" : "off"); 516 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 517 if (qinfo.rx_buf_size != 0) 518 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 519 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 520 521 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 522 printf("\nBurst mode: %s%s", 523 mode.info, 524 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 525 " (per queue)" : ""); 526 527 printf("\n"); 528 } 529 530 void 531 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 532 { 533 struct rte_eth_burst_mode mode; 534 struct rte_eth_txq_info qinfo; 535 int32_t rc; 536 static const char *info_border = "*********************"; 537 538 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 539 if (rc != 0) { 540 fprintf(stderr, 541 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 542 port_id, queue_id, strerror(-rc), rc); 543 return; 544 } 545 546 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 547 info_border, port_id, queue_id, info_border); 548 549 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 550 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 551 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 552 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 553 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 554 printf("\nTX deferred start: %s", 555 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 556 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 557 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 558 559 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 560 printf("\nBurst mode: %s%s", 561 mode.info, 562 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 563 " (per queue)" : ""); 564 565 printf("\n"); 566 } 567 568 static int bus_match_all(const struct rte_bus *bus, const void *data) 569 { 570 RTE_SET_USED(bus); 571 RTE_SET_USED(data); 572 return 0; 573 } 574 575 static void 576 device_infos_display_speeds(uint32_t speed_capa) 577 { 578 printf("\n\tDevice speed capability:"); 579 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 580 printf(" Autonegotiate (all speeds)"); 581 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 582 printf(" Disable autonegotiate (fixed speed) "); 583 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 584 printf(" 10 Mbps half-duplex "); 585 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 586 printf(" 10 Mbps full-duplex "); 587 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 588 printf(" 100 Mbps half-duplex "); 589 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 590 printf(" 100 Mbps full-duplex "); 591 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 592 printf(" 1 Gbps "); 593 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 594 printf(" 2.5 Gbps "); 595 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 596 printf(" 5 Gbps "); 597 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 598 printf(" 10 Gbps "); 599 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 600 printf(" 20 Gbps "); 601 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 602 printf(" 25 Gbps "); 603 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 604 printf(" 40 Gbps "); 605 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 606 printf(" 50 Gbps "); 607 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 608 printf(" 56 Gbps "); 609 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 610 printf(" 100 Gbps "); 611 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 612 printf(" 200 Gbps "); 613 } 614 615 void 616 device_infos_display(const char *identifier) 617 { 618 static const char *info_border = "*********************"; 619 struct rte_bus *start = NULL, *next; 620 struct rte_dev_iterator dev_iter; 621 char name[RTE_ETH_NAME_MAX_LEN]; 622 struct rte_ether_addr mac_addr; 623 struct rte_device *dev; 624 struct rte_devargs da; 625 portid_t port_id; 626 struct rte_eth_dev_info dev_info; 627 char devstr[128]; 628 629 memset(&da, 0, sizeof(da)); 630 if (!identifier) 631 goto skip_parse; 632 633 if (rte_devargs_parsef(&da, "%s", identifier)) { 634 fprintf(stderr, "cannot parse identifier\n"); 635 return; 636 } 637 638 skip_parse: 639 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 640 641 start = next; 642 if (identifier && da.bus != next) 643 continue; 644 645 /* Skip buses that don't have iterate method */ 646 if (!next->dev_iterate) 647 continue; 648 649 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 650 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 651 652 if (!dev->driver) 653 continue; 654 /* Check for matching device if identifier is present */ 655 if (identifier && 656 strncmp(da.name, dev->name, strlen(dev->name))) 657 continue; 658 printf("\n%s Infos for device %s %s\n", 659 info_border, dev->name, info_border); 660 printf("Bus name: %s", dev->bus->name); 661 printf("\nDriver name: %s", dev->driver->name); 662 printf("\nDevargs: %s", 663 dev->devargs ? dev->devargs->args : ""); 664 printf("\nConnect to socket: %d", dev->numa_node); 665 printf("\n"); 666 667 /* List ports with matching device name */ 668 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 669 printf("\n\tPort id: %-2d", port_id); 670 if (eth_macaddr_get_print_err(port_id, 671 &mac_addr) == 0) 672 print_ethaddr("\n\tMAC address: ", 673 &mac_addr); 674 rte_eth_dev_get_name_by_port(port_id, name); 675 printf("\n\tDevice name: %s", name); 676 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 677 device_infos_display_speeds(dev_info.speed_capa); 678 printf("\n"); 679 } 680 } 681 }; 682 rte_devargs_reset(&da); 683 } 684 685 static void 686 print_dev_capabilities(uint64_t capabilities) 687 { 688 uint64_t single_capa; 689 int begin; 690 int end; 691 int bit; 692 693 if (capabilities == 0) 694 return; 695 696 begin = __builtin_ctzll(capabilities); 697 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 698 699 single_capa = 1ULL << begin; 700 for (bit = begin; bit < end; bit++) { 701 if (capabilities & single_capa) 702 printf(" %s", 703 rte_eth_dev_capability_name(single_capa)); 704 single_capa <<= 1; 705 } 706 } 707 708 uint64_t 709 str_to_rsstypes(const char *str) 710 { 711 uint16_t i; 712 713 for (i = 0; rss_type_table[i].str != NULL; i++) { 714 if (strcmp(rss_type_table[i].str, str) == 0) 715 return rss_type_table[i].rss_type; 716 } 717 718 return 0; 719 } 720 721 const char * 722 rsstypes_to_str(uint64_t rss_type) 723 { 724 uint16_t i; 725 726 for (i = 0; rss_type_table[i].str != NULL; i++) { 727 if (rss_type_table[i].rss_type == rss_type) 728 return rss_type_table[i].str; 729 } 730 731 return NULL; 732 } 733 734 static void 735 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line) 736 { 737 uint16_t user_defined_str_len; 738 uint16_t total_len = 0; 739 uint16_t str_len = 0; 740 uint64_t rss_offload; 741 uint16_t i; 742 743 for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) { 744 rss_offload = RTE_BIT64(i); 745 if ((offload_types & rss_offload) != 0) { 746 const char *p = rsstypes_to_str(rss_offload); 747 748 user_defined_str_len = 749 strlen("user-defined-") + (i / 10 + 1); 750 str_len = p ? strlen(p) : user_defined_str_len; 751 str_len += 2; /* add two spaces */ 752 if (total_len + str_len >= char_num_per_line) { 753 total_len = 0; 754 printf("\n"); 755 } 756 757 if (p) 758 printf(" %s", p); 759 else 760 printf(" user-defined-%u", i); 761 total_len += str_len; 762 } 763 } 764 printf("\n"); 765 } 766 767 void 768 port_infos_display(portid_t port_id) 769 { 770 struct rte_port *port; 771 struct rte_ether_addr mac_addr; 772 struct rte_eth_link link; 773 struct rte_eth_dev_info dev_info; 774 int vlan_offload; 775 struct rte_mempool * mp; 776 static const char *info_border = "*********************"; 777 uint16_t mtu; 778 char name[RTE_ETH_NAME_MAX_LEN]; 779 int ret; 780 char fw_version[ETHDEV_FWVERS_LEN]; 781 782 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 783 print_valid_ports(); 784 return; 785 } 786 port = &ports[port_id]; 787 ret = eth_link_get_nowait_print_err(port_id, &link); 788 if (ret < 0) 789 return; 790 791 ret = eth_dev_info_get_print_err(port_id, &dev_info); 792 if (ret != 0) 793 return; 794 795 printf("\n%s Infos for port %-2d %s\n", 796 info_border, port_id, info_border); 797 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 798 print_ethaddr("MAC address: ", &mac_addr); 799 rte_eth_dev_get_name_by_port(port_id, name); 800 printf("\nDevice name: %s", name); 801 printf("\nDriver name: %s", dev_info.driver_name); 802 803 if (rte_eth_dev_fw_version_get(port_id, fw_version, 804 ETHDEV_FWVERS_LEN) == 0) 805 printf("\nFirmware-version: %s", fw_version); 806 else 807 printf("\nFirmware-version: %s", "not available"); 808 809 if (dev_info.device->devargs && dev_info.device->devargs->args) 810 printf("\nDevargs: %s", dev_info.device->devargs->args); 811 printf("\nConnect to socket: %u", port->socket_id); 812 813 if (port_numa[port_id] != NUMA_NO_CONFIG) { 814 mp = mbuf_pool_find(port_numa[port_id], 0); 815 if (mp) 816 printf("\nmemory allocation on the socket: %d", 817 port_numa[port_id]); 818 } else 819 printf("\nmemory allocation on the socket: %u",port->socket_id); 820 821 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 822 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 823 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 824 ("full-duplex") : ("half-duplex")); 825 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 826 ("On") : ("Off")); 827 828 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 829 printf("MTU: %u\n", mtu); 830 831 printf("Promiscuous mode: %s\n", 832 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 833 printf("Allmulticast mode: %s\n", 834 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 835 printf("Maximum number of MAC addresses: %u\n", 836 (unsigned int)(port->dev_info.max_mac_addrs)); 837 printf("Maximum number of MAC addresses of hash filtering: %u\n", 838 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 839 840 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 841 if (vlan_offload >= 0){ 842 printf("VLAN offload: \n"); 843 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 844 printf(" strip on, "); 845 else 846 printf(" strip off, "); 847 848 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 849 printf("filter on, "); 850 else 851 printf("filter off, "); 852 853 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 854 printf("extend on, "); 855 else 856 printf("extend off, "); 857 858 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 859 printf("qinq strip on\n"); 860 else 861 printf("qinq strip off\n"); 862 } 863 864 if (dev_info.hash_key_size > 0) 865 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 866 if (dev_info.reta_size > 0) 867 printf("Redirection table size: %u\n", dev_info.reta_size); 868 if (!dev_info.flow_type_rss_offloads) 869 printf("No RSS offload flow type is supported.\n"); 870 else { 871 printf("Supported RSS offload flow types:\n"); 872 rss_offload_types_display(dev_info.flow_type_rss_offloads, 873 TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 874 } 875 876 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 877 printf("Maximum configurable length of RX packet: %u\n", 878 dev_info.max_rx_pktlen); 879 printf("Maximum configurable size of LRO aggregated packet: %u\n", 880 dev_info.max_lro_pkt_size); 881 if (dev_info.max_vfs) 882 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 883 if (dev_info.max_vmdq_pools) 884 printf("Maximum number of VMDq pools: %u\n", 885 dev_info.max_vmdq_pools); 886 887 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 888 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 889 printf("Max possible number of RXDs per queue: %hu\n", 890 dev_info.rx_desc_lim.nb_max); 891 printf("Min possible number of RXDs per queue: %hu\n", 892 dev_info.rx_desc_lim.nb_min); 893 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 894 895 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 896 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 897 printf("Max possible number of TXDs per queue: %hu\n", 898 dev_info.tx_desc_lim.nb_max); 899 printf("Min possible number of TXDs per queue: %hu\n", 900 dev_info.tx_desc_lim.nb_min); 901 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 902 printf("Max segment number per packet: %hu\n", 903 dev_info.tx_desc_lim.nb_seg_max); 904 printf("Max segment number per MTU/TSO: %hu\n", 905 dev_info.tx_desc_lim.nb_mtu_seg_max); 906 907 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 908 print_dev_capabilities(dev_info.dev_capa); 909 printf(" )\n"); 910 /* Show switch info only if valid switch domain and port id is set */ 911 if (dev_info.switch_info.domain_id != 912 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 913 if (dev_info.switch_info.name) 914 printf("Switch name: %s\n", dev_info.switch_info.name); 915 916 printf("Switch domain Id: %u\n", 917 dev_info.switch_info.domain_id); 918 printf("Switch Port Id: %u\n", 919 dev_info.switch_info.port_id); 920 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 921 printf("Switch Rx domain: %u\n", 922 dev_info.switch_info.rx_domain); 923 } 924 } 925 926 void 927 port_summary_header_display(void) 928 { 929 uint16_t port_number; 930 931 port_number = rte_eth_dev_count_avail(); 932 printf("Number of available ports: %i\n", port_number); 933 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 934 "Driver", "Status", "Link"); 935 } 936 937 void 938 port_summary_display(portid_t port_id) 939 { 940 struct rte_ether_addr mac_addr; 941 struct rte_eth_link link; 942 struct rte_eth_dev_info dev_info; 943 char name[RTE_ETH_NAME_MAX_LEN]; 944 int ret; 945 946 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 947 print_valid_ports(); 948 return; 949 } 950 951 ret = eth_link_get_nowait_print_err(port_id, &link); 952 if (ret < 0) 953 return; 954 955 ret = eth_dev_info_get_print_err(port_id, &dev_info); 956 if (ret != 0) 957 return; 958 959 rte_eth_dev_get_name_by_port(port_id, name); 960 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 961 if (ret != 0) 962 return; 963 964 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 965 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 966 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 967 rte_eth_link_speed_to_str(link.link_speed)); 968 } 969 970 void 971 port_eeprom_display(portid_t port_id) 972 { 973 struct rte_dev_eeprom_info einfo; 974 int ret; 975 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 976 print_valid_ports(); 977 return; 978 } 979 980 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 981 if (len_eeprom < 0) { 982 switch (len_eeprom) { 983 case -ENODEV: 984 fprintf(stderr, "port index %d invalid\n", port_id); 985 break; 986 case -ENOTSUP: 987 fprintf(stderr, "operation not supported by device\n"); 988 break; 989 case -EIO: 990 fprintf(stderr, "device is removed\n"); 991 break; 992 default: 993 fprintf(stderr, "Unable to get EEPROM: %d\n", 994 len_eeprom); 995 break; 996 } 997 return; 998 } 999 1000 einfo.offset = 0; 1001 einfo.length = len_eeprom; 1002 einfo.data = calloc(1, len_eeprom); 1003 if (!einfo.data) { 1004 fprintf(stderr, 1005 "Allocation of port %u eeprom data failed\n", 1006 port_id); 1007 return; 1008 } 1009 1010 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 1011 if (ret != 0) { 1012 switch (ret) { 1013 case -ENODEV: 1014 fprintf(stderr, "port index %d invalid\n", port_id); 1015 break; 1016 case -ENOTSUP: 1017 fprintf(stderr, "operation not supported by device\n"); 1018 break; 1019 case -EIO: 1020 fprintf(stderr, "device is removed\n"); 1021 break; 1022 default: 1023 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 1024 break; 1025 } 1026 free(einfo.data); 1027 return; 1028 } 1029 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1030 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 1031 free(einfo.data); 1032 } 1033 1034 void 1035 port_module_eeprom_display(portid_t port_id) 1036 { 1037 struct rte_eth_dev_module_info minfo; 1038 struct rte_dev_eeprom_info einfo; 1039 int ret; 1040 1041 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 1042 print_valid_ports(); 1043 return; 1044 } 1045 1046 1047 ret = rte_eth_dev_get_module_info(port_id, &minfo); 1048 if (ret != 0) { 1049 switch (ret) { 1050 case -ENODEV: 1051 fprintf(stderr, "port index %d invalid\n", port_id); 1052 break; 1053 case -ENOTSUP: 1054 fprintf(stderr, "operation not supported by device\n"); 1055 break; 1056 case -EIO: 1057 fprintf(stderr, "device is removed\n"); 1058 break; 1059 default: 1060 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1061 ret); 1062 break; 1063 } 1064 return; 1065 } 1066 1067 einfo.offset = 0; 1068 einfo.length = minfo.eeprom_len; 1069 einfo.data = calloc(1, minfo.eeprom_len); 1070 if (!einfo.data) { 1071 fprintf(stderr, 1072 "Allocation of port %u eeprom data failed\n", 1073 port_id); 1074 return; 1075 } 1076 1077 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 1078 if (ret != 0) { 1079 switch (ret) { 1080 case -ENODEV: 1081 fprintf(stderr, "port index %d invalid\n", port_id); 1082 break; 1083 case -ENOTSUP: 1084 fprintf(stderr, "operation not supported by device\n"); 1085 break; 1086 case -EIO: 1087 fprintf(stderr, "device is removed\n"); 1088 break; 1089 default: 1090 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1091 ret); 1092 break; 1093 } 1094 free(einfo.data); 1095 return; 1096 } 1097 1098 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1099 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1100 free(einfo.data); 1101 } 1102 1103 int 1104 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1105 { 1106 uint16_t pid; 1107 1108 if (port_id == (portid_t)RTE_PORT_ALL) 1109 return 0; 1110 1111 RTE_ETH_FOREACH_DEV(pid) 1112 if (port_id == pid) 1113 return 0; 1114 1115 if (warning == ENABLED_WARN) 1116 fprintf(stderr, "Invalid port %d\n", port_id); 1117 1118 return 1; 1119 } 1120 1121 void print_valid_ports(void) 1122 { 1123 portid_t pid; 1124 1125 printf("The valid ports array is ["); 1126 RTE_ETH_FOREACH_DEV(pid) { 1127 printf(" %d", pid); 1128 } 1129 printf(" ]\n"); 1130 } 1131 1132 static int 1133 vlan_id_is_invalid(uint16_t vlan_id) 1134 { 1135 if (vlan_id < 4096) 1136 return 0; 1137 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1138 return 1; 1139 } 1140 1141 static int 1142 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1143 { 1144 const struct rte_pci_device *pci_dev; 1145 const struct rte_bus *bus; 1146 uint64_t pci_len; 1147 1148 if (reg_off & 0x3) { 1149 fprintf(stderr, 1150 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1151 (unsigned int)reg_off); 1152 return 1; 1153 } 1154 1155 if (!ports[port_id].dev_info.device) { 1156 fprintf(stderr, "Invalid device\n"); 1157 return 0; 1158 } 1159 1160 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1161 if (bus && !strcmp(bus->name, "pci")) { 1162 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1163 } else { 1164 fprintf(stderr, "Not a PCI device\n"); 1165 return 1; 1166 } 1167 1168 pci_len = pci_dev->mem_resource[0].len; 1169 if (reg_off >= pci_len) { 1170 fprintf(stderr, 1171 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1172 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1173 pci_len); 1174 return 1; 1175 } 1176 return 0; 1177 } 1178 1179 static int 1180 reg_bit_pos_is_invalid(uint8_t bit_pos) 1181 { 1182 if (bit_pos <= 31) 1183 return 0; 1184 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1185 return 1; 1186 } 1187 1188 #define display_port_and_reg_off(port_id, reg_off) \ 1189 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1190 1191 static inline void 1192 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1193 { 1194 display_port_and_reg_off(port_id, (unsigned)reg_off); 1195 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1196 } 1197 1198 void 1199 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1200 { 1201 uint32_t reg_v; 1202 1203 1204 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1205 return; 1206 if (port_reg_off_is_invalid(port_id, reg_off)) 1207 return; 1208 if (reg_bit_pos_is_invalid(bit_x)) 1209 return; 1210 reg_v = port_id_pci_reg_read(port_id, reg_off); 1211 display_port_and_reg_off(port_id, (unsigned)reg_off); 1212 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1213 } 1214 1215 void 1216 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1217 uint8_t bit1_pos, uint8_t bit2_pos) 1218 { 1219 uint32_t reg_v; 1220 uint8_t l_bit; 1221 uint8_t h_bit; 1222 1223 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1224 return; 1225 if (port_reg_off_is_invalid(port_id, reg_off)) 1226 return; 1227 if (reg_bit_pos_is_invalid(bit1_pos)) 1228 return; 1229 if (reg_bit_pos_is_invalid(bit2_pos)) 1230 return; 1231 if (bit1_pos > bit2_pos) 1232 l_bit = bit2_pos, h_bit = bit1_pos; 1233 else 1234 l_bit = bit1_pos, h_bit = bit2_pos; 1235 1236 reg_v = port_id_pci_reg_read(port_id, reg_off); 1237 reg_v >>= l_bit; 1238 if (h_bit < 31) 1239 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1240 display_port_and_reg_off(port_id, (unsigned)reg_off); 1241 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1242 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1243 } 1244 1245 void 1246 port_reg_display(portid_t port_id, uint32_t reg_off) 1247 { 1248 uint32_t reg_v; 1249 1250 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1251 return; 1252 if (port_reg_off_is_invalid(port_id, reg_off)) 1253 return; 1254 reg_v = port_id_pci_reg_read(port_id, reg_off); 1255 display_port_reg_value(port_id, reg_off, reg_v); 1256 } 1257 1258 void 1259 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1260 uint8_t bit_v) 1261 { 1262 uint32_t reg_v; 1263 1264 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1265 return; 1266 if (port_reg_off_is_invalid(port_id, reg_off)) 1267 return; 1268 if (reg_bit_pos_is_invalid(bit_pos)) 1269 return; 1270 if (bit_v > 1) { 1271 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1272 (int) bit_v); 1273 return; 1274 } 1275 reg_v = port_id_pci_reg_read(port_id, reg_off); 1276 if (bit_v == 0) 1277 reg_v &= ~(1 << bit_pos); 1278 else 1279 reg_v |= (1 << bit_pos); 1280 port_id_pci_reg_write(port_id, reg_off, reg_v); 1281 display_port_reg_value(port_id, reg_off, reg_v); 1282 } 1283 1284 void 1285 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1286 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1287 { 1288 uint32_t max_v; 1289 uint32_t reg_v; 1290 uint8_t l_bit; 1291 uint8_t h_bit; 1292 1293 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1294 return; 1295 if (port_reg_off_is_invalid(port_id, reg_off)) 1296 return; 1297 if (reg_bit_pos_is_invalid(bit1_pos)) 1298 return; 1299 if (reg_bit_pos_is_invalid(bit2_pos)) 1300 return; 1301 if (bit1_pos > bit2_pos) 1302 l_bit = bit2_pos, h_bit = bit1_pos; 1303 else 1304 l_bit = bit1_pos, h_bit = bit2_pos; 1305 1306 if ((h_bit - l_bit) < 31) 1307 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1308 else 1309 max_v = 0xFFFFFFFF; 1310 1311 if (value > max_v) { 1312 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1313 (unsigned)value, (unsigned)value, 1314 (unsigned)max_v, (unsigned)max_v); 1315 return; 1316 } 1317 reg_v = port_id_pci_reg_read(port_id, reg_off); 1318 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1319 reg_v |= (value << l_bit); /* Set changed bits */ 1320 port_id_pci_reg_write(port_id, reg_off, reg_v); 1321 display_port_reg_value(port_id, reg_off, reg_v); 1322 } 1323 1324 void 1325 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1326 { 1327 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1328 return; 1329 if (port_reg_off_is_invalid(port_id, reg_off)) 1330 return; 1331 port_id_pci_reg_write(port_id, reg_off, reg_v); 1332 display_port_reg_value(port_id, reg_off, reg_v); 1333 } 1334 1335 static uint32_t 1336 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1337 { 1338 uint32_t overhead_len; 1339 1340 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1341 overhead_len = max_rx_pktlen - max_mtu; 1342 else 1343 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1344 1345 return overhead_len; 1346 } 1347 1348 static int 1349 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) 1350 { 1351 struct rte_eth_dev_info dev_info; 1352 uint32_t overhead_len; 1353 uint32_t frame_size; 1354 int ret; 1355 1356 ret = rte_eth_dev_info_get(port_id, &dev_info); 1357 if (ret != 0) 1358 return ret; 1359 1360 if (mtu < dev_info.min_mtu) { 1361 fprintf(stderr, 1362 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1363 mtu, dev_info.min_mtu, port_id); 1364 return -EINVAL; 1365 } 1366 if (mtu > dev_info.max_mtu) { 1367 fprintf(stderr, 1368 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1369 mtu, dev_info.max_mtu, port_id); 1370 return -EINVAL; 1371 } 1372 1373 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1374 dev_info.max_mtu); 1375 frame_size = mtu + overhead_len; 1376 if (frame_size > dev_info.max_rx_pktlen) { 1377 fprintf(stderr, 1378 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1379 frame_size, dev_info.max_rx_pktlen, port_id); 1380 return -EINVAL; 1381 } 1382 1383 return 0; 1384 } 1385 1386 void 1387 port_mtu_set(portid_t port_id, uint16_t mtu) 1388 { 1389 struct rte_port *port = &ports[port_id]; 1390 int diag; 1391 1392 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1393 return; 1394 1395 diag = eth_dev_validate_mtu(port_id, mtu); 1396 if (diag != 0) 1397 return; 1398 1399 if (port->need_reconfig == 0) { 1400 diag = rte_eth_dev_set_mtu(port_id, mtu); 1401 if (diag != 0) { 1402 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1403 return; 1404 } 1405 } 1406 1407 port->dev_conf.rxmode.mtu = mtu; 1408 } 1409 1410 /* Generic flow management functions. */ 1411 1412 static struct port_flow_tunnel * 1413 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1414 { 1415 struct port_flow_tunnel *flow_tunnel; 1416 1417 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1418 if (flow_tunnel->id == port_tunnel_id) 1419 goto out; 1420 } 1421 flow_tunnel = NULL; 1422 1423 out: 1424 return flow_tunnel; 1425 } 1426 1427 const char * 1428 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1429 { 1430 const char *type; 1431 switch (tunnel->type) { 1432 default: 1433 type = "unknown"; 1434 break; 1435 case RTE_FLOW_ITEM_TYPE_VXLAN: 1436 type = "vxlan"; 1437 break; 1438 case RTE_FLOW_ITEM_TYPE_GRE: 1439 type = "gre"; 1440 break; 1441 case RTE_FLOW_ITEM_TYPE_NVGRE: 1442 type = "nvgre"; 1443 break; 1444 case RTE_FLOW_ITEM_TYPE_GENEVE: 1445 type = "geneve"; 1446 break; 1447 } 1448 1449 return type; 1450 } 1451 1452 struct port_flow_tunnel * 1453 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1454 { 1455 struct rte_port *port = &ports[port_id]; 1456 struct port_flow_tunnel *flow_tunnel; 1457 1458 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1459 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1460 goto out; 1461 } 1462 flow_tunnel = NULL; 1463 1464 out: 1465 return flow_tunnel; 1466 } 1467 1468 void port_flow_tunnel_list(portid_t port_id) 1469 { 1470 struct rte_port *port = &ports[port_id]; 1471 struct port_flow_tunnel *flt; 1472 1473 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1474 printf("port %u tunnel #%u type=%s", 1475 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1476 if (flt->tunnel.tun_id) 1477 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1478 printf("\n"); 1479 } 1480 } 1481 1482 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1483 { 1484 struct rte_port *port = &ports[port_id]; 1485 struct port_flow_tunnel *flt; 1486 1487 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1488 if (flt->id == tunnel_id) 1489 break; 1490 } 1491 if (flt) { 1492 LIST_REMOVE(flt, chain); 1493 free(flt); 1494 printf("port %u: flow tunnel #%u destroyed\n", 1495 port_id, tunnel_id); 1496 } 1497 } 1498 1499 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1500 { 1501 struct rte_port *port = &ports[port_id]; 1502 enum rte_flow_item_type type; 1503 struct port_flow_tunnel *flt; 1504 1505 if (!strcmp(ops->type, "vxlan")) 1506 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1507 else if (!strcmp(ops->type, "gre")) 1508 type = RTE_FLOW_ITEM_TYPE_GRE; 1509 else if (!strcmp(ops->type, "nvgre")) 1510 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1511 else if (!strcmp(ops->type, "geneve")) 1512 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1513 else { 1514 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1515 ops->type); 1516 return; 1517 } 1518 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1519 if (flt->tunnel.type == type) 1520 break; 1521 } 1522 if (!flt) { 1523 flt = calloc(1, sizeof(*flt)); 1524 if (!flt) { 1525 fprintf(stderr, "failed to allocate port flt object\n"); 1526 return; 1527 } 1528 flt->tunnel.type = type; 1529 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1530 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1531 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1532 } 1533 printf("port %d: flow tunnel #%u type %s\n", 1534 port_id, flt->id, ops->type); 1535 } 1536 1537 /** Generate a port_flow entry from attributes/pattern/actions. */ 1538 static struct port_flow * 1539 port_flow_new(const struct rte_flow_attr *attr, 1540 const struct rte_flow_item *pattern, 1541 const struct rte_flow_action *actions, 1542 struct rte_flow_error *error) 1543 { 1544 const struct rte_flow_conv_rule rule = { 1545 .attr_ro = attr, 1546 .pattern_ro = pattern, 1547 .actions_ro = actions, 1548 }; 1549 struct port_flow *pf; 1550 int ret; 1551 1552 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1553 if (ret < 0) 1554 return NULL; 1555 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1556 if (!pf) { 1557 rte_flow_error_set 1558 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1559 "calloc() failed"); 1560 return NULL; 1561 } 1562 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1563 error) >= 0) 1564 return pf; 1565 free(pf); 1566 return NULL; 1567 } 1568 1569 /** Print a message out of a flow error. */ 1570 static int 1571 port_flow_complain(struct rte_flow_error *error) 1572 { 1573 static const char *const errstrlist[] = { 1574 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1575 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1576 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1577 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1578 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1579 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1580 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1581 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1582 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1583 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1584 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1585 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1586 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1587 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1588 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1589 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1590 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1591 }; 1592 const char *errstr; 1593 char buf[32]; 1594 int err = rte_errno; 1595 1596 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1597 !errstrlist[error->type]) 1598 errstr = "unknown type"; 1599 else 1600 errstr = errstrlist[error->type]; 1601 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1602 __func__, error->type, errstr, 1603 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1604 error->cause), buf) : "", 1605 error->message ? error->message : "(no stated reason)", 1606 rte_strerror(err)); 1607 1608 switch (error->type) { 1609 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1610 fprintf(stderr, "The status suggests the use of \"transfer\" " 1611 "as the possible cause of the failure. Make " 1612 "sure that the flow in question and its " 1613 "indirect components (if any) are managed " 1614 "via \"transfer\" proxy port. Use command " 1615 "\"show port (port_id) flow transfer proxy\" " 1616 "to figure out the proxy port ID\n"); 1617 break; 1618 default: 1619 break; 1620 } 1621 1622 return -err; 1623 } 1624 1625 static void 1626 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line) 1627 { 1628 uint16_t total_len = 0; 1629 uint16_t str_len; 1630 uint16_t i; 1631 1632 if (rss_types == 0) 1633 return; 1634 1635 for (i = 0; rss_type_table[i].str; i++) { 1636 if (rss_type_table[i].rss_type == 0) 1637 continue; 1638 1639 if ((rss_types & rss_type_table[i].rss_type) == 1640 rss_type_table[i].rss_type) { 1641 /* Contain two spaces */ 1642 str_len = strlen(rss_type_table[i].str) + 2; 1643 if (total_len + str_len > char_num_per_line) { 1644 printf("\n"); 1645 total_len = 0; 1646 } 1647 printf(" %s", rss_type_table[i].str); 1648 total_len += str_len; 1649 } 1650 } 1651 printf("\n"); 1652 } 1653 1654 static void 1655 rss_config_display(struct rte_flow_action_rss *rss_conf) 1656 { 1657 uint8_t i; 1658 1659 if (rss_conf == NULL) { 1660 fprintf(stderr, "Invalid rule\n"); 1661 return; 1662 } 1663 1664 printf("RSS:\n" 1665 " queues:"); 1666 if (rss_conf->queue_num == 0) 1667 printf(" none"); 1668 for (i = 0; i < rss_conf->queue_num; i++) 1669 printf(" %d", rss_conf->queue[i]); 1670 printf("\n"); 1671 1672 printf(" function: "); 1673 switch (rss_conf->func) { 1674 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1675 printf("default\n"); 1676 break; 1677 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1678 printf("toeplitz\n"); 1679 break; 1680 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1681 printf("simple_xor\n"); 1682 break; 1683 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1684 printf("symmetric_toeplitz\n"); 1685 break; 1686 default: 1687 printf("Unknown function\n"); 1688 return; 1689 } 1690 1691 printf(" types:\n"); 1692 if (rss_conf->types == 0) { 1693 printf(" none\n"); 1694 return; 1695 } 1696 rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 1697 } 1698 1699 static struct port_indirect_action * 1700 action_get_by_id(portid_t port_id, uint32_t id) 1701 { 1702 struct rte_port *port; 1703 struct port_indirect_action **ppia; 1704 struct port_indirect_action *pia = NULL; 1705 1706 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1707 port_id == (portid_t)RTE_PORT_ALL) 1708 return NULL; 1709 port = &ports[port_id]; 1710 ppia = &port->actions_list; 1711 while (*ppia) { 1712 if ((*ppia)->id == id) { 1713 pia = *ppia; 1714 break; 1715 } 1716 ppia = &(*ppia)->next; 1717 } 1718 if (!pia) 1719 fprintf(stderr, 1720 "Failed to find indirect action #%u on port %u\n", 1721 id, port_id); 1722 return pia; 1723 } 1724 1725 static int 1726 action_alloc(portid_t port_id, uint32_t id, 1727 struct port_indirect_action **action) 1728 { 1729 struct rte_port *port; 1730 struct port_indirect_action **ppia; 1731 struct port_indirect_action *pia = NULL; 1732 1733 *action = NULL; 1734 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1735 port_id == (portid_t)RTE_PORT_ALL) 1736 return -EINVAL; 1737 port = &ports[port_id]; 1738 if (id == UINT32_MAX) { 1739 /* taking first available ID */ 1740 if (port->actions_list) { 1741 if (port->actions_list->id == UINT32_MAX - 1) { 1742 fprintf(stderr, 1743 "Highest indirect action ID is already assigned, delete it first\n"); 1744 return -ENOMEM; 1745 } 1746 id = port->actions_list->id + 1; 1747 } else { 1748 id = 0; 1749 } 1750 } 1751 pia = calloc(1, sizeof(*pia)); 1752 if (!pia) { 1753 fprintf(stderr, 1754 "Allocation of port %u indirect action failed\n", 1755 port_id); 1756 return -ENOMEM; 1757 } 1758 ppia = &port->actions_list; 1759 while (*ppia && (*ppia)->id > id) 1760 ppia = &(*ppia)->next; 1761 if (*ppia && (*ppia)->id == id) { 1762 fprintf(stderr, 1763 "Indirect action #%u is already assigned, delete it first\n", 1764 id); 1765 free(pia); 1766 return -EINVAL; 1767 } 1768 pia->next = *ppia; 1769 pia->id = id; 1770 *ppia = pia; 1771 *action = pia; 1772 return 0; 1773 } 1774 1775 static int 1776 template_alloc(uint32_t id, struct port_template **template, 1777 struct port_template **list) 1778 { 1779 struct port_template *lst = *list; 1780 struct port_template **ppt; 1781 struct port_template *pt = NULL; 1782 1783 *template = NULL; 1784 if (id == UINT32_MAX) { 1785 /* taking first available ID */ 1786 if (lst) { 1787 if (lst->id == UINT32_MAX - 1) { 1788 printf("Highest template ID is already" 1789 " assigned, delete it first\n"); 1790 return -ENOMEM; 1791 } 1792 id = lst->id + 1; 1793 } else { 1794 id = 0; 1795 } 1796 } 1797 pt = calloc(1, sizeof(*pt)); 1798 if (!pt) { 1799 printf("Allocation of port template failed\n"); 1800 return -ENOMEM; 1801 } 1802 ppt = list; 1803 while (*ppt && (*ppt)->id > id) 1804 ppt = &(*ppt)->next; 1805 if (*ppt && (*ppt)->id == id) { 1806 printf("Template #%u is already assigned," 1807 " delete it first\n", id); 1808 free(pt); 1809 return -EINVAL; 1810 } 1811 pt->next = *ppt; 1812 pt->id = id; 1813 *ppt = pt; 1814 *template = pt; 1815 return 0; 1816 } 1817 1818 static int 1819 table_alloc(uint32_t id, struct port_table **table, 1820 struct port_table **list) 1821 { 1822 struct port_table *lst = *list; 1823 struct port_table **ppt; 1824 struct port_table *pt = NULL; 1825 1826 *table = NULL; 1827 if (id == UINT32_MAX) { 1828 /* taking first available ID */ 1829 if (lst) { 1830 if (lst->id == UINT32_MAX - 1) { 1831 printf("Highest table ID is already" 1832 " assigned, delete it first\n"); 1833 return -ENOMEM; 1834 } 1835 id = lst->id + 1; 1836 } else { 1837 id = 0; 1838 } 1839 } 1840 pt = calloc(1, sizeof(*pt)); 1841 if (!pt) { 1842 printf("Allocation of table failed\n"); 1843 return -ENOMEM; 1844 } 1845 ppt = list; 1846 while (*ppt && (*ppt)->id > id) 1847 ppt = &(*ppt)->next; 1848 if (*ppt && (*ppt)->id == id) { 1849 printf("Table #%u is already assigned," 1850 " delete it first\n", id); 1851 free(pt); 1852 return -EINVAL; 1853 } 1854 pt->next = *ppt; 1855 pt->id = id; 1856 *ppt = pt; 1857 *table = pt; 1858 return 0; 1859 } 1860 1861 /** Get info about flow management resources. */ 1862 int 1863 port_flow_get_info(portid_t port_id) 1864 { 1865 struct rte_flow_port_info port_info; 1866 struct rte_flow_queue_info queue_info; 1867 struct rte_flow_error error; 1868 1869 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1870 port_id == (portid_t)RTE_PORT_ALL) 1871 return -EINVAL; 1872 /* Poisoning to make sure PMDs update it in case of error. */ 1873 memset(&error, 0x99, sizeof(error)); 1874 memset(&port_info, 0, sizeof(port_info)); 1875 memset(&queue_info, 0, sizeof(queue_info)); 1876 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1877 return port_flow_complain(&error); 1878 printf("Flow engine resources on port %u:\n" 1879 "Number of queues: %d\n" 1880 "Size of queues: %d\n" 1881 "Number of counters: %d\n" 1882 "Number of aging objects: %d\n" 1883 "Number of meter actions: %d\n", 1884 port_id, port_info.max_nb_queues, 1885 queue_info.max_size, 1886 port_info.max_nb_counters, 1887 port_info.max_nb_aging_objects, 1888 port_info.max_nb_meters); 1889 return 0; 1890 } 1891 1892 /** Configure flow management resources. */ 1893 int 1894 port_flow_configure(portid_t port_id, 1895 const struct rte_flow_port_attr *port_attr, 1896 uint16_t nb_queue, 1897 const struct rte_flow_queue_attr *queue_attr) 1898 { 1899 struct rte_port *port; 1900 struct rte_flow_error error; 1901 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1902 int std_queue; 1903 1904 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1905 port_id == (portid_t)RTE_PORT_ALL) 1906 return -EINVAL; 1907 port = &ports[port_id]; 1908 port->queue_nb = nb_queue; 1909 port->queue_sz = queue_attr->size; 1910 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1911 attr_list[std_queue] = queue_attr; 1912 /* Poisoning to make sure PMDs update it in case of error. */ 1913 memset(&error, 0x66, sizeof(error)); 1914 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1915 return port_flow_complain(&error); 1916 printf("Configure flows on port %u: " 1917 "number of queues %d with %d elements\n", 1918 port_id, nb_queue, queue_attr->size); 1919 return 0; 1920 } 1921 1922 /** Create indirect action */ 1923 int 1924 port_action_handle_create(portid_t port_id, uint32_t id, 1925 const struct rte_flow_indir_action_conf *conf, 1926 const struct rte_flow_action *action) 1927 { 1928 struct port_indirect_action *pia; 1929 int ret; 1930 struct rte_flow_error error; 1931 1932 ret = action_alloc(port_id, id, &pia); 1933 if (ret) 1934 return ret; 1935 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1936 struct rte_flow_action_age *age = 1937 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1938 1939 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1940 age->context = &pia->age_type; 1941 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1942 struct rte_flow_action_conntrack *ct = 1943 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1944 1945 memcpy(ct, &conntrack_context, sizeof(*ct)); 1946 } 1947 /* Poisoning to make sure PMDs update it in case of error. */ 1948 memset(&error, 0x22, sizeof(error)); 1949 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1950 &error); 1951 if (!pia->handle) { 1952 uint32_t destroy_id = pia->id; 1953 port_action_handle_destroy(port_id, 1, &destroy_id); 1954 return port_flow_complain(&error); 1955 } 1956 pia->type = action->type; 1957 printf("Indirect action #%u created\n", pia->id); 1958 return 0; 1959 } 1960 1961 /** Destroy indirect action */ 1962 int 1963 port_action_handle_destroy(portid_t port_id, 1964 uint32_t n, 1965 const uint32_t *actions) 1966 { 1967 struct rte_port *port; 1968 struct port_indirect_action **tmp; 1969 uint32_t c = 0; 1970 int ret = 0; 1971 1972 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1973 port_id == (portid_t)RTE_PORT_ALL) 1974 return -EINVAL; 1975 port = &ports[port_id]; 1976 tmp = &port->actions_list; 1977 while (*tmp) { 1978 uint32_t i; 1979 1980 for (i = 0; i != n; ++i) { 1981 struct rte_flow_error error; 1982 struct port_indirect_action *pia = *tmp; 1983 1984 if (actions[i] != pia->id) 1985 continue; 1986 /* 1987 * Poisoning to make sure PMDs update it in case 1988 * of error. 1989 */ 1990 memset(&error, 0x33, sizeof(error)); 1991 1992 if (pia->handle && rte_flow_action_handle_destroy( 1993 port_id, pia->handle, &error)) { 1994 ret = port_flow_complain(&error); 1995 continue; 1996 } 1997 *tmp = pia->next; 1998 printf("Indirect action #%u destroyed\n", pia->id); 1999 free(pia); 2000 break; 2001 } 2002 if (i == n) 2003 tmp = &(*tmp)->next; 2004 ++c; 2005 } 2006 return ret; 2007 } 2008 2009 int 2010 port_action_handle_flush(portid_t port_id) 2011 { 2012 struct rte_port *port; 2013 struct port_indirect_action **tmp; 2014 int ret = 0; 2015 2016 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2017 port_id == (portid_t)RTE_PORT_ALL) 2018 return -EINVAL; 2019 port = &ports[port_id]; 2020 tmp = &port->actions_list; 2021 while (*tmp != NULL) { 2022 struct rte_flow_error error; 2023 struct port_indirect_action *pia = *tmp; 2024 2025 /* Poisoning to make sure PMDs update it in case of error. */ 2026 memset(&error, 0x44, sizeof(error)); 2027 if (pia->handle != NULL && 2028 rte_flow_action_handle_destroy 2029 (port_id, pia->handle, &error) != 0) { 2030 printf("Indirect action #%u not destroyed\n", pia->id); 2031 ret = port_flow_complain(&error); 2032 tmp = &pia->next; 2033 } else { 2034 *tmp = pia->next; 2035 free(pia); 2036 } 2037 } 2038 return ret; 2039 } 2040 2041 /** Get indirect action by port + id */ 2042 struct rte_flow_action_handle * 2043 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 2044 { 2045 2046 struct port_indirect_action *pia = action_get_by_id(port_id, id); 2047 2048 return (pia) ? pia->handle : NULL; 2049 } 2050 2051 /** Update indirect action */ 2052 int 2053 port_action_handle_update(portid_t port_id, uint32_t id, 2054 const struct rte_flow_action *action) 2055 { 2056 struct rte_flow_error error; 2057 struct rte_flow_action_handle *action_handle; 2058 struct port_indirect_action *pia; 2059 const void *update; 2060 2061 action_handle = port_action_handle_get_by_id(port_id, id); 2062 if (!action_handle) 2063 return -EINVAL; 2064 pia = action_get_by_id(port_id, id); 2065 if (!pia) 2066 return -EINVAL; 2067 switch (pia->type) { 2068 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2069 update = action->conf; 2070 break; 2071 default: 2072 update = action; 2073 break; 2074 } 2075 if (rte_flow_action_handle_update(port_id, action_handle, update, 2076 &error)) { 2077 return port_flow_complain(&error); 2078 } 2079 printf("Indirect action #%u updated\n", id); 2080 return 0; 2081 } 2082 2083 int 2084 port_action_handle_query(portid_t port_id, uint32_t id) 2085 { 2086 struct rte_flow_error error; 2087 struct port_indirect_action *pia; 2088 union { 2089 struct rte_flow_query_count count; 2090 struct rte_flow_query_age age; 2091 struct rte_flow_action_conntrack ct; 2092 } query; 2093 2094 pia = action_get_by_id(port_id, id); 2095 if (!pia) 2096 return -EINVAL; 2097 switch (pia->type) { 2098 case RTE_FLOW_ACTION_TYPE_AGE: 2099 case RTE_FLOW_ACTION_TYPE_COUNT: 2100 break; 2101 default: 2102 fprintf(stderr, 2103 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2104 id, pia->type, port_id); 2105 return -ENOTSUP; 2106 } 2107 /* Poisoning to make sure PMDs update it in case of error. */ 2108 memset(&error, 0x55, sizeof(error)); 2109 memset(&query, 0, sizeof(query)); 2110 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 2111 return port_flow_complain(&error); 2112 switch (pia->type) { 2113 case RTE_FLOW_ACTION_TYPE_AGE: 2114 printf("Indirect AGE action:\n" 2115 " aged: %u\n" 2116 " sec_since_last_hit_valid: %u\n" 2117 " sec_since_last_hit: %" PRIu32 "\n", 2118 query.age.aged, 2119 query.age.sec_since_last_hit_valid, 2120 query.age.sec_since_last_hit); 2121 break; 2122 case RTE_FLOW_ACTION_TYPE_COUNT: 2123 printf("Indirect COUNT action:\n" 2124 " hits_set: %u\n" 2125 " bytes_set: %u\n" 2126 " hits: %" PRIu64 "\n" 2127 " bytes: %" PRIu64 "\n", 2128 query.count.hits_set, 2129 query.count.bytes_set, 2130 query.count.hits, 2131 query.count.bytes); 2132 break; 2133 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 2134 printf("Conntrack Context:\n" 2135 " Peer: %u, Flow dir: %s, Enable: %u\n" 2136 " Live: %u, SACK: %u, CACK: %u\n" 2137 " Packet dir: %s, Liberal: %u, State: %u\n" 2138 " Factor: %u, Retrans: %u, TCP flags: %u\n" 2139 " Last Seq: %u, Last ACK: %u\n" 2140 " Last Win: %u, Last End: %u\n", 2141 query.ct.peer_port, 2142 query.ct.is_original_dir ? "Original" : "Reply", 2143 query.ct.enable, query.ct.live_connection, 2144 query.ct.selective_ack, query.ct.challenge_ack_passed, 2145 query.ct.last_direction ? "Original" : "Reply", 2146 query.ct.liberal_mode, query.ct.state, 2147 query.ct.max_ack_window, query.ct.retransmission_limit, 2148 query.ct.last_index, query.ct.last_seq, 2149 query.ct.last_ack, query.ct.last_window, 2150 query.ct.last_end); 2151 printf(" Original Dir:\n" 2152 " scale: %u, fin: %u, ack seen: %u\n" 2153 " unacked data: %u\n Sent end: %u," 2154 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2155 query.ct.original_dir.scale, 2156 query.ct.original_dir.close_initiated, 2157 query.ct.original_dir.last_ack_seen, 2158 query.ct.original_dir.data_unacked, 2159 query.ct.original_dir.sent_end, 2160 query.ct.original_dir.reply_end, 2161 query.ct.original_dir.max_win, 2162 query.ct.original_dir.max_ack); 2163 printf(" Reply Dir:\n" 2164 " scale: %u, fin: %u, ack seen: %u\n" 2165 " unacked data: %u\n Sent end: %u," 2166 " Reply end: %u, Max win: %u, Max ACK: %u\n", 2167 query.ct.reply_dir.scale, 2168 query.ct.reply_dir.close_initiated, 2169 query.ct.reply_dir.last_ack_seen, 2170 query.ct.reply_dir.data_unacked, 2171 query.ct.reply_dir.sent_end, 2172 query.ct.reply_dir.reply_end, 2173 query.ct.reply_dir.max_win, 2174 query.ct.reply_dir.max_ack); 2175 break; 2176 default: 2177 fprintf(stderr, 2178 "Indirect action %u (type: %d) on port %u doesn't support query\n", 2179 id, pia->type, port_id); 2180 break; 2181 } 2182 return 0; 2183 } 2184 2185 static struct port_flow_tunnel * 2186 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 2187 const struct rte_flow_item *pattern, 2188 const struct rte_flow_action *actions, 2189 const struct tunnel_ops *tunnel_ops) 2190 { 2191 int ret; 2192 struct rte_port *port; 2193 struct port_flow_tunnel *pft; 2194 struct rte_flow_error error; 2195 2196 port = &ports[port_id]; 2197 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2198 if (!pft) { 2199 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2200 tunnel_ops->id); 2201 return NULL; 2202 } 2203 if (tunnel_ops->actions) { 2204 uint32_t num_actions; 2205 const struct rte_flow_action *aptr; 2206 2207 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2208 &pft->pmd_actions, 2209 &pft->num_pmd_actions, 2210 &error); 2211 if (ret) { 2212 port_flow_complain(&error); 2213 return NULL; 2214 } 2215 for (aptr = actions, num_actions = 1; 2216 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2217 aptr++, num_actions++); 2218 pft->actions = malloc( 2219 (num_actions + pft->num_pmd_actions) * 2220 sizeof(actions[0])); 2221 if (!pft->actions) { 2222 rte_flow_tunnel_action_decap_release( 2223 port_id, pft->actions, 2224 pft->num_pmd_actions, &error); 2225 return NULL; 2226 } 2227 rte_memcpy(pft->actions, pft->pmd_actions, 2228 pft->num_pmd_actions * sizeof(actions[0])); 2229 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2230 num_actions * sizeof(actions[0])); 2231 } 2232 if (tunnel_ops->items) { 2233 uint32_t num_items; 2234 const struct rte_flow_item *iptr; 2235 2236 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2237 &pft->pmd_items, 2238 &pft->num_pmd_items, 2239 &error); 2240 if (ret) { 2241 port_flow_complain(&error); 2242 return NULL; 2243 } 2244 for (iptr = pattern, num_items = 1; 2245 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2246 iptr++, num_items++); 2247 pft->items = malloc((num_items + pft->num_pmd_items) * 2248 sizeof(pattern[0])); 2249 if (!pft->items) { 2250 rte_flow_tunnel_item_release( 2251 port_id, pft->pmd_items, 2252 pft->num_pmd_items, &error); 2253 return NULL; 2254 } 2255 rte_memcpy(pft->items, pft->pmd_items, 2256 pft->num_pmd_items * sizeof(pattern[0])); 2257 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2258 num_items * sizeof(pattern[0])); 2259 } 2260 2261 return pft; 2262 } 2263 2264 static void 2265 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2266 const struct tunnel_ops *tunnel_ops, 2267 struct port_flow_tunnel *pft) 2268 { 2269 struct rte_flow_error error; 2270 2271 if (tunnel_ops->actions) { 2272 free(pft->actions); 2273 rte_flow_tunnel_action_decap_release( 2274 port_id, pft->pmd_actions, 2275 pft->num_pmd_actions, &error); 2276 pft->actions = NULL; 2277 pft->pmd_actions = NULL; 2278 } 2279 if (tunnel_ops->items) { 2280 free(pft->items); 2281 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2282 pft->num_pmd_items, 2283 &error); 2284 pft->items = NULL; 2285 pft->pmd_items = NULL; 2286 } 2287 } 2288 2289 /** Add port meter policy */ 2290 int 2291 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2292 const struct rte_flow_action *actions) 2293 { 2294 struct rte_mtr_error error; 2295 const struct rte_flow_action *act = actions; 2296 const struct rte_flow_action *start; 2297 struct rte_mtr_meter_policy_params policy; 2298 uint32_t i = 0, act_n; 2299 int ret; 2300 2301 for (i = 0; i < RTE_COLORS; i++) { 2302 for (act_n = 0, start = act; 2303 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2304 act_n++; 2305 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2306 policy.actions[i] = start; 2307 else 2308 policy.actions[i] = NULL; 2309 act++; 2310 } 2311 ret = rte_mtr_meter_policy_add(port_id, 2312 policy_id, 2313 &policy, &error); 2314 if (ret) 2315 print_mtr_err_msg(&error); 2316 return ret; 2317 } 2318 2319 /** Validate flow rule. */ 2320 int 2321 port_flow_validate(portid_t port_id, 2322 const struct rte_flow_attr *attr, 2323 const struct rte_flow_item *pattern, 2324 const struct rte_flow_action *actions, 2325 const struct tunnel_ops *tunnel_ops) 2326 { 2327 struct rte_flow_error error; 2328 struct port_flow_tunnel *pft = NULL; 2329 int ret; 2330 2331 /* Poisoning to make sure PMDs update it in case of error. */ 2332 memset(&error, 0x11, sizeof(error)); 2333 if (tunnel_ops->enabled) { 2334 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2335 actions, tunnel_ops); 2336 if (!pft) 2337 return -ENOENT; 2338 if (pft->items) 2339 pattern = pft->items; 2340 if (pft->actions) 2341 actions = pft->actions; 2342 } 2343 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2344 if (tunnel_ops->enabled) 2345 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2346 if (ret) 2347 return port_flow_complain(&error); 2348 printf("Flow rule validated\n"); 2349 return 0; 2350 } 2351 2352 /** Return age action structure if exists, otherwise NULL. */ 2353 static struct rte_flow_action_age * 2354 age_action_get(const struct rte_flow_action *actions) 2355 { 2356 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2357 switch (actions->type) { 2358 case RTE_FLOW_ACTION_TYPE_AGE: 2359 return (struct rte_flow_action_age *) 2360 (uintptr_t)actions->conf; 2361 default: 2362 break; 2363 } 2364 } 2365 return NULL; 2366 } 2367 2368 /** Create pattern template */ 2369 int 2370 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2371 const struct rte_flow_pattern_template_attr *attr, 2372 const struct rte_flow_item *pattern) 2373 { 2374 struct rte_port *port; 2375 struct port_template *pit; 2376 int ret; 2377 struct rte_flow_error error; 2378 2379 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2380 port_id == (portid_t)RTE_PORT_ALL) 2381 return -EINVAL; 2382 port = &ports[port_id]; 2383 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2384 if (ret) 2385 return ret; 2386 /* Poisoning to make sure PMDs update it in case of error. */ 2387 memset(&error, 0x22, sizeof(error)); 2388 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2389 attr, pattern, &error); 2390 if (!pit->template.pattern_template) { 2391 uint32_t destroy_id = pit->id; 2392 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2393 return port_flow_complain(&error); 2394 } 2395 printf("Pattern template #%u created\n", pit->id); 2396 return 0; 2397 } 2398 2399 /** Destroy pattern template */ 2400 int 2401 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2402 const uint32_t *template) 2403 { 2404 struct rte_port *port; 2405 struct port_template **tmp; 2406 uint32_t c = 0; 2407 int ret = 0; 2408 2409 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2410 port_id == (portid_t)RTE_PORT_ALL) 2411 return -EINVAL; 2412 port = &ports[port_id]; 2413 tmp = &port->pattern_templ_list; 2414 while (*tmp) { 2415 uint32_t i; 2416 2417 for (i = 0; i != n; ++i) { 2418 struct rte_flow_error error; 2419 struct port_template *pit = *tmp; 2420 2421 if (template[i] != pit->id) 2422 continue; 2423 /* 2424 * Poisoning to make sure PMDs update it in case 2425 * of error. 2426 */ 2427 memset(&error, 0x33, sizeof(error)); 2428 2429 if (pit->template.pattern_template && 2430 rte_flow_pattern_template_destroy(port_id, 2431 pit->template.pattern_template, 2432 &error)) { 2433 ret = port_flow_complain(&error); 2434 continue; 2435 } 2436 *tmp = pit->next; 2437 printf("Pattern template #%u destroyed\n", pit->id); 2438 free(pit); 2439 break; 2440 } 2441 if (i == n) 2442 tmp = &(*tmp)->next; 2443 ++c; 2444 } 2445 return ret; 2446 } 2447 2448 /** Create actions template */ 2449 int 2450 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2451 const struct rte_flow_actions_template_attr *attr, 2452 const struct rte_flow_action *actions, 2453 const struct rte_flow_action *masks) 2454 { 2455 struct rte_port *port; 2456 struct port_template *pat; 2457 int ret; 2458 struct rte_flow_error error; 2459 2460 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2461 port_id == (portid_t)RTE_PORT_ALL) 2462 return -EINVAL; 2463 port = &ports[port_id]; 2464 ret = template_alloc(id, &pat, &port->actions_templ_list); 2465 if (ret) 2466 return ret; 2467 /* Poisoning to make sure PMDs update it in case of error. */ 2468 memset(&error, 0x22, sizeof(error)); 2469 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2470 attr, actions, masks, &error); 2471 if (!pat->template.actions_template) { 2472 uint32_t destroy_id = pat->id; 2473 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2474 return port_flow_complain(&error); 2475 } 2476 printf("Actions template #%u created\n", pat->id); 2477 return 0; 2478 } 2479 2480 /** Destroy actions template */ 2481 int 2482 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2483 const uint32_t *template) 2484 { 2485 struct rte_port *port; 2486 struct port_template **tmp; 2487 uint32_t c = 0; 2488 int ret = 0; 2489 2490 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2491 port_id == (portid_t)RTE_PORT_ALL) 2492 return -EINVAL; 2493 port = &ports[port_id]; 2494 tmp = &port->actions_templ_list; 2495 while (*tmp) { 2496 uint32_t i; 2497 2498 for (i = 0; i != n; ++i) { 2499 struct rte_flow_error error; 2500 struct port_template *pat = *tmp; 2501 2502 if (template[i] != pat->id) 2503 continue; 2504 /* 2505 * Poisoning to make sure PMDs update it in case 2506 * of error. 2507 */ 2508 memset(&error, 0x33, sizeof(error)); 2509 2510 if (pat->template.actions_template && 2511 rte_flow_actions_template_destroy(port_id, 2512 pat->template.actions_template, &error)) { 2513 ret = port_flow_complain(&error); 2514 continue; 2515 } 2516 *tmp = pat->next; 2517 printf("Actions template #%u destroyed\n", pat->id); 2518 free(pat); 2519 break; 2520 } 2521 if (i == n) 2522 tmp = &(*tmp)->next; 2523 ++c; 2524 } 2525 return ret; 2526 } 2527 2528 /** Create table */ 2529 int 2530 port_flow_template_table_create(portid_t port_id, uint32_t id, 2531 const struct rte_flow_template_table_attr *table_attr, 2532 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2533 uint32_t nb_actions_templates, uint32_t *actions_templates) 2534 { 2535 struct rte_port *port; 2536 struct port_table *pt; 2537 struct port_template *temp = NULL; 2538 int ret; 2539 uint32_t i; 2540 struct rte_flow_error error; 2541 struct rte_flow_pattern_template 2542 *flow_pattern_templates[nb_pattern_templates]; 2543 struct rte_flow_actions_template 2544 *flow_actions_templates[nb_actions_templates]; 2545 2546 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2547 port_id == (portid_t)RTE_PORT_ALL) 2548 return -EINVAL; 2549 port = &ports[port_id]; 2550 for (i = 0; i < nb_pattern_templates; ++i) { 2551 bool found = false; 2552 temp = port->pattern_templ_list; 2553 while (temp) { 2554 if (pattern_templates[i] == temp->id) { 2555 flow_pattern_templates[i] = 2556 temp->template.pattern_template; 2557 found = true; 2558 break; 2559 } 2560 temp = temp->next; 2561 } 2562 if (!found) { 2563 printf("Pattern template #%u is invalid\n", 2564 pattern_templates[i]); 2565 return -EINVAL; 2566 } 2567 } 2568 for (i = 0; i < nb_actions_templates; ++i) { 2569 bool found = false; 2570 temp = port->actions_templ_list; 2571 while (temp) { 2572 if (actions_templates[i] == temp->id) { 2573 flow_actions_templates[i] = 2574 temp->template.actions_template; 2575 found = true; 2576 break; 2577 } 2578 temp = temp->next; 2579 } 2580 if (!found) { 2581 printf("Actions template #%u is invalid\n", 2582 actions_templates[i]); 2583 return -EINVAL; 2584 } 2585 } 2586 ret = table_alloc(id, &pt, &port->table_list); 2587 if (ret) 2588 return ret; 2589 /* Poisoning to make sure PMDs update it in case of error. */ 2590 memset(&error, 0x22, sizeof(error)); 2591 pt->table = rte_flow_template_table_create(port_id, table_attr, 2592 flow_pattern_templates, nb_pattern_templates, 2593 flow_actions_templates, nb_actions_templates, 2594 &error); 2595 2596 if (!pt->table) { 2597 uint32_t destroy_id = pt->id; 2598 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2599 return port_flow_complain(&error); 2600 } 2601 pt->nb_pattern_templates = nb_pattern_templates; 2602 pt->nb_actions_templates = nb_actions_templates; 2603 printf("Template table #%u created\n", pt->id); 2604 return 0; 2605 } 2606 2607 /** Destroy table */ 2608 int 2609 port_flow_template_table_destroy(portid_t port_id, 2610 uint32_t n, const uint32_t *table) 2611 { 2612 struct rte_port *port; 2613 struct port_table **tmp; 2614 uint32_t c = 0; 2615 int ret = 0; 2616 2617 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2618 port_id == (portid_t)RTE_PORT_ALL) 2619 return -EINVAL; 2620 port = &ports[port_id]; 2621 tmp = &port->table_list; 2622 while (*tmp) { 2623 uint32_t i; 2624 2625 for (i = 0; i != n; ++i) { 2626 struct rte_flow_error error; 2627 struct port_table *pt = *tmp; 2628 2629 if (table[i] != pt->id) 2630 continue; 2631 /* 2632 * Poisoning to make sure PMDs update it in case 2633 * of error. 2634 */ 2635 memset(&error, 0x33, sizeof(error)); 2636 2637 if (pt->table && 2638 rte_flow_template_table_destroy(port_id, 2639 pt->table, 2640 &error)) { 2641 ret = port_flow_complain(&error); 2642 continue; 2643 } 2644 *tmp = pt->next; 2645 printf("Template table #%u destroyed\n", pt->id); 2646 free(pt); 2647 break; 2648 } 2649 if (i == n) 2650 tmp = &(*tmp)->next; 2651 ++c; 2652 } 2653 return ret; 2654 } 2655 2656 /** Enqueue create flow rule operation. */ 2657 int 2658 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2659 bool postpone, uint32_t table_id, 2660 uint32_t pattern_idx, uint32_t actions_idx, 2661 const struct rte_flow_item *pattern, 2662 const struct rte_flow_action *actions) 2663 { 2664 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2665 struct rte_flow *flow; 2666 struct rte_port *port; 2667 struct port_flow *pf; 2668 struct port_table *pt; 2669 uint32_t id = 0; 2670 bool found; 2671 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2672 struct rte_flow_action_age *age = age_action_get(actions); 2673 2674 port = &ports[port_id]; 2675 if (port->flow_list) { 2676 if (port->flow_list->id == UINT32_MAX) { 2677 printf("Highest rule ID is already assigned," 2678 " delete it first"); 2679 return -ENOMEM; 2680 } 2681 id = port->flow_list->id + 1; 2682 } 2683 2684 if (queue_id >= port->queue_nb) { 2685 printf("Queue #%u is invalid\n", queue_id); 2686 return -EINVAL; 2687 } 2688 2689 found = false; 2690 pt = port->table_list; 2691 while (pt) { 2692 if (table_id == pt->id) { 2693 found = true; 2694 break; 2695 } 2696 pt = pt->next; 2697 } 2698 if (!found) { 2699 printf("Table #%u is invalid\n", table_id); 2700 return -EINVAL; 2701 } 2702 2703 if (pattern_idx >= pt->nb_pattern_templates) { 2704 printf("Pattern template index #%u is invalid," 2705 " %u templates present in the table\n", 2706 pattern_idx, pt->nb_pattern_templates); 2707 return -EINVAL; 2708 } 2709 if (actions_idx >= pt->nb_actions_templates) { 2710 printf("Actions template index #%u is invalid," 2711 " %u templates present in the table\n", 2712 actions_idx, pt->nb_actions_templates); 2713 return -EINVAL; 2714 } 2715 2716 pf = port_flow_new(NULL, pattern, actions, &error); 2717 if (!pf) 2718 return port_flow_complain(&error); 2719 if (age) { 2720 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2721 age->context = &pf->age_type; 2722 } 2723 /* Poisoning to make sure PMDs update it in case of error. */ 2724 memset(&error, 0x11, sizeof(error)); 2725 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2726 pattern, pattern_idx, actions, actions_idx, NULL, &error); 2727 if (!flow) { 2728 uint32_t flow_id = pf->id; 2729 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2730 return port_flow_complain(&error); 2731 } 2732 2733 pf->next = port->flow_list; 2734 pf->id = id; 2735 pf->flow = flow; 2736 port->flow_list = pf; 2737 printf("Flow rule #%u creation enqueued\n", pf->id); 2738 return 0; 2739 } 2740 2741 /** Enqueue number of destroy flow rules operations. */ 2742 int 2743 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2744 bool postpone, uint32_t n, const uint32_t *rule) 2745 { 2746 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2747 struct rte_port *port; 2748 struct port_flow **tmp; 2749 uint32_t c = 0; 2750 int ret = 0; 2751 2752 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2753 port_id == (portid_t)RTE_PORT_ALL) 2754 return -EINVAL; 2755 port = &ports[port_id]; 2756 2757 if (queue_id >= port->queue_nb) { 2758 printf("Queue #%u is invalid\n", queue_id); 2759 return -EINVAL; 2760 } 2761 2762 tmp = &port->flow_list; 2763 while (*tmp) { 2764 uint32_t i; 2765 2766 for (i = 0; i != n; ++i) { 2767 struct rte_flow_error error; 2768 struct port_flow *pf = *tmp; 2769 2770 if (rule[i] != pf->id) 2771 continue; 2772 /* 2773 * Poisoning to make sure PMD 2774 * update it in case of error. 2775 */ 2776 memset(&error, 0x33, sizeof(error)); 2777 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2778 pf->flow, NULL, &error)) { 2779 ret = port_flow_complain(&error); 2780 continue; 2781 } 2782 printf("Flow rule #%u destruction enqueued\n", pf->id); 2783 *tmp = pf->next; 2784 free(pf); 2785 break; 2786 } 2787 if (i == n) 2788 tmp = &(*tmp)->next; 2789 ++c; 2790 } 2791 return ret; 2792 } 2793 2794 /** Enqueue indirect action create operation. */ 2795 int 2796 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2797 bool postpone, uint32_t id, 2798 const struct rte_flow_indir_action_conf *conf, 2799 const struct rte_flow_action *action) 2800 { 2801 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2802 struct rte_port *port; 2803 struct port_indirect_action *pia; 2804 int ret; 2805 struct rte_flow_error error; 2806 2807 ret = action_alloc(port_id, id, &pia); 2808 if (ret) 2809 return ret; 2810 2811 port = &ports[port_id]; 2812 if (queue_id >= port->queue_nb) { 2813 printf("Queue #%u is invalid\n", queue_id); 2814 return -EINVAL; 2815 } 2816 2817 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2818 struct rte_flow_action_age *age = 2819 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2820 2821 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2822 age->context = &pia->age_type; 2823 } 2824 /* Poisoning to make sure PMDs update it in case of error. */ 2825 memset(&error, 0x88, sizeof(error)); 2826 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2827 &attr, conf, action, NULL, &error); 2828 if (!pia->handle) { 2829 uint32_t destroy_id = pia->id; 2830 port_queue_action_handle_destroy(port_id, queue_id, 2831 postpone, 1, &destroy_id); 2832 return port_flow_complain(&error); 2833 } 2834 pia->type = action->type; 2835 printf("Indirect action #%u creation queued\n", pia->id); 2836 return 0; 2837 } 2838 2839 /** Enqueue indirect action destroy operation. */ 2840 int 2841 port_queue_action_handle_destroy(portid_t port_id, 2842 uint32_t queue_id, bool postpone, 2843 uint32_t n, const uint32_t *actions) 2844 { 2845 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2846 struct rte_port *port; 2847 struct port_indirect_action **tmp; 2848 uint32_t c = 0; 2849 int ret = 0; 2850 2851 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2852 port_id == (portid_t)RTE_PORT_ALL) 2853 return -EINVAL; 2854 port = &ports[port_id]; 2855 2856 if (queue_id >= port->queue_nb) { 2857 printf("Queue #%u is invalid\n", queue_id); 2858 return -EINVAL; 2859 } 2860 2861 tmp = &port->actions_list; 2862 while (*tmp) { 2863 uint32_t i; 2864 2865 for (i = 0; i != n; ++i) { 2866 struct rte_flow_error error; 2867 struct port_indirect_action *pia = *tmp; 2868 2869 if (actions[i] != pia->id) 2870 continue; 2871 /* 2872 * Poisoning to make sure PMDs update it in case 2873 * of error. 2874 */ 2875 memset(&error, 0x99, sizeof(error)); 2876 2877 if (pia->handle && 2878 rte_flow_async_action_handle_destroy(port_id, 2879 queue_id, &attr, pia->handle, NULL, &error)) { 2880 ret = port_flow_complain(&error); 2881 continue; 2882 } 2883 *tmp = pia->next; 2884 printf("Indirect action #%u destruction queued\n", 2885 pia->id); 2886 free(pia); 2887 break; 2888 } 2889 if (i == n) 2890 tmp = &(*tmp)->next; 2891 ++c; 2892 } 2893 return ret; 2894 } 2895 2896 /** Enqueue indirect action update operation. */ 2897 int 2898 port_queue_action_handle_update(portid_t port_id, 2899 uint32_t queue_id, bool postpone, uint32_t id, 2900 const struct rte_flow_action *action) 2901 { 2902 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2903 struct rte_port *port; 2904 struct rte_flow_error error; 2905 struct rte_flow_action_handle *action_handle; 2906 2907 action_handle = port_action_handle_get_by_id(port_id, id); 2908 if (!action_handle) 2909 return -EINVAL; 2910 2911 port = &ports[port_id]; 2912 if (queue_id >= port->queue_nb) { 2913 printf("Queue #%u is invalid\n", queue_id); 2914 return -EINVAL; 2915 } 2916 2917 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2918 action_handle, action, NULL, &error)) { 2919 return port_flow_complain(&error); 2920 } 2921 printf("Indirect action #%u update queued\n", id); 2922 return 0; 2923 } 2924 2925 /** Push all the queue operations in the queue to the NIC. */ 2926 int 2927 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 2928 { 2929 struct rte_port *port; 2930 struct rte_flow_error error; 2931 int ret = 0; 2932 2933 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2934 port_id == (portid_t)RTE_PORT_ALL) 2935 return -EINVAL; 2936 port = &ports[port_id]; 2937 2938 if (queue_id >= port->queue_nb) { 2939 printf("Queue #%u is invalid\n", queue_id); 2940 return -EINVAL; 2941 } 2942 2943 memset(&error, 0x55, sizeof(error)); 2944 ret = rte_flow_push(port_id, queue_id, &error); 2945 if (ret < 0) { 2946 printf("Failed to push operations in the queue\n"); 2947 return -EINVAL; 2948 } 2949 printf("Queue #%u operations pushed\n", queue_id); 2950 return ret; 2951 } 2952 2953 /** Pull queue operation results from the queue. */ 2954 int 2955 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 2956 { 2957 struct rte_port *port; 2958 struct rte_flow_op_result *res; 2959 struct rte_flow_error error; 2960 int ret = 0; 2961 int success = 0; 2962 int i; 2963 2964 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2965 port_id == (portid_t)RTE_PORT_ALL) 2966 return -EINVAL; 2967 port = &ports[port_id]; 2968 2969 if (queue_id >= port->queue_nb) { 2970 printf("Queue #%u is invalid\n", queue_id); 2971 return -EINVAL; 2972 } 2973 2974 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 2975 if (!res) { 2976 printf("Failed to allocate memory for pulled results\n"); 2977 return -ENOMEM; 2978 } 2979 2980 memset(&error, 0x66, sizeof(error)); 2981 ret = rte_flow_pull(port_id, queue_id, res, 2982 port->queue_sz, &error); 2983 if (ret < 0) { 2984 printf("Failed to pull a operation results\n"); 2985 free(res); 2986 return -EINVAL; 2987 } 2988 2989 for (i = 0; i < ret; i++) { 2990 if (res[i].status == RTE_FLOW_OP_SUCCESS) 2991 success++; 2992 } 2993 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 2994 queue_id, ret, ret - success, success); 2995 free(res); 2996 return ret; 2997 } 2998 2999 /** Create flow rule. */ 3000 int 3001 port_flow_create(portid_t port_id, 3002 const struct rte_flow_attr *attr, 3003 const struct rte_flow_item *pattern, 3004 const struct rte_flow_action *actions, 3005 const struct tunnel_ops *tunnel_ops) 3006 { 3007 struct rte_flow *flow; 3008 struct rte_port *port; 3009 struct port_flow *pf; 3010 uint32_t id = 0; 3011 struct rte_flow_error error; 3012 struct port_flow_tunnel *pft = NULL; 3013 struct rte_flow_action_age *age = age_action_get(actions); 3014 3015 port = &ports[port_id]; 3016 if (port->flow_list) { 3017 if (port->flow_list->id == UINT32_MAX) { 3018 fprintf(stderr, 3019 "Highest rule ID is already assigned, delete it first"); 3020 return -ENOMEM; 3021 } 3022 id = port->flow_list->id + 1; 3023 } 3024 if (tunnel_ops->enabled) { 3025 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 3026 actions, tunnel_ops); 3027 if (!pft) 3028 return -ENOENT; 3029 if (pft->items) 3030 pattern = pft->items; 3031 if (pft->actions) 3032 actions = pft->actions; 3033 } 3034 pf = port_flow_new(attr, pattern, actions, &error); 3035 if (!pf) 3036 return port_flow_complain(&error); 3037 if (age) { 3038 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 3039 age->context = &pf->age_type; 3040 } 3041 /* Poisoning to make sure PMDs update it in case of error. */ 3042 memset(&error, 0x22, sizeof(error)); 3043 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 3044 if (!flow) { 3045 if (tunnel_ops->enabled) 3046 port_flow_tunnel_offload_cmd_release(port_id, 3047 tunnel_ops, pft); 3048 free(pf); 3049 return port_flow_complain(&error); 3050 } 3051 pf->next = port->flow_list; 3052 pf->id = id; 3053 pf->flow = flow; 3054 port->flow_list = pf; 3055 if (tunnel_ops->enabled) 3056 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 3057 printf("Flow rule #%u created\n", pf->id); 3058 return 0; 3059 } 3060 3061 /** Destroy a number of flow rules. */ 3062 int 3063 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 3064 { 3065 struct rte_port *port; 3066 struct port_flow **tmp; 3067 uint32_t c = 0; 3068 int ret = 0; 3069 3070 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3071 port_id == (portid_t)RTE_PORT_ALL) 3072 return -EINVAL; 3073 port = &ports[port_id]; 3074 tmp = &port->flow_list; 3075 while (*tmp) { 3076 uint32_t i; 3077 3078 for (i = 0; i != n; ++i) { 3079 struct rte_flow_error error; 3080 struct port_flow *pf = *tmp; 3081 3082 if (rule[i] != pf->id) 3083 continue; 3084 /* 3085 * Poisoning to make sure PMDs update it in case 3086 * of error. 3087 */ 3088 memset(&error, 0x33, sizeof(error)); 3089 if (rte_flow_destroy(port_id, pf->flow, &error)) { 3090 ret = port_flow_complain(&error); 3091 continue; 3092 } 3093 printf("Flow rule #%u destroyed\n", pf->id); 3094 *tmp = pf->next; 3095 free(pf); 3096 break; 3097 } 3098 if (i == n) 3099 tmp = &(*tmp)->next; 3100 ++c; 3101 } 3102 return ret; 3103 } 3104 3105 /** Remove all flow rules. */ 3106 int 3107 port_flow_flush(portid_t port_id) 3108 { 3109 struct rte_flow_error error; 3110 struct rte_port *port; 3111 int ret = 0; 3112 3113 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3114 port_id == (portid_t)RTE_PORT_ALL) 3115 return -EINVAL; 3116 3117 port = &ports[port_id]; 3118 3119 if (port->flow_list == NULL) 3120 return ret; 3121 3122 /* Poisoning to make sure PMDs update it in case of error. */ 3123 memset(&error, 0x44, sizeof(error)); 3124 if (rte_flow_flush(port_id, &error)) { 3125 port_flow_complain(&error); 3126 } 3127 3128 while (port->flow_list) { 3129 struct port_flow *pf = port->flow_list->next; 3130 3131 free(port->flow_list); 3132 port->flow_list = pf; 3133 } 3134 return ret; 3135 } 3136 3137 /** Dump flow rules. */ 3138 int 3139 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 3140 const char *file_name) 3141 { 3142 int ret = 0; 3143 FILE *file = stdout; 3144 struct rte_flow_error error; 3145 struct rte_port *port; 3146 struct port_flow *pflow; 3147 struct rte_flow *tmpFlow = NULL; 3148 bool found = false; 3149 3150 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3151 port_id == (portid_t)RTE_PORT_ALL) 3152 return -EINVAL; 3153 3154 if (!dump_all) { 3155 port = &ports[port_id]; 3156 pflow = port->flow_list; 3157 while (pflow) { 3158 if (rule_id != pflow->id) { 3159 pflow = pflow->next; 3160 } else { 3161 tmpFlow = pflow->flow; 3162 if (tmpFlow) 3163 found = true; 3164 break; 3165 } 3166 } 3167 if (found == false) { 3168 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 3169 return -EINVAL; 3170 } 3171 } 3172 3173 if (file_name && strlen(file_name)) { 3174 file = fopen(file_name, "w"); 3175 if (!file) { 3176 fprintf(stderr, "Failed to create file %s: %s\n", 3177 file_name, strerror(errno)); 3178 return -errno; 3179 } 3180 } 3181 3182 if (!dump_all) 3183 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 3184 else 3185 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 3186 if (ret) { 3187 port_flow_complain(&error); 3188 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 3189 } else 3190 printf("Flow dump finished\n"); 3191 if (file_name && strlen(file_name)) 3192 fclose(file); 3193 return ret; 3194 } 3195 3196 /** Query a flow rule. */ 3197 int 3198 port_flow_query(portid_t port_id, uint32_t rule, 3199 const struct rte_flow_action *action) 3200 { 3201 struct rte_flow_error error; 3202 struct rte_port *port; 3203 struct port_flow *pf; 3204 const char *name; 3205 union { 3206 struct rte_flow_query_count count; 3207 struct rte_flow_action_rss rss_conf; 3208 struct rte_flow_query_age age; 3209 } query; 3210 int ret; 3211 3212 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3213 port_id == (portid_t)RTE_PORT_ALL) 3214 return -EINVAL; 3215 port = &ports[port_id]; 3216 for (pf = port->flow_list; pf; pf = pf->next) 3217 if (pf->id == rule) 3218 break; 3219 if (!pf) { 3220 fprintf(stderr, "Flow rule #%u not found\n", rule); 3221 return -ENOENT; 3222 } 3223 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3224 &name, sizeof(name), 3225 (void *)(uintptr_t)action->type, &error); 3226 if (ret < 0) 3227 return port_flow_complain(&error); 3228 switch (action->type) { 3229 case RTE_FLOW_ACTION_TYPE_COUNT: 3230 case RTE_FLOW_ACTION_TYPE_RSS: 3231 case RTE_FLOW_ACTION_TYPE_AGE: 3232 break; 3233 default: 3234 fprintf(stderr, "Cannot query action type %d (%s)\n", 3235 action->type, name); 3236 return -ENOTSUP; 3237 } 3238 /* Poisoning to make sure PMDs update it in case of error. */ 3239 memset(&error, 0x55, sizeof(error)); 3240 memset(&query, 0, sizeof(query)); 3241 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3242 return port_flow_complain(&error); 3243 switch (action->type) { 3244 case RTE_FLOW_ACTION_TYPE_COUNT: 3245 printf("%s:\n" 3246 " hits_set: %u\n" 3247 " bytes_set: %u\n" 3248 " hits: %" PRIu64 "\n" 3249 " bytes: %" PRIu64 "\n", 3250 name, 3251 query.count.hits_set, 3252 query.count.bytes_set, 3253 query.count.hits, 3254 query.count.bytes); 3255 break; 3256 case RTE_FLOW_ACTION_TYPE_RSS: 3257 rss_config_display(&query.rss_conf); 3258 break; 3259 case RTE_FLOW_ACTION_TYPE_AGE: 3260 printf("%s:\n" 3261 " aged: %u\n" 3262 " sec_since_last_hit_valid: %u\n" 3263 " sec_since_last_hit: %" PRIu32 "\n", 3264 name, 3265 query.age.aged, 3266 query.age.sec_since_last_hit_valid, 3267 query.age.sec_since_last_hit); 3268 break; 3269 default: 3270 fprintf(stderr, 3271 "Cannot display result for action type %d (%s)\n", 3272 action->type, name); 3273 break; 3274 } 3275 return 0; 3276 } 3277 3278 /** List simply and destroy all aged flows. */ 3279 void 3280 port_flow_aged(portid_t port_id, uint8_t destroy) 3281 { 3282 void **contexts; 3283 int nb_context, total = 0, idx; 3284 struct rte_flow_error error; 3285 enum age_action_context_type *type; 3286 union { 3287 struct port_flow *pf; 3288 struct port_indirect_action *pia; 3289 } ctx; 3290 3291 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3292 port_id == (portid_t)RTE_PORT_ALL) 3293 return; 3294 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3295 printf("Port %u total aged flows: %d\n", port_id, total); 3296 if (total < 0) { 3297 port_flow_complain(&error); 3298 return; 3299 } 3300 if (total == 0) 3301 return; 3302 contexts = malloc(sizeof(void *) * total); 3303 if (contexts == NULL) { 3304 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3305 return; 3306 } 3307 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3308 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3309 if (nb_context != total) { 3310 fprintf(stderr, 3311 "Port:%d get aged flows count(%d) != total(%d)\n", 3312 port_id, nb_context, total); 3313 free(contexts); 3314 return; 3315 } 3316 total = 0; 3317 for (idx = 0; idx < nb_context; idx++) { 3318 if (!contexts[idx]) { 3319 fprintf(stderr, "Error: get Null context in port %u\n", 3320 port_id); 3321 continue; 3322 } 3323 type = (enum age_action_context_type *)contexts[idx]; 3324 switch (*type) { 3325 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3326 ctx.pf = container_of(type, struct port_flow, age_type); 3327 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3328 "\t%c%c%c\t\n", 3329 "Flow", 3330 ctx.pf->id, 3331 ctx.pf->rule.attr->group, 3332 ctx.pf->rule.attr->priority, 3333 ctx.pf->rule.attr->ingress ? 'i' : '-', 3334 ctx.pf->rule.attr->egress ? 'e' : '-', 3335 ctx.pf->rule.attr->transfer ? 't' : '-'); 3336 if (destroy && !port_flow_destroy(port_id, 1, 3337 &ctx.pf->id)) 3338 total++; 3339 break; 3340 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3341 ctx.pia = container_of(type, 3342 struct port_indirect_action, age_type); 3343 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3344 ctx.pia->id); 3345 break; 3346 default: 3347 fprintf(stderr, "Error: invalid context type %u\n", 3348 port_id); 3349 break; 3350 } 3351 } 3352 printf("\n%d flows destroyed\n", total); 3353 free(contexts); 3354 } 3355 3356 /** List flow rules. */ 3357 void 3358 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3359 { 3360 struct rte_port *port; 3361 struct port_flow *pf; 3362 struct port_flow *list = NULL; 3363 uint32_t i; 3364 3365 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3366 port_id == (portid_t)RTE_PORT_ALL) 3367 return; 3368 port = &ports[port_id]; 3369 if (!port->flow_list) 3370 return; 3371 /* Sort flows by group, priority and ID. */ 3372 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3373 struct port_flow **tmp; 3374 const struct rte_flow_attr *curr = pf->rule.attr; 3375 3376 if (n) { 3377 /* Filter out unwanted groups. */ 3378 for (i = 0; i != n; ++i) 3379 if (curr->group == group[i]) 3380 break; 3381 if (i == n) 3382 continue; 3383 } 3384 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3385 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3386 3387 if (curr->group > comp->group || 3388 (curr->group == comp->group && 3389 curr->priority > comp->priority) || 3390 (curr->group == comp->group && 3391 curr->priority == comp->priority && 3392 pf->id > (*tmp)->id)) 3393 continue; 3394 break; 3395 } 3396 pf->tmp = *tmp; 3397 *tmp = pf; 3398 } 3399 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3400 for (pf = list; pf != NULL; pf = pf->tmp) { 3401 const struct rte_flow_item *item = pf->rule.pattern; 3402 const struct rte_flow_action *action = pf->rule.actions; 3403 const char *name; 3404 3405 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3406 pf->id, 3407 pf->rule.attr->group, 3408 pf->rule.attr->priority, 3409 pf->rule.attr->ingress ? 'i' : '-', 3410 pf->rule.attr->egress ? 'e' : '-', 3411 pf->rule.attr->transfer ? 't' : '-'); 3412 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3413 if ((uint32_t)item->type > INT_MAX) 3414 name = "PMD_INTERNAL"; 3415 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3416 &name, sizeof(name), 3417 (void *)(uintptr_t)item->type, 3418 NULL) <= 0) 3419 name = "[UNKNOWN]"; 3420 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3421 printf("%s ", name); 3422 ++item; 3423 } 3424 printf("=>"); 3425 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3426 if ((uint32_t)action->type > INT_MAX) 3427 name = "PMD_INTERNAL"; 3428 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3429 &name, sizeof(name), 3430 (void *)(uintptr_t)action->type, 3431 NULL) <= 0) 3432 name = "[UNKNOWN]"; 3433 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3434 printf(" %s", name); 3435 ++action; 3436 } 3437 printf("\n"); 3438 } 3439 } 3440 3441 /** Restrict ingress traffic to the defined flow rules. */ 3442 int 3443 port_flow_isolate(portid_t port_id, int set) 3444 { 3445 struct rte_flow_error error; 3446 3447 /* Poisoning to make sure PMDs update it in case of error. */ 3448 memset(&error, 0x66, sizeof(error)); 3449 if (rte_flow_isolate(port_id, set, &error)) 3450 return port_flow_complain(&error); 3451 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3452 port_id, 3453 set ? "now restricted" : "not restricted anymore"); 3454 return 0; 3455 } 3456 3457 /* 3458 * RX/TX ring descriptors display functions. 3459 */ 3460 int 3461 rx_queue_id_is_invalid(queueid_t rxq_id) 3462 { 3463 if (rxq_id < nb_rxq) 3464 return 0; 3465 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3466 rxq_id, nb_rxq); 3467 return 1; 3468 } 3469 3470 int 3471 tx_queue_id_is_invalid(queueid_t txq_id) 3472 { 3473 if (txq_id < nb_txq) 3474 return 0; 3475 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3476 txq_id, nb_txq); 3477 return 1; 3478 } 3479 3480 static int 3481 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3482 { 3483 struct rte_port *port = &ports[port_id]; 3484 struct rte_eth_rxq_info rx_qinfo; 3485 int ret; 3486 3487 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3488 if (ret == 0) { 3489 *ring_size = rx_qinfo.nb_desc; 3490 return ret; 3491 } 3492 3493 if (ret != -ENOTSUP) 3494 return ret; 3495 /* 3496 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3497 * ring_size stored in testpmd will be used for validity verification. 3498 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3499 * being 0, it will use a default value provided by PMDs to setup this 3500 * rxq. If the default value is 0, it will use the 3501 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3502 */ 3503 if (port->nb_rx_desc[rxq_id]) 3504 *ring_size = port->nb_rx_desc[rxq_id]; 3505 else if (port->dev_info.default_rxportconf.ring_size) 3506 *ring_size = port->dev_info.default_rxportconf.ring_size; 3507 else 3508 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3509 return 0; 3510 } 3511 3512 static int 3513 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3514 { 3515 struct rte_port *port = &ports[port_id]; 3516 struct rte_eth_txq_info tx_qinfo; 3517 int ret; 3518 3519 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3520 if (ret == 0) { 3521 *ring_size = tx_qinfo.nb_desc; 3522 return ret; 3523 } 3524 3525 if (ret != -ENOTSUP) 3526 return ret; 3527 /* 3528 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3529 * ring_size stored in testpmd will be used for validity verification. 3530 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3531 * being 0, it will use a default value provided by PMDs to setup this 3532 * txq. If the default value is 0, it will use the 3533 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3534 */ 3535 if (port->nb_tx_desc[txq_id]) 3536 *ring_size = port->nb_tx_desc[txq_id]; 3537 else if (port->dev_info.default_txportconf.ring_size) 3538 *ring_size = port->dev_info.default_txportconf.ring_size; 3539 else 3540 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3541 return 0; 3542 } 3543 3544 static int 3545 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3546 { 3547 uint16_t ring_size; 3548 int ret; 3549 3550 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3551 if (ret) 3552 return 1; 3553 3554 if (rxdesc_id < ring_size) 3555 return 0; 3556 3557 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3558 rxdesc_id, ring_size); 3559 return 1; 3560 } 3561 3562 static int 3563 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3564 { 3565 uint16_t ring_size; 3566 int ret; 3567 3568 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3569 if (ret) 3570 return 1; 3571 3572 if (txdesc_id < ring_size) 3573 return 0; 3574 3575 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3576 txdesc_id, ring_size); 3577 return 1; 3578 } 3579 3580 static const struct rte_memzone * 3581 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3582 { 3583 char mz_name[RTE_MEMZONE_NAMESIZE]; 3584 const struct rte_memzone *mz; 3585 3586 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3587 port_id, q_id, ring_name); 3588 mz = rte_memzone_lookup(mz_name); 3589 if (mz == NULL) 3590 fprintf(stderr, 3591 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3592 ring_name, port_id, q_id, mz_name); 3593 return mz; 3594 } 3595 3596 union igb_ring_dword { 3597 uint64_t dword; 3598 struct { 3599 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3600 uint32_t lo; 3601 uint32_t hi; 3602 #else 3603 uint32_t hi; 3604 uint32_t lo; 3605 #endif 3606 } words; 3607 }; 3608 3609 struct igb_ring_desc_32_bytes { 3610 union igb_ring_dword lo_dword; 3611 union igb_ring_dword hi_dword; 3612 union igb_ring_dword resv1; 3613 union igb_ring_dword resv2; 3614 }; 3615 3616 struct igb_ring_desc_16_bytes { 3617 union igb_ring_dword lo_dword; 3618 union igb_ring_dword hi_dword; 3619 }; 3620 3621 static void 3622 ring_rxd_display_dword(union igb_ring_dword dword) 3623 { 3624 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3625 (unsigned)dword.words.hi); 3626 } 3627 3628 static void 3629 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3630 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3631 portid_t port_id, 3632 #else 3633 __rte_unused portid_t port_id, 3634 #endif 3635 uint16_t desc_id) 3636 { 3637 struct igb_ring_desc_16_bytes *ring = 3638 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3639 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3640 int ret; 3641 struct rte_eth_dev_info dev_info; 3642 3643 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3644 if (ret != 0) 3645 return; 3646 3647 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3648 /* 32 bytes RX descriptor, i40e only */ 3649 struct igb_ring_desc_32_bytes *ring = 3650 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3651 ring[desc_id].lo_dword.dword = 3652 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3653 ring_rxd_display_dword(ring[desc_id].lo_dword); 3654 ring[desc_id].hi_dword.dword = 3655 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3656 ring_rxd_display_dword(ring[desc_id].hi_dword); 3657 ring[desc_id].resv1.dword = 3658 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3659 ring_rxd_display_dword(ring[desc_id].resv1); 3660 ring[desc_id].resv2.dword = 3661 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3662 ring_rxd_display_dword(ring[desc_id].resv2); 3663 3664 return; 3665 } 3666 #endif 3667 /* 16 bytes RX descriptor */ 3668 ring[desc_id].lo_dword.dword = 3669 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3670 ring_rxd_display_dword(ring[desc_id].lo_dword); 3671 ring[desc_id].hi_dword.dword = 3672 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3673 ring_rxd_display_dword(ring[desc_id].hi_dword); 3674 } 3675 3676 static void 3677 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3678 { 3679 struct igb_ring_desc_16_bytes *ring; 3680 struct igb_ring_desc_16_bytes txd; 3681 3682 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3683 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3684 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3685 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3686 (unsigned)txd.lo_dword.words.lo, 3687 (unsigned)txd.lo_dword.words.hi, 3688 (unsigned)txd.hi_dword.words.lo, 3689 (unsigned)txd.hi_dword.words.hi); 3690 } 3691 3692 void 3693 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3694 { 3695 const struct rte_memzone *rx_mz; 3696 3697 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3698 return; 3699 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3700 if (rx_mz == NULL) 3701 return; 3702 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 3703 } 3704 3705 void 3706 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 3707 { 3708 const struct rte_memzone *tx_mz; 3709 3710 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 3711 return; 3712 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 3713 if (tx_mz == NULL) 3714 return; 3715 ring_tx_descriptor_display(tx_mz, txd_id); 3716 } 3717 3718 void 3719 fwd_lcores_config_display(void) 3720 { 3721 lcoreid_t lc_id; 3722 3723 printf("List of forwarding lcores:"); 3724 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 3725 printf(" %2u", fwd_lcores_cpuids[lc_id]); 3726 printf("\n"); 3727 } 3728 void 3729 rxtx_config_display(void) 3730 { 3731 portid_t pid; 3732 queueid_t qid; 3733 3734 printf(" %s packet forwarding%s packets/burst=%d\n", 3735 cur_fwd_eng->fwd_mode_name, 3736 retry_enabled == 0 ? "" : " with retry", 3737 nb_pkt_per_burst); 3738 3739 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 3740 printf(" packet len=%u - nb packet segments=%d\n", 3741 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 3742 3743 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 3744 nb_fwd_lcores, nb_fwd_ports); 3745 3746 RTE_ETH_FOREACH_DEV(pid) { 3747 struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; 3748 struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; 3749 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 3750 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 3751 struct rte_eth_rxq_info rx_qinfo; 3752 struct rte_eth_txq_info tx_qinfo; 3753 uint16_t rx_free_thresh_tmp; 3754 uint16_t tx_free_thresh_tmp; 3755 uint16_t tx_rs_thresh_tmp; 3756 uint16_t nb_rx_desc_tmp; 3757 uint16_t nb_tx_desc_tmp; 3758 uint64_t offloads_tmp; 3759 uint8_t pthresh_tmp; 3760 uint8_t hthresh_tmp; 3761 uint8_t wthresh_tmp; 3762 int32_t rc; 3763 3764 /* per port config */ 3765 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 3766 (unsigned int)pid, nb_rxq, nb_txq); 3767 3768 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 3769 ports[pid].dev_conf.rxmode.offloads, 3770 ports[pid].dev_conf.txmode.offloads); 3771 3772 /* per rx queue config only for first queue to be less verbose */ 3773 for (qid = 0; qid < 1; qid++) { 3774 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 3775 if (rc) { 3776 nb_rx_desc_tmp = nb_rx_desc[qid]; 3777 rx_free_thresh_tmp = 3778 rx_conf[qid].rx_free_thresh; 3779 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 3780 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 3781 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 3782 offloads_tmp = rx_conf[qid].offloads; 3783 } else { 3784 nb_rx_desc_tmp = rx_qinfo.nb_desc; 3785 rx_free_thresh_tmp = 3786 rx_qinfo.conf.rx_free_thresh; 3787 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 3788 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 3789 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 3790 offloads_tmp = rx_qinfo.conf.offloads; 3791 } 3792 3793 printf(" RX queue: %d\n", qid); 3794 printf(" RX desc=%d - RX free threshold=%d\n", 3795 nb_rx_desc_tmp, rx_free_thresh_tmp); 3796 printf(" RX threshold registers: pthresh=%d hthresh=%d " 3797 " wthresh=%d\n", 3798 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3799 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 3800 if (rx_conf->share_group > 0) 3801 printf(" share_group=%u share_qid=%u", 3802 rx_conf->share_group, 3803 rx_conf->share_qid); 3804 printf("\n"); 3805 } 3806 3807 /* per tx queue config only for first queue to be less verbose */ 3808 for (qid = 0; qid < 1; qid++) { 3809 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 3810 if (rc) { 3811 nb_tx_desc_tmp = nb_tx_desc[qid]; 3812 tx_free_thresh_tmp = 3813 tx_conf[qid].tx_free_thresh; 3814 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 3815 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 3816 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 3817 offloads_tmp = tx_conf[qid].offloads; 3818 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 3819 } else { 3820 nb_tx_desc_tmp = tx_qinfo.nb_desc; 3821 tx_free_thresh_tmp = 3822 tx_qinfo.conf.tx_free_thresh; 3823 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 3824 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 3825 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 3826 offloads_tmp = tx_qinfo.conf.offloads; 3827 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 3828 } 3829 3830 printf(" TX queue: %d\n", qid); 3831 printf(" TX desc=%d - TX free threshold=%d\n", 3832 nb_tx_desc_tmp, tx_free_thresh_tmp); 3833 printf(" TX threshold registers: pthresh=%d hthresh=%d " 3834 " wthresh=%d\n", 3835 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3836 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 3837 offloads_tmp, tx_rs_thresh_tmp); 3838 } 3839 } 3840 } 3841 3842 void 3843 port_rss_reta_info(portid_t port_id, 3844 struct rte_eth_rss_reta_entry64 *reta_conf, 3845 uint16_t nb_entries) 3846 { 3847 uint16_t i, idx, shift; 3848 int ret; 3849 3850 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3851 return; 3852 3853 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 3854 if (ret != 0) { 3855 fprintf(stderr, 3856 "Failed to get RSS RETA info, return code = %d\n", 3857 ret); 3858 return; 3859 } 3860 3861 for (i = 0; i < nb_entries; i++) { 3862 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3863 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3864 if (!(reta_conf[idx].mask & (1ULL << shift))) 3865 continue; 3866 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 3867 i, reta_conf[idx].reta[shift]); 3868 } 3869 } 3870 3871 /* 3872 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 3873 * key of the port. 3874 */ 3875 void 3876 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 3877 { 3878 struct rte_eth_rss_conf rss_conf = {0}; 3879 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 3880 uint64_t rss_hf; 3881 uint8_t i; 3882 int diag; 3883 struct rte_eth_dev_info dev_info; 3884 uint8_t hash_key_size; 3885 int ret; 3886 3887 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3888 return; 3889 3890 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3891 if (ret != 0) 3892 return; 3893 3894 if (dev_info.hash_key_size > 0 && 3895 dev_info.hash_key_size <= sizeof(rss_key)) 3896 hash_key_size = dev_info.hash_key_size; 3897 else { 3898 fprintf(stderr, 3899 "dev_info did not provide a valid hash key size\n"); 3900 return; 3901 } 3902 3903 /* Get RSS hash key if asked to display it */ 3904 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3905 rss_conf.rss_key_len = hash_key_size; 3906 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3907 if (diag != 0) { 3908 switch (diag) { 3909 case -ENODEV: 3910 fprintf(stderr, "port index %d invalid\n", port_id); 3911 break; 3912 case -ENOTSUP: 3913 fprintf(stderr, "operation not supported by device\n"); 3914 break; 3915 default: 3916 fprintf(stderr, "operation failed - diag=%d\n", diag); 3917 break; 3918 } 3919 return; 3920 } 3921 rss_hf = rss_conf.rss_hf; 3922 if (rss_hf == 0) { 3923 printf("RSS disabled\n"); 3924 return; 3925 } 3926 printf("RSS functions:\n"); 3927 rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE); 3928 if (!show_rss_key) 3929 return; 3930 printf("RSS key:\n"); 3931 for (i = 0; i < hash_key_size; i++) 3932 printf("%02X", rss_key[i]); 3933 printf("\n"); 3934 } 3935 3936 void 3937 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3938 uint8_t hash_key_len) 3939 { 3940 struct rte_eth_rss_conf rss_conf; 3941 int diag; 3942 3943 rss_conf.rss_key = NULL; 3944 rss_conf.rss_key_len = 0; 3945 rss_conf.rss_hf = str_to_rsstypes(rss_type); 3946 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3947 if (diag == 0) { 3948 rss_conf.rss_key = hash_key; 3949 rss_conf.rss_key_len = hash_key_len; 3950 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3951 } 3952 if (diag == 0) 3953 return; 3954 3955 switch (diag) { 3956 case -ENODEV: 3957 fprintf(stderr, "port index %d invalid\n", port_id); 3958 break; 3959 case -ENOTSUP: 3960 fprintf(stderr, "operation not supported by device\n"); 3961 break; 3962 default: 3963 fprintf(stderr, "operation failed - diag=%d\n", diag); 3964 break; 3965 } 3966 } 3967 3968 /* 3969 * Check whether a shared rxq scheduled on other lcores. 3970 */ 3971 static bool 3972 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3973 portid_t src_port, queueid_t src_rxq, 3974 uint32_t share_group, queueid_t share_rxq) 3975 { 3976 streamid_t sm_id; 3977 streamid_t nb_fs_per_lcore; 3978 lcoreid_t nb_fc; 3979 lcoreid_t lc_id; 3980 struct fwd_stream *fs; 3981 struct rte_port *port; 3982 struct rte_eth_dev_info *dev_info; 3983 struct rte_eth_rxconf *rxq_conf; 3984 3985 nb_fc = cur_fwd_config.nb_fwd_lcores; 3986 /* Check remaining cores. */ 3987 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3988 sm_id = fwd_lcores[lc_id]->stream_idx; 3989 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3990 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3991 sm_id++) { 3992 fs = fwd_streams[sm_id]; 3993 port = &ports[fs->rx_port]; 3994 dev_info = &port->dev_info; 3995 rxq_conf = &port->rxq[fs->rx_queue].conf; 3996 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3997 == 0 || rxq_conf->share_group == 0) 3998 /* Not shared rxq. */ 3999 continue; 4000 if (domain_id != port->dev_info.switch_info.domain_id) 4001 continue; 4002 if (rxq_conf->share_group != share_group) 4003 continue; 4004 if (rxq_conf->share_qid != share_rxq) 4005 continue; 4006 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 4007 share_group, share_rxq); 4008 printf(" lcore %hhu Port %hu queue %hu\n", 4009 src_lc, src_port, src_rxq); 4010 printf(" lcore %hhu Port %hu queue %hu\n", 4011 lc_id, fs->rx_port, fs->rx_queue); 4012 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 4013 nb_rxq); 4014 return true; 4015 } 4016 } 4017 return false; 4018 } 4019 4020 /* 4021 * Check shared rxq configuration. 4022 * 4023 * Shared group must not being scheduled on different core. 4024 */ 4025 bool 4026 pkt_fwd_shared_rxq_check(void) 4027 { 4028 streamid_t sm_id; 4029 streamid_t nb_fs_per_lcore; 4030 lcoreid_t nb_fc; 4031 lcoreid_t lc_id; 4032 struct fwd_stream *fs; 4033 uint16_t domain_id; 4034 struct rte_port *port; 4035 struct rte_eth_dev_info *dev_info; 4036 struct rte_eth_rxconf *rxq_conf; 4037 4038 if (rxq_share == 0) 4039 return true; 4040 nb_fc = cur_fwd_config.nb_fwd_lcores; 4041 /* 4042 * Check streams on each core, make sure the same switch domain + 4043 * group + queue doesn't get scheduled on other cores. 4044 */ 4045 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 4046 sm_id = fwd_lcores[lc_id]->stream_idx; 4047 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 4048 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 4049 sm_id++) { 4050 fs = fwd_streams[sm_id]; 4051 /* Update lcore info stream being scheduled. */ 4052 fs->lcore = fwd_lcores[lc_id]; 4053 port = &ports[fs->rx_port]; 4054 dev_info = &port->dev_info; 4055 rxq_conf = &port->rxq[fs->rx_queue].conf; 4056 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 4057 == 0 || rxq_conf->share_group == 0) 4058 /* Not shared rxq. */ 4059 continue; 4060 /* Check shared rxq not scheduled on remaining cores. */ 4061 domain_id = port->dev_info.switch_info.domain_id; 4062 if (fwd_stream_on_other_lcores(domain_id, lc_id, 4063 fs->rx_port, 4064 fs->rx_queue, 4065 rxq_conf->share_group, 4066 rxq_conf->share_qid)) 4067 return false; 4068 } 4069 } 4070 return true; 4071 } 4072 4073 /* 4074 * Setup forwarding configuration for each logical core. 4075 */ 4076 static void 4077 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 4078 { 4079 streamid_t nb_fs_per_lcore; 4080 streamid_t nb_fs; 4081 streamid_t sm_id; 4082 lcoreid_t nb_extra; 4083 lcoreid_t nb_fc; 4084 lcoreid_t nb_lc; 4085 lcoreid_t lc_id; 4086 4087 nb_fs = cfg->nb_fwd_streams; 4088 nb_fc = cfg->nb_fwd_lcores; 4089 if (nb_fs <= nb_fc) { 4090 nb_fs_per_lcore = 1; 4091 nb_extra = 0; 4092 } else { 4093 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 4094 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 4095 } 4096 4097 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 4098 sm_id = 0; 4099 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 4100 fwd_lcores[lc_id]->stream_idx = sm_id; 4101 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 4102 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4103 } 4104 4105 /* 4106 * Assign extra remaining streams, if any. 4107 */ 4108 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 4109 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 4110 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 4111 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 4112 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 4113 } 4114 } 4115 4116 static portid_t 4117 fwd_topology_tx_port_get(portid_t rxp) 4118 { 4119 static int warning_once = 1; 4120 4121 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 4122 4123 switch (port_topology) { 4124 default: 4125 case PORT_TOPOLOGY_PAIRED: 4126 if ((rxp & 0x1) == 0) { 4127 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 4128 return rxp + 1; 4129 if (warning_once) { 4130 fprintf(stderr, 4131 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 4132 warning_once = 0; 4133 } 4134 return rxp; 4135 } 4136 return rxp - 1; 4137 case PORT_TOPOLOGY_CHAINED: 4138 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 4139 case PORT_TOPOLOGY_LOOP: 4140 return rxp; 4141 } 4142 } 4143 4144 static void 4145 simple_fwd_config_setup(void) 4146 { 4147 portid_t i; 4148 4149 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 4150 cur_fwd_config.nb_fwd_streams = 4151 (streamid_t) cur_fwd_config.nb_fwd_ports; 4152 4153 /* reinitialize forwarding streams */ 4154 init_fwd_streams(); 4155 4156 /* 4157 * In the simple forwarding test, the number of forwarding cores 4158 * must be lower or equal to the number of forwarding ports. 4159 */ 4160 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4161 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 4162 cur_fwd_config.nb_fwd_lcores = 4163 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 4164 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4165 4166 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 4167 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 4168 fwd_streams[i]->rx_queue = 0; 4169 fwd_streams[i]->tx_port = 4170 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 4171 fwd_streams[i]->tx_queue = 0; 4172 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 4173 fwd_streams[i]->retry_enabled = retry_enabled; 4174 } 4175 } 4176 4177 /** 4178 * For the RSS forwarding test all streams distributed over lcores. Each stream 4179 * being composed of a RX queue to poll on a RX port for input messages, 4180 * associated with a TX queue of a TX port where to send forwarded packets. 4181 */ 4182 static void 4183 rss_fwd_config_setup(void) 4184 { 4185 portid_t rxp; 4186 portid_t txp; 4187 queueid_t rxq; 4188 queueid_t nb_q; 4189 streamid_t sm_id; 4190 int start; 4191 int end; 4192 4193 nb_q = nb_rxq; 4194 if (nb_q > nb_txq) 4195 nb_q = nb_txq; 4196 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4197 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4198 cur_fwd_config.nb_fwd_streams = 4199 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4200 4201 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4202 cur_fwd_config.nb_fwd_lcores = 4203 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4204 4205 /* reinitialize forwarding streams */ 4206 init_fwd_streams(); 4207 4208 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4209 4210 if (proc_id > 0 && nb_q % num_procs != 0) 4211 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4212 4213 /** 4214 * In multi-process, All queues are allocated to different 4215 * processes based on num_procs and proc_id. For example: 4216 * if supports 4 queues(nb_q), 2 processes(num_procs), 4217 * the 0~1 queue for primary process. 4218 * the 2~3 queue for secondary process. 4219 */ 4220 start = proc_id * nb_q / num_procs; 4221 end = start + nb_q / num_procs; 4222 rxp = 0; 4223 rxq = start; 4224 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4225 struct fwd_stream *fs; 4226 4227 fs = fwd_streams[sm_id]; 4228 txp = fwd_topology_tx_port_get(rxp); 4229 fs->rx_port = fwd_ports_ids[rxp]; 4230 fs->rx_queue = rxq; 4231 fs->tx_port = fwd_ports_ids[txp]; 4232 fs->tx_queue = rxq; 4233 fs->peer_addr = fs->tx_port; 4234 fs->retry_enabled = retry_enabled; 4235 rxp++; 4236 if (rxp < nb_fwd_ports) 4237 continue; 4238 rxp = 0; 4239 rxq++; 4240 if (rxq >= end) 4241 rxq = start; 4242 } 4243 } 4244 4245 static uint16_t 4246 get_fwd_port_total_tc_num(void) 4247 { 4248 struct rte_eth_dcb_info dcb_info; 4249 uint16_t total_tc_num = 0; 4250 unsigned int i; 4251 4252 for (i = 0; i < nb_fwd_ports; i++) { 4253 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4254 total_tc_num += dcb_info.nb_tcs; 4255 } 4256 4257 return total_tc_num; 4258 } 4259 4260 /** 4261 * For the DCB forwarding test, each core is assigned on each traffic class. 4262 * 4263 * Each core is assigned a multi-stream, each stream being composed of 4264 * a RX queue to poll on a RX port for input messages, associated with 4265 * a TX queue of a TX port where to send forwarded packets. All RX and 4266 * TX queues are mapping to the same traffic class. 4267 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4268 * the same core 4269 */ 4270 static void 4271 dcb_fwd_config_setup(void) 4272 { 4273 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4274 portid_t txp, rxp = 0; 4275 queueid_t txq, rxq = 0; 4276 lcoreid_t lc_id; 4277 uint16_t nb_rx_queue, nb_tx_queue; 4278 uint16_t i, j, k, sm_id = 0; 4279 uint16_t total_tc_num; 4280 struct rte_port *port; 4281 uint8_t tc = 0; 4282 portid_t pid; 4283 int ret; 4284 4285 /* 4286 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4287 * or RTE_PORT_STOPPED. 4288 * 4289 * Re-configure ports to get updated mapping between tc and queue in 4290 * case the queue number of the port is changed. Skip for started ports 4291 * since modifying queue number and calling dev_configure need to stop 4292 * ports first. 4293 */ 4294 for (pid = 0; pid < nb_fwd_ports; pid++) { 4295 if (port_is_started(pid) == 1) 4296 continue; 4297 4298 port = &ports[pid]; 4299 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4300 &port->dev_conf); 4301 if (ret < 0) { 4302 fprintf(stderr, 4303 "Failed to re-configure port %d, ret = %d.\n", 4304 pid, ret); 4305 return; 4306 } 4307 } 4308 4309 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4310 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4311 cur_fwd_config.nb_fwd_streams = 4312 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4313 total_tc_num = get_fwd_port_total_tc_num(); 4314 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4315 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4316 4317 /* reinitialize forwarding streams */ 4318 init_fwd_streams(); 4319 sm_id = 0; 4320 txp = 1; 4321 /* get the dcb info on the first RX and TX ports */ 4322 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4323 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4324 4325 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4326 fwd_lcores[lc_id]->stream_nb = 0; 4327 fwd_lcores[lc_id]->stream_idx = sm_id; 4328 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4329 /* if the nb_queue is zero, means this tc is 4330 * not enabled on the POOL 4331 */ 4332 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4333 break; 4334 k = fwd_lcores[lc_id]->stream_nb + 4335 fwd_lcores[lc_id]->stream_idx; 4336 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4337 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4338 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4339 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4340 for (j = 0; j < nb_rx_queue; j++) { 4341 struct fwd_stream *fs; 4342 4343 fs = fwd_streams[k + j]; 4344 fs->rx_port = fwd_ports_ids[rxp]; 4345 fs->rx_queue = rxq + j; 4346 fs->tx_port = fwd_ports_ids[txp]; 4347 fs->tx_queue = txq + j % nb_tx_queue; 4348 fs->peer_addr = fs->tx_port; 4349 fs->retry_enabled = retry_enabled; 4350 } 4351 fwd_lcores[lc_id]->stream_nb += 4352 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4353 } 4354 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4355 4356 tc++; 4357 if (tc < rxp_dcb_info.nb_tcs) 4358 continue; 4359 /* Restart from TC 0 on next RX port */ 4360 tc = 0; 4361 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4362 rxp = (portid_t) 4363 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4364 else 4365 rxp++; 4366 if (rxp >= nb_fwd_ports) 4367 return; 4368 /* get the dcb information on next RX and TX ports */ 4369 if ((rxp & 0x1) == 0) 4370 txp = (portid_t) (rxp + 1); 4371 else 4372 txp = (portid_t) (rxp - 1); 4373 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4374 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4375 } 4376 } 4377 4378 static void 4379 icmp_echo_config_setup(void) 4380 { 4381 portid_t rxp; 4382 queueid_t rxq; 4383 lcoreid_t lc_id; 4384 uint16_t sm_id; 4385 4386 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4387 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4388 (nb_txq * nb_fwd_ports); 4389 else 4390 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4391 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4392 cur_fwd_config.nb_fwd_streams = 4393 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4394 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4395 cur_fwd_config.nb_fwd_lcores = 4396 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4397 if (verbose_level > 0) { 4398 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4399 __FUNCTION__, 4400 cur_fwd_config.nb_fwd_lcores, 4401 cur_fwd_config.nb_fwd_ports, 4402 cur_fwd_config.nb_fwd_streams); 4403 } 4404 4405 /* reinitialize forwarding streams */ 4406 init_fwd_streams(); 4407 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4408 rxp = 0; rxq = 0; 4409 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4410 if (verbose_level > 0) 4411 printf(" core=%d: \n", lc_id); 4412 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4413 struct fwd_stream *fs; 4414 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4415 fs->rx_port = fwd_ports_ids[rxp]; 4416 fs->rx_queue = rxq; 4417 fs->tx_port = fs->rx_port; 4418 fs->tx_queue = rxq; 4419 fs->peer_addr = fs->tx_port; 4420 fs->retry_enabled = retry_enabled; 4421 if (verbose_level > 0) 4422 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4423 sm_id, fs->rx_port, fs->rx_queue, 4424 fs->tx_queue); 4425 rxq = (queueid_t) (rxq + 1); 4426 if (rxq == nb_rxq) { 4427 rxq = 0; 4428 rxp = (portid_t) (rxp + 1); 4429 } 4430 } 4431 } 4432 } 4433 4434 void 4435 fwd_config_setup(void) 4436 { 4437 struct rte_port *port; 4438 portid_t pt_id; 4439 unsigned int i; 4440 4441 cur_fwd_config.fwd_eng = cur_fwd_eng; 4442 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4443 icmp_echo_config_setup(); 4444 return; 4445 } 4446 4447 if ((nb_rxq > 1) && (nb_txq > 1)){ 4448 if (dcb_config) { 4449 for (i = 0; i < nb_fwd_ports; i++) { 4450 pt_id = fwd_ports_ids[i]; 4451 port = &ports[pt_id]; 4452 if (!port->dcb_flag) { 4453 fprintf(stderr, 4454 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4455 return; 4456 } 4457 } 4458 if (nb_fwd_lcores == 1) { 4459 fprintf(stderr, 4460 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4461 return; 4462 } 4463 4464 dcb_fwd_config_setup(); 4465 } else 4466 rss_fwd_config_setup(); 4467 } 4468 else 4469 simple_fwd_config_setup(); 4470 } 4471 4472 static const char * 4473 mp_alloc_to_str(uint8_t mode) 4474 { 4475 switch (mode) { 4476 case MP_ALLOC_NATIVE: 4477 return "native"; 4478 case MP_ALLOC_ANON: 4479 return "anon"; 4480 case MP_ALLOC_XMEM: 4481 return "xmem"; 4482 case MP_ALLOC_XMEM_HUGE: 4483 return "xmemhuge"; 4484 case MP_ALLOC_XBUF: 4485 return "xbuf"; 4486 default: 4487 return "invalid"; 4488 } 4489 } 4490 4491 void 4492 pkt_fwd_config_display(struct fwd_config *cfg) 4493 { 4494 struct fwd_stream *fs; 4495 lcoreid_t lc_id; 4496 streamid_t sm_id; 4497 4498 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4499 "NUMA support %s, MP allocation mode: %s\n", 4500 cfg->fwd_eng->fwd_mode_name, 4501 retry_enabled == 0 ? "" : " with retry", 4502 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4503 numa_support == 1 ? "enabled" : "disabled", 4504 mp_alloc_to_str(mp_alloc_type)); 4505 4506 if (retry_enabled) 4507 printf("TX retry num: %u, delay between TX retries: %uus\n", 4508 burst_tx_retry_num, burst_tx_delay_time); 4509 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4510 printf("Logical Core %u (socket %u) forwards packets on " 4511 "%d streams:", 4512 fwd_lcores_cpuids[lc_id], 4513 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4514 fwd_lcores[lc_id]->stream_nb); 4515 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4516 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4517 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4518 "P=%d/Q=%d (socket %u) ", 4519 fs->rx_port, fs->rx_queue, 4520 ports[fs->rx_port].socket_id, 4521 fs->tx_port, fs->tx_queue, 4522 ports[fs->tx_port].socket_id); 4523 print_ethaddr("peer=", 4524 &peer_eth_addrs[fs->peer_addr]); 4525 } 4526 printf("\n"); 4527 } 4528 printf("\n"); 4529 } 4530 4531 void 4532 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4533 { 4534 struct rte_ether_addr new_peer_addr; 4535 if (!rte_eth_dev_is_valid_port(port_id)) { 4536 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4537 return; 4538 } 4539 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4540 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4541 peer_addr); 4542 return; 4543 } 4544 peer_eth_addrs[port_id] = new_peer_addr; 4545 } 4546 4547 int 4548 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4549 { 4550 unsigned int i; 4551 unsigned int lcore_cpuid; 4552 int record_now; 4553 4554 record_now = 0; 4555 again: 4556 for (i = 0; i < nb_lc; i++) { 4557 lcore_cpuid = lcorelist[i]; 4558 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4559 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4560 return -1; 4561 } 4562 if (lcore_cpuid == rte_get_main_lcore()) { 4563 fprintf(stderr, 4564 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4565 lcore_cpuid); 4566 return -1; 4567 } 4568 if (record_now) 4569 fwd_lcores_cpuids[i] = lcore_cpuid; 4570 } 4571 if (record_now == 0) { 4572 record_now = 1; 4573 goto again; 4574 } 4575 nb_cfg_lcores = (lcoreid_t) nb_lc; 4576 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4577 printf("previous number of forwarding cores %u - changed to " 4578 "number of configured cores %u\n", 4579 (unsigned int) nb_fwd_lcores, nb_lc); 4580 nb_fwd_lcores = (lcoreid_t) nb_lc; 4581 } 4582 4583 return 0; 4584 } 4585 4586 int 4587 set_fwd_lcores_mask(uint64_t lcoremask) 4588 { 4589 unsigned int lcorelist[64]; 4590 unsigned int nb_lc; 4591 unsigned int i; 4592 4593 if (lcoremask == 0) { 4594 fprintf(stderr, "Invalid NULL mask of cores\n"); 4595 return -1; 4596 } 4597 nb_lc = 0; 4598 for (i = 0; i < 64; i++) { 4599 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4600 continue; 4601 lcorelist[nb_lc++] = i; 4602 } 4603 return set_fwd_lcores_list(lcorelist, nb_lc); 4604 } 4605 4606 void 4607 set_fwd_lcores_number(uint16_t nb_lc) 4608 { 4609 if (test_done == 0) { 4610 fprintf(stderr, "Please stop forwarding first\n"); 4611 return; 4612 } 4613 if (nb_lc > nb_cfg_lcores) { 4614 fprintf(stderr, 4615 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4616 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4617 return; 4618 } 4619 nb_fwd_lcores = (lcoreid_t) nb_lc; 4620 printf("Number of forwarding cores set to %u\n", 4621 (unsigned int) nb_fwd_lcores); 4622 } 4623 4624 void 4625 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4626 { 4627 unsigned int i; 4628 portid_t port_id; 4629 int record_now; 4630 4631 record_now = 0; 4632 again: 4633 for (i = 0; i < nb_pt; i++) { 4634 port_id = (portid_t) portlist[i]; 4635 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4636 return; 4637 if (record_now) 4638 fwd_ports_ids[i] = port_id; 4639 } 4640 if (record_now == 0) { 4641 record_now = 1; 4642 goto again; 4643 } 4644 nb_cfg_ports = (portid_t) nb_pt; 4645 if (nb_fwd_ports != (portid_t) nb_pt) { 4646 printf("previous number of forwarding ports %u - changed to " 4647 "number of configured ports %u\n", 4648 (unsigned int) nb_fwd_ports, nb_pt); 4649 nb_fwd_ports = (portid_t) nb_pt; 4650 } 4651 } 4652 4653 /** 4654 * Parse the user input and obtain the list of forwarding ports 4655 * 4656 * @param[in] list 4657 * String containing the user input. User can specify 4658 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4659 * For example, if the user wants to use all the available 4660 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4661 * If the user wants to use only the ports 1,2 then the input 4662 * is 1,2. 4663 * valid characters are '-' and ',' 4664 * @param[out] values 4665 * This array will be filled with a list of port IDs 4666 * based on the user input 4667 * Note that duplicate entries are discarded and only the first 4668 * count entries in this array are port IDs and all the rest 4669 * will contain default values 4670 * @param[in] maxsize 4671 * This parameter denotes 2 things 4672 * 1) Number of elements in the values array 4673 * 2) Maximum value of each element in the values array 4674 * @return 4675 * On success, returns total count of parsed port IDs 4676 * On failure, returns 0 4677 */ 4678 static unsigned int 4679 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4680 { 4681 unsigned int count = 0; 4682 char *end = NULL; 4683 int min, max; 4684 int value, i; 4685 unsigned int marked[maxsize]; 4686 4687 if (list == NULL || values == NULL) 4688 return 0; 4689 4690 for (i = 0; i < (int)maxsize; i++) 4691 marked[i] = 0; 4692 4693 min = INT_MAX; 4694 4695 do { 4696 /*Remove the blank spaces if any*/ 4697 while (isblank(*list)) 4698 list++; 4699 if (*list == '\0') 4700 break; 4701 errno = 0; 4702 value = strtol(list, &end, 10); 4703 if (errno || end == NULL) 4704 return 0; 4705 if (value < 0 || value >= (int)maxsize) 4706 return 0; 4707 while (isblank(*end)) 4708 end++; 4709 if (*end == '-' && min == INT_MAX) { 4710 min = value; 4711 } else if ((*end == ',') || (*end == '\0')) { 4712 max = value; 4713 if (min == INT_MAX) 4714 min = value; 4715 for (i = min; i <= max; i++) { 4716 if (count < maxsize) { 4717 if (marked[i]) 4718 continue; 4719 values[count] = i; 4720 marked[i] = 1; 4721 count++; 4722 } 4723 } 4724 min = INT_MAX; 4725 } else 4726 return 0; 4727 list = end + 1; 4728 } while (*end != '\0'); 4729 4730 return count; 4731 } 4732 4733 void 4734 parse_fwd_portlist(const char *portlist) 4735 { 4736 unsigned int portcount; 4737 unsigned int portindex[RTE_MAX_ETHPORTS]; 4738 unsigned int i, valid_port_count = 0; 4739 4740 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 4741 if (!portcount) 4742 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 4743 4744 /* 4745 * Here we verify the validity of the ports 4746 * and thereby calculate the total number of 4747 * valid ports 4748 */ 4749 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 4750 if (rte_eth_dev_is_valid_port(portindex[i])) { 4751 portindex[valid_port_count] = portindex[i]; 4752 valid_port_count++; 4753 } 4754 } 4755 4756 set_fwd_ports_list(portindex, valid_port_count); 4757 } 4758 4759 void 4760 set_fwd_ports_mask(uint64_t portmask) 4761 { 4762 unsigned int portlist[64]; 4763 unsigned int nb_pt; 4764 unsigned int i; 4765 4766 if (portmask == 0) { 4767 fprintf(stderr, "Invalid NULL mask of ports\n"); 4768 return; 4769 } 4770 nb_pt = 0; 4771 RTE_ETH_FOREACH_DEV(i) { 4772 if (! ((uint64_t)(1ULL << i) & portmask)) 4773 continue; 4774 portlist[nb_pt++] = i; 4775 } 4776 set_fwd_ports_list(portlist, nb_pt); 4777 } 4778 4779 void 4780 set_fwd_ports_number(uint16_t nb_pt) 4781 { 4782 if (nb_pt > nb_cfg_ports) { 4783 fprintf(stderr, 4784 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 4785 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 4786 return; 4787 } 4788 nb_fwd_ports = (portid_t) nb_pt; 4789 printf("Number of forwarding ports set to %u\n", 4790 (unsigned int) nb_fwd_ports); 4791 } 4792 4793 int 4794 port_is_forwarding(portid_t port_id) 4795 { 4796 unsigned int i; 4797 4798 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4799 return -1; 4800 4801 for (i = 0; i < nb_fwd_ports; i++) { 4802 if (fwd_ports_ids[i] == port_id) 4803 return 1; 4804 } 4805 4806 return 0; 4807 } 4808 4809 void 4810 set_nb_pkt_per_burst(uint16_t nb) 4811 { 4812 if (nb > MAX_PKT_BURST) { 4813 fprintf(stderr, 4814 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 4815 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 4816 return; 4817 } 4818 nb_pkt_per_burst = nb; 4819 printf("Number of packets per burst set to %u\n", 4820 (unsigned int) nb_pkt_per_burst); 4821 } 4822 4823 static const char * 4824 tx_split_get_name(enum tx_pkt_split split) 4825 { 4826 uint32_t i; 4827 4828 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4829 if (tx_split_name[i].split == split) 4830 return tx_split_name[i].name; 4831 } 4832 return NULL; 4833 } 4834 4835 void 4836 set_tx_pkt_split(const char *name) 4837 { 4838 uint32_t i; 4839 4840 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4841 if (strcmp(tx_split_name[i].name, name) == 0) { 4842 tx_pkt_split = tx_split_name[i].split; 4843 return; 4844 } 4845 } 4846 fprintf(stderr, "unknown value: \"%s\"\n", name); 4847 } 4848 4849 int 4850 parse_fec_mode(const char *name, uint32_t *fec_capa) 4851 { 4852 uint8_t i; 4853 4854 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 4855 if (strcmp(fec_mode_name[i].name, name) == 0) { 4856 *fec_capa = 4857 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 4858 return 0; 4859 } 4860 } 4861 return -1; 4862 } 4863 4864 void 4865 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 4866 { 4867 unsigned int i, j; 4868 4869 printf("FEC capabilities:\n"); 4870 4871 for (i = 0; i < num; i++) { 4872 printf("%s : ", 4873 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 4874 4875 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 4876 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 4877 speed_fec_capa[i].capa) 4878 printf("%s ", fec_mode_name[j].name); 4879 } 4880 printf("\n"); 4881 } 4882 } 4883 4884 void 4885 show_rx_pkt_offsets(void) 4886 { 4887 uint32_t i, n; 4888 4889 n = rx_pkt_nb_offs; 4890 printf("Number of offsets: %u\n", n); 4891 if (n) { 4892 printf("Segment offsets: "); 4893 for (i = 0; i != n - 1; i++) 4894 printf("%hu,", rx_pkt_seg_offsets[i]); 4895 printf("%hu\n", rx_pkt_seg_lengths[i]); 4896 } 4897 } 4898 4899 void 4900 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4901 { 4902 unsigned int i; 4903 4904 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4905 printf("nb segments per RX packets=%u >= " 4906 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4907 return; 4908 } 4909 4910 /* 4911 * No extra check here, the segment length will be checked by PMD 4912 * in the extended queue setup. 4913 */ 4914 for (i = 0; i < nb_offs; i++) { 4915 if (seg_offsets[i] >= UINT16_MAX) { 4916 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4917 i, seg_offsets[i]); 4918 return; 4919 } 4920 } 4921 4922 for (i = 0; i < nb_offs; i++) 4923 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4924 4925 rx_pkt_nb_offs = (uint8_t) nb_offs; 4926 } 4927 4928 void 4929 show_rx_pkt_segments(void) 4930 { 4931 uint32_t i, n; 4932 4933 n = rx_pkt_nb_segs; 4934 printf("Number of segments: %u\n", n); 4935 if (n) { 4936 printf("Segment sizes: "); 4937 for (i = 0; i != n - 1; i++) 4938 printf("%hu,", rx_pkt_seg_lengths[i]); 4939 printf("%hu\n", rx_pkt_seg_lengths[i]); 4940 } 4941 } 4942 4943 void 4944 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4945 { 4946 unsigned int i; 4947 4948 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4949 printf("nb segments per RX packets=%u >= " 4950 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4951 return; 4952 } 4953 4954 /* 4955 * No extra check here, the segment length will be checked by PMD 4956 * in the extended queue setup. 4957 */ 4958 for (i = 0; i < nb_segs; i++) { 4959 if (seg_lengths[i] >= UINT16_MAX) { 4960 printf("length[%u]=%u > UINT16_MAX - give up\n", 4961 i, seg_lengths[i]); 4962 return; 4963 } 4964 } 4965 4966 for (i = 0; i < nb_segs; i++) 4967 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4968 4969 rx_pkt_nb_segs = (uint8_t) nb_segs; 4970 } 4971 4972 void 4973 show_tx_pkt_segments(void) 4974 { 4975 uint32_t i, n; 4976 const char *split; 4977 4978 n = tx_pkt_nb_segs; 4979 split = tx_split_get_name(tx_pkt_split); 4980 4981 printf("Number of segments: %u\n", n); 4982 printf("Segment sizes: "); 4983 for (i = 0; i != n - 1; i++) 4984 printf("%hu,", tx_pkt_seg_lengths[i]); 4985 printf("%hu\n", tx_pkt_seg_lengths[i]); 4986 printf("Split packet: %s\n", split); 4987 } 4988 4989 static bool 4990 nb_segs_is_invalid(unsigned int nb_segs) 4991 { 4992 uint16_t ring_size; 4993 uint16_t queue_id; 4994 uint16_t port_id; 4995 int ret; 4996 4997 RTE_ETH_FOREACH_DEV(port_id) { 4998 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4999 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 5000 if (ret) { 5001 /* Port may not be initialized yet, can't say 5002 * the port is invalid in this stage. 5003 */ 5004 continue; 5005 } 5006 if (ring_size < nb_segs) { 5007 printf("nb segments per TX packets=%u >= TX " 5008 "queue(%u) ring_size=%u - txpkts ignored\n", 5009 nb_segs, queue_id, ring_size); 5010 return true; 5011 } 5012 } 5013 } 5014 5015 return false; 5016 } 5017 5018 void 5019 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 5020 { 5021 uint16_t tx_pkt_len; 5022 unsigned int i; 5023 5024 /* 5025 * For single segment settings failed check is ignored. 5026 * It is a very basic capability to send the single segment 5027 * packets, suppose it is always supported. 5028 */ 5029 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 5030 fprintf(stderr, 5031 "Tx segment size(%u) is not supported - txpkts ignored\n", 5032 nb_segs); 5033 return; 5034 } 5035 5036 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 5037 fprintf(stderr, 5038 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 5039 nb_segs, RTE_MAX_SEGS_PER_PKT); 5040 return; 5041 } 5042 5043 /* 5044 * Check that each segment length is greater or equal than 5045 * the mbuf data size. 5046 * Check also that the total packet length is greater or equal than the 5047 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 5048 * 20 + 8). 5049 */ 5050 tx_pkt_len = 0; 5051 for (i = 0; i < nb_segs; i++) { 5052 if (seg_lengths[i] > mbuf_data_size[0]) { 5053 fprintf(stderr, 5054 "length[%u]=%u > mbuf_data_size=%u - give up\n", 5055 i, seg_lengths[i], mbuf_data_size[0]); 5056 return; 5057 } 5058 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 5059 } 5060 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 5061 fprintf(stderr, "total packet length=%u < %d - give up\n", 5062 (unsigned) tx_pkt_len, 5063 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 5064 return; 5065 } 5066 5067 for (i = 0; i < nb_segs; i++) 5068 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 5069 5070 tx_pkt_length = tx_pkt_len; 5071 tx_pkt_nb_segs = (uint8_t) nb_segs; 5072 } 5073 5074 void 5075 show_tx_pkt_times(void) 5076 { 5077 printf("Interburst gap: %u\n", tx_pkt_times_inter); 5078 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 5079 } 5080 5081 void 5082 set_tx_pkt_times(unsigned int *tx_times) 5083 { 5084 tx_pkt_times_inter = tx_times[0]; 5085 tx_pkt_times_intra = tx_times[1]; 5086 } 5087 5088 #ifdef RTE_LIB_GRO 5089 void 5090 setup_gro(const char *onoff, portid_t port_id) 5091 { 5092 if (!rte_eth_dev_is_valid_port(port_id)) { 5093 fprintf(stderr, "invalid port id %u\n", port_id); 5094 return; 5095 } 5096 if (test_done == 0) { 5097 fprintf(stderr, 5098 "Before enable/disable GRO, please stop forwarding first\n"); 5099 return; 5100 } 5101 if (strcmp(onoff, "on") == 0) { 5102 if (gro_ports[port_id].enable != 0) { 5103 fprintf(stderr, 5104 "Port %u has enabled GRO. Please disable GRO first\n", 5105 port_id); 5106 return; 5107 } 5108 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5109 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 5110 gro_ports[port_id].param.max_flow_num = 5111 GRO_DEFAULT_FLOW_NUM; 5112 gro_ports[port_id].param.max_item_per_flow = 5113 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 5114 } 5115 gro_ports[port_id].enable = 1; 5116 } else { 5117 if (gro_ports[port_id].enable == 0) { 5118 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 5119 return; 5120 } 5121 gro_ports[port_id].enable = 0; 5122 } 5123 } 5124 5125 void 5126 setup_gro_flush_cycles(uint8_t cycles) 5127 { 5128 if (test_done == 0) { 5129 fprintf(stderr, 5130 "Before change flush interval for GRO, please stop forwarding first.\n"); 5131 return; 5132 } 5133 5134 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 5135 GRO_DEFAULT_FLUSH_CYCLES) { 5136 fprintf(stderr, 5137 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 5138 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 5139 cycles = GRO_DEFAULT_FLUSH_CYCLES; 5140 } 5141 5142 gro_flush_cycles = cycles; 5143 } 5144 5145 void 5146 show_gro(portid_t port_id) 5147 { 5148 struct rte_gro_param *param; 5149 uint32_t max_pkts_num; 5150 5151 param = &gro_ports[port_id].param; 5152 5153 if (!rte_eth_dev_is_valid_port(port_id)) { 5154 fprintf(stderr, "Invalid port id %u.\n", port_id); 5155 return; 5156 } 5157 if (gro_ports[port_id].enable) { 5158 printf("GRO type: TCP/IPv4\n"); 5159 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 5160 max_pkts_num = param->max_flow_num * 5161 param->max_item_per_flow; 5162 } else 5163 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 5164 printf("Max number of packets to perform GRO: %u\n", 5165 max_pkts_num); 5166 printf("Flushing cycles: %u\n", gro_flush_cycles); 5167 } else 5168 printf("Port %u doesn't enable GRO.\n", port_id); 5169 } 5170 #endif /* RTE_LIB_GRO */ 5171 5172 #ifdef RTE_LIB_GSO 5173 void 5174 setup_gso(const char *mode, portid_t port_id) 5175 { 5176 if (!rte_eth_dev_is_valid_port(port_id)) { 5177 fprintf(stderr, "invalid port id %u\n", port_id); 5178 return; 5179 } 5180 if (strcmp(mode, "on") == 0) { 5181 if (test_done == 0) { 5182 fprintf(stderr, 5183 "before enabling GSO, please stop forwarding first\n"); 5184 return; 5185 } 5186 gso_ports[port_id].enable = 1; 5187 } else if (strcmp(mode, "off") == 0) { 5188 if (test_done == 0) { 5189 fprintf(stderr, 5190 "before disabling GSO, please stop forwarding first\n"); 5191 return; 5192 } 5193 gso_ports[port_id].enable = 0; 5194 } 5195 } 5196 #endif /* RTE_LIB_GSO */ 5197 5198 char* 5199 list_pkt_forwarding_modes(void) 5200 { 5201 static char fwd_modes[128] = ""; 5202 const char *separator = "|"; 5203 struct fwd_engine *fwd_eng; 5204 unsigned i = 0; 5205 5206 if (strlen (fwd_modes) == 0) { 5207 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5208 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5209 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5210 strncat(fwd_modes, separator, 5211 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5212 } 5213 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5214 } 5215 5216 return fwd_modes; 5217 } 5218 5219 char* 5220 list_pkt_forwarding_retry_modes(void) 5221 { 5222 static char fwd_modes[128] = ""; 5223 const char *separator = "|"; 5224 struct fwd_engine *fwd_eng; 5225 unsigned i = 0; 5226 5227 if (strlen(fwd_modes) == 0) { 5228 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5229 if (fwd_eng == &rx_only_engine) 5230 continue; 5231 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5232 sizeof(fwd_modes) - 5233 strlen(fwd_modes) - 1); 5234 strncat(fwd_modes, separator, 5235 sizeof(fwd_modes) - 5236 strlen(fwd_modes) - 1); 5237 } 5238 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5239 } 5240 5241 return fwd_modes; 5242 } 5243 5244 void 5245 set_pkt_forwarding_mode(const char *fwd_mode_name) 5246 { 5247 struct fwd_engine *fwd_eng; 5248 unsigned i; 5249 5250 i = 0; 5251 while ((fwd_eng = fwd_engines[i]) != NULL) { 5252 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5253 printf("Set %s packet forwarding mode%s\n", 5254 fwd_mode_name, 5255 retry_enabled == 0 ? "" : " with retry"); 5256 cur_fwd_eng = fwd_eng; 5257 return; 5258 } 5259 i++; 5260 } 5261 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5262 } 5263 5264 void 5265 add_rx_dump_callbacks(portid_t portid) 5266 { 5267 struct rte_eth_dev_info dev_info; 5268 uint16_t queue; 5269 int ret; 5270 5271 if (port_id_is_invalid(portid, ENABLED_WARN)) 5272 return; 5273 5274 ret = eth_dev_info_get_print_err(portid, &dev_info); 5275 if (ret != 0) 5276 return; 5277 5278 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5279 if (!ports[portid].rx_dump_cb[queue]) 5280 ports[portid].rx_dump_cb[queue] = 5281 rte_eth_add_rx_callback(portid, queue, 5282 dump_rx_pkts, NULL); 5283 } 5284 5285 void 5286 add_tx_dump_callbacks(portid_t portid) 5287 { 5288 struct rte_eth_dev_info dev_info; 5289 uint16_t queue; 5290 int ret; 5291 5292 if (port_id_is_invalid(portid, ENABLED_WARN)) 5293 return; 5294 5295 ret = eth_dev_info_get_print_err(portid, &dev_info); 5296 if (ret != 0) 5297 return; 5298 5299 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5300 if (!ports[portid].tx_dump_cb[queue]) 5301 ports[portid].tx_dump_cb[queue] = 5302 rte_eth_add_tx_callback(portid, queue, 5303 dump_tx_pkts, NULL); 5304 } 5305 5306 void 5307 remove_rx_dump_callbacks(portid_t portid) 5308 { 5309 struct rte_eth_dev_info dev_info; 5310 uint16_t queue; 5311 int ret; 5312 5313 if (port_id_is_invalid(portid, ENABLED_WARN)) 5314 return; 5315 5316 ret = eth_dev_info_get_print_err(portid, &dev_info); 5317 if (ret != 0) 5318 return; 5319 5320 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5321 if (ports[portid].rx_dump_cb[queue]) { 5322 rte_eth_remove_rx_callback(portid, queue, 5323 ports[portid].rx_dump_cb[queue]); 5324 ports[portid].rx_dump_cb[queue] = NULL; 5325 } 5326 } 5327 5328 void 5329 remove_tx_dump_callbacks(portid_t portid) 5330 { 5331 struct rte_eth_dev_info dev_info; 5332 uint16_t queue; 5333 int ret; 5334 5335 if (port_id_is_invalid(portid, ENABLED_WARN)) 5336 return; 5337 5338 ret = eth_dev_info_get_print_err(portid, &dev_info); 5339 if (ret != 0) 5340 return; 5341 5342 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5343 if (ports[portid].tx_dump_cb[queue]) { 5344 rte_eth_remove_tx_callback(portid, queue, 5345 ports[portid].tx_dump_cb[queue]); 5346 ports[portid].tx_dump_cb[queue] = NULL; 5347 } 5348 } 5349 5350 void 5351 configure_rxtx_dump_callbacks(uint16_t verbose) 5352 { 5353 portid_t portid; 5354 5355 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5356 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5357 return; 5358 #endif 5359 5360 RTE_ETH_FOREACH_DEV(portid) 5361 { 5362 if (verbose == 1 || verbose > 2) 5363 add_rx_dump_callbacks(portid); 5364 else 5365 remove_rx_dump_callbacks(portid); 5366 if (verbose >= 2) 5367 add_tx_dump_callbacks(portid); 5368 else 5369 remove_tx_dump_callbacks(portid); 5370 } 5371 } 5372 5373 void 5374 set_verbose_level(uint16_t vb_level) 5375 { 5376 printf("Change verbose level from %u to %u\n", 5377 (unsigned int) verbose_level, (unsigned int) vb_level); 5378 verbose_level = vb_level; 5379 configure_rxtx_dump_callbacks(verbose_level); 5380 } 5381 5382 void 5383 vlan_extend_set(portid_t port_id, int on) 5384 { 5385 int diag; 5386 int vlan_offload; 5387 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5388 5389 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5390 return; 5391 5392 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5393 5394 if (on) { 5395 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5396 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5397 } else { 5398 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5399 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5400 } 5401 5402 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5403 if (diag < 0) { 5404 fprintf(stderr, 5405 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5406 port_id, on, diag); 5407 return; 5408 } 5409 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5410 } 5411 5412 void 5413 rx_vlan_strip_set(portid_t port_id, int on) 5414 { 5415 int diag; 5416 int vlan_offload; 5417 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5418 5419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5420 return; 5421 5422 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5423 5424 if (on) { 5425 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5426 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5427 } else { 5428 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5429 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5430 } 5431 5432 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5433 if (diag < 0) { 5434 fprintf(stderr, 5435 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5436 __func__, port_id, on, diag); 5437 return; 5438 } 5439 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5440 } 5441 5442 void 5443 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5444 { 5445 int diag; 5446 5447 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5448 return; 5449 5450 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5451 if (diag < 0) 5452 fprintf(stderr, 5453 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5454 __func__, port_id, queue_id, on, diag); 5455 } 5456 5457 void 5458 rx_vlan_filter_set(portid_t port_id, int on) 5459 { 5460 int diag; 5461 int vlan_offload; 5462 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5463 5464 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5465 return; 5466 5467 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5468 5469 if (on) { 5470 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5471 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5472 } else { 5473 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5474 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5475 } 5476 5477 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5478 if (diag < 0) { 5479 fprintf(stderr, 5480 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5481 __func__, port_id, on, diag); 5482 return; 5483 } 5484 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5485 } 5486 5487 void 5488 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5489 { 5490 int diag; 5491 int vlan_offload; 5492 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5493 5494 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5495 return; 5496 5497 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5498 5499 if (on) { 5500 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5501 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5502 } else { 5503 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5504 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5505 } 5506 5507 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5508 if (diag < 0) { 5509 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5510 __func__, port_id, on, diag); 5511 return; 5512 } 5513 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5514 } 5515 5516 int 5517 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5518 { 5519 int diag; 5520 5521 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5522 return 1; 5523 if (vlan_id_is_invalid(vlan_id)) 5524 return 1; 5525 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5526 if (diag == 0) 5527 return 0; 5528 fprintf(stderr, 5529 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5530 port_id, vlan_id, on, diag); 5531 return -1; 5532 } 5533 5534 void 5535 rx_vlan_all_filter_set(portid_t port_id, int on) 5536 { 5537 uint16_t vlan_id; 5538 5539 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5540 return; 5541 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5542 if (rx_vft_set(port_id, vlan_id, on)) 5543 break; 5544 } 5545 } 5546 5547 void 5548 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5549 { 5550 int diag; 5551 5552 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5553 return; 5554 5555 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5556 if (diag == 0) 5557 return; 5558 5559 fprintf(stderr, 5560 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5561 port_id, vlan_type, tp_id, diag); 5562 } 5563 5564 void 5565 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5566 { 5567 struct rte_eth_dev_info dev_info; 5568 int ret; 5569 5570 if (vlan_id_is_invalid(vlan_id)) 5571 return; 5572 5573 if (ports[port_id].dev_conf.txmode.offloads & 5574 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5575 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5576 return; 5577 } 5578 5579 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5580 if (ret != 0) 5581 return; 5582 5583 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5584 fprintf(stderr, 5585 "Error: vlan insert is not supported by port %d\n", 5586 port_id); 5587 return; 5588 } 5589 5590 tx_vlan_reset(port_id); 5591 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 5592 ports[port_id].tx_vlan_id = vlan_id; 5593 } 5594 5595 void 5596 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 5597 { 5598 struct rte_eth_dev_info dev_info; 5599 int ret; 5600 5601 if (vlan_id_is_invalid(vlan_id)) 5602 return; 5603 if (vlan_id_is_invalid(vlan_id_outer)) 5604 return; 5605 5606 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5607 if (ret != 0) 5608 return; 5609 5610 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 5611 fprintf(stderr, 5612 "Error: qinq insert not supported by port %d\n", 5613 port_id); 5614 return; 5615 } 5616 5617 tx_vlan_reset(port_id); 5618 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5619 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5620 ports[port_id].tx_vlan_id = vlan_id; 5621 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 5622 } 5623 5624 void 5625 tx_vlan_reset(portid_t port_id) 5626 { 5627 ports[port_id].dev_conf.txmode.offloads &= 5628 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5629 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5630 ports[port_id].tx_vlan_id = 0; 5631 ports[port_id].tx_vlan_id_outer = 0; 5632 } 5633 5634 void 5635 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 5636 { 5637 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5638 return; 5639 5640 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 5641 } 5642 5643 void 5644 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 5645 { 5646 int ret; 5647 5648 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5649 return; 5650 5651 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 5652 return; 5653 5654 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 5655 fprintf(stderr, "map_value not in required range 0..%d\n", 5656 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 5657 return; 5658 } 5659 5660 if (!is_rx) { /* tx */ 5661 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 5662 map_value); 5663 if (ret) { 5664 fprintf(stderr, 5665 "failed to set tx queue stats mapping.\n"); 5666 return; 5667 } 5668 } else { /* rx */ 5669 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 5670 map_value); 5671 if (ret) { 5672 fprintf(stderr, 5673 "failed to set rx queue stats mapping.\n"); 5674 return; 5675 } 5676 } 5677 } 5678 5679 void 5680 set_xstats_hide_zero(uint8_t on_off) 5681 { 5682 xstats_hide_zero = on_off; 5683 } 5684 5685 void 5686 set_record_core_cycles(uint8_t on_off) 5687 { 5688 record_core_cycles = on_off; 5689 } 5690 5691 void 5692 set_record_burst_stats(uint8_t on_off) 5693 { 5694 record_burst_stats = on_off; 5695 } 5696 5697 uint16_t 5698 str_to_flowtype(const char *string) 5699 { 5700 uint8_t i; 5701 5702 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5703 if (!strcmp(flowtype_str_table[i].str, string)) 5704 return flowtype_str_table[i].ftype; 5705 } 5706 5707 if (isdigit(string[0])) { 5708 int val = atoi(string); 5709 if (val > 0 && val < 64) 5710 return (uint16_t)val; 5711 } 5712 5713 return RTE_ETH_FLOW_UNKNOWN; 5714 } 5715 5716 const char* 5717 flowtype_to_str(uint16_t flow_type) 5718 { 5719 uint8_t i; 5720 5721 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5722 if (flowtype_str_table[i].ftype == flow_type) 5723 return flowtype_str_table[i].str; 5724 } 5725 5726 return NULL; 5727 } 5728 5729 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 5730 5731 static inline void 5732 print_fdir_mask(struct rte_eth_fdir_masks *mask) 5733 { 5734 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 5735 5736 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5737 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 5738 " tunnel_id: 0x%08x", 5739 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 5740 rte_be_to_cpu_32(mask->tunnel_id_mask)); 5741 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5742 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 5743 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 5744 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 5745 5746 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 5747 rte_be_to_cpu_16(mask->src_port_mask), 5748 rte_be_to_cpu_16(mask->dst_port_mask)); 5749 5750 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5751 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 5752 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 5753 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 5754 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 5755 5756 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5757 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 5758 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 5759 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 5760 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 5761 } 5762 5763 printf("\n"); 5764 } 5765 5766 static inline void 5767 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5768 { 5769 struct rte_eth_flex_payload_cfg *cfg; 5770 uint32_t i, j; 5771 5772 for (i = 0; i < flex_conf->nb_payloads; i++) { 5773 cfg = &flex_conf->flex_set[i]; 5774 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 5775 printf("\n RAW: "); 5776 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 5777 printf("\n L2_PAYLOAD: "); 5778 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 5779 printf("\n L3_PAYLOAD: "); 5780 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 5781 printf("\n L4_PAYLOAD: "); 5782 else 5783 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 5784 for (j = 0; j < num; j++) 5785 printf(" %-5u", cfg->src_offset[j]); 5786 } 5787 printf("\n"); 5788 } 5789 5790 static inline void 5791 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5792 { 5793 struct rte_eth_fdir_flex_mask *mask; 5794 uint32_t i, j; 5795 const char *p; 5796 5797 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 5798 mask = &flex_conf->flex_mask[i]; 5799 p = flowtype_to_str(mask->flow_type); 5800 printf("\n %s:\t", p ? p : "unknown"); 5801 for (j = 0; j < num; j++) 5802 printf(" %02x", mask->mask[j]); 5803 } 5804 printf("\n"); 5805 } 5806 5807 static inline void 5808 print_fdir_flow_type(uint32_t flow_types_mask) 5809 { 5810 int i; 5811 const char *p; 5812 5813 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 5814 if (!(flow_types_mask & (1 << i))) 5815 continue; 5816 p = flowtype_to_str(i); 5817 if (p) 5818 printf(" %s", p); 5819 else 5820 printf(" unknown"); 5821 } 5822 printf("\n"); 5823 } 5824 5825 static int 5826 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 5827 struct rte_eth_fdir_stats *fdir_stat) 5828 { 5829 int ret = -ENOTSUP; 5830 5831 #ifdef RTE_NET_I40E 5832 if (ret == -ENOTSUP) { 5833 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 5834 if (!ret) 5835 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 5836 } 5837 #endif 5838 #ifdef RTE_NET_IXGBE 5839 if (ret == -ENOTSUP) { 5840 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 5841 if (!ret) 5842 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 5843 } 5844 #endif 5845 switch (ret) { 5846 case 0: 5847 break; 5848 case -ENOTSUP: 5849 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 5850 port_id); 5851 break; 5852 default: 5853 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 5854 break; 5855 } 5856 return ret; 5857 } 5858 5859 void 5860 fdir_get_infos(portid_t port_id) 5861 { 5862 struct rte_eth_fdir_stats fdir_stat; 5863 struct rte_eth_fdir_info fdir_info; 5864 5865 static const char *fdir_stats_border = "########################"; 5866 5867 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5868 return; 5869 5870 memset(&fdir_info, 0, sizeof(fdir_info)); 5871 memset(&fdir_stat, 0, sizeof(fdir_stat)); 5872 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 5873 return; 5874 5875 printf("\n %s FDIR infos for port %-2d %s\n", 5876 fdir_stats_border, port_id, fdir_stats_border); 5877 printf(" MODE: "); 5878 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 5879 printf(" PERFECT\n"); 5880 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 5881 printf(" PERFECT-MAC-VLAN\n"); 5882 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5883 printf(" PERFECT-TUNNEL\n"); 5884 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 5885 printf(" SIGNATURE\n"); 5886 else 5887 printf(" DISABLE\n"); 5888 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5889 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5890 printf(" SUPPORTED FLOW TYPE: "); 5891 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5892 } 5893 printf(" FLEX PAYLOAD INFO:\n"); 5894 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5895 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5896 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5897 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5898 fdir_info.flex_payload_unit, 5899 fdir_info.max_flex_payload_segment_num, 5900 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5901 printf(" MASK: "); 5902 print_fdir_mask(&fdir_info.mask); 5903 if (fdir_info.flex_conf.nb_payloads > 0) { 5904 printf(" FLEX PAYLOAD SRC OFFSET:"); 5905 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5906 } 5907 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5908 printf(" FLEX MASK CFG:"); 5909 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5910 } 5911 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5912 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5913 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5914 fdir_info.guarant_spc, fdir_info.best_spc); 5915 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5916 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5917 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5918 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5919 fdir_stat.collision, fdir_stat.free, 5920 fdir_stat.maxhash, fdir_stat.maxlen, 5921 fdir_stat.add, fdir_stat.remove, 5922 fdir_stat.f_add, fdir_stat.f_remove); 5923 printf(" %s############################%s\n", 5924 fdir_stats_border, fdir_stats_border); 5925 } 5926 5927 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5928 5929 void 5930 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5931 { 5932 struct rte_port *port; 5933 struct rte_eth_fdir_flex_conf *flex_conf; 5934 int i, idx = 0; 5935 5936 port = &ports[port_id]; 5937 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5938 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5939 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5940 idx = i; 5941 break; 5942 } 5943 } 5944 if (i >= RTE_ETH_FLOW_MAX) { 5945 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5946 idx = flex_conf->nb_flexmasks; 5947 flex_conf->nb_flexmasks++; 5948 } else { 5949 fprintf(stderr, 5950 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5951 cfg->flow_type); 5952 return; 5953 } 5954 } 5955 rte_memcpy(&flex_conf->flex_mask[idx], 5956 cfg, 5957 sizeof(struct rte_eth_fdir_flex_mask)); 5958 } 5959 5960 void 5961 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5962 { 5963 struct rte_port *port; 5964 struct rte_eth_fdir_flex_conf *flex_conf; 5965 int i, idx = 0; 5966 5967 port = &ports[port_id]; 5968 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5969 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5970 if (cfg->type == flex_conf->flex_set[i].type) { 5971 idx = i; 5972 break; 5973 } 5974 } 5975 if (i >= RTE_ETH_PAYLOAD_MAX) { 5976 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5977 idx = flex_conf->nb_payloads; 5978 flex_conf->nb_payloads++; 5979 } else { 5980 fprintf(stderr, 5981 "The flex payload table is full. Can not set flex payload for type(%u).", 5982 cfg->type); 5983 return; 5984 } 5985 } 5986 rte_memcpy(&flex_conf->flex_set[idx], 5987 cfg, 5988 sizeof(struct rte_eth_flex_payload_cfg)); 5989 5990 } 5991 5992 void 5993 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5994 { 5995 #ifdef RTE_NET_IXGBE 5996 int diag; 5997 5998 if (is_rx) 5999 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 6000 else 6001 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 6002 6003 if (diag == 0) 6004 return; 6005 fprintf(stderr, 6006 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 6007 is_rx ? "rx" : "tx", port_id, diag); 6008 return; 6009 #endif 6010 fprintf(stderr, "VF %s setting not supported for port %d\n", 6011 is_rx ? "Rx" : "Tx", port_id); 6012 RTE_SET_USED(vf); 6013 RTE_SET_USED(on); 6014 } 6015 6016 int 6017 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 6018 { 6019 int diag; 6020 struct rte_eth_link link; 6021 int ret; 6022 6023 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6024 return 1; 6025 ret = eth_link_get_nowait_print_err(port_id, &link); 6026 if (ret < 0) 6027 return 1; 6028 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 6029 rate > link.link_speed) { 6030 fprintf(stderr, 6031 "Invalid rate value:%u bigger than link speed: %u\n", 6032 rate, link.link_speed); 6033 return 1; 6034 } 6035 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 6036 if (diag == 0) 6037 return diag; 6038 fprintf(stderr, 6039 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 6040 port_id, diag); 6041 return diag; 6042 } 6043 6044 int 6045 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 6046 { 6047 int diag = -ENOTSUP; 6048 6049 RTE_SET_USED(vf); 6050 RTE_SET_USED(rate); 6051 RTE_SET_USED(q_msk); 6052 6053 #ifdef RTE_NET_IXGBE 6054 if (diag == -ENOTSUP) 6055 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 6056 q_msk); 6057 #endif 6058 #ifdef RTE_NET_BNXT 6059 if (diag == -ENOTSUP) 6060 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 6061 #endif 6062 if (diag == 0) 6063 return diag; 6064 6065 fprintf(stderr, 6066 "%s for port_id=%d failed diag=%d\n", 6067 __func__, port_id, diag); 6068 return diag; 6069 } 6070 6071 int 6072 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh) 6073 { 6074 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6075 return -EINVAL; 6076 6077 return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh); 6078 } 6079 6080 /* 6081 * Functions to manage the set of filtered Multicast MAC addresses. 6082 * 6083 * A pool of filtered multicast MAC addresses is associated with each port. 6084 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 6085 * The address of the pool and the number of valid multicast MAC addresses 6086 * recorded in the pool are stored in the fields "mc_addr_pool" and 6087 * "mc_addr_nb" of the "rte_port" data structure. 6088 * 6089 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 6090 * to be supplied a contiguous array of multicast MAC addresses. 6091 * To comply with this constraint, the set of multicast addresses recorded 6092 * into the pool are systematically compacted at the beginning of the pool. 6093 * Hence, when a multicast address is removed from the pool, all following 6094 * addresses, if any, are copied back to keep the set contiguous. 6095 */ 6096 #define MCAST_POOL_INC 32 6097 6098 static int 6099 mcast_addr_pool_extend(struct rte_port *port) 6100 { 6101 struct rte_ether_addr *mc_pool; 6102 size_t mc_pool_size; 6103 6104 /* 6105 * If a free entry is available at the end of the pool, just 6106 * increment the number of recorded multicast addresses. 6107 */ 6108 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 6109 port->mc_addr_nb++; 6110 return 0; 6111 } 6112 6113 /* 6114 * [re]allocate a pool with MCAST_POOL_INC more entries. 6115 * The previous test guarantees that port->mc_addr_nb is a multiple 6116 * of MCAST_POOL_INC. 6117 */ 6118 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 6119 MCAST_POOL_INC); 6120 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 6121 mc_pool_size); 6122 if (mc_pool == NULL) { 6123 fprintf(stderr, 6124 "allocation of pool of %u multicast addresses failed\n", 6125 port->mc_addr_nb + MCAST_POOL_INC); 6126 return -ENOMEM; 6127 } 6128 6129 port->mc_addr_pool = mc_pool; 6130 port->mc_addr_nb++; 6131 return 0; 6132 6133 } 6134 6135 static void 6136 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 6137 { 6138 if (mcast_addr_pool_extend(port) != 0) 6139 return; 6140 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 6141 } 6142 6143 static void 6144 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 6145 { 6146 port->mc_addr_nb--; 6147 if (addr_idx == port->mc_addr_nb) { 6148 /* No need to recompact the set of multicast addresses. */ 6149 if (port->mc_addr_nb == 0) { 6150 /* free the pool of multicast addresses. */ 6151 free(port->mc_addr_pool); 6152 port->mc_addr_pool = NULL; 6153 } 6154 return; 6155 } 6156 memmove(&port->mc_addr_pool[addr_idx], 6157 &port->mc_addr_pool[addr_idx + 1], 6158 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 6159 } 6160 6161 int 6162 mcast_addr_pool_destroy(portid_t port_id) 6163 { 6164 struct rte_port *port; 6165 6166 if (port_id_is_invalid(port_id, ENABLED_WARN) || 6167 port_id == (portid_t)RTE_PORT_ALL) 6168 return -EINVAL; 6169 port = &ports[port_id]; 6170 6171 if (port->mc_addr_nb != 0) { 6172 /* free the pool of multicast addresses. */ 6173 free(port->mc_addr_pool); 6174 port->mc_addr_pool = NULL; 6175 port->mc_addr_nb = 0; 6176 } 6177 return 0; 6178 } 6179 6180 static int 6181 eth_port_multicast_addr_list_set(portid_t port_id) 6182 { 6183 struct rte_port *port; 6184 int diag; 6185 6186 port = &ports[port_id]; 6187 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 6188 port->mc_addr_nb); 6189 if (diag < 0) 6190 fprintf(stderr, 6191 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 6192 port_id, port->mc_addr_nb, diag); 6193 6194 return diag; 6195 } 6196 6197 void 6198 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6199 { 6200 struct rte_port *port; 6201 uint32_t i; 6202 6203 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6204 return; 6205 6206 port = &ports[port_id]; 6207 6208 /* 6209 * Check that the added multicast MAC address is not already recorded 6210 * in the pool of multicast addresses. 6211 */ 6212 for (i = 0; i < port->mc_addr_nb; i++) { 6213 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6214 fprintf(stderr, 6215 "multicast address already filtered by port\n"); 6216 return; 6217 } 6218 } 6219 6220 mcast_addr_pool_append(port, mc_addr); 6221 if (eth_port_multicast_addr_list_set(port_id) < 0) 6222 /* Rollback on failure, remove the address from the pool */ 6223 mcast_addr_pool_remove(port, i); 6224 } 6225 6226 void 6227 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6228 { 6229 struct rte_port *port; 6230 uint32_t i; 6231 6232 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6233 return; 6234 6235 port = &ports[port_id]; 6236 6237 /* 6238 * Search the pool of multicast MAC addresses for the removed address. 6239 */ 6240 for (i = 0; i < port->mc_addr_nb; i++) { 6241 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6242 break; 6243 } 6244 if (i == port->mc_addr_nb) { 6245 fprintf(stderr, "multicast address not filtered by port %d\n", 6246 port_id); 6247 return; 6248 } 6249 6250 mcast_addr_pool_remove(port, i); 6251 if (eth_port_multicast_addr_list_set(port_id) < 0) 6252 /* Rollback on failure, add the address back into the pool */ 6253 mcast_addr_pool_append(port, mc_addr); 6254 } 6255 6256 void 6257 port_dcb_info_display(portid_t port_id) 6258 { 6259 struct rte_eth_dcb_info dcb_info; 6260 uint16_t i; 6261 int ret; 6262 static const char *border = "================"; 6263 6264 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6265 return; 6266 6267 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6268 if (ret) { 6269 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6270 port_id); 6271 return; 6272 } 6273 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6274 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6275 printf("\n TC : "); 6276 for (i = 0; i < dcb_info.nb_tcs; i++) 6277 printf("\t%4d", i); 6278 printf("\n Priority : "); 6279 for (i = 0; i < dcb_info.nb_tcs; i++) 6280 printf("\t%4d", dcb_info.prio_tc[i]); 6281 printf("\n BW percent :"); 6282 for (i = 0; i < dcb_info.nb_tcs; i++) 6283 printf("\t%4d%%", dcb_info.tc_bws[i]); 6284 printf("\n RXQ base : "); 6285 for (i = 0; i < dcb_info.nb_tcs; i++) 6286 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6287 printf("\n RXQ number :"); 6288 for (i = 0; i < dcb_info.nb_tcs; i++) 6289 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6290 printf("\n TXQ base : "); 6291 for (i = 0; i < dcb_info.nb_tcs; i++) 6292 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6293 printf("\n TXQ number :"); 6294 for (i = 0; i < dcb_info.nb_tcs; i++) 6295 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6296 printf("\n"); 6297 } 6298 6299 uint8_t * 6300 open_file(const char *file_path, uint32_t *size) 6301 { 6302 int fd = open(file_path, O_RDONLY); 6303 off_t pkg_size; 6304 uint8_t *buf = NULL; 6305 int ret = 0; 6306 struct stat st_buf; 6307 6308 if (size) 6309 *size = 0; 6310 6311 if (fd == -1) { 6312 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6313 return buf; 6314 } 6315 6316 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6317 close(fd); 6318 fprintf(stderr, "%s: File operations failed\n", __func__); 6319 return buf; 6320 } 6321 6322 pkg_size = st_buf.st_size; 6323 if (pkg_size < 0) { 6324 close(fd); 6325 fprintf(stderr, "%s: File operations failed\n", __func__); 6326 return buf; 6327 } 6328 6329 buf = (uint8_t *)malloc(pkg_size); 6330 if (!buf) { 6331 close(fd); 6332 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6333 return buf; 6334 } 6335 6336 ret = read(fd, buf, pkg_size); 6337 if (ret < 0) { 6338 close(fd); 6339 fprintf(stderr, "%s: File read operation failed\n", __func__); 6340 close_file(buf); 6341 return NULL; 6342 } 6343 6344 if (size) 6345 *size = pkg_size; 6346 6347 close(fd); 6348 6349 return buf; 6350 } 6351 6352 int 6353 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6354 { 6355 FILE *fh = fopen(file_path, "wb"); 6356 6357 if (fh == NULL) { 6358 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6359 return -1; 6360 } 6361 6362 if (fwrite(buf, 1, size, fh) != size) { 6363 fclose(fh); 6364 fprintf(stderr, "%s: File write operation failed\n", __func__); 6365 return -1; 6366 } 6367 6368 fclose(fh); 6369 6370 return 0; 6371 } 6372 6373 int 6374 close_file(uint8_t *buf) 6375 { 6376 if (buf) { 6377 free((void *)buf); 6378 return 0; 6379 } 6380 6381 return -1; 6382 } 6383 6384 void 6385 show_macs(portid_t port_id) 6386 { 6387 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6388 struct rte_eth_dev_info dev_info; 6389 int32_t i, rc, num_macs = 0; 6390 6391 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6392 return; 6393 6394 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6395 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6396 if (rc < 0) 6397 return; 6398 6399 for (i = 0; i < rc; i++) { 6400 6401 /* skip zero address */ 6402 if (rte_is_zero_ether_addr(&addr[i])) 6403 continue; 6404 6405 num_macs++; 6406 } 6407 6408 printf("Number of MAC address added: %d\n", num_macs); 6409 6410 for (i = 0; i < rc; i++) { 6411 6412 /* skip zero address */ 6413 if (rte_is_zero_ether_addr(&addr[i])) 6414 continue; 6415 6416 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6417 printf(" %s\n", buf); 6418 } 6419 } 6420 6421 void 6422 show_mcast_macs(portid_t port_id) 6423 { 6424 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6425 struct rte_ether_addr *addr; 6426 struct rte_port *port; 6427 uint32_t i; 6428 6429 port = &ports[port_id]; 6430 6431 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6432 6433 for (i = 0; i < port->mc_addr_nb; i++) { 6434 addr = &port->mc_addr_pool[i]; 6435 6436 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6437 printf(" %s\n", buf); 6438 } 6439 } 6440