1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_hexdump.h> 53 54 #include "testpmd.h" 55 56 #define ETHDEV_FWVERS_LEN 32 57 58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 60 #else 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 62 #endif 63 64 #define NS_PER_SEC 1E9 65 66 static char *flowtype_to_str(uint16_t flow_type); 67 68 static const struct { 69 enum tx_pkt_split split; 70 const char *name; 71 } tx_split_name[] = { 72 { 73 .split = TX_PKT_SPLIT_OFF, 74 .name = "off", 75 }, 76 { 77 .split = TX_PKT_SPLIT_ON, 78 .name = "on", 79 }, 80 { 81 .split = TX_PKT_SPLIT_RND, 82 .name = "rand", 83 }, 84 }; 85 86 const struct rss_type_info rss_type_table[] = { 87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 90 ETH_RSS_GTPU}, 91 { "none", 0 }, 92 { "eth", ETH_RSS_ETH }, 93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 95 { "vlan", ETH_RSS_VLAN }, 96 { "s-vlan", ETH_RSS_S_VLAN }, 97 { "c-vlan", ETH_RSS_C_VLAN }, 98 { "ipv4", ETH_RSS_IPV4 }, 99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 104 { "ipv6", ETH_RSS_IPV6 }, 105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 110 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 111 { "ipv6-ex", ETH_RSS_IPV6_EX }, 112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 114 { "port", ETH_RSS_PORT }, 115 { "vxlan", ETH_RSS_VXLAN }, 116 { "geneve", ETH_RSS_GENEVE }, 117 { "nvgre", ETH_RSS_NVGRE }, 118 { "ip", ETH_RSS_IP }, 119 { "udp", ETH_RSS_UDP }, 120 { "tcp", ETH_RSS_TCP }, 121 { "sctp", ETH_RSS_SCTP }, 122 { "tunnel", ETH_RSS_TUNNEL }, 123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 133 { "esp", ETH_RSS_ESP }, 134 { "ah", ETH_RSS_AH }, 135 { "l2tpv3", ETH_RSS_L2TPV3 }, 136 { "pfcp", ETH_RSS_PFCP }, 137 { "pppoe", ETH_RSS_PPPOE }, 138 { "gtpu", ETH_RSS_GTPU }, 139 { NULL, 0 }, 140 }; 141 142 static const struct { 143 enum rte_eth_fec_mode mode; 144 const char *name; 145 } fec_mode_name[] = { 146 { 147 .mode = RTE_ETH_FEC_NOFEC, 148 .name = "off", 149 }, 150 { 151 .mode = RTE_ETH_FEC_AUTO, 152 .name = "auto", 153 }, 154 { 155 .mode = RTE_ETH_FEC_BASER, 156 .name = "baser", 157 }, 158 { 159 .mode = RTE_ETH_FEC_RS, 160 .name = "rs", 161 }, 162 }; 163 164 static void 165 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 166 { 167 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 168 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 169 printf("%s%s", name, buf); 170 } 171 172 void 173 nic_stats_display(portid_t port_id) 174 { 175 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 176 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 177 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 178 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 179 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 180 struct timespec cur_time; 181 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 182 diff_ns; 183 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 184 struct rte_eth_stats stats; 185 struct rte_port *port = &ports[port_id]; 186 uint8_t i; 187 188 static const char *nic_stats_border = "########################"; 189 190 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 191 print_valid_ports(); 192 return; 193 } 194 rte_eth_stats_get(port_id, &stats); 195 printf("\n %s NIC statistics for port %-2d %s\n", 196 nic_stats_border, port_id, nic_stats_border); 197 198 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 199 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 200 "%-"PRIu64"\n", 201 stats.ipackets, stats.imissed, stats.ibytes); 202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 203 printf(" RX-nombuf: %-10"PRIu64"\n", 204 stats.rx_nombuf); 205 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 206 "%-"PRIu64"\n", 207 stats.opackets, stats.oerrors, stats.obytes); 208 } 209 else { 210 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 211 " RX-bytes: %10"PRIu64"\n", 212 stats.ipackets, stats.ierrors, stats.ibytes); 213 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 214 printf(" RX-nombuf: %10"PRIu64"\n", 215 stats.rx_nombuf); 216 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 217 " TX-bytes: %10"PRIu64"\n", 218 stats.opackets, stats.oerrors, stats.obytes); 219 } 220 221 if (port->rx_queue_stats_mapping_enabled) { 222 printf("\n"); 223 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 224 printf(" Stats reg %2d RX-packets: %10"PRIu64 225 " RX-errors: %10"PRIu64 226 " RX-bytes: %10"PRIu64"\n", 227 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 228 } 229 } 230 if (port->tx_queue_stats_mapping_enabled) { 231 printf("\n"); 232 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 233 printf(" Stats reg %2d TX-packets: %10"PRIu64 234 " TX-bytes: %10"PRIu64"\n", 235 i, stats.q_opackets[i], stats.q_obytes[i]); 236 } 237 } 238 239 diff_ns = 0; 240 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 241 uint64_t ns; 242 243 ns = cur_time.tv_sec * NS_PER_SEC; 244 ns += cur_time.tv_nsec; 245 246 if (prev_ns[port_id] != 0) 247 diff_ns = ns - prev_ns[port_id]; 248 prev_ns[port_id] = ns; 249 } 250 251 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 252 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 253 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 254 (stats.opackets - prev_pkts_tx[port_id]) : 0; 255 prev_pkts_rx[port_id] = stats.ipackets; 256 prev_pkts_tx[port_id] = stats.opackets; 257 mpps_rx = diff_ns > 0 ? 258 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 259 mpps_tx = diff_ns > 0 ? 260 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 261 262 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 263 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 264 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 265 (stats.obytes - prev_bytes_tx[port_id]) : 0; 266 prev_bytes_rx[port_id] = stats.ibytes; 267 prev_bytes_tx[port_id] = stats.obytes; 268 mbps_rx = diff_ns > 0 ? 269 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 270 mbps_tx = diff_ns > 0 ? 271 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 272 273 printf("\n Throughput (since last show)\n"); 274 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 275 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 276 mpps_tx, mbps_tx * 8); 277 278 printf(" %s############################%s\n", 279 nic_stats_border, nic_stats_border); 280 } 281 282 void 283 nic_stats_clear(portid_t port_id) 284 { 285 int ret; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 292 ret = rte_eth_stats_reset(port_id); 293 if (ret != 0) { 294 printf("%s: Error: failed to reset stats (port %u): %s", 295 __func__, port_id, strerror(-ret)); 296 return; 297 } 298 299 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 300 if (ret != 0) { 301 if (ret < 0) 302 ret = -ret; 303 printf("%s: Error: failed to get stats (port %u): %s", 304 __func__, port_id, strerror(ret)); 305 return; 306 } 307 printf("\n NIC statistics for port %d cleared\n", port_id); 308 } 309 310 void 311 nic_xstats_display(portid_t port_id) 312 { 313 struct rte_eth_xstat *xstats; 314 int cnt_xstats, idx_xstat; 315 struct rte_eth_xstat_name *xstats_names; 316 317 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 318 print_valid_ports(); 319 return; 320 } 321 printf("###### NIC extended statistics for port %-2d\n", port_id); 322 if (!rte_eth_dev_is_valid_port(port_id)) { 323 printf("Error: Invalid port number %i\n", port_id); 324 return; 325 } 326 327 /* Get count */ 328 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 329 if (cnt_xstats < 0) { 330 printf("Error: Cannot get count of xstats\n"); 331 return; 332 } 333 334 /* Get id-name lookup table */ 335 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 336 if (xstats_names == NULL) { 337 printf("Cannot allocate memory for xstats lookup\n"); 338 return; 339 } 340 if (cnt_xstats != rte_eth_xstats_get_names( 341 port_id, xstats_names, cnt_xstats)) { 342 printf("Error: Cannot get xstats lookup\n"); 343 free(xstats_names); 344 return; 345 } 346 347 /* Get stats themselves */ 348 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 349 if (xstats == NULL) { 350 printf("Cannot allocate memory for xstats\n"); 351 free(xstats_names); 352 return; 353 } 354 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 355 printf("Error: Unable to get xstats\n"); 356 free(xstats_names); 357 free(xstats); 358 return; 359 } 360 361 /* Display xstats */ 362 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 363 if (xstats_hide_zero && !xstats[idx_xstat].value) 364 continue; 365 printf("%s: %"PRIu64"\n", 366 xstats_names[idx_xstat].name, 367 xstats[idx_xstat].value); 368 } 369 free(xstats_names); 370 free(xstats); 371 } 372 373 void 374 nic_xstats_clear(portid_t port_id) 375 { 376 int ret; 377 378 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 379 print_valid_ports(); 380 return; 381 } 382 383 ret = rte_eth_xstats_reset(port_id); 384 if (ret != 0) { 385 printf("%s: Error: failed to reset xstats (port %u): %s", 386 __func__, port_id, strerror(-ret)); 387 return; 388 } 389 390 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 391 if (ret != 0) { 392 if (ret < 0) 393 ret = -ret; 394 printf("%s: Error: failed to get stats (port %u): %s", 395 __func__, port_id, strerror(ret)); 396 return; 397 } 398 } 399 400 void 401 nic_stats_mapping_display(portid_t port_id) 402 { 403 struct rte_port *port = &ports[port_id]; 404 uint16_t i; 405 406 static const char *nic_stats_mapping_border = "########################"; 407 408 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 409 print_valid_ports(); 410 return; 411 } 412 413 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 414 printf("Port id %d - either does not support queue statistic mapping or" 415 " no queue statistic mapping set\n", port_id); 416 return; 417 } 418 419 printf("\n %s NIC statistics mapping for port %-2d %s\n", 420 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 421 422 if (port->rx_queue_stats_mapping_enabled) { 423 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 424 if (rx_queue_stats_mappings[i].port_id == port_id) { 425 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 426 rx_queue_stats_mappings[i].queue_id, 427 rx_queue_stats_mappings[i].stats_counter_id); 428 } 429 } 430 printf("\n"); 431 } 432 433 434 if (port->tx_queue_stats_mapping_enabled) { 435 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 436 if (tx_queue_stats_mappings[i].port_id == port_id) { 437 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 438 tx_queue_stats_mappings[i].queue_id, 439 tx_queue_stats_mappings[i].stats_counter_id); 440 } 441 } 442 } 443 444 printf(" %s####################################%s\n", 445 nic_stats_mapping_border, nic_stats_mapping_border); 446 } 447 448 void 449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 450 { 451 struct rte_eth_burst_mode mode; 452 struct rte_eth_rxq_info qinfo; 453 int32_t rc; 454 static const char *info_border = "*********************"; 455 456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 457 if (rc != 0) { 458 printf("Failed to retrieve information for port: %u, " 459 "RX queue: %hu\nerror desc: %s(%d)\n", 460 port_id, queue_id, strerror(-rc), rc); 461 return; 462 } 463 464 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 465 info_border, port_id, queue_id, info_border); 466 467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 472 printf("\nRX drop packets: %s", 473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 474 printf("\nRX deferred start: %s", 475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 476 printf("\nRX scattered packets: %s", 477 (qinfo.scattered_rx != 0) ? "on" : "off"); 478 if (qinfo.rx_buf_size != 0) 479 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 480 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 481 482 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 483 printf("\nBurst mode: %s%s", 484 mode.info, 485 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 486 " (per queue)" : ""); 487 488 printf("\n"); 489 } 490 491 void 492 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 493 { 494 struct rte_eth_burst_mode mode; 495 struct rte_eth_txq_info qinfo; 496 int32_t rc; 497 static const char *info_border = "*********************"; 498 499 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 500 if (rc != 0) { 501 printf("Failed to retrieve information for port: %u, " 502 "TX queue: %hu\nerror desc: %s(%d)\n", 503 port_id, queue_id, strerror(-rc), rc); 504 return; 505 } 506 507 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 508 info_border, port_id, queue_id, info_border); 509 510 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 511 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 512 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 513 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 514 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 515 printf("\nTX deferred start: %s", 516 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 517 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 518 519 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 520 printf("\nBurst mode: %s%s", 521 mode.info, 522 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 523 " (per queue)" : ""); 524 525 printf("\n"); 526 } 527 528 static int bus_match_all(const struct rte_bus *bus, const void *data) 529 { 530 RTE_SET_USED(bus); 531 RTE_SET_USED(data); 532 return 0; 533 } 534 535 static void 536 device_infos_display_speeds(uint32_t speed_capa) 537 { 538 printf("\n\tDevice speed capability:"); 539 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 540 printf(" Autonegotiate (all speeds)"); 541 if (speed_capa & ETH_LINK_SPEED_FIXED) 542 printf(" Disable autonegotiate (fixed speed) "); 543 if (speed_capa & ETH_LINK_SPEED_10M_HD) 544 printf(" 10 Mbps half-duplex "); 545 if (speed_capa & ETH_LINK_SPEED_10M) 546 printf(" 10 Mbps full-duplex "); 547 if (speed_capa & ETH_LINK_SPEED_100M_HD) 548 printf(" 100 Mbps half-duplex "); 549 if (speed_capa & ETH_LINK_SPEED_100M) 550 printf(" 100 Mbps full-duplex "); 551 if (speed_capa & ETH_LINK_SPEED_1G) 552 printf(" 1 Gbps "); 553 if (speed_capa & ETH_LINK_SPEED_2_5G) 554 printf(" 2.5 Gbps "); 555 if (speed_capa & ETH_LINK_SPEED_5G) 556 printf(" 5 Gbps "); 557 if (speed_capa & ETH_LINK_SPEED_10G) 558 printf(" 10 Gbps "); 559 if (speed_capa & ETH_LINK_SPEED_20G) 560 printf(" 20 Gbps "); 561 if (speed_capa & ETH_LINK_SPEED_25G) 562 printf(" 25 Gbps "); 563 if (speed_capa & ETH_LINK_SPEED_40G) 564 printf(" 40 Gbps "); 565 if (speed_capa & ETH_LINK_SPEED_50G) 566 printf(" 50 Gbps "); 567 if (speed_capa & ETH_LINK_SPEED_56G) 568 printf(" 56 Gbps "); 569 if (speed_capa & ETH_LINK_SPEED_100G) 570 printf(" 100 Gbps "); 571 if (speed_capa & ETH_LINK_SPEED_200G) 572 printf(" 200 Gbps "); 573 } 574 575 void 576 device_infos_display(const char *identifier) 577 { 578 static const char *info_border = "*********************"; 579 struct rte_bus *start = NULL, *next; 580 struct rte_dev_iterator dev_iter; 581 char name[RTE_ETH_NAME_MAX_LEN]; 582 struct rte_ether_addr mac_addr; 583 struct rte_device *dev; 584 struct rte_devargs da; 585 portid_t port_id; 586 struct rte_eth_dev_info dev_info; 587 char devstr[128]; 588 589 memset(&da, 0, sizeof(da)); 590 if (!identifier) 591 goto skip_parse; 592 593 if (rte_devargs_parsef(&da, "%s", identifier)) { 594 printf("cannot parse identifier\n"); 595 if (da.args) 596 free(da.args); 597 return; 598 } 599 600 skip_parse: 601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 602 603 start = next; 604 if (identifier && da.bus != next) 605 continue; 606 607 /* Skip buses that don't have iterate method */ 608 if (!next->dev_iterate) 609 continue; 610 611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 613 614 if (!dev->driver) 615 continue; 616 /* Check for matching device if identifier is present */ 617 if (identifier && 618 strncmp(da.name, dev->name, strlen(dev->name))) 619 continue; 620 printf("\n%s Infos for device %s %s\n", 621 info_border, dev->name, info_border); 622 printf("Bus name: %s", dev->bus->name); 623 printf("\nDriver name: %s", dev->driver->name); 624 printf("\nDevargs: %s", 625 dev->devargs ? dev->devargs->args : ""); 626 printf("\nConnect to socket: %d", dev->numa_node); 627 printf("\n"); 628 629 /* List ports with matching device name */ 630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 631 printf("\n\tPort id: %-2d", port_id); 632 if (eth_macaddr_get_print_err(port_id, 633 &mac_addr) == 0) 634 print_ethaddr("\n\tMAC address: ", 635 &mac_addr); 636 rte_eth_dev_get_name_by_port(port_id, name); 637 printf("\n\tDevice name: %s", name); 638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 639 device_infos_display_speeds(dev_info.speed_capa); 640 printf("\n"); 641 } 642 } 643 }; 644 } 645 646 void 647 port_infos_display(portid_t port_id) 648 { 649 struct rte_port *port; 650 struct rte_ether_addr mac_addr; 651 struct rte_eth_link link; 652 struct rte_eth_dev_info dev_info; 653 int vlan_offload; 654 struct rte_mempool * mp; 655 static const char *info_border = "*********************"; 656 uint16_t mtu; 657 char name[RTE_ETH_NAME_MAX_LEN]; 658 int ret; 659 char fw_version[ETHDEV_FWVERS_LEN]; 660 661 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 662 print_valid_ports(); 663 return; 664 } 665 port = &ports[port_id]; 666 ret = eth_link_get_nowait_print_err(port_id, &link); 667 if (ret < 0) 668 return; 669 670 ret = eth_dev_info_get_print_err(port_id, &dev_info); 671 if (ret != 0) 672 return; 673 674 printf("\n%s Infos for port %-2d %s\n", 675 info_border, port_id, info_border); 676 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 677 print_ethaddr("MAC address: ", &mac_addr); 678 rte_eth_dev_get_name_by_port(port_id, name); 679 printf("\nDevice name: %s", name); 680 printf("\nDriver name: %s", dev_info.driver_name); 681 682 if (rte_eth_dev_fw_version_get(port_id, fw_version, 683 ETHDEV_FWVERS_LEN) == 0) 684 printf("\nFirmware-version: %s", fw_version); 685 else 686 printf("\nFirmware-version: %s", "not available"); 687 688 if (dev_info.device->devargs && dev_info.device->devargs->args) 689 printf("\nDevargs: %s", dev_info.device->devargs->args); 690 printf("\nConnect to socket: %u", port->socket_id); 691 692 if (port_numa[port_id] != NUMA_NO_CONFIG) { 693 mp = mbuf_pool_find(port_numa[port_id], 0); 694 if (mp) 695 printf("\nmemory allocation on the socket: %d", 696 port_numa[port_id]); 697 } else 698 printf("\nmemory allocation on the socket: %u",port->socket_id); 699 700 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 701 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 702 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 703 ("full-duplex") : ("half-duplex")); 704 705 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 706 printf("MTU: %u\n", mtu); 707 708 printf("Promiscuous mode: %s\n", 709 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 710 printf("Allmulticast mode: %s\n", 711 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 712 printf("Maximum number of MAC addresses: %u\n", 713 (unsigned int)(port->dev_info.max_mac_addrs)); 714 printf("Maximum number of MAC addresses of hash filtering: %u\n", 715 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 716 717 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 718 if (vlan_offload >= 0){ 719 printf("VLAN offload: \n"); 720 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 721 printf(" strip on, "); 722 else 723 printf(" strip off, "); 724 725 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 726 printf("filter on, "); 727 else 728 printf("filter off, "); 729 730 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 731 printf("extend on, "); 732 else 733 printf("extend off, "); 734 735 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 736 printf("qinq strip on\n"); 737 else 738 printf("qinq strip off\n"); 739 } 740 741 if (dev_info.hash_key_size > 0) 742 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 743 if (dev_info.reta_size > 0) 744 printf("Redirection table size: %u\n", dev_info.reta_size); 745 if (!dev_info.flow_type_rss_offloads) 746 printf("No RSS offload flow type is supported.\n"); 747 else { 748 uint16_t i; 749 char *p; 750 751 printf("Supported RSS offload flow types:\n"); 752 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 753 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 754 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 755 continue; 756 p = flowtype_to_str(i); 757 if (p) 758 printf(" %s\n", p); 759 else 760 printf(" user defined %d\n", i); 761 } 762 } 763 764 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 765 printf("Maximum configurable length of RX packet: %u\n", 766 dev_info.max_rx_pktlen); 767 printf("Maximum configurable size of LRO aggregated packet: %u\n", 768 dev_info.max_lro_pkt_size); 769 if (dev_info.max_vfs) 770 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 771 if (dev_info.max_vmdq_pools) 772 printf("Maximum number of VMDq pools: %u\n", 773 dev_info.max_vmdq_pools); 774 775 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 776 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 777 printf("Max possible number of RXDs per queue: %hu\n", 778 dev_info.rx_desc_lim.nb_max); 779 printf("Min possible number of RXDs per queue: %hu\n", 780 dev_info.rx_desc_lim.nb_min); 781 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 782 783 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 784 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 785 printf("Max possible number of TXDs per queue: %hu\n", 786 dev_info.tx_desc_lim.nb_max); 787 printf("Min possible number of TXDs per queue: %hu\n", 788 dev_info.tx_desc_lim.nb_min); 789 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 790 printf("Max segment number per packet: %hu\n", 791 dev_info.tx_desc_lim.nb_seg_max); 792 printf("Max segment number per MTU/TSO: %hu\n", 793 dev_info.tx_desc_lim.nb_mtu_seg_max); 794 795 /* Show switch info only if valid switch domain and port id is set */ 796 if (dev_info.switch_info.domain_id != 797 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 798 if (dev_info.switch_info.name) 799 printf("Switch name: %s\n", dev_info.switch_info.name); 800 801 printf("Switch domain Id: %u\n", 802 dev_info.switch_info.domain_id); 803 printf("Switch Port Id: %u\n", 804 dev_info.switch_info.port_id); 805 } 806 } 807 808 void 809 port_summary_header_display(void) 810 { 811 uint16_t port_number; 812 813 port_number = rte_eth_dev_count_avail(); 814 printf("Number of available ports: %i\n", port_number); 815 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 816 "Driver", "Status", "Link"); 817 } 818 819 void 820 port_summary_display(portid_t port_id) 821 { 822 struct rte_ether_addr mac_addr; 823 struct rte_eth_link link; 824 struct rte_eth_dev_info dev_info; 825 char name[RTE_ETH_NAME_MAX_LEN]; 826 int ret; 827 828 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 829 print_valid_ports(); 830 return; 831 } 832 833 ret = eth_link_get_nowait_print_err(port_id, &link); 834 if (ret < 0) 835 return; 836 837 ret = eth_dev_info_get_print_err(port_id, &dev_info); 838 if (ret != 0) 839 return; 840 841 rte_eth_dev_get_name_by_port(port_id, name); 842 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 843 if (ret != 0) 844 return; 845 846 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 847 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 848 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 849 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 850 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 851 rte_eth_link_speed_to_str(link.link_speed)); 852 } 853 854 void 855 port_eeprom_display(portid_t port_id) 856 { 857 struct rte_dev_eeprom_info einfo; 858 int ret; 859 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 860 print_valid_ports(); 861 return; 862 } 863 864 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 865 if (len_eeprom < 0) { 866 switch (len_eeprom) { 867 case -ENODEV: 868 printf("port index %d invalid\n", port_id); 869 break; 870 case -ENOTSUP: 871 printf("operation not supported by device\n"); 872 break; 873 case -EIO: 874 printf("device is removed\n"); 875 break; 876 default: 877 printf("Unable to get EEPROM: %d\n", len_eeprom); 878 break; 879 } 880 return; 881 } 882 883 char buf[len_eeprom]; 884 einfo.offset = 0; 885 einfo.length = len_eeprom; 886 einfo.data = buf; 887 888 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 889 if (ret != 0) { 890 switch (ret) { 891 case -ENODEV: 892 printf("port index %d invalid\n", port_id); 893 break; 894 case -ENOTSUP: 895 printf("operation not supported by device\n"); 896 break; 897 case -EIO: 898 printf("device is removed\n"); 899 break; 900 default: 901 printf("Unable to get EEPROM: %d\n", ret); 902 break; 903 } 904 return; 905 } 906 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 907 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 908 } 909 910 void 911 port_module_eeprom_display(portid_t port_id) 912 { 913 struct rte_eth_dev_module_info minfo; 914 struct rte_dev_eeprom_info einfo; 915 int ret; 916 917 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 918 print_valid_ports(); 919 return; 920 } 921 922 923 ret = rte_eth_dev_get_module_info(port_id, &minfo); 924 if (ret != 0) { 925 switch (ret) { 926 case -ENODEV: 927 printf("port index %d invalid\n", port_id); 928 break; 929 case -ENOTSUP: 930 printf("operation not supported by device\n"); 931 break; 932 case -EIO: 933 printf("device is removed\n"); 934 break; 935 default: 936 printf("Unable to get module EEPROM: %d\n", ret); 937 break; 938 } 939 return; 940 } 941 942 char buf[minfo.eeprom_len]; 943 einfo.offset = 0; 944 einfo.length = minfo.eeprom_len; 945 einfo.data = buf; 946 947 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 948 if (ret != 0) { 949 switch (ret) { 950 case -ENODEV: 951 printf("port index %d invalid\n", port_id); 952 break; 953 case -ENOTSUP: 954 printf("operation not supported by device\n"); 955 break; 956 case -EIO: 957 printf("device is removed\n"); 958 break; 959 default: 960 printf("Unable to get module EEPROM: %d\n", ret); 961 break; 962 } 963 return; 964 } 965 966 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 967 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 968 } 969 970 void 971 port_offload_cap_display(portid_t port_id) 972 { 973 struct rte_eth_dev_info dev_info; 974 static const char *info_border = "************"; 975 int ret; 976 977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 978 return; 979 980 ret = eth_dev_info_get_print_err(port_id, &dev_info); 981 if (ret != 0) 982 return; 983 984 printf("\n%s Port %d supported offload features: %s\n", 985 info_border, port_id, info_border); 986 987 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 988 printf("VLAN stripped: "); 989 if (ports[port_id].dev_conf.rxmode.offloads & 990 DEV_RX_OFFLOAD_VLAN_STRIP) 991 printf("on\n"); 992 else 993 printf("off\n"); 994 } 995 996 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 997 printf("Double VLANs stripped: "); 998 if (ports[port_id].dev_conf.rxmode.offloads & 999 DEV_RX_OFFLOAD_QINQ_STRIP) 1000 printf("on\n"); 1001 else 1002 printf("off\n"); 1003 } 1004 1005 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 1006 printf("RX IPv4 checksum: "); 1007 if (ports[port_id].dev_conf.rxmode.offloads & 1008 DEV_RX_OFFLOAD_IPV4_CKSUM) 1009 printf("on\n"); 1010 else 1011 printf("off\n"); 1012 } 1013 1014 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 1015 printf("RX UDP checksum: "); 1016 if (ports[port_id].dev_conf.rxmode.offloads & 1017 DEV_RX_OFFLOAD_UDP_CKSUM) 1018 printf("on\n"); 1019 else 1020 printf("off\n"); 1021 } 1022 1023 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 1024 printf("RX TCP checksum: "); 1025 if (ports[port_id].dev_conf.rxmode.offloads & 1026 DEV_RX_OFFLOAD_TCP_CKSUM) 1027 printf("on\n"); 1028 else 1029 printf("off\n"); 1030 } 1031 1032 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 1033 printf("RX SCTP checksum: "); 1034 if (ports[port_id].dev_conf.rxmode.offloads & 1035 DEV_RX_OFFLOAD_SCTP_CKSUM) 1036 printf("on\n"); 1037 else 1038 printf("off\n"); 1039 } 1040 1041 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 1042 printf("RX Outer IPv4 checksum: "); 1043 if (ports[port_id].dev_conf.rxmode.offloads & 1044 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 1045 printf("on\n"); 1046 else 1047 printf("off\n"); 1048 } 1049 1050 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 1051 printf("RX Outer UDP checksum: "); 1052 if (ports[port_id].dev_conf.rxmode.offloads & 1053 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 1054 printf("on\n"); 1055 else 1056 printf("off\n"); 1057 } 1058 1059 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 1060 printf("Large receive offload: "); 1061 if (ports[port_id].dev_conf.rxmode.offloads & 1062 DEV_RX_OFFLOAD_TCP_LRO) 1063 printf("on\n"); 1064 else 1065 printf("off\n"); 1066 } 1067 1068 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 1069 printf("HW timestamp: "); 1070 if (ports[port_id].dev_conf.rxmode.offloads & 1071 DEV_RX_OFFLOAD_TIMESTAMP) 1072 printf("on\n"); 1073 else 1074 printf("off\n"); 1075 } 1076 1077 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 1078 printf("Rx Keep CRC: "); 1079 if (ports[port_id].dev_conf.rxmode.offloads & 1080 DEV_RX_OFFLOAD_KEEP_CRC) 1081 printf("on\n"); 1082 else 1083 printf("off\n"); 1084 } 1085 1086 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 1087 printf("RX offload security: "); 1088 if (ports[port_id].dev_conf.rxmode.offloads & 1089 DEV_RX_OFFLOAD_SECURITY) 1090 printf("on\n"); 1091 else 1092 printf("off\n"); 1093 } 1094 1095 if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1096 printf("RX offload buffer split: "); 1097 if (ports[port_id].dev_conf.rxmode.offloads & 1098 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) 1099 printf("on\n"); 1100 else 1101 printf("off\n"); 1102 } 1103 1104 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 1105 printf("VLAN insert: "); 1106 if (ports[port_id].dev_conf.txmode.offloads & 1107 DEV_TX_OFFLOAD_VLAN_INSERT) 1108 printf("on\n"); 1109 else 1110 printf("off\n"); 1111 } 1112 1113 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 1114 printf("Double VLANs insert: "); 1115 if (ports[port_id].dev_conf.txmode.offloads & 1116 DEV_TX_OFFLOAD_QINQ_INSERT) 1117 printf("on\n"); 1118 else 1119 printf("off\n"); 1120 } 1121 1122 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 1123 printf("TX IPv4 checksum: "); 1124 if (ports[port_id].dev_conf.txmode.offloads & 1125 DEV_TX_OFFLOAD_IPV4_CKSUM) 1126 printf("on\n"); 1127 else 1128 printf("off\n"); 1129 } 1130 1131 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 1132 printf("TX UDP checksum: "); 1133 if (ports[port_id].dev_conf.txmode.offloads & 1134 DEV_TX_OFFLOAD_UDP_CKSUM) 1135 printf("on\n"); 1136 else 1137 printf("off\n"); 1138 } 1139 1140 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 1141 printf("TX TCP checksum: "); 1142 if (ports[port_id].dev_conf.txmode.offloads & 1143 DEV_TX_OFFLOAD_TCP_CKSUM) 1144 printf("on\n"); 1145 else 1146 printf("off\n"); 1147 } 1148 1149 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 1150 printf("TX SCTP checksum: "); 1151 if (ports[port_id].dev_conf.txmode.offloads & 1152 DEV_TX_OFFLOAD_SCTP_CKSUM) 1153 printf("on\n"); 1154 else 1155 printf("off\n"); 1156 } 1157 1158 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 1159 printf("TX Outer IPv4 checksum: "); 1160 if (ports[port_id].dev_conf.txmode.offloads & 1161 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 1162 printf("on\n"); 1163 else 1164 printf("off\n"); 1165 } 1166 1167 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 1168 printf("TX TCP segmentation: "); 1169 if (ports[port_id].dev_conf.txmode.offloads & 1170 DEV_TX_OFFLOAD_TCP_TSO) 1171 printf("on\n"); 1172 else 1173 printf("off\n"); 1174 } 1175 1176 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 1177 printf("TX UDP segmentation: "); 1178 if (ports[port_id].dev_conf.txmode.offloads & 1179 DEV_TX_OFFLOAD_UDP_TSO) 1180 printf("on\n"); 1181 else 1182 printf("off\n"); 1183 } 1184 1185 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 1186 printf("TSO for VXLAN tunnel packet: "); 1187 if (ports[port_id].dev_conf.txmode.offloads & 1188 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 1189 printf("on\n"); 1190 else 1191 printf("off\n"); 1192 } 1193 1194 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 1195 printf("TSO for GRE tunnel packet: "); 1196 if (ports[port_id].dev_conf.txmode.offloads & 1197 DEV_TX_OFFLOAD_GRE_TNL_TSO) 1198 printf("on\n"); 1199 else 1200 printf("off\n"); 1201 } 1202 1203 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 1204 printf("TSO for IPIP tunnel packet: "); 1205 if (ports[port_id].dev_conf.txmode.offloads & 1206 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 1207 printf("on\n"); 1208 else 1209 printf("off\n"); 1210 } 1211 1212 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 1213 printf("TSO for GENEVE tunnel packet: "); 1214 if (ports[port_id].dev_conf.txmode.offloads & 1215 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 1216 printf("on\n"); 1217 else 1218 printf("off\n"); 1219 } 1220 1221 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 1222 printf("IP tunnel TSO: "); 1223 if (ports[port_id].dev_conf.txmode.offloads & 1224 DEV_TX_OFFLOAD_IP_TNL_TSO) 1225 printf("on\n"); 1226 else 1227 printf("off\n"); 1228 } 1229 1230 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 1231 printf("UDP tunnel TSO: "); 1232 if (ports[port_id].dev_conf.txmode.offloads & 1233 DEV_TX_OFFLOAD_UDP_TNL_TSO) 1234 printf("on\n"); 1235 else 1236 printf("off\n"); 1237 } 1238 1239 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 1240 printf("TX Outer UDP checksum: "); 1241 if (ports[port_id].dev_conf.txmode.offloads & 1242 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1243 printf("on\n"); 1244 else 1245 printf("off\n"); 1246 } 1247 1248 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 1249 printf("Tx scheduling on timestamp: "); 1250 if (ports[port_id].dev_conf.txmode.offloads & 1251 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) 1252 printf("on\n"); 1253 else 1254 printf("off\n"); 1255 } 1256 1257 } 1258 1259 int 1260 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1261 { 1262 uint16_t pid; 1263 1264 if (port_id == (portid_t)RTE_PORT_ALL) 1265 return 0; 1266 1267 RTE_ETH_FOREACH_DEV(pid) 1268 if (port_id == pid) 1269 return 0; 1270 1271 if (warning == ENABLED_WARN) 1272 printf("Invalid port %d\n", port_id); 1273 1274 return 1; 1275 } 1276 1277 void print_valid_ports(void) 1278 { 1279 portid_t pid; 1280 1281 printf("The valid ports array is ["); 1282 RTE_ETH_FOREACH_DEV(pid) { 1283 printf(" %d", pid); 1284 } 1285 printf(" ]\n"); 1286 } 1287 1288 static int 1289 vlan_id_is_invalid(uint16_t vlan_id) 1290 { 1291 if (vlan_id < 4096) 1292 return 0; 1293 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1294 return 1; 1295 } 1296 1297 static int 1298 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1299 { 1300 const struct rte_pci_device *pci_dev; 1301 const struct rte_bus *bus; 1302 uint64_t pci_len; 1303 1304 if (reg_off & 0x3) { 1305 printf("Port register offset 0x%X not aligned on a 4-byte " 1306 "boundary\n", 1307 (unsigned)reg_off); 1308 return 1; 1309 } 1310 1311 if (!ports[port_id].dev_info.device) { 1312 printf("Invalid device\n"); 1313 return 0; 1314 } 1315 1316 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1317 if (bus && !strcmp(bus->name, "pci")) { 1318 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1319 } else { 1320 printf("Not a PCI device\n"); 1321 return 1; 1322 } 1323 1324 pci_len = pci_dev->mem_resource[0].len; 1325 if (reg_off >= pci_len) { 1326 printf("Port %d: register offset %u (0x%X) out of port PCI " 1327 "resource (length=%"PRIu64")\n", 1328 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1329 return 1; 1330 } 1331 return 0; 1332 } 1333 1334 static int 1335 reg_bit_pos_is_invalid(uint8_t bit_pos) 1336 { 1337 if (bit_pos <= 31) 1338 return 0; 1339 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1340 return 1; 1341 } 1342 1343 #define display_port_and_reg_off(port_id, reg_off) \ 1344 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1345 1346 static inline void 1347 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1348 { 1349 display_port_and_reg_off(port_id, (unsigned)reg_off); 1350 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1351 } 1352 1353 void 1354 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1355 { 1356 uint32_t reg_v; 1357 1358 1359 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1360 return; 1361 if (port_reg_off_is_invalid(port_id, reg_off)) 1362 return; 1363 if (reg_bit_pos_is_invalid(bit_x)) 1364 return; 1365 reg_v = port_id_pci_reg_read(port_id, reg_off); 1366 display_port_and_reg_off(port_id, (unsigned)reg_off); 1367 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1368 } 1369 1370 void 1371 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1372 uint8_t bit1_pos, uint8_t bit2_pos) 1373 { 1374 uint32_t reg_v; 1375 uint8_t l_bit; 1376 uint8_t h_bit; 1377 1378 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1379 return; 1380 if (port_reg_off_is_invalid(port_id, reg_off)) 1381 return; 1382 if (reg_bit_pos_is_invalid(bit1_pos)) 1383 return; 1384 if (reg_bit_pos_is_invalid(bit2_pos)) 1385 return; 1386 if (bit1_pos > bit2_pos) 1387 l_bit = bit2_pos, h_bit = bit1_pos; 1388 else 1389 l_bit = bit1_pos, h_bit = bit2_pos; 1390 1391 reg_v = port_id_pci_reg_read(port_id, reg_off); 1392 reg_v >>= l_bit; 1393 if (h_bit < 31) 1394 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1395 display_port_and_reg_off(port_id, (unsigned)reg_off); 1396 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1397 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1398 } 1399 1400 void 1401 port_reg_display(portid_t port_id, uint32_t reg_off) 1402 { 1403 uint32_t reg_v; 1404 1405 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1406 return; 1407 if (port_reg_off_is_invalid(port_id, reg_off)) 1408 return; 1409 reg_v = port_id_pci_reg_read(port_id, reg_off); 1410 display_port_reg_value(port_id, reg_off, reg_v); 1411 } 1412 1413 void 1414 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1415 uint8_t bit_v) 1416 { 1417 uint32_t reg_v; 1418 1419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1420 return; 1421 if (port_reg_off_is_invalid(port_id, reg_off)) 1422 return; 1423 if (reg_bit_pos_is_invalid(bit_pos)) 1424 return; 1425 if (bit_v > 1) { 1426 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1427 return; 1428 } 1429 reg_v = port_id_pci_reg_read(port_id, reg_off); 1430 if (bit_v == 0) 1431 reg_v &= ~(1 << bit_pos); 1432 else 1433 reg_v |= (1 << bit_pos); 1434 port_id_pci_reg_write(port_id, reg_off, reg_v); 1435 display_port_reg_value(port_id, reg_off, reg_v); 1436 } 1437 1438 void 1439 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1440 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1441 { 1442 uint32_t max_v; 1443 uint32_t reg_v; 1444 uint8_t l_bit; 1445 uint8_t h_bit; 1446 1447 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1448 return; 1449 if (port_reg_off_is_invalid(port_id, reg_off)) 1450 return; 1451 if (reg_bit_pos_is_invalid(bit1_pos)) 1452 return; 1453 if (reg_bit_pos_is_invalid(bit2_pos)) 1454 return; 1455 if (bit1_pos > bit2_pos) 1456 l_bit = bit2_pos, h_bit = bit1_pos; 1457 else 1458 l_bit = bit1_pos, h_bit = bit2_pos; 1459 1460 if ((h_bit - l_bit) < 31) 1461 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1462 else 1463 max_v = 0xFFFFFFFF; 1464 1465 if (value > max_v) { 1466 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1467 (unsigned)value, (unsigned)value, 1468 (unsigned)max_v, (unsigned)max_v); 1469 return; 1470 } 1471 reg_v = port_id_pci_reg_read(port_id, reg_off); 1472 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1473 reg_v |= (value << l_bit); /* Set changed bits */ 1474 port_id_pci_reg_write(port_id, reg_off, reg_v); 1475 display_port_reg_value(port_id, reg_off, reg_v); 1476 } 1477 1478 void 1479 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1480 { 1481 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1482 return; 1483 if (port_reg_off_is_invalid(port_id, reg_off)) 1484 return; 1485 port_id_pci_reg_write(port_id, reg_off, reg_v); 1486 display_port_reg_value(port_id, reg_off, reg_v); 1487 } 1488 1489 void 1490 port_mtu_set(portid_t port_id, uint16_t mtu) 1491 { 1492 int diag; 1493 struct rte_port *rte_port = &ports[port_id]; 1494 struct rte_eth_dev_info dev_info; 1495 uint16_t eth_overhead; 1496 int ret; 1497 1498 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1499 return; 1500 1501 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1502 if (ret != 0) 1503 return; 1504 1505 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1506 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1507 mtu, dev_info.min_mtu, dev_info.max_mtu); 1508 return; 1509 } 1510 diag = rte_eth_dev_set_mtu(port_id, mtu); 1511 if (diag) 1512 printf("Set MTU failed. diag=%d\n", diag); 1513 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1514 /* 1515 * Ether overhead in driver is equal to the difference of 1516 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1517 * device supports jumbo frame. 1518 */ 1519 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1520 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1521 rte_port->dev_conf.rxmode.offloads |= 1522 DEV_RX_OFFLOAD_JUMBO_FRAME; 1523 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1524 mtu + eth_overhead; 1525 } else 1526 rte_port->dev_conf.rxmode.offloads &= 1527 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1528 } 1529 } 1530 1531 /* Generic flow management functions. */ 1532 1533 static struct port_flow_tunnel * 1534 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1535 { 1536 struct port_flow_tunnel *flow_tunnel; 1537 1538 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1539 if (flow_tunnel->id == port_tunnel_id) 1540 goto out; 1541 } 1542 flow_tunnel = NULL; 1543 1544 out: 1545 return flow_tunnel; 1546 } 1547 1548 const char * 1549 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1550 { 1551 const char *type; 1552 switch (tunnel->type) { 1553 default: 1554 type = "unknown"; 1555 break; 1556 case RTE_FLOW_ITEM_TYPE_VXLAN: 1557 type = "vxlan"; 1558 break; 1559 } 1560 1561 return type; 1562 } 1563 1564 struct port_flow_tunnel * 1565 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1566 { 1567 struct rte_port *port = &ports[port_id]; 1568 struct port_flow_tunnel *flow_tunnel; 1569 1570 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1571 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1572 goto out; 1573 } 1574 flow_tunnel = NULL; 1575 1576 out: 1577 return flow_tunnel; 1578 } 1579 1580 void port_flow_tunnel_list(portid_t port_id) 1581 { 1582 struct rte_port *port = &ports[port_id]; 1583 struct port_flow_tunnel *flt; 1584 1585 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1586 printf("port %u tunnel #%u type=%s", 1587 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1588 if (flt->tunnel.tun_id) 1589 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1590 printf("\n"); 1591 } 1592 } 1593 1594 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1595 { 1596 struct rte_port *port = &ports[port_id]; 1597 struct port_flow_tunnel *flt; 1598 1599 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1600 if (flt->id == tunnel_id) 1601 break; 1602 } 1603 if (flt) { 1604 LIST_REMOVE(flt, chain); 1605 free(flt); 1606 printf("port %u: flow tunnel #%u destroyed\n", 1607 port_id, tunnel_id); 1608 } 1609 } 1610 1611 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1612 { 1613 struct rte_port *port = &ports[port_id]; 1614 enum rte_flow_item_type type; 1615 struct port_flow_tunnel *flt; 1616 1617 if (!strcmp(ops->type, "vxlan")) 1618 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1619 else { 1620 printf("cannot offload \"%s\" tunnel type\n", ops->type); 1621 return; 1622 } 1623 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1624 if (flt->tunnel.type == type) 1625 break; 1626 } 1627 if (!flt) { 1628 flt = calloc(1, sizeof(*flt)); 1629 if (!flt) { 1630 printf("failed to allocate port flt object\n"); 1631 return; 1632 } 1633 flt->tunnel.type = type; 1634 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1635 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1636 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1637 } 1638 printf("port %d: flow tunnel #%u type %s\n", 1639 port_id, flt->id, ops->type); 1640 } 1641 1642 /** Generate a port_flow entry from attributes/pattern/actions. */ 1643 static struct port_flow * 1644 port_flow_new(const struct rte_flow_attr *attr, 1645 const struct rte_flow_item *pattern, 1646 const struct rte_flow_action *actions, 1647 struct rte_flow_error *error) 1648 { 1649 const struct rte_flow_conv_rule rule = { 1650 .attr_ro = attr, 1651 .pattern_ro = pattern, 1652 .actions_ro = actions, 1653 }; 1654 struct port_flow *pf; 1655 int ret; 1656 1657 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1658 if (ret < 0) 1659 return NULL; 1660 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1661 if (!pf) { 1662 rte_flow_error_set 1663 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1664 "calloc() failed"); 1665 return NULL; 1666 } 1667 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1668 error) >= 0) 1669 return pf; 1670 free(pf); 1671 return NULL; 1672 } 1673 1674 /** Print a message out of a flow error. */ 1675 static int 1676 port_flow_complain(struct rte_flow_error *error) 1677 { 1678 static const char *const errstrlist[] = { 1679 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1680 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1681 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1682 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1683 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1684 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1685 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1686 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1687 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1688 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1689 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1690 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1691 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1692 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1693 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1694 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1695 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1696 }; 1697 const char *errstr; 1698 char buf[32]; 1699 int err = rte_errno; 1700 1701 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1702 !errstrlist[error->type]) 1703 errstr = "unknown type"; 1704 else 1705 errstr = errstrlist[error->type]; 1706 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1707 error->type, errstr, 1708 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1709 error->cause), buf) : "", 1710 error->message ? error->message : "(no stated reason)", 1711 rte_strerror(err)); 1712 return -err; 1713 } 1714 1715 static void 1716 rss_config_display(struct rte_flow_action_rss *rss_conf) 1717 { 1718 uint8_t i; 1719 1720 if (rss_conf == NULL) { 1721 printf("Invalid rule\n"); 1722 return; 1723 } 1724 1725 printf("RSS:\n" 1726 " queues:"); 1727 if (rss_conf->queue_num == 0) 1728 printf(" none"); 1729 for (i = 0; i < rss_conf->queue_num; i++) 1730 printf(" %d", rss_conf->queue[i]); 1731 printf("\n"); 1732 1733 printf(" function: "); 1734 switch (rss_conf->func) { 1735 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1736 printf("default\n"); 1737 break; 1738 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1739 printf("toeplitz\n"); 1740 break; 1741 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1742 printf("simple_xor\n"); 1743 break; 1744 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1745 printf("symmetric_toeplitz\n"); 1746 break; 1747 default: 1748 printf("Unknown function\n"); 1749 return; 1750 } 1751 1752 printf(" types:\n"); 1753 if (rss_conf->types == 0) { 1754 printf(" none\n"); 1755 return; 1756 } 1757 for (i = 0; rss_type_table[i].str; i++) { 1758 if ((rss_conf->types & 1759 rss_type_table[i].rss_type) == 1760 rss_type_table[i].rss_type && 1761 rss_type_table[i].rss_type != 0) 1762 printf(" %s\n", rss_type_table[i].str); 1763 } 1764 } 1765 1766 static struct port_shared_action * 1767 action_get_by_id(portid_t port_id, uint32_t id) 1768 { 1769 struct rte_port *port; 1770 struct port_shared_action **ppsa; 1771 struct port_shared_action *psa = NULL; 1772 1773 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1774 port_id == (portid_t)RTE_PORT_ALL) 1775 return NULL; 1776 port = &ports[port_id]; 1777 ppsa = &port->actions_list; 1778 while (*ppsa) { 1779 if ((*ppsa)->id == id) { 1780 psa = *ppsa; 1781 break; 1782 } 1783 ppsa = &(*ppsa)->next; 1784 } 1785 if (!psa) 1786 printf("Failed to find shared action #%u on port %u\n", 1787 id, port_id); 1788 return psa; 1789 } 1790 1791 static int 1792 action_alloc(portid_t port_id, uint32_t id, 1793 struct port_shared_action **action) 1794 { 1795 struct rte_port *port; 1796 struct port_shared_action **ppsa; 1797 struct port_shared_action *psa = NULL; 1798 1799 *action = NULL; 1800 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1801 port_id == (portid_t)RTE_PORT_ALL) 1802 return -EINVAL; 1803 port = &ports[port_id]; 1804 if (id == UINT32_MAX) { 1805 /* taking first available ID */ 1806 if (port->actions_list) { 1807 if (port->actions_list->id == UINT32_MAX - 1) { 1808 printf("Highest shared action ID is already" 1809 " assigned, delete it first\n"); 1810 return -ENOMEM; 1811 } 1812 id = port->actions_list->id + 1; 1813 } else { 1814 id = 0; 1815 } 1816 } 1817 psa = calloc(1, sizeof(*psa)); 1818 if (!psa) { 1819 printf("Allocation of port %u shared action failed\n", 1820 port_id); 1821 return -ENOMEM; 1822 } 1823 ppsa = &port->actions_list; 1824 while (*ppsa && (*ppsa)->id > id) 1825 ppsa = &(*ppsa)->next; 1826 if (*ppsa && (*ppsa)->id == id) { 1827 printf("Shared action #%u is already assigned," 1828 " delete it first\n", id); 1829 free(psa); 1830 return -EINVAL; 1831 } 1832 psa->next = *ppsa; 1833 psa->id = id; 1834 *ppsa = psa; 1835 *action = psa; 1836 return 0; 1837 } 1838 1839 /** Create shared action */ 1840 int 1841 port_shared_action_create(portid_t port_id, uint32_t id, 1842 const struct rte_flow_shared_action_conf *conf, 1843 const struct rte_flow_action *action) 1844 { 1845 struct port_shared_action *psa; 1846 int ret; 1847 struct rte_flow_error error; 1848 1849 ret = action_alloc(port_id, id, &psa); 1850 if (ret) 1851 return ret; 1852 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1853 struct rte_flow_action_age *age = 1854 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1855 1856 psa->age_type = ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION; 1857 age->context = &psa->age_type; 1858 } 1859 /* Poisoning to make sure PMDs update it in case of error. */ 1860 memset(&error, 0x22, sizeof(error)); 1861 psa->action = rte_flow_shared_action_create(port_id, conf, action, 1862 &error); 1863 if (!psa->action) { 1864 uint32_t destroy_id = psa->id; 1865 port_shared_action_destroy(port_id, 1, &destroy_id); 1866 return port_flow_complain(&error); 1867 } 1868 psa->type = action->type; 1869 printf("Shared action #%u created\n", psa->id); 1870 return 0; 1871 } 1872 1873 /** Destroy shared action */ 1874 int 1875 port_shared_action_destroy(portid_t port_id, 1876 uint32_t n, 1877 const uint32_t *actions) 1878 { 1879 struct rte_port *port; 1880 struct port_shared_action **tmp; 1881 uint32_t c = 0; 1882 int ret = 0; 1883 1884 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1885 port_id == (portid_t)RTE_PORT_ALL) 1886 return -EINVAL; 1887 port = &ports[port_id]; 1888 tmp = &port->actions_list; 1889 while (*tmp) { 1890 uint32_t i; 1891 1892 for (i = 0; i != n; ++i) { 1893 struct rte_flow_error error; 1894 struct port_shared_action *psa = *tmp; 1895 1896 if (actions[i] != psa->id) 1897 continue; 1898 /* 1899 * Poisoning to make sure PMDs update it in case 1900 * of error. 1901 */ 1902 memset(&error, 0x33, sizeof(error)); 1903 1904 if (psa->action && rte_flow_shared_action_destroy( 1905 port_id, psa->action, &error)) { 1906 ret = port_flow_complain(&error); 1907 continue; 1908 } 1909 *tmp = psa->next; 1910 printf("Shared action #%u destroyed\n", psa->id); 1911 free(psa); 1912 break; 1913 } 1914 if (i == n) 1915 tmp = &(*tmp)->next; 1916 ++c; 1917 } 1918 return ret; 1919 } 1920 1921 1922 /** Get shared action by port + id */ 1923 struct rte_flow_shared_action * 1924 port_shared_action_get_by_id(portid_t port_id, uint32_t id) 1925 { 1926 1927 struct port_shared_action *psa = action_get_by_id(port_id, id); 1928 1929 return (psa) ? psa->action : NULL; 1930 } 1931 1932 /** Update shared action */ 1933 int 1934 port_shared_action_update(portid_t port_id, uint32_t id, 1935 const struct rte_flow_action *action) 1936 { 1937 struct rte_flow_error error; 1938 struct rte_flow_shared_action *shared_action; 1939 1940 shared_action = port_shared_action_get_by_id(port_id, id); 1941 if (!shared_action) 1942 return -EINVAL; 1943 if (rte_flow_shared_action_update(port_id, shared_action, action, 1944 &error)) { 1945 return port_flow_complain(&error); 1946 } 1947 printf("Shared action #%u updated\n", id); 1948 return 0; 1949 } 1950 1951 int 1952 port_shared_action_query(portid_t port_id, uint32_t id) 1953 { 1954 struct rte_flow_error error; 1955 struct port_shared_action *psa; 1956 uint64_t default_data; 1957 void *data = NULL; 1958 int ret = 0; 1959 1960 psa = action_get_by_id(port_id, id); 1961 if (!psa) 1962 return -EINVAL; 1963 switch (psa->type) { 1964 case RTE_FLOW_ACTION_TYPE_RSS: 1965 data = &default_data; 1966 break; 1967 default: 1968 printf("Shared action %u (type: %d) on port %u doesn't support" 1969 " query\n", id, psa->type, port_id); 1970 return -1; 1971 } 1972 if (rte_flow_shared_action_query(port_id, psa->action, data, &error)) 1973 ret = port_flow_complain(&error); 1974 switch (psa->type) { 1975 case RTE_FLOW_ACTION_TYPE_RSS: 1976 if (!ret) 1977 printf("Shared RSS action:\n\trefs:%u\n", 1978 *((uint32_t *)data)); 1979 data = NULL; 1980 break; 1981 default: 1982 printf("Shared action %u (type: %d) on port %u doesn't support" 1983 " query\n", id, psa->type, port_id); 1984 ret = -1; 1985 } 1986 return ret; 1987 } 1988 static struct port_flow_tunnel * 1989 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1990 const struct rte_flow_item *pattern, 1991 const struct rte_flow_action *actions, 1992 const struct tunnel_ops *tunnel_ops) 1993 { 1994 int ret; 1995 struct rte_port *port; 1996 struct port_flow_tunnel *pft; 1997 struct rte_flow_error error; 1998 1999 port = &ports[port_id]; 2000 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2001 if (!pft) { 2002 printf("failed to locate port flow tunnel #%u\n", 2003 tunnel_ops->id); 2004 return NULL; 2005 } 2006 if (tunnel_ops->actions) { 2007 uint32_t num_actions; 2008 const struct rte_flow_action *aptr; 2009 2010 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2011 &pft->pmd_actions, 2012 &pft->num_pmd_actions, 2013 &error); 2014 if (ret) { 2015 port_flow_complain(&error); 2016 return NULL; 2017 } 2018 for (aptr = actions, num_actions = 1; 2019 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2020 aptr++, num_actions++); 2021 pft->actions = malloc( 2022 (num_actions + pft->num_pmd_actions) * 2023 sizeof(actions[0])); 2024 if (!pft->actions) { 2025 rte_flow_tunnel_action_decap_release( 2026 port_id, pft->actions, 2027 pft->num_pmd_actions, &error); 2028 return NULL; 2029 } 2030 rte_memcpy(pft->actions, pft->pmd_actions, 2031 pft->num_pmd_actions * sizeof(actions[0])); 2032 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2033 num_actions * sizeof(actions[0])); 2034 } 2035 if (tunnel_ops->items) { 2036 uint32_t num_items; 2037 const struct rte_flow_item *iptr; 2038 2039 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2040 &pft->pmd_items, 2041 &pft->num_pmd_items, 2042 &error); 2043 if (ret) { 2044 port_flow_complain(&error); 2045 return NULL; 2046 } 2047 for (iptr = pattern, num_items = 1; 2048 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2049 iptr++, num_items++); 2050 pft->items = malloc((num_items + pft->num_pmd_items) * 2051 sizeof(pattern[0])); 2052 if (!pft->items) { 2053 rte_flow_tunnel_item_release( 2054 port_id, pft->pmd_items, 2055 pft->num_pmd_items, &error); 2056 return NULL; 2057 } 2058 rte_memcpy(pft->items, pft->pmd_items, 2059 pft->num_pmd_items * sizeof(pattern[0])); 2060 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2061 num_items * sizeof(pattern[0])); 2062 } 2063 2064 return pft; 2065 } 2066 2067 static void 2068 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2069 const struct tunnel_ops *tunnel_ops, 2070 struct port_flow_tunnel *pft) 2071 { 2072 struct rte_flow_error error; 2073 2074 if (tunnel_ops->actions) { 2075 free(pft->actions); 2076 rte_flow_tunnel_action_decap_release( 2077 port_id, pft->pmd_actions, 2078 pft->num_pmd_actions, &error); 2079 pft->actions = NULL; 2080 pft->pmd_actions = NULL; 2081 } 2082 if (tunnel_ops->items) { 2083 free(pft->items); 2084 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2085 pft->num_pmd_items, 2086 &error); 2087 pft->items = NULL; 2088 pft->pmd_items = NULL; 2089 } 2090 } 2091 2092 /** Validate flow rule. */ 2093 int 2094 port_flow_validate(portid_t port_id, 2095 const struct rte_flow_attr *attr, 2096 const struct rte_flow_item *pattern, 2097 const struct rte_flow_action *actions, 2098 const struct tunnel_ops *tunnel_ops) 2099 { 2100 struct rte_flow_error error; 2101 struct port_flow_tunnel *pft = NULL; 2102 2103 /* Poisoning to make sure PMDs update it in case of error. */ 2104 memset(&error, 0x11, sizeof(error)); 2105 if (tunnel_ops->enabled) { 2106 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2107 actions, tunnel_ops); 2108 if (!pft) 2109 return -ENOENT; 2110 if (pft->items) 2111 pattern = pft->items; 2112 if (pft->actions) 2113 actions = pft->actions; 2114 } 2115 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 2116 return port_flow_complain(&error); 2117 if (tunnel_ops->enabled) 2118 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2119 printf("Flow rule validated\n"); 2120 return 0; 2121 } 2122 2123 /** Return age action structure if exists, otherwise NULL. */ 2124 static struct rte_flow_action_age * 2125 age_action_get(const struct rte_flow_action *actions) 2126 { 2127 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2128 switch (actions->type) { 2129 case RTE_FLOW_ACTION_TYPE_AGE: 2130 return (struct rte_flow_action_age *) 2131 (uintptr_t)actions->conf; 2132 default: 2133 break; 2134 } 2135 } 2136 return NULL; 2137 } 2138 2139 /** Create flow rule. */ 2140 int 2141 port_flow_create(portid_t port_id, 2142 const struct rte_flow_attr *attr, 2143 const struct rte_flow_item *pattern, 2144 const struct rte_flow_action *actions, 2145 const struct tunnel_ops *tunnel_ops) 2146 { 2147 struct rte_flow *flow; 2148 struct rte_port *port; 2149 struct port_flow *pf; 2150 uint32_t id = 0; 2151 struct rte_flow_error error; 2152 struct port_flow_tunnel *pft = NULL; 2153 struct rte_flow_action_age *age = age_action_get(actions); 2154 2155 port = &ports[port_id]; 2156 if (port->flow_list) { 2157 if (port->flow_list->id == UINT32_MAX) { 2158 printf("Highest rule ID is already assigned, delete" 2159 " it first"); 2160 return -ENOMEM; 2161 } 2162 id = port->flow_list->id + 1; 2163 } 2164 if (tunnel_ops->enabled) { 2165 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2166 actions, tunnel_ops); 2167 if (!pft) 2168 return -ENOENT; 2169 if (pft->items) 2170 pattern = pft->items; 2171 if (pft->actions) 2172 actions = pft->actions; 2173 } 2174 pf = port_flow_new(attr, pattern, actions, &error); 2175 if (!pf) 2176 return port_flow_complain(&error); 2177 if (age) { 2178 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2179 age->context = &pf->age_type; 2180 } 2181 /* Poisoning to make sure PMDs update it in case of error. */ 2182 memset(&error, 0x22, sizeof(error)); 2183 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2184 if (!flow) { 2185 free(pf); 2186 return port_flow_complain(&error); 2187 } 2188 pf->next = port->flow_list; 2189 pf->id = id; 2190 pf->flow = flow; 2191 port->flow_list = pf; 2192 if (tunnel_ops->enabled) 2193 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2194 printf("Flow rule #%u created\n", pf->id); 2195 return 0; 2196 } 2197 2198 /** Destroy a number of flow rules. */ 2199 int 2200 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2201 { 2202 struct rte_port *port; 2203 struct port_flow **tmp; 2204 uint32_t c = 0; 2205 int ret = 0; 2206 2207 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2208 port_id == (portid_t)RTE_PORT_ALL) 2209 return -EINVAL; 2210 port = &ports[port_id]; 2211 tmp = &port->flow_list; 2212 while (*tmp) { 2213 uint32_t i; 2214 2215 for (i = 0; i != n; ++i) { 2216 struct rte_flow_error error; 2217 struct port_flow *pf = *tmp; 2218 2219 if (rule[i] != pf->id) 2220 continue; 2221 /* 2222 * Poisoning to make sure PMDs update it in case 2223 * of error. 2224 */ 2225 memset(&error, 0x33, sizeof(error)); 2226 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2227 ret = port_flow_complain(&error); 2228 continue; 2229 } 2230 printf("Flow rule #%u destroyed\n", pf->id); 2231 *tmp = pf->next; 2232 free(pf); 2233 break; 2234 } 2235 if (i == n) 2236 tmp = &(*tmp)->next; 2237 ++c; 2238 } 2239 return ret; 2240 } 2241 2242 /** Remove all flow rules. */ 2243 int 2244 port_flow_flush(portid_t port_id) 2245 { 2246 struct rte_flow_error error; 2247 struct rte_port *port; 2248 int ret = 0; 2249 2250 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2251 port_id == (portid_t)RTE_PORT_ALL) 2252 return -EINVAL; 2253 2254 port = &ports[port_id]; 2255 2256 if (port->flow_list == NULL) 2257 return ret; 2258 2259 /* Poisoning to make sure PMDs update it in case of error. */ 2260 memset(&error, 0x44, sizeof(error)); 2261 if (rte_flow_flush(port_id, &error)) { 2262 port_flow_complain(&error); 2263 } 2264 2265 while (port->flow_list) { 2266 struct port_flow *pf = port->flow_list->next; 2267 2268 free(port->flow_list); 2269 port->flow_list = pf; 2270 } 2271 return ret; 2272 } 2273 2274 /** Dump all flow rules. */ 2275 int 2276 port_flow_dump(portid_t port_id, const char *file_name) 2277 { 2278 int ret = 0; 2279 FILE *file = stdout; 2280 struct rte_flow_error error; 2281 2282 if (file_name && strlen(file_name)) { 2283 file = fopen(file_name, "w"); 2284 if (!file) { 2285 printf("Failed to create file %s: %s\n", file_name, 2286 strerror(errno)); 2287 return -errno; 2288 } 2289 } 2290 ret = rte_flow_dev_dump(port_id, file, &error); 2291 if (ret) { 2292 port_flow_complain(&error); 2293 printf("Failed to dump flow: %s\n", strerror(-ret)); 2294 } else 2295 printf("Flow dump finished\n"); 2296 if (file_name && strlen(file_name)) 2297 fclose(file); 2298 return ret; 2299 } 2300 2301 /** Query a flow rule. */ 2302 int 2303 port_flow_query(portid_t port_id, uint32_t rule, 2304 const struct rte_flow_action *action) 2305 { 2306 struct rte_flow_error error; 2307 struct rte_port *port; 2308 struct port_flow *pf; 2309 const char *name; 2310 union { 2311 struct rte_flow_query_count count; 2312 struct rte_flow_action_rss rss_conf; 2313 struct rte_flow_query_age age; 2314 } query; 2315 int ret; 2316 2317 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2318 port_id == (portid_t)RTE_PORT_ALL) 2319 return -EINVAL; 2320 port = &ports[port_id]; 2321 for (pf = port->flow_list; pf; pf = pf->next) 2322 if (pf->id == rule) 2323 break; 2324 if (!pf) { 2325 printf("Flow rule #%u not found\n", rule); 2326 return -ENOENT; 2327 } 2328 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2329 &name, sizeof(name), 2330 (void *)(uintptr_t)action->type, &error); 2331 if (ret < 0) 2332 return port_flow_complain(&error); 2333 switch (action->type) { 2334 case RTE_FLOW_ACTION_TYPE_COUNT: 2335 case RTE_FLOW_ACTION_TYPE_RSS: 2336 case RTE_FLOW_ACTION_TYPE_AGE: 2337 break; 2338 default: 2339 printf("Cannot query action type %d (%s)\n", 2340 action->type, name); 2341 return -ENOTSUP; 2342 } 2343 /* Poisoning to make sure PMDs update it in case of error. */ 2344 memset(&error, 0x55, sizeof(error)); 2345 memset(&query, 0, sizeof(query)); 2346 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2347 return port_flow_complain(&error); 2348 switch (action->type) { 2349 case RTE_FLOW_ACTION_TYPE_COUNT: 2350 printf("%s:\n" 2351 " hits_set: %u\n" 2352 " bytes_set: %u\n" 2353 " hits: %" PRIu64 "\n" 2354 " bytes: %" PRIu64 "\n", 2355 name, 2356 query.count.hits_set, 2357 query.count.bytes_set, 2358 query.count.hits, 2359 query.count.bytes); 2360 break; 2361 case RTE_FLOW_ACTION_TYPE_RSS: 2362 rss_config_display(&query.rss_conf); 2363 break; 2364 case RTE_FLOW_ACTION_TYPE_AGE: 2365 printf("%s:\n" 2366 " aged: %u\n" 2367 " sec_since_last_hit_valid: %u\n" 2368 " sec_since_last_hit: %" PRIu32 "\n", 2369 name, 2370 query.age.aged, 2371 query.age.sec_since_last_hit_valid, 2372 query.age.sec_since_last_hit); 2373 break; 2374 default: 2375 printf("Cannot display result for action type %d (%s)\n", 2376 action->type, name); 2377 break; 2378 } 2379 return 0; 2380 } 2381 2382 /** List simply and destroy all aged flows. */ 2383 void 2384 port_flow_aged(portid_t port_id, uint8_t destroy) 2385 { 2386 void **contexts; 2387 int nb_context, total = 0, idx; 2388 struct rte_flow_error error; 2389 enum age_action_context_type *type; 2390 union { 2391 struct port_flow *pf; 2392 struct port_shared_action *psa; 2393 } ctx; 2394 2395 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2396 port_id == (portid_t)RTE_PORT_ALL) 2397 return; 2398 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2399 printf("Port %u total aged flows: %d\n", port_id, total); 2400 if (total < 0) { 2401 port_flow_complain(&error); 2402 return; 2403 } 2404 if (total == 0) 2405 return; 2406 contexts = malloc(sizeof(void *) * total); 2407 if (contexts == NULL) { 2408 printf("Cannot allocate contexts for aged flow\n"); 2409 return; 2410 } 2411 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 2412 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2413 if (nb_context != total) { 2414 printf("Port:%d get aged flows count(%d) != total(%d)\n", 2415 port_id, nb_context, total); 2416 free(contexts); 2417 return; 2418 } 2419 total = 0; 2420 for (idx = 0; idx < nb_context; idx++) { 2421 if (!contexts[idx]) { 2422 printf("Error: get Null context in port %u\n", port_id); 2423 continue; 2424 } 2425 type = (enum age_action_context_type *)contexts[idx]; 2426 switch (*type) { 2427 case ACTION_AGE_CONTEXT_TYPE_FLOW: 2428 ctx.pf = container_of(type, struct port_flow, age_type); 2429 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 2430 "\t%c%c%c\t\n", 2431 "Flow", 2432 ctx.pf->id, 2433 ctx.pf->rule.attr->group, 2434 ctx.pf->rule.attr->priority, 2435 ctx.pf->rule.attr->ingress ? 'i' : '-', 2436 ctx.pf->rule.attr->egress ? 'e' : '-', 2437 ctx.pf->rule.attr->transfer ? 't' : '-'); 2438 if (destroy && !port_flow_destroy(port_id, 1, 2439 &ctx.pf->id)) 2440 total++; 2441 break; 2442 case ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION: 2443 ctx.psa = container_of(type, struct port_shared_action, 2444 age_type); 2445 printf("%-20s\t%" PRIu32 "\n", "Shared action", 2446 ctx.psa->id); 2447 break; 2448 default: 2449 printf("Error: invalid context type %u\n", port_id); 2450 break; 2451 } 2452 } 2453 printf("\n%d flows destroyed\n", total); 2454 free(contexts); 2455 } 2456 2457 /** List flow rules. */ 2458 void 2459 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2460 { 2461 struct rte_port *port; 2462 struct port_flow *pf; 2463 struct port_flow *list = NULL; 2464 uint32_t i; 2465 2466 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2467 port_id == (portid_t)RTE_PORT_ALL) 2468 return; 2469 port = &ports[port_id]; 2470 if (!port->flow_list) 2471 return; 2472 /* Sort flows by group, priority and ID. */ 2473 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2474 struct port_flow **tmp; 2475 const struct rte_flow_attr *curr = pf->rule.attr; 2476 2477 if (n) { 2478 /* Filter out unwanted groups. */ 2479 for (i = 0; i != n; ++i) 2480 if (curr->group == group[i]) 2481 break; 2482 if (i == n) 2483 continue; 2484 } 2485 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2486 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2487 2488 if (curr->group > comp->group || 2489 (curr->group == comp->group && 2490 curr->priority > comp->priority) || 2491 (curr->group == comp->group && 2492 curr->priority == comp->priority && 2493 pf->id > (*tmp)->id)) 2494 continue; 2495 break; 2496 } 2497 pf->tmp = *tmp; 2498 *tmp = pf; 2499 } 2500 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2501 for (pf = list; pf != NULL; pf = pf->tmp) { 2502 const struct rte_flow_item *item = pf->rule.pattern; 2503 const struct rte_flow_action *action = pf->rule.actions; 2504 const char *name; 2505 2506 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2507 pf->id, 2508 pf->rule.attr->group, 2509 pf->rule.attr->priority, 2510 pf->rule.attr->ingress ? 'i' : '-', 2511 pf->rule.attr->egress ? 'e' : '-', 2512 pf->rule.attr->transfer ? 't' : '-'); 2513 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2514 if ((uint32_t)item->type > INT_MAX) 2515 name = "PMD_INTERNAL"; 2516 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2517 &name, sizeof(name), 2518 (void *)(uintptr_t)item->type, 2519 NULL) <= 0) 2520 name = "[UNKNOWN]"; 2521 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2522 printf("%s ", name); 2523 ++item; 2524 } 2525 printf("=>"); 2526 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2527 if ((uint32_t)action->type > INT_MAX) 2528 name = "PMD_INTERNAL"; 2529 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2530 &name, sizeof(name), 2531 (void *)(uintptr_t)action->type, 2532 NULL) <= 0) 2533 name = "[UNKNOWN]"; 2534 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2535 printf(" %s", name); 2536 ++action; 2537 } 2538 printf("\n"); 2539 } 2540 } 2541 2542 /** Restrict ingress traffic to the defined flow rules. */ 2543 int 2544 port_flow_isolate(portid_t port_id, int set) 2545 { 2546 struct rte_flow_error error; 2547 2548 /* Poisoning to make sure PMDs update it in case of error. */ 2549 memset(&error, 0x66, sizeof(error)); 2550 if (rte_flow_isolate(port_id, set, &error)) 2551 return port_flow_complain(&error); 2552 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2553 port_id, 2554 set ? "now restricted" : "not restricted anymore"); 2555 return 0; 2556 } 2557 2558 /* 2559 * RX/TX ring descriptors display functions. 2560 */ 2561 int 2562 rx_queue_id_is_invalid(queueid_t rxq_id) 2563 { 2564 if (rxq_id < nb_rxq) 2565 return 0; 2566 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2567 return 1; 2568 } 2569 2570 int 2571 tx_queue_id_is_invalid(queueid_t txq_id) 2572 { 2573 if (txq_id < nb_txq) 2574 return 0; 2575 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 2576 return 1; 2577 } 2578 2579 static int 2580 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2581 { 2582 struct rte_port *port = &ports[port_id]; 2583 struct rte_eth_rxq_info rx_qinfo; 2584 int ret; 2585 2586 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2587 if (ret == 0) { 2588 *ring_size = rx_qinfo.nb_desc; 2589 return ret; 2590 } 2591 2592 if (ret != -ENOTSUP) 2593 return ret; 2594 /* 2595 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2596 * ring_size stored in testpmd will be used for validity verification. 2597 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2598 * being 0, it will use a default value provided by PMDs to setup this 2599 * rxq. If the default value is 0, it will use the 2600 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2601 */ 2602 if (port->nb_rx_desc[rxq_id]) 2603 *ring_size = port->nb_rx_desc[rxq_id]; 2604 else if (port->dev_info.default_rxportconf.ring_size) 2605 *ring_size = port->dev_info.default_rxportconf.ring_size; 2606 else 2607 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2608 return 0; 2609 } 2610 2611 static int 2612 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2613 { 2614 struct rte_port *port = &ports[port_id]; 2615 struct rte_eth_txq_info tx_qinfo; 2616 int ret; 2617 2618 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2619 if (ret == 0) { 2620 *ring_size = tx_qinfo.nb_desc; 2621 return ret; 2622 } 2623 2624 if (ret != -ENOTSUP) 2625 return ret; 2626 /* 2627 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2628 * ring_size stored in testpmd will be used for validity verification. 2629 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2630 * being 0, it will use a default value provided by PMDs to setup this 2631 * txq. If the default value is 0, it will use the 2632 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2633 */ 2634 if (port->nb_tx_desc[txq_id]) 2635 *ring_size = port->nb_tx_desc[txq_id]; 2636 else if (port->dev_info.default_txportconf.ring_size) 2637 *ring_size = port->dev_info.default_txportconf.ring_size; 2638 else 2639 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2640 return 0; 2641 } 2642 2643 static int 2644 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2645 { 2646 uint16_t ring_size; 2647 int ret; 2648 2649 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2650 if (ret) 2651 return 1; 2652 2653 if (rxdesc_id < ring_size) 2654 return 0; 2655 2656 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2657 rxdesc_id, ring_size); 2658 return 1; 2659 } 2660 2661 static int 2662 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2663 { 2664 uint16_t ring_size; 2665 int ret; 2666 2667 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2668 if (ret) 2669 return 1; 2670 2671 if (txdesc_id < ring_size) 2672 return 0; 2673 2674 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2675 txdesc_id, ring_size); 2676 return 1; 2677 } 2678 2679 static const struct rte_memzone * 2680 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2681 { 2682 char mz_name[RTE_MEMZONE_NAMESIZE]; 2683 const struct rte_memzone *mz; 2684 2685 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2686 port_id, q_id, ring_name); 2687 mz = rte_memzone_lookup(mz_name); 2688 if (mz == NULL) 2689 printf("%s ring memory zoneof (port %d, queue %d) not" 2690 "found (zone name = %s\n", 2691 ring_name, port_id, q_id, mz_name); 2692 return mz; 2693 } 2694 2695 union igb_ring_dword { 2696 uint64_t dword; 2697 struct { 2698 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2699 uint32_t lo; 2700 uint32_t hi; 2701 #else 2702 uint32_t hi; 2703 uint32_t lo; 2704 #endif 2705 } words; 2706 }; 2707 2708 struct igb_ring_desc_32_bytes { 2709 union igb_ring_dword lo_dword; 2710 union igb_ring_dword hi_dword; 2711 union igb_ring_dword resv1; 2712 union igb_ring_dword resv2; 2713 }; 2714 2715 struct igb_ring_desc_16_bytes { 2716 union igb_ring_dword lo_dword; 2717 union igb_ring_dword hi_dword; 2718 }; 2719 2720 static void 2721 ring_rxd_display_dword(union igb_ring_dword dword) 2722 { 2723 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2724 (unsigned)dword.words.hi); 2725 } 2726 2727 static void 2728 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2729 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2730 portid_t port_id, 2731 #else 2732 __rte_unused portid_t port_id, 2733 #endif 2734 uint16_t desc_id) 2735 { 2736 struct igb_ring_desc_16_bytes *ring = 2737 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2738 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2739 int ret; 2740 struct rte_eth_dev_info dev_info; 2741 2742 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2743 if (ret != 0) 2744 return; 2745 2746 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2747 /* 32 bytes RX descriptor, i40e only */ 2748 struct igb_ring_desc_32_bytes *ring = 2749 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2750 ring[desc_id].lo_dword.dword = 2751 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2752 ring_rxd_display_dword(ring[desc_id].lo_dword); 2753 ring[desc_id].hi_dword.dword = 2754 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2755 ring_rxd_display_dword(ring[desc_id].hi_dword); 2756 ring[desc_id].resv1.dword = 2757 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2758 ring_rxd_display_dword(ring[desc_id].resv1); 2759 ring[desc_id].resv2.dword = 2760 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2761 ring_rxd_display_dword(ring[desc_id].resv2); 2762 2763 return; 2764 } 2765 #endif 2766 /* 16 bytes RX descriptor */ 2767 ring[desc_id].lo_dword.dword = 2768 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2769 ring_rxd_display_dword(ring[desc_id].lo_dword); 2770 ring[desc_id].hi_dword.dword = 2771 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2772 ring_rxd_display_dword(ring[desc_id].hi_dword); 2773 } 2774 2775 static void 2776 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2777 { 2778 struct igb_ring_desc_16_bytes *ring; 2779 struct igb_ring_desc_16_bytes txd; 2780 2781 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2782 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2783 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2784 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2785 (unsigned)txd.lo_dword.words.lo, 2786 (unsigned)txd.lo_dword.words.hi, 2787 (unsigned)txd.hi_dword.words.lo, 2788 (unsigned)txd.hi_dword.words.hi); 2789 } 2790 2791 void 2792 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2793 { 2794 const struct rte_memzone *rx_mz; 2795 2796 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2797 return; 2798 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2799 if (rx_mz == NULL) 2800 return; 2801 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2802 } 2803 2804 void 2805 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2806 { 2807 const struct rte_memzone *tx_mz; 2808 2809 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2810 return; 2811 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2812 if (tx_mz == NULL) 2813 return; 2814 ring_tx_descriptor_display(tx_mz, txd_id); 2815 } 2816 2817 void 2818 fwd_lcores_config_display(void) 2819 { 2820 lcoreid_t lc_id; 2821 2822 printf("List of forwarding lcores:"); 2823 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2824 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2825 printf("\n"); 2826 } 2827 void 2828 rxtx_config_display(void) 2829 { 2830 portid_t pid; 2831 queueid_t qid; 2832 2833 printf(" %s packet forwarding%s packets/burst=%d\n", 2834 cur_fwd_eng->fwd_mode_name, 2835 retry_enabled == 0 ? "" : " with retry", 2836 nb_pkt_per_burst); 2837 2838 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2839 printf(" packet len=%u - nb packet segments=%d\n", 2840 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2841 2842 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2843 nb_fwd_lcores, nb_fwd_ports); 2844 2845 RTE_ETH_FOREACH_DEV(pid) { 2846 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2847 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2848 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2849 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2850 struct rte_eth_rxq_info rx_qinfo; 2851 struct rte_eth_txq_info tx_qinfo; 2852 uint16_t rx_free_thresh_tmp; 2853 uint16_t tx_free_thresh_tmp; 2854 uint16_t tx_rs_thresh_tmp; 2855 uint16_t nb_rx_desc_tmp; 2856 uint16_t nb_tx_desc_tmp; 2857 uint64_t offloads_tmp; 2858 uint8_t pthresh_tmp; 2859 uint8_t hthresh_tmp; 2860 uint8_t wthresh_tmp; 2861 int32_t rc; 2862 2863 /* per port config */ 2864 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2865 (unsigned int)pid, nb_rxq, nb_txq); 2866 2867 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2868 ports[pid].dev_conf.rxmode.offloads, 2869 ports[pid].dev_conf.txmode.offloads); 2870 2871 /* per rx queue config only for first queue to be less verbose */ 2872 for (qid = 0; qid < 1; qid++) { 2873 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2874 if (rc) { 2875 nb_rx_desc_tmp = nb_rx_desc[qid]; 2876 rx_free_thresh_tmp = 2877 rx_conf[qid].rx_free_thresh; 2878 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2879 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2880 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2881 offloads_tmp = rx_conf[qid].offloads; 2882 } else { 2883 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2884 rx_free_thresh_tmp = 2885 rx_qinfo.conf.rx_free_thresh; 2886 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2887 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2888 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2889 offloads_tmp = rx_qinfo.conf.offloads; 2890 } 2891 2892 printf(" RX queue: %d\n", qid); 2893 printf(" RX desc=%d - RX free threshold=%d\n", 2894 nb_rx_desc_tmp, rx_free_thresh_tmp); 2895 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2896 " wthresh=%d\n", 2897 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2898 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2899 } 2900 2901 /* per tx queue config only for first queue to be less verbose */ 2902 for (qid = 0; qid < 1; qid++) { 2903 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2904 if (rc) { 2905 nb_tx_desc_tmp = nb_tx_desc[qid]; 2906 tx_free_thresh_tmp = 2907 tx_conf[qid].tx_free_thresh; 2908 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2909 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2910 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2911 offloads_tmp = tx_conf[qid].offloads; 2912 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2913 } else { 2914 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2915 tx_free_thresh_tmp = 2916 tx_qinfo.conf.tx_free_thresh; 2917 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2918 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2919 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2920 offloads_tmp = tx_qinfo.conf.offloads; 2921 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2922 } 2923 2924 printf(" TX queue: %d\n", qid); 2925 printf(" TX desc=%d - TX free threshold=%d\n", 2926 nb_tx_desc_tmp, tx_free_thresh_tmp); 2927 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2928 " wthresh=%d\n", 2929 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2930 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2931 offloads_tmp, tx_rs_thresh_tmp); 2932 } 2933 } 2934 } 2935 2936 void 2937 port_rss_reta_info(portid_t port_id, 2938 struct rte_eth_rss_reta_entry64 *reta_conf, 2939 uint16_t nb_entries) 2940 { 2941 uint16_t i, idx, shift; 2942 int ret; 2943 2944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2945 return; 2946 2947 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2948 if (ret != 0) { 2949 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2950 return; 2951 } 2952 2953 for (i = 0; i < nb_entries; i++) { 2954 idx = i / RTE_RETA_GROUP_SIZE; 2955 shift = i % RTE_RETA_GROUP_SIZE; 2956 if (!(reta_conf[idx].mask & (1ULL << shift))) 2957 continue; 2958 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2959 i, reta_conf[idx].reta[shift]); 2960 } 2961 } 2962 2963 /* 2964 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2965 * key of the port. 2966 */ 2967 void 2968 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2969 { 2970 struct rte_eth_rss_conf rss_conf = {0}; 2971 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2972 uint64_t rss_hf; 2973 uint8_t i; 2974 int diag; 2975 struct rte_eth_dev_info dev_info; 2976 uint8_t hash_key_size; 2977 int ret; 2978 2979 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2980 return; 2981 2982 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2983 if (ret != 0) 2984 return; 2985 2986 if (dev_info.hash_key_size > 0 && 2987 dev_info.hash_key_size <= sizeof(rss_key)) 2988 hash_key_size = dev_info.hash_key_size; 2989 else { 2990 printf("dev_info did not provide a valid hash key size\n"); 2991 return; 2992 } 2993 2994 /* Get RSS hash key if asked to display it */ 2995 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2996 rss_conf.rss_key_len = hash_key_size; 2997 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2998 if (diag != 0) { 2999 switch (diag) { 3000 case -ENODEV: 3001 printf("port index %d invalid\n", port_id); 3002 break; 3003 case -ENOTSUP: 3004 printf("operation not supported by device\n"); 3005 break; 3006 default: 3007 printf("operation failed - diag=%d\n", diag); 3008 break; 3009 } 3010 return; 3011 } 3012 rss_hf = rss_conf.rss_hf; 3013 if (rss_hf == 0) { 3014 printf("RSS disabled\n"); 3015 return; 3016 } 3017 printf("RSS functions:\n "); 3018 for (i = 0; rss_type_table[i].str; i++) { 3019 if (rss_hf & rss_type_table[i].rss_type) 3020 printf("%s ", rss_type_table[i].str); 3021 } 3022 printf("\n"); 3023 if (!show_rss_key) 3024 return; 3025 printf("RSS key:\n"); 3026 for (i = 0; i < hash_key_size; i++) 3027 printf("%02X", rss_key[i]); 3028 printf("\n"); 3029 } 3030 3031 void 3032 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3033 uint hash_key_len) 3034 { 3035 struct rte_eth_rss_conf rss_conf; 3036 int diag; 3037 unsigned int i; 3038 3039 rss_conf.rss_key = NULL; 3040 rss_conf.rss_key_len = hash_key_len; 3041 rss_conf.rss_hf = 0; 3042 for (i = 0; rss_type_table[i].str; i++) { 3043 if (!strcmp(rss_type_table[i].str, rss_type)) 3044 rss_conf.rss_hf = rss_type_table[i].rss_type; 3045 } 3046 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3047 if (diag == 0) { 3048 rss_conf.rss_key = hash_key; 3049 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3050 } 3051 if (diag == 0) 3052 return; 3053 3054 switch (diag) { 3055 case -ENODEV: 3056 printf("port index %d invalid\n", port_id); 3057 break; 3058 case -ENOTSUP: 3059 printf("operation not supported by device\n"); 3060 break; 3061 default: 3062 printf("operation failed - diag=%d\n", diag); 3063 break; 3064 } 3065 } 3066 3067 /* 3068 * Setup forwarding configuration for each logical core. 3069 */ 3070 static void 3071 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3072 { 3073 streamid_t nb_fs_per_lcore; 3074 streamid_t nb_fs; 3075 streamid_t sm_id; 3076 lcoreid_t nb_extra; 3077 lcoreid_t nb_fc; 3078 lcoreid_t nb_lc; 3079 lcoreid_t lc_id; 3080 3081 nb_fs = cfg->nb_fwd_streams; 3082 nb_fc = cfg->nb_fwd_lcores; 3083 if (nb_fs <= nb_fc) { 3084 nb_fs_per_lcore = 1; 3085 nb_extra = 0; 3086 } else { 3087 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3088 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3089 } 3090 3091 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3092 sm_id = 0; 3093 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3094 fwd_lcores[lc_id]->stream_idx = sm_id; 3095 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3096 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3097 } 3098 3099 /* 3100 * Assign extra remaining streams, if any. 3101 */ 3102 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3103 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3104 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3105 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3106 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3107 } 3108 } 3109 3110 static portid_t 3111 fwd_topology_tx_port_get(portid_t rxp) 3112 { 3113 static int warning_once = 1; 3114 3115 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3116 3117 switch (port_topology) { 3118 default: 3119 case PORT_TOPOLOGY_PAIRED: 3120 if ((rxp & 0x1) == 0) { 3121 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3122 return rxp + 1; 3123 if (warning_once) { 3124 printf("\nWarning! port-topology=paired" 3125 " and odd forward ports number," 3126 " the last port will pair with" 3127 " itself.\n\n"); 3128 warning_once = 0; 3129 } 3130 return rxp; 3131 } 3132 return rxp - 1; 3133 case PORT_TOPOLOGY_CHAINED: 3134 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3135 case PORT_TOPOLOGY_LOOP: 3136 return rxp; 3137 } 3138 } 3139 3140 static void 3141 simple_fwd_config_setup(void) 3142 { 3143 portid_t i; 3144 3145 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3146 cur_fwd_config.nb_fwd_streams = 3147 (streamid_t) cur_fwd_config.nb_fwd_ports; 3148 3149 /* reinitialize forwarding streams */ 3150 init_fwd_streams(); 3151 3152 /* 3153 * In the simple forwarding test, the number of forwarding cores 3154 * must be lower or equal to the number of forwarding ports. 3155 */ 3156 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3157 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3158 cur_fwd_config.nb_fwd_lcores = 3159 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3160 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3161 3162 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3163 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3164 fwd_streams[i]->rx_queue = 0; 3165 fwd_streams[i]->tx_port = 3166 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3167 fwd_streams[i]->tx_queue = 0; 3168 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3169 fwd_streams[i]->retry_enabled = retry_enabled; 3170 } 3171 } 3172 3173 /** 3174 * For the RSS forwarding test all streams distributed over lcores. Each stream 3175 * being composed of a RX queue to poll on a RX port for input messages, 3176 * associated with a TX queue of a TX port where to send forwarded packets. 3177 */ 3178 static void 3179 rss_fwd_config_setup(void) 3180 { 3181 portid_t rxp; 3182 portid_t txp; 3183 queueid_t rxq; 3184 queueid_t nb_q; 3185 streamid_t sm_id; 3186 3187 nb_q = nb_rxq; 3188 if (nb_q > nb_txq) 3189 nb_q = nb_txq; 3190 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3191 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3192 cur_fwd_config.nb_fwd_streams = 3193 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3194 3195 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3196 cur_fwd_config.nb_fwd_lcores = 3197 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3198 3199 /* reinitialize forwarding streams */ 3200 init_fwd_streams(); 3201 3202 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3203 rxp = 0; rxq = 0; 3204 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3205 struct fwd_stream *fs; 3206 3207 fs = fwd_streams[sm_id]; 3208 txp = fwd_topology_tx_port_get(rxp); 3209 fs->rx_port = fwd_ports_ids[rxp]; 3210 fs->rx_queue = rxq; 3211 fs->tx_port = fwd_ports_ids[txp]; 3212 fs->tx_queue = rxq; 3213 fs->peer_addr = fs->tx_port; 3214 fs->retry_enabled = retry_enabled; 3215 rxp++; 3216 if (rxp < nb_fwd_ports) 3217 continue; 3218 rxp = 0; 3219 rxq++; 3220 } 3221 } 3222 3223 /** 3224 * For the DCB forwarding test, each core is assigned on each traffic class. 3225 * 3226 * Each core is assigned a multi-stream, each stream being composed of 3227 * a RX queue to poll on a RX port for input messages, associated with 3228 * a TX queue of a TX port where to send forwarded packets. All RX and 3229 * TX queues are mapping to the same traffic class. 3230 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3231 * the same core 3232 */ 3233 static void 3234 dcb_fwd_config_setup(void) 3235 { 3236 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3237 portid_t txp, rxp = 0; 3238 queueid_t txq, rxq = 0; 3239 lcoreid_t lc_id; 3240 uint16_t nb_rx_queue, nb_tx_queue; 3241 uint16_t i, j, k, sm_id = 0; 3242 uint8_t tc = 0; 3243 3244 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3245 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3246 cur_fwd_config.nb_fwd_streams = 3247 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3248 3249 /* reinitialize forwarding streams */ 3250 init_fwd_streams(); 3251 sm_id = 0; 3252 txp = 1; 3253 /* get the dcb info on the first RX and TX ports */ 3254 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3255 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3256 3257 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3258 fwd_lcores[lc_id]->stream_nb = 0; 3259 fwd_lcores[lc_id]->stream_idx = sm_id; 3260 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3261 /* if the nb_queue is zero, means this tc is 3262 * not enabled on the POOL 3263 */ 3264 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3265 break; 3266 k = fwd_lcores[lc_id]->stream_nb + 3267 fwd_lcores[lc_id]->stream_idx; 3268 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3269 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3270 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3271 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3272 for (j = 0; j < nb_rx_queue; j++) { 3273 struct fwd_stream *fs; 3274 3275 fs = fwd_streams[k + j]; 3276 fs->rx_port = fwd_ports_ids[rxp]; 3277 fs->rx_queue = rxq + j; 3278 fs->tx_port = fwd_ports_ids[txp]; 3279 fs->tx_queue = txq + j % nb_tx_queue; 3280 fs->peer_addr = fs->tx_port; 3281 fs->retry_enabled = retry_enabled; 3282 } 3283 fwd_lcores[lc_id]->stream_nb += 3284 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3285 } 3286 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3287 3288 tc++; 3289 if (tc < rxp_dcb_info.nb_tcs) 3290 continue; 3291 /* Restart from TC 0 on next RX port */ 3292 tc = 0; 3293 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3294 rxp = (portid_t) 3295 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3296 else 3297 rxp++; 3298 if (rxp >= nb_fwd_ports) 3299 return; 3300 /* get the dcb information on next RX and TX ports */ 3301 if ((rxp & 0x1) == 0) 3302 txp = (portid_t) (rxp + 1); 3303 else 3304 txp = (portid_t) (rxp - 1); 3305 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3306 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3307 } 3308 } 3309 3310 static void 3311 icmp_echo_config_setup(void) 3312 { 3313 portid_t rxp; 3314 queueid_t rxq; 3315 lcoreid_t lc_id; 3316 uint16_t sm_id; 3317 3318 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3319 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3320 (nb_txq * nb_fwd_ports); 3321 else 3322 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3323 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3324 cur_fwd_config.nb_fwd_streams = 3325 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3326 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3327 cur_fwd_config.nb_fwd_lcores = 3328 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3329 if (verbose_level > 0) { 3330 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3331 __FUNCTION__, 3332 cur_fwd_config.nb_fwd_lcores, 3333 cur_fwd_config.nb_fwd_ports, 3334 cur_fwd_config.nb_fwd_streams); 3335 } 3336 3337 /* reinitialize forwarding streams */ 3338 init_fwd_streams(); 3339 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3340 rxp = 0; rxq = 0; 3341 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3342 if (verbose_level > 0) 3343 printf(" core=%d: \n", lc_id); 3344 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3345 struct fwd_stream *fs; 3346 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3347 fs->rx_port = fwd_ports_ids[rxp]; 3348 fs->rx_queue = rxq; 3349 fs->tx_port = fs->rx_port; 3350 fs->tx_queue = rxq; 3351 fs->peer_addr = fs->tx_port; 3352 fs->retry_enabled = retry_enabled; 3353 if (verbose_level > 0) 3354 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3355 sm_id, fs->rx_port, fs->rx_queue, 3356 fs->tx_queue); 3357 rxq = (queueid_t) (rxq + 1); 3358 if (rxq == nb_rxq) { 3359 rxq = 0; 3360 rxp = (portid_t) (rxp + 1); 3361 } 3362 } 3363 } 3364 } 3365 3366 void 3367 fwd_config_setup(void) 3368 { 3369 cur_fwd_config.fwd_eng = cur_fwd_eng; 3370 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3371 icmp_echo_config_setup(); 3372 return; 3373 } 3374 3375 if ((nb_rxq > 1) && (nb_txq > 1)){ 3376 if (dcb_config) 3377 dcb_fwd_config_setup(); 3378 else 3379 rss_fwd_config_setup(); 3380 } 3381 else 3382 simple_fwd_config_setup(); 3383 } 3384 3385 static const char * 3386 mp_alloc_to_str(uint8_t mode) 3387 { 3388 switch (mode) { 3389 case MP_ALLOC_NATIVE: 3390 return "native"; 3391 case MP_ALLOC_ANON: 3392 return "anon"; 3393 case MP_ALLOC_XMEM: 3394 return "xmem"; 3395 case MP_ALLOC_XMEM_HUGE: 3396 return "xmemhuge"; 3397 case MP_ALLOC_XBUF: 3398 return "xbuf"; 3399 default: 3400 return "invalid"; 3401 } 3402 } 3403 3404 void 3405 pkt_fwd_config_display(struct fwd_config *cfg) 3406 { 3407 struct fwd_stream *fs; 3408 lcoreid_t lc_id; 3409 streamid_t sm_id; 3410 3411 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3412 "NUMA support %s, MP allocation mode: %s\n", 3413 cfg->fwd_eng->fwd_mode_name, 3414 retry_enabled == 0 ? "" : " with retry", 3415 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3416 numa_support == 1 ? "enabled" : "disabled", 3417 mp_alloc_to_str(mp_alloc_type)); 3418 3419 if (retry_enabled) 3420 printf("TX retry num: %u, delay between TX retries: %uus\n", 3421 burst_tx_retry_num, burst_tx_delay_time); 3422 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3423 printf("Logical Core %u (socket %u) forwards packets on " 3424 "%d streams:", 3425 fwd_lcores_cpuids[lc_id], 3426 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3427 fwd_lcores[lc_id]->stream_nb); 3428 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3429 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3430 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3431 "P=%d/Q=%d (socket %u) ", 3432 fs->rx_port, fs->rx_queue, 3433 ports[fs->rx_port].socket_id, 3434 fs->tx_port, fs->tx_queue, 3435 ports[fs->tx_port].socket_id); 3436 print_ethaddr("peer=", 3437 &peer_eth_addrs[fs->peer_addr]); 3438 } 3439 printf("\n"); 3440 } 3441 printf("\n"); 3442 } 3443 3444 void 3445 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3446 { 3447 struct rte_ether_addr new_peer_addr; 3448 if (!rte_eth_dev_is_valid_port(port_id)) { 3449 printf("Error: Invalid port number %i\n", port_id); 3450 return; 3451 } 3452 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3453 printf("Error: Invalid ethernet address: %s\n", peer_addr); 3454 return; 3455 } 3456 peer_eth_addrs[port_id] = new_peer_addr; 3457 } 3458 3459 int 3460 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3461 { 3462 unsigned int i; 3463 unsigned int lcore_cpuid; 3464 int record_now; 3465 3466 record_now = 0; 3467 again: 3468 for (i = 0; i < nb_lc; i++) { 3469 lcore_cpuid = lcorelist[i]; 3470 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3471 printf("lcore %u not enabled\n", lcore_cpuid); 3472 return -1; 3473 } 3474 if (lcore_cpuid == rte_get_main_lcore()) { 3475 printf("lcore %u cannot be masked on for running " 3476 "packet forwarding, which is the main lcore " 3477 "and reserved for command line parsing only\n", 3478 lcore_cpuid); 3479 return -1; 3480 } 3481 if (record_now) 3482 fwd_lcores_cpuids[i] = lcore_cpuid; 3483 } 3484 if (record_now == 0) { 3485 record_now = 1; 3486 goto again; 3487 } 3488 nb_cfg_lcores = (lcoreid_t) nb_lc; 3489 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3490 printf("previous number of forwarding cores %u - changed to " 3491 "number of configured cores %u\n", 3492 (unsigned int) nb_fwd_lcores, nb_lc); 3493 nb_fwd_lcores = (lcoreid_t) nb_lc; 3494 } 3495 3496 return 0; 3497 } 3498 3499 int 3500 set_fwd_lcores_mask(uint64_t lcoremask) 3501 { 3502 unsigned int lcorelist[64]; 3503 unsigned int nb_lc; 3504 unsigned int i; 3505 3506 if (lcoremask == 0) { 3507 printf("Invalid NULL mask of cores\n"); 3508 return -1; 3509 } 3510 nb_lc = 0; 3511 for (i = 0; i < 64; i++) { 3512 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3513 continue; 3514 lcorelist[nb_lc++] = i; 3515 } 3516 return set_fwd_lcores_list(lcorelist, nb_lc); 3517 } 3518 3519 void 3520 set_fwd_lcores_number(uint16_t nb_lc) 3521 { 3522 if (test_done == 0) { 3523 printf("Please stop forwarding first\n"); 3524 return; 3525 } 3526 if (nb_lc > nb_cfg_lcores) { 3527 printf("nb fwd cores %u > %u (max. number of configured " 3528 "lcores) - ignored\n", 3529 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3530 return; 3531 } 3532 nb_fwd_lcores = (lcoreid_t) nb_lc; 3533 printf("Number of forwarding cores set to %u\n", 3534 (unsigned int) nb_fwd_lcores); 3535 } 3536 3537 void 3538 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3539 { 3540 unsigned int i; 3541 portid_t port_id; 3542 int record_now; 3543 3544 record_now = 0; 3545 again: 3546 for (i = 0; i < nb_pt; i++) { 3547 port_id = (portid_t) portlist[i]; 3548 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3549 return; 3550 if (record_now) 3551 fwd_ports_ids[i] = port_id; 3552 } 3553 if (record_now == 0) { 3554 record_now = 1; 3555 goto again; 3556 } 3557 nb_cfg_ports = (portid_t) nb_pt; 3558 if (nb_fwd_ports != (portid_t) nb_pt) { 3559 printf("previous number of forwarding ports %u - changed to " 3560 "number of configured ports %u\n", 3561 (unsigned int) nb_fwd_ports, nb_pt); 3562 nb_fwd_ports = (portid_t) nb_pt; 3563 } 3564 } 3565 3566 /** 3567 * Parse the user input and obtain the list of forwarding ports 3568 * 3569 * @param[in] list 3570 * String containing the user input. User can specify 3571 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3572 * For example, if the user wants to use all the available 3573 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3574 * If the user wants to use only the ports 1,2 then the input 3575 * is 1,2. 3576 * valid characters are '-' and ',' 3577 * @param[out] values 3578 * This array will be filled with a list of port IDs 3579 * based on the user input 3580 * Note that duplicate entries are discarded and only the first 3581 * count entries in this array are port IDs and all the rest 3582 * will contain default values 3583 * @param[in] maxsize 3584 * This parameter denotes 2 things 3585 * 1) Number of elements in the values array 3586 * 2) Maximum value of each element in the values array 3587 * @return 3588 * On success, returns total count of parsed port IDs 3589 * On failure, returns 0 3590 */ 3591 static unsigned int 3592 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3593 { 3594 unsigned int count = 0; 3595 char *end = NULL; 3596 int min, max; 3597 int value, i; 3598 unsigned int marked[maxsize]; 3599 3600 if (list == NULL || values == NULL) 3601 return 0; 3602 3603 for (i = 0; i < (int)maxsize; i++) 3604 marked[i] = 0; 3605 3606 min = INT_MAX; 3607 3608 do { 3609 /*Remove the blank spaces if any*/ 3610 while (isblank(*list)) 3611 list++; 3612 if (*list == '\0') 3613 break; 3614 errno = 0; 3615 value = strtol(list, &end, 10); 3616 if (errno || end == NULL) 3617 return 0; 3618 if (value < 0 || value >= (int)maxsize) 3619 return 0; 3620 while (isblank(*end)) 3621 end++; 3622 if (*end == '-' && min == INT_MAX) { 3623 min = value; 3624 } else if ((*end == ',') || (*end == '\0')) { 3625 max = value; 3626 if (min == INT_MAX) 3627 min = value; 3628 for (i = min; i <= max; i++) { 3629 if (count < maxsize) { 3630 if (marked[i]) 3631 continue; 3632 values[count] = i; 3633 marked[i] = 1; 3634 count++; 3635 } 3636 } 3637 min = INT_MAX; 3638 } else 3639 return 0; 3640 list = end + 1; 3641 } while (*end != '\0'); 3642 3643 return count; 3644 } 3645 3646 void 3647 parse_fwd_portlist(const char *portlist) 3648 { 3649 unsigned int portcount; 3650 unsigned int portindex[RTE_MAX_ETHPORTS]; 3651 unsigned int i, valid_port_count = 0; 3652 3653 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3654 if (!portcount) 3655 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3656 3657 /* 3658 * Here we verify the validity of the ports 3659 * and thereby calculate the total number of 3660 * valid ports 3661 */ 3662 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3663 if (rte_eth_dev_is_valid_port(portindex[i])) { 3664 portindex[valid_port_count] = portindex[i]; 3665 valid_port_count++; 3666 } 3667 } 3668 3669 set_fwd_ports_list(portindex, valid_port_count); 3670 } 3671 3672 void 3673 set_fwd_ports_mask(uint64_t portmask) 3674 { 3675 unsigned int portlist[64]; 3676 unsigned int nb_pt; 3677 unsigned int i; 3678 3679 if (portmask == 0) { 3680 printf("Invalid NULL mask of ports\n"); 3681 return; 3682 } 3683 nb_pt = 0; 3684 RTE_ETH_FOREACH_DEV(i) { 3685 if (! ((uint64_t)(1ULL << i) & portmask)) 3686 continue; 3687 portlist[nb_pt++] = i; 3688 } 3689 set_fwd_ports_list(portlist, nb_pt); 3690 } 3691 3692 void 3693 set_fwd_ports_number(uint16_t nb_pt) 3694 { 3695 if (nb_pt > nb_cfg_ports) { 3696 printf("nb fwd ports %u > %u (number of configured " 3697 "ports) - ignored\n", 3698 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3699 return; 3700 } 3701 nb_fwd_ports = (portid_t) nb_pt; 3702 printf("Number of forwarding ports set to %u\n", 3703 (unsigned int) nb_fwd_ports); 3704 } 3705 3706 int 3707 port_is_forwarding(portid_t port_id) 3708 { 3709 unsigned int i; 3710 3711 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3712 return -1; 3713 3714 for (i = 0; i < nb_fwd_ports; i++) { 3715 if (fwd_ports_ids[i] == port_id) 3716 return 1; 3717 } 3718 3719 return 0; 3720 } 3721 3722 void 3723 set_nb_pkt_per_burst(uint16_t nb) 3724 { 3725 if (nb > MAX_PKT_BURST) { 3726 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3727 " ignored\n", 3728 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3729 return; 3730 } 3731 nb_pkt_per_burst = nb; 3732 printf("Number of packets per burst set to %u\n", 3733 (unsigned int) nb_pkt_per_burst); 3734 } 3735 3736 static const char * 3737 tx_split_get_name(enum tx_pkt_split split) 3738 { 3739 uint32_t i; 3740 3741 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3742 if (tx_split_name[i].split == split) 3743 return tx_split_name[i].name; 3744 } 3745 return NULL; 3746 } 3747 3748 void 3749 set_tx_pkt_split(const char *name) 3750 { 3751 uint32_t i; 3752 3753 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3754 if (strcmp(tx_split_name[i].name, name) == 0) { 3755 tx_pkt_split = tx_split_name[i].split; 3756 return; 3757 } 3758 } 3759 printf("unknown value: \"%s\"\n", name); 3760 } 3761 3762 int 3763 parse_fec_mode(const char *name, uint32_t *mode) 3764 { 3765 uint8_t i; 3766 3767 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3768 if (strcmp(fec_mode_name[i].name, name) == 0) { 3769 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3770 return 0; 3771 } 3772 } 3773 return -1; 3774 } 3775 3776 void 3777 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3778 { 3779 unsigned int i, j; 3780 3781 printf("FEC capabilities:\n"); 3782 3783 for (i = 0; i < num; i++) { 3784 printf("%s : ", 3785 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3786 3787 for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) { 3788 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3789 speed_fec_capa[i].capa) 3790 printf("%s ", fec_mode_name[j].name); 3791 } 3792 printf("\n"); 3793 } 3794 } 3795 3796 void 3797 show_rx_pkt_offsets(void) 3798 { 3799 uint32_t i, n; 3800 3801 n = rx_pkt_nb_offs; 3802 printf("Number of offsets: %u\n", n); 3803 if (n) { 3804 printf("Segment offsets: "); 3805 for (i = 0; i != n - 1; i++) 3806 printf("%hu,", rx_pkt_seg_offsets[i]); 3807 printf("%hu\n", rx_pkt_seg_lengths[i]); 3808 } 3809 } 3810 3811 void 3812 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3813 { 3814 unsigned int i; 3815 3816 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3817 printf("nb segments per RX packets=%u >= " 3818 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3819 return; 3820 } 3821 3822 /* 3823 * No extra check here, the segment length will be checked by PMD 3824 * in the extended queue setup. 3825 */ 3826 for (i = 0; i < nb_offs; i++) { 3827 if (seg_offsets[i] >= UINT16_MAX) { 3828 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3829 i, seg_offsets[i]); 3830 return; 3831 } 3832 } 3833 3834 for (i = 0; i < nb_offs; i++) 3835 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3836 3837 rx_pkt_nb_offs = (uint8_t) nb_offs; 3838 } 3839 3840 void 3841 show_rx_pkt_segments(void) 3842 { 3843 uint32_t i, n; 3844 3845 n = rx_pkt_nb_segs; 3846 printf("Number of segments: %u\n", n); 3847 if (n) { 3848 printf("Segment sizes: "); 3849 for (i = 0; i != n - 1; i++) 3850 printf("%hu,", rx_pkt_seg_lengths[i]); 3851 printf("%hu\n", rx_pkt_seg_lengths[i]); 3852 } 3853 } 3854 3855 void 3856 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3857 { 3858 unsigned int i; 3859 3860 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3861 printf("nb segments per RX packets=%u >= " 3862 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3863 return; 3864 } 3865 3866 /* 3867 * No extra check here, the segment length will be checked by PMD 3868 * in the extended queue setup. 3869 */ 3870 for (i = 0; i < nb_segs; i++) { 3871 if (seg_lengths[i] >= UINT16_MAX) { 3872 printf("length[%u]=%u > UINT16_MAX - give up\n", 3873 i, seg_lengths[i]); 3874 return; 3875 } 3876 } 3877 3878 for (i = 0; i < nb_segs; i++) 3879 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3880 3881 rx_pkt_nb_segs = (uint8_t) nb_segs; 3882 } 3883 3884 void 3885 show_tx_pkt_segments(void) 3886 { 3887 uint32_t i, n; 3888 const char *split; 3889 3890 n = tx_pkt_nb_segs; 3891 split = tx_split_get_name(tx_pkt_split); 3892 3893 printf("Number of segments: %u\n", n); 3894 printf("Segment sizes: "); 3895 for (i = 0; i != n - 1; i++) 3896 printf("%hu,", tx_pkt_seg_lengths[i]); 3897 printf("%hu\n", tx_pkt_seg_lengths[i]); 3898 printf("Split packet: %s\n", split); 3899 } 3900 3901 static bool 3902 nb_segs_is_invalid(unsigned int nb_segs) 3903 { 3904 uint16_t ring_size; 3905 uint16_t queue_id; 3906 uint16_t port_id; 3907 int ret; 3908 3909 RTE_ETH_FOREACH_DEV(port_id) { 3910 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3911 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3912 3913 if (ret) 3914 return true; 3915 3916 if (ring_size < nb_segs) { 3917 printf("nb segments per TX packets=%u >= " 3918 "TX queue(%u) ring_size=%u - ignored\n", 3919 nb_segs, queue_id, ring_size); 3920 return true; 3921 } 3922 } 3923 } 3924 3925 return false; 3926 } 3927 3928 void 3929 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3930 { 3931 uint16_t tx_pkt_len; 3932 unsigned int i; 3933 3934 if (nb_segs_is_invalid(nb_segs)) 3935 return; 3936 3937 /* 3938 * Check that each segment length is greater or equal than 3939 * the mbuf data sise. 3940 * Check also that the total packet length is greater or equal than the 3941 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3942 * 20 + 8). 3943 */ 3944 tx_pkt_len = 0; 3945 for (i = 0; i < nb_segs; i++) { 3946 if (seg_lengths[i] > mbuf_data_size[0]) { 3947 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3948 i, seg_lengths[i], mbuf_data_size[0]); 3949 return; 3950 } 3951 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3952 } 3953 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3954 printf("total packet length=%u < %d - give up\n", 3955 (unsigned) tx_pkt_len, 3956 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3957 return; 3958 } 3959 3960 for (i = 0; i < nb_segs; i++) 3961 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3962 3963 tx_pkt_length = tx_pkt_len; 3964 tx_pkt_nb_segs = (uint8_t) nb_segs; 3965 } 3966 3967 void 3968 show_tx_pkt_times(void) 3969 { 3970 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3971 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3972 } 3973 3974 void 3975 set_tx_pkt_times(unsigned int *tx_times) 3976 { 3977 tx_pkt_times_inter = tx_times[0]; 3978 tx_pkt_times_intra = tx_times[1]; 3979 } 3980 3981 void 3982 setup_gro(const char *onoff, portid_t port_id) 3983 { 3984 if (!rte_eth_dev_is_valid_port(port_id)) { 3985 printf("invalid port id %u\n", port_id); 3986 return; 3987 } 3988 if (test_done == 0) { 3989 printf("Before enable/disable GRO," 3990 " please stop forwarding first\n"); 3991 return; 3992 } 3993 if (strcmp(onoff, "on") == 0) { 3994 if (gro_ports[port_id].enable != 0) { 3995 printf("Port %u has enabled GRO. Please" 3996 " disable GRO first\n", port_id); 3997 return; 3998 } 3999 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4000 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4001 gro_ports[port_id].param.max_flow_num = 4002 GRO_DEFAULT_FLOW_NUM; 4003 gro_ports[port_id].param.max_item_per_flow = 4004 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4005 } 4006 gro_ports[port_id].enable = 1; 4007 } else { 4008 if (gro_ports[port_id].enable == 0) { 4009 printf("Port %u has disabled GRO\n", port_id); 4010 return; 4011 } 4012 gro_ports[port_id].enable = 0; 4013 } 4014 } 4015 4016 void 4017 setup_gro_flush_cycles(uint8_t cycles) 4018 { 4019 if (test_done == 0) { 4020 printf("Before change flush interval for GRO," 4021 " please stop forwarding first.\n"); 4022 return; 4023 } 4024 4025 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4026 GRO_DEFAULT_FLUSH_CYCLES) { 4027 printf("The flushing cycle be in the range" 4028 " of 1 to %u. Revert to the default" 4029 " value %u.\n", 4030 GRO_MAX_FLUSH_CYCLES, 4031 GRO_DEFAULT_FLUSH_CYCLES); 4032 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4033 } 4034 4035 gro_flush_cycles = cycles; 4036 } 4037 4038 void 4039 show_gro(portid_t port_id) 4040 { 4041 struct rte_gro_param *param; 4042 uint32_t max_pkts_num; 4043 4044 param = &gro_ports[port_id].param; 4045 4046 if (!rte_eth_dev_is_valid_port(port_id)) { 4047 printf("Invalid port id %u.\n", port_id); 4048 return; 4049 } 4050 if (gro_ports[port_id].enable) { 4051 printf("GRO type: TCP/IPv4\n"); 4052 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4053 max_pkts_num = param->max_flow_num * 4054 param->max_item_per_flow; 4055 } else 4056 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4057 printf("Max number of packets to perform GRO: %u\n", 4058 max_pkts_num); 4059 printf("Flushing cycles: %u\n", gro_flush_cycles); 4060 } else 4061 printf("Port %u doesn't enable GRO.\n", port_id); 4062 } 4063 4064 void 4065 setup_gso(const char *mode, portid_t port_id) 4066 { 4067 if (!rte_eth_dev_is_valid_port(port_id)) { 4068 printf("invalid port id %u\n", port_id); 4069 return; 4070 } 4071 if (strcmp(mode, "on") == 0) { 4072 if (test_done == 0) { 4073 printf("before enabling GSO," 4074 " please stop forwarding first\n"); 4075 return; 4076 } 4077 gso_ports[port_id].enable = 1; 4078 } else if (strcmp(mode, "off") == 0) { 4079 if (test_done == 0) { 4080 printf("before disabling GSO," 4081 " please stop forwarding first\n"); 4082 return; 4083 } 4084 gso_ports[port_id].enable = 0; 4085 } 4086 } 4087 4088 char* 4089 list_pkt_forwarding_modes(void) 4090 { 4091 static char fwd_modes[128] = ""; 4092 const char *separator = "|"; 4093 struct fwd_engine *fwd_eng; 4094 unsigned i = 0; 4095 4096 if (strlen (fwd_modes) == 0) { 4097 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4098 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4099 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4100 strncat(fwd_modes, separator, 4101 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4102 } 4103 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4104 } 4105 4106 return fwd_modes; 4107 } 4108 4109 char* 4110 list_pkt_forwarding_retry_modes(void) 4111 { 4112 static char fwd_modes[128] = ""; 4113 const char *separator = "|"; 4114 struct fwd_engine *fwd_eng; 4115 unsigned i = 0; 4116 4117 if (strlen(fwd_modes) == 0) { 4118 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4119 if (fwd_eng == &rx_only_engine) 4120 continue; 4121 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4122 sizeof(fwd_modes) - 4123 strlen(fwd_modes) - 1); 4124 strncat(fwd_modes, separator, 4125 sizeof(fwd_modes) - 4126 strlen(fwd_modes) - 1); 4127 } 4128 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4129 } 4130 4131 return fwd_modes; 4132 } 4133 4134 void 4135 set_pkt_forwarding_mode(const char *fwd_mode_name) 4136 { 4137 struct fwd_engine *fwd_eng; 4138 unsigned i; 4139 4140 i = 0; 4141 while ((fwd_eng = fwd_engines[i]) != NULL) { 4142 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4143 printf("Set %s packet forwarding mode%s\n", 4144 fwd_mode_name, 4145 retry_enabled == 0 ? "" : " with retry"); 4146 cur_fwd_eng = fwd_eng; 4147 return; 4148 } 4149 i++; 4150 } 4151 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 4152 } 4153 4154 void 4155 add_rx_dump_callbacks(portid_t portid) 4156 { 4157 struct rte_eth_dev_info dev_info; 4158 uint16_t queue; 4159 int ret; 4160 4161 if (port_id_is_invalid(portid, ENABLED_WARN)) 4162 return; 4163 4164 ret = eth_dev_info_get_print_err(portid, &dev_info); 4165 if (ret != 0) 4166 return; 4167 4168 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4169 if (!ports[portid].rx_dump_cb[queue]) 4170 ports[portid].rx_dump_cb[queue] = 4171 rte_eth_add_rx_callback(portid, queue, 4172 dump_rx_pkts, NULL); 4173 } 4174 4175 void 4176 add_tx_dump_callbacks(portid_t portid) 4177 { 4178 struct rte_eth_dev_info dev_info; 4179 uint16_t queue; 4180 int ret; 4181 4182 if (port_id_is_invalid(portid, ENABLED_WARN)) 4183 return; 4184 4185 ret = eth_dev_info_get_print_err(portid, &dev_info); 4186 if (ret != 0) 4187 return; 4188 4189 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4190 if (!ports[portid].tx_dump_cb[queue]) 4191 ports[portid].tx_dump_cb[queue] = 4192 rte_eth_add_tx_callback(portid, queue, 4193 dump_tx_pkts, NULL); 4194 } 4195 4196 void 4197 remove_rx_dump_callbacks(portid_t portid) 4198 { 4199 struct rte_eth_dev_info dev_info; 4200 uint16_t queue; 4201 int ret; 4202 4203 if (port_id_is_invalid(portid, ENABLED_WARN)) 4204 return; 4205 4206 ret = eth_dev_info_get_print_err(portid, &dev_info); 4207 if (ret != 0) 4208 return; 4209 4210 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4211 if (ports[portid].rx_dump_cb[queue]) { 4212 rte_eth_remove_rx_callback(portid, queue, 4213 ports[portid].rx_dump_cb[queue]); 4214 ports[portid].rx_dump_cb[queue] = NULL; 4215 } 4216 } 4217 4218 void 4219 remove_tx_dump_callbacks(portid_t portid) 4220 { 4221 struct rte_eth_dev_info dev_info; 4222 uint16_t queue; 4223 int ret; 4224 4225 if (port_id_is_invalid(portid, ENABLED_WARN)) 4226 return; 4227 4228 ret = eth_dev_info_get_print_err(portid, &dev_info); 4229 if (ret != 0) 4230 return; 4231 4232 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4233 if (ports[portid].tx_dump_cb[queue]) { 4234 rte_eth_remove_tx_callback(portid, queue, 4235 ports[portid].tx_dump_cb[queue]); 4236 ports[portid].tx_dump_cb[queue] = NULL; 4237 } 4238 } 4239 4240 void 4241 configure_rxtx_dump_callbacks(uint16_t verbose) 4242 { 4243 portid_t portid; 4244 4245 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4246 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4247 return; 4248 #endif 4249 4250 RTE_ETH_FOREACH_DEV(portid) 4251 { 4252 if (verbose == 1 || verbose > 2) 4253 add_rx_dump_callbacks(portid); 4254 else 4255 remove_rx_dump_callbacks(portid); 4256 if (verbose >= 2) 4257 add_tx_dump_callbacks(portid); 4258 else 4259 remove_tx_dump_callbacks(portid); 4260 } 4261 } 4262 4263 void 4264 set_verbose_level(uint16_t vb_level) 4265 { 4266 printf("Change verbose level from %u to %u\n", 4267 (unsigned int) verbose_level, (unsigned int) vb_level); 4268 verbose_level = vb_level; 4269 configure_rxtx_dump_callbacks(verbose_level); 4270 } 4271 4272 void 4273 vlan_extend_set(portid_t port_id, int on) 4274 { 4275 int diag; 4276 int vlan_offload; 4277 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4278 4279 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4280 return; 4281 4282 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4283 4284 if (on) { 4285 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4286 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4287 } else { 4288 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4289 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4290 } 4291 4292 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4293 if (diag < 0) { 4294 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 4295 "diag=%d\n", port_id, on, diag); 4296 return; 4297 } 4298 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4299 } 4300 4301 void 4302 rx_vlan_strip_set(portid_t port_id, int on) 4303 { 4304 int diag; 4305 int vlan_offload; 4306 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4307 4308 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4309 return; 4310 4311 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4312 4313 if (on) { 4314 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4315 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4316 } else { 4317 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4318 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4319 } 4320 4321 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4322 if (diag < 0) { 4323 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 4324 "diag=%d\n", port_id, on, diag); 4325 return; 4326 } 4327 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4328 } 4329 4330 void 4331 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4332 { 4333 int diag; 4334 4335 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4336 return; 4337 4338 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4339 if (diag < 0) 4340 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 4341 "diag=%d\n", port_id, queue_id, on, diag); 4342 } 4343 4344 void 4345 rx_vlan_filter_set(portid_t port_id, int on) 4346 { 4347 int diag; 4348 int vlan_offload; 4349 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4350 4351 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4352 return; 4353 4354 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4355 4356 if (on) { 4357 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4358 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4359 } else { 4360 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4361 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4362 } 4363 4364 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4365 if (diag < 0) { 4366 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 4367 "diag=%d\n", port_id, on, diag); 4368 return; 4369 } 4370 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4371 } 4372 4373 void 4374 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4375 { 4376 int diag; 4377 int vlan_offload; 4378 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4379 4380 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4381 return; 4382 4383 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4384 4385 if (on) { 4386 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4387 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4388 } else { 4389 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4390 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4391 } 4392 4393 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4394 if (diag < 0) { 4395 printf("%s(port_pi=%d, on=%d) failed " 4396 "diag=%d\n", __func__, port_id, on, diag); 4397 return; 4398 } 4399 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4400 } 4401 4402 int 4403 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4404 { 4405 int diag; 4406 4407 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4408 return 1; 4409 if (vlan_id_is_invalid(vlan_id)) 4410 return 1; 4411 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4412 if (diag == 0) 4413 return 0; 4414 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 4415 "diag=%d\n", 4416 port_id, vlan_id, on, diag); 4417 return -1; 4418 } 4419 4420 void 4421 rx_vlan_all_filter_set(portid_t port_id, int on) 4422 { 4423 uint16_t vlan_id; 4424 4425 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4426 return; 4427 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4428 if (rx_vft_set(port_id, vlan_id, on)) 4429 break; 4430 } 4431 } 4432 4433 void 4434 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4435 { 4436 int diag; 4437 4438 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4439 return; 4440 4441 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4442 if (diag == 0) 4443 return; 4444 4445 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 4446 "diag=%d\n", 4447 port_id, vlan_type, tp_id, diag); 4448 } 4449 4450 void 4451 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4452 { 4453 struct rte_eth_dev_info dev_info; 4454 int ret; 4455 4456 if (vlan_id_is_invalid(vlan_id)) 4457 return; 4458 4459 if (ports[port_id].dev_conf.txmode.offloads & 4460 DEV_TX_OFFLOAD_QINQ_INSERT) { 4461 printf("Error, as QinQ has been enabled.\n"); 4462 return; 4463 } 4464 4465 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4466 if (ret != 0) 4467 return; 4468 4469 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4470 printf("Error: vlan insert is not supported by port %d\n", 4471 port_id); 4472 return; 4473 } 4474 4475 tx_vlan_reset(port_id); 4476 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4477 ports[port_id].tx_vlan_id = vlan_id; 4478 } 4479 4480 void 4481 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4482 { 4483 struct rte_eth_dev_info dev_info; 4484 int ret; 4485 4486 if (vlan_id_is_invalid(vlan_id)) 4487 return; 4488 if (vlan_id_is_invalid(vlan_id_outer)) 4489 return; 4490 4491 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4492 if (ret != 0) 4493 return; 4494 4495 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4496 printf("Error: qinq insert not supported by port %d\n", 4497 port_id); 4498 return; 4499 } 4500 4501 tx_vlan_reset(port_id); 4502 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4503 DEV_TX_OFFLOAD_QINQ_INSERT); 4504 ports[port_id].tx_vlan_id = vlan_id; 4505 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4506 } 4507 4508 void 4509 tx_vlan_reset(portid_t port_id) 4510 { 4511 ports[port_id].dev_conf.txmode.offloads &= 4512 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4513 DEV_TX_OFFLOAD_QINQ_INSERT); 4514 ports[port_id].tx_vlan_id = 0; 4515 ports[port_id].tx_vlan_id_outer = 0; 4516 } 4517 4518 void 4519 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4520 { 4521 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4522 return; 4523 4524 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4525 } 4526 4527 void 4528 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4529 { 4530 uint16_t i; 4531 uint8_t existing_mapping_found = 0; 4532 4533 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4534 return; 4535 4536 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4537 return; 4538 4539 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4540 printf("map_value not in required range 0..%d\n", 4541 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4542 return; 4543 } 4544 4545 if (!is_rx) { /*then tx*/ 4546 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 4547 if ((tx_queue_stats_mappings[i].port_id == port_id) && 4548 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 4549 tx_queue_stats_mappings[i].stats_counter_id = map_value; 4550 existing_mapping_found = 1; 4551 break; 4552 } 4553 } 4554 if (!existing_mapping_found) { /* A new additional mapping... */ 4555 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 4556 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 4557 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 4558 nb_tx_queue_stats_mappings++; 4559 } 4560 } 4561 else { /*rx*/ 4562 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 4563 if ((rx_queue_stats_mappings[i].port_id == port_id) && 4564 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 4565 rx_queue_stats_mappings[i].stats_counter_id = map_value; 4566 existing_mapping_found = 1; 4567 break; 4568 } 4569 } 4570 if (!existing_mapping_found) { /* A new additional mapping... */ 4571 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 4572 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 4573 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 4574 nb_rx_queue_stats_mappings++; 4575 } 4576 } 4577 } 4578 4579 void 4580 set_xstats_hide_zero(uint8_t on_off) 4581 { 4582 xstats_hide_zero = on_off; 4583 } 4584 4585 void 4586 set_record_core_cycles(uint8_t on_off) 4587 { 4588 record_core_cycles = on_off; 4589 } 4590 4591 void 4592 set_record_burst_stats(uint8_t on_off) 4593 { 4594 record_burst_stats = on_off; 4595 } 4596 4597 static inline void 4598 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4599 { 4600 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4601 4602 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4603 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4604 " tunnel_id: 0x%08x", 4605 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4606 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4607 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4608 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4609 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4610 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4611 4612 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4613 rte_be_to_cpu_16(mask->src_port_mask), 4614 rte_be_to_cpu_16(mask->dst_port_mask)); 4615 4616 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4617 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4618 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4619 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4620 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4621 4622 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4623 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4624 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4625 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4626 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4627 } 4628 4629 printf("\n"); 4630 } 4631 4632 static inline void 4633 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4634 { 4635 struct rte_eth_flex_payload_cfg *cfg; 4636 uint32_t i, j; 4637 4638 for (i = 0; i < flex_conf->nb_payloads; i++) { 4639 cfg = &flex_conf->flex_set[i]; 4640 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4641 printf("\n RAW: "); 4642 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4643 printf("\n L2_PAYLOAD: "); 4644 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4645 printf("\n L3_PAYLOAD: "); 4646 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4647 printf("\n L4_PAYLOAD: "); 4648 else 4649 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4650 for (j = 0; j < num; j++) 4651 printf(" %-5u", cfg->src_offset[j]); 4652 } 4653 printf("\n"); 4654 } 4655 4656 static char * 4657 flowtype_to_str(uint16_t flow_type) 4658 { 4659 struct flow_type_info { 4660 char str[32]; 4661 uint16_t ftype; 4662 }; 4663 4664 uint8_t i; 4665 static struct flow_type_info flowtype_str_table[] = { 4666 {"raw", RTE_ETH_FLOW_RAW}, 4667 {"ipv4", RTE_ETH_FLOW_IPV4}, 4668 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4669 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4670 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4671 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4672 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4673 {"ipv6", RTE_ETH_FLOW_IPV6}, 4674 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4675 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4676 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4677 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4678 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4679 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4680 {"port", RTE_ETH_FLOW_PORT}, 4681 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4682 {"geneve", RTE_ETH_FLOW_GENEVE}, 4683 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4684 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4685 }; 4686 4687 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4688 if (flowtype_str_table[i].ftype == flow_type) 4689 return flowtype_str_table[i].str; 4690 } 4691 4692 return NULL; 4693 } 4694 4695 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 4696 4697 static inline void 4698 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4699 { 4700 struct rte_eth_fdir_flex_mask *mask; 4701 uint32_t i, j; 4702 char *p; 4703 4704 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4705 mask = &flex_conf->flex_mask[i]; 4706 p = flowtype_to_str(mask->flow_type); 4707 printf("\n %s:\t", p ? p : "unknown"); 4708 for (j = 0; j < num; j++) 4709 printf(" %02x", mask->mask[j]); 4710 } 4711 printf("\n"); 4712 } 4713 4714 static inline void 4715 print_fdir_flow_type(uint32_t flow_types_mask) 4716 { 4717 int i; 4718 char *p; 4719 4720 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4721 if (!(flow_types_mask & (1 << i))) 4722 continue; 4723 p = flowtype_to_str(i); 4724 if (p) 4725 printf(" %s", p); 4726 else 4727 printf(" unknown"); 4728 } 4729 printf("\n"); 4730 } 4731 4732 static int 4733 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4734 struct rte_eth_fdir_stats *fdir_stat) 4735 { 4736 int ret = -ENOTSUP; 4737 4738 #ifdef RTE_NET_I40E 4739 if (ret == -ENOTSUP) { 4740 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4741 if (!ret) 4742 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4743 } 4744 #endif 4745 #ifdef RTE_NET_IXGBE 4746 if (ret == -ENOTSUP) { 4747 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4748 if (!ret) 4749 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4750 } 4751 #endif 4752 switch (ret) { 4753 case 0: 4754 break; 4755 case -ENOTSUP: 4756 printf("\n FDIR is not supported on port %-2d\n", 4757 port_id); 4758 break; 4759 default: 4760 printf("programming error: (%s)\n", strerror(-ret)); 4761 break; 4762 } 4763 return ret; 4764 } 4765 4766 void 4767 fdir_get_infos(portid_t port_id) 4768 { 4769 struct rte_eth_fdir_stats fdir_stat; 4770 struct rte_eth_fdir_info fdir_info; 4771 4772 static const char *fdir_stats_border = "########################"; 4773 4774 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4775 return; 4776 4777 memset(&fdir_info, 0, sizeof(fdir_info)); 4778 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4779 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4780 return; 4781 4782 printf("\n %s FDIR infos for port %-2d %s\n", 4783 fdir_stats_border, port_id, fdir_stats_border); 4784 printf(" MODE: "); 4785 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4786 printf(" PERFECT\n"); 4787 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4788 printf(" PERFECT-MAC-VLAN\n"); 4789 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4790 printf(" PERFECT-TUNNEL\n"); 4791 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4792 printf(" SIGNATURE\n"); 4793 else 4794 printf(" DISABLE\n"); 4795 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4796 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4797 printf(" SUPPORTED FLOW TYPE: "); 4798 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4799 } 4800 printf(" FLEX PAYLOAD INFO:\n"); 4801 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4802 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4803 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4804 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4805 fdir_info.flex_payload_unit, 4806 fdir_info.max_flex_payload_segment_num, 4807 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4808 printf(" MASK: "); 4809 print_fdir_mask(&fdir_info.mask); 4810 if (fdir_info.flex_conf.nb_payloads > 0) { 4811 printf(" FLEX PAYLOAD SRC OFFSET:"); 4812 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4813 } 4814 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4815 printf(" FLEX MASK CFG:"); 4816 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4817 } 4818 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4819 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4820 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4821 fdir_info.guarant_spc, fdir_info.best_spc); 4822 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4823 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4824 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4825 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4826 fdir_stat.collision, fdir_stat.free, 4827 fdir_stat.maxhash, fdir_stat.maxlen, 4828 fdir_stat.add, fdir_stat.remove, 4829 fdir_stat.f_add, fdir_stat.f_remove); 4830 printf(" %s############################%s\n", 4831 fdir_stats_border, fdir_stats_border); 4832 } 4833 4834 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 4835 4836 void 4837 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4838 { 4839 struct rte_port *port; 4840 struct rte_eth_fdir_flex_conf *flex_conf; 4841 int i, idx = 0; 4842 4843 port = &ports[port_id]; 4844 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4845 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4846 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4847 idx = i; 4848 break; 4849 } 4850 } 4851 if (i >= RTE_ETH_FLOW_MAX) { 4852 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4853 idx = flex_conf->nb_flexmasks; 4854 flex_conf->nb_flexmasks++; 4855 } else { 4856 printf("The flex mask table is full. Can not set flex" 4857 " mask for flow_type(%u).", cfg->flow_type); 4858 return; 4859 } 4860 } 4861 rte_memcpy(&flex_conf->flex_mask[idx], 4862 cfg, 4863 sizeof(struct rte_eth_fdir_flex_mask)); 4864 } 4865 4866 void 4867 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4868 { 4869 struct rte_port *port; 4870 struct rte_eth_fdir_flex_conf *flex_conf; 4871 int i, idx = 0; 4872 4873 port = &ports[port_id]; 4874 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4875 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4876 if (cfg->type == flex_conf->flex_set[i].type) { 4877 idx = i; 4878 break; 4879 } 4880 } 4881 if (i >= RTE_ETH_PAYLOAD_MAX) { 4882 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4883 idx = flex_conf->nb_payloads; 4884 flex_conf->nb_payloads++; 4885 } else { 4886 printf("The flex payload table is full. Can not set" 4887 " flex payload for type(%u).", cfg->type); 4888 return; 4889 } 4890 } 4891 rte_memcpy(&flex_conf->flex_set[idx], 4892 cfg, 4893 sizeof(struct rte_eth_flex_payload_cfg)); 4894 4895 } 4896 4897 void 4898 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4899 { 4900 #ifdef RTE_NET_IXGBE 4901 int diag; 4902 4903 if (is_rx) 4904 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4905 else 4906 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4907 4908 if (diag == 0) 4909 return; 4910 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4911 is_rx ? "rx" : "tx", port_id, diag); 4912 return; 4913 #endif 4914 printf("VF %s setting not supported for port %d\n", 4915 is_rx ? "Rx" : "Tx", port_id); 4916 RTE_SET_USED(vf); 4917 RTE_SET_USED(on); 4918 } 4919 4920 int 4921 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4922 { 4923 int diag; 4924 struct rte_eth_link link; 4925 int ret; 4926 4927 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4928 return 1; 4929 ret = eth_link_get_nowait_print_err(port_id, &link); 4930 if (ret < 0) 4931 return 1; 4932 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4933 rate > link.link_speed) { 4934 printf("Invalid rate value:%u bigger than link speed: %u\n", 4935 rate, link.link_speed); 4936 return 1; 4937 } 4938 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4939 if (diag == 0) 4940 return diag; 4941 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4942 port_id, diag); 4943 return diag; 4944 } 4945 4946 int 4947 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4948 { 4949 int diag = -ENOTSUP; 4950 4951 RTE_SET_USED(vf); 4952 RTE_SET_USED(rate); 4953 RTE_SET_USED(q_msk); 4954 4955 #ifdef RTE_NET_IXGBE 4956 if (diag == -ENOTSUP) 4957 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4958 q_msk); 4959 #endif 4960 #ifdef RTE_NET_BNXT 4961 if (diag == -ENOTSUP) 4962 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4963 #endif 4964 if (diag == 0) 4965 return diag; 4966 4967 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4968 port_id, diag); 4969 return diag; 4970 } 4971 4972 /* 4973 * Functions to manage the set of filtered Multicast MAC addresses. 4974 * 4975 * A pool of filtered multicast MAC addresses is associated with each port. 4976 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4977 * The address of the pool and the number of valid multicast MAC addresses 4978 * recorded in the pool are stored in the fields "mc_addr_pool" and 4979 * "mc_addr_nb" of the "rte_port" data structure. 4980 * 4981 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4982 * to be supplied a contiguous array of multicast MAC addresses. 4983 * To comply with this constraint, the set of multicast addresses recorded 4984 * into the pool are systematically compacted at the beginning of the pool. 4985 * Hence, when a multicast address is removed from the pool, all following 4986 * addresses, if any, are copied back to keep the set contiguous. 4987 */ 4988 #define MCAST_POOL_INC 32 4989 4990 static int 4991 mcast_addr_pool_extend(struct rte_port *port) 4992 { 4993 struct rte_ether_addr *mc_pool; 4994 size_t mc_pool_size; 4995 4996 /* 4997 * If a free entry is available at the end of the pool, just 4998 * increment the number of recorded multicast addresses. 4999 */ 5000 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5001 port->mc_addr_nb++; 5002 return 0; 5003 } 5004 5005 /* 5006 * [re]allocate a pool with MCAST_POOL_INC more entries. 5007 * The previous test guarantees that port->mc_addr_nb is a multiple 5008 * of MCAST_POOL_INC. 5009 */ 5010 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5011 MCAST_POOL_INC); 5012 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5013 mc_pool_size); 5014 if (mc_pool == NULL) { 5015 printf("allocation of pool of %u multicast addresses failed\n", 5016 port->mc_addr_nb + MCAST_POOL_INC); 5017 return -ENOMEM; 5018 } 5019 5020 port->mc_addr_pool = mc_pool; 5021 port->mc_addr_nb++; 5022 return 0; 5023 5024 } 5025 5026 static void 5027 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5028 { 5029 if (mcast_addr_pool_extend(port) != 0) 5030 return; 5031 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5032 } 5033 5034 static void 5035 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5036 { 5037 port->mc_addr_nb--; 5038 if (addr_idx == port->mc_addr_nb) { 5039 /* No need to recompact the set of multicast addressses. */ 5040 if (port->mc_addr_nb == 0) { 5041 /* free the pool of multicast addresses. */ 5042 free(port->mc_addr_pool); 5043 port->mc_addr_pool = NULL; 5044 } 5045 return; 5046 } 5047 memmove(&port->mc_addr_pool[addr_idx], 5048 &port->mc_addr_pool[addr_idx + 1], 5049 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5050 } 5051 5052 static int 5053 eth_port_multicast_addr_list_set(portid_t port_id) 5054 { 5055 struct rte_port *port; 5056 int diag; 5057 5058 port = &ports[port_id]; 5059 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5060 port->mc_addr_nb); 5061 if (diag < 0) 5062 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5063 port_id, port->mc_addr_nb, diag); 5064 5065 return diag; 5066 } 5067 5068 void 5069 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5070 { 5071 struct rte_port *port; 5072 uint32_t i; 5073 5074 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5075 return; 5076 5077 port = &ports[port_id]; 5078 5079 /* 5080 * Check that the added multicast MAC address is not already recorded 5081 * in the pool of multicast addresses. 5082 */ 5083 for (i = 0; i < port->mc_addr_nb; i++) { 5084 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5085 printf("multicast address already filtered by port\n"); 5086 return; 5087 } 5088 } 5089 5090 mcast_addr_pool_append(port, mc_addr); 5091 if (eth_port_multicast_addr_list_set(port_id) < 0) 5092 /* Rollback on failure, remove the address from the pool */ 5093 mcast_addr_pool_remove(port, i); 5094 } 5095 5096 void 5097 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5098 { 5099 struct rte_port *port; 5100 uint32_t i; 5101 5102 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5103 return; 5104 5105 port = &ports[port_id]; 5106 5107 /* 5108 * Search the pool of multicast MAC addresses for the removed address. 5109 */ 5110 for (i = 0; i < port->mc_addr_nb; i++) { 5111 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5112 break; 5113 } 5114 if (i == port->mc_addr_nb) { 5115 printf("multicast address not filtered by port %d\n", port_id); 5116 return; 5117 } 5118 5119 mcast_addr_pool_remove(port, i); 5120 if (eth_port_multicast_addr_list_set(port_id) < 0) 5121 /* Rollback on failure, add the address back into the pool */ 5122 mcast_addr_pool_append(port, mc_addr); 5123 } 5124 5125 void 5126 port_dcb_info_display(portid_t port_id) 5127 { 5128 struct rte_eth_dcb_info dcb_info; 5129 uint16_t i; 5130 int ret; 5131 static const char *border = "================"; 5132 5133 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5134 return; 5135 5136 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5137 if (ret) { 5138 printf("\n Failed to get dcb infos on port %-2d\n", 5139 port_id); 5140 return; 5141 } 5142 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5143 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5144 printf("\n TC : "); 5145 for (i = 0; i < dcb_info.nb_tcs; i++) 5146 printf("\t%4d", i); 5147 printf("\n Priority : "); 5148 for (i = 0; i < dcb_info.nb_tcs; i++) 5149 printf("\t%4d", dcb_info.prio_tc[i]); 5150 printf("\n BW percent :"); 5151 for (i = 0; i < dcb_info.nb_tcs; i++) 5152 printf("\t%4d%%", dcb_info.tc_bws[i]); 5153 printf("\n RXQ base : "); 5154 for (i = 0; i < dcb_info.nb_tcs; i++) 5155 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5156 printf("\n RXQ number :"); 5157 for (i = 0; i < dcb_info.nb_tcs; i++) 5158 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5159 printf("\n TXQ base : "); 5160 for (i = 0; i < dcb_info.nb_tcs; i++) 5161 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5162 printf("\n TXQ number :"); 5163 for (i = 0; i < dcb_info.nb_tcs; i++) 5164 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5165 printf("\n"); 5166 } 5167 5168 uint8_t * 5169 open_file(const char *file_path, uint32_t *size) 5170 { 5171 int fd = open(file_path, O_RDONLY); 5172 off_t pkg_size; 5173 uint8_t *buf = NULL; 5174 int ret = 0; 5175 struct stat st_buf; 5176 5177 if (size) 5178 *size = 0; 5179 5180 if (fd == -1) { 5181 printf("%s: Failed to open %s\n", __func__, file_path); 5182 return buf; 5183 } 5184 5185 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5186 close(fd); 5187 printf("%s: File operations failed\n", __func__); 5188 return buf; 5189 } 5190 5191 pkg_size = st_buf.st_size; 5192 if (pkg_size < 0) { 5193 close(fd); 5194 printf("%s: File operations failed\n", __func__); 5195 return buf; 5196 } 5197 5198 buf = (uint8_t *)malloc(pkg_size); 5199 if (!buf) { 5200 close(fd); 5201 printf("%s: Failed to malloc memory\n", __func__); 5202 return buf; 5203 } 5204 5205 ret = read(fd, buf, pkg_size); 5206 if (ret < 0) { 5207 close(fd); 5208 printf("%s: File read operation failed\n", __func__); 5209 close_file(buf); 5210 return NULL; 5211 } 5212 5213 if (size) 5214 *size = pkg_size; 5215 5216 close(fd); 5217 5218 return buf; 5219 } 5220 5221 int 5222 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5223 { 5224 FILE *fh = fopen(file_path, "wb"); 5225 5226 if (fh == NULL) { 5227 printf("%s: Failed to open %s\n", __func__, file_path); 5228 return -1; 5229 } 5230 5231 if (fwrite(buf, 1, size, fh) != size) { 5232 fclose(fh); 5233 printf("%s: File write operation failed\n", __func__); 5234 return -1; 5235 } 5236 5237 fclose(fh); 5238 5239 return 0; 5240 } 5241 5242 int 5243 close_file(uint8_t *buf) 5244 { 5245 if (buf) { 5246 free((void *)buf); 5247 return 0; 5248 } 5249 5250 return -1; 5251 } 5252 5253 void 5254 port_queue_region_info_display(portid_t port_id, void *buf) 5255 { 5256 #ifdef RTE_NET_I40E 5257 uint16_t i, j; 5258 struct rte_pmd_i40e_queue_regions *info = 5259 (struct rte_pmd_i40e_queue_regions *)buf; 5260 static const char *queue_region_info_stats_border = "-------"; 5261 5262 if (!info->queue_region_number) 5263 printf("there is no region has been set before"); 5264 5265 printf("\n %s All queue region info for port=%2d %s", 5266 queue_region_info_stats_border, port_id, 5267 queue_region_info_stats_border); 5268 printf("\n queue_region_number: %-14u \n", 5269 info->queue_region_number); 5270 5271 for (i = 0; i < info->queue_region_number; i++) { 5272 printf("\n region_id: %-14u queue_number: %-14u " 5273 "queue_start_index: %-14u \n", 5274 info->region[i].region_id, 5275 info->region[i].queue_num, 5276 info->region[i].queue_start_index); 5277 5278 printf(" user_priority_num is %-14u :", 5279 info->region[i].user_priority_num); 5280 for (j = 0; j < info->region[i].user_priority_num; j++) 5281 printf(" %-14u ", info->region[i].user_priority[j]); 5282 5283 printf("\n flowtype_num is %-14u :", 5284 info->region[i].flowtype_num); 5285 for (j = 0; j < info->region[i].flowtype_num; j++) 5286 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5287 } 5288 #else 5289 RTE_SET_USED(port_id); 5290 RTE_SET_USED(buf); 5291 #endif 5292 5293 printf("\n\n"); 5294 } 5295 5296 void 5297 show_macs(portid_t port_id) 5298 { 5299 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5300 struct rte_eth_dev_info dev_info; 5301 struct rte_ether_addr *addr; 5302 uint32_t i, num_macs = 0; 5303 struct rte_eth_dev *dev; 5304 5305 dev = &rte_eth_devices[port_id]; 5306 5307 rte_eth_dev_info_get(port_id, &dev_info); 5308 5309 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5310 addr = &dev->data->mac_addrs[i]; 5311 5312 /* skip zero address */ 5313 if (rte_is_zero_ether_addr(addr)) 5314 continue; 5315 5316 num_macs++; 5317 } 5318 5319 printf("Number of MAC address added: %d\n", num_macs); 5320 5321 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5322 addr = &dev->data->mac_addrs[i]; 5323 5324 /* skip zero address */ 5325 if (rte_is_zero_ether_addr(addr)) 5326 continue; 5327 5328 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5329 printf(" %s\n", buf); 5330 } 5331 } 5332 5333 void 5334 show_mcast_macs(portid_t port_id) 5335 { 5336 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5337 struct rte_ether_addr *addr; 5338 struct rte_port *port; 5339 uint32_t i; 5340 5341 port = &ports[port_id]; 5342 5343 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5344 5345 for (i = 0; i < port->mc_addr_nb; i++) { 5346 addr = &port->mc_addr_pool[i]; 5347 5348 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5349 printf(" %s\n", buf); 5350 } 5351 } 5352