1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <rte_hexdump.h> 53 54 #include "testpmd.h" 55 56 #define ETHDEV_FWVERS_LEN 32 57 58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 60 #else 61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 62 #endif 63 64 #define NS_PER_SEC 1E9 65 66 static char *flowtype_to_str(uint16_t flow_type); 67 68 static const struct { 69 enum tx_pkt_split split; 70 const char *name; 71 } tx_split_name[] = { 72 { 73 .split = TX_PKT_SPLIT_OFF, 74 .name = "off", 75 }, 76 { 77 .split = TX_PKT_SPLIT_ON, 78 .name = "on", 79 }, 80 { 81 .split = TX_PKT_SPLIT_RND, 82 .name = "rand", 83 }, 84 }; 85 86 const struct rss_type_info rss_type_table[] = { 87 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | 88 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | 89 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | 90 ETH_RSS_GTPU}, 91 { "none", 0 }, 92 { "eth", ETH_RSS_ETH }, 93 { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, 94 { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, 95 { "vlan", ETH_RSS_VLAN }, 96 { "s-vlan", ETH_RSS_S_VLAN }, 97 { "c-vlan", ETH_RSS_C_VLAN }, 98 { "ipv4", ETH_RSS_IPV4 }, 99 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 100 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 101 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 102 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 103 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 104 { "ipv6", ETH_RSS_IPV6 }, 105 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 106 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 107 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 108 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 109 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 110 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 111 { "ipv6-ex", ETH_RSS_IPV6_EX }, 112 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 113 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 114 { "port", ETH_RSS_PORT }, 115 { "vxlan", ETH_RSS_VXLAN }, 116 { "geneve", ETH_RSS_GENEVE }, 117 { "nvgre", ETH_RSS_NVGRE }, 118 { "ip", ETH_RSS_IP }, 119 { "udp", ETH_RSS_UDP }, 120 { "tcp", ETH_RSS_TCP }, 121 { "sctp", ETH_RSS_SCTP }, 122 { "tunnel", ETH_RSS_TUNNEL }, 123 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 124 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 125 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 126 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 127 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 128 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 129 { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, 130 { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, 131 { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, 132 { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, 133 { "esp", ETH_RSS_ESP }, 134 { "ah", ETH_RSS_AH }, 135 { "l2tpv3", ETH_RSS_L2TPV3 }, 136 { "pfcp", ETH_RSS_PFCP }, 137 { "pppoe", ETH_RSS_PPPOE }, 138 { "gtpu", ETH_RSS_GTPU }, 139 { NULL, 0 }, 140 }; 141 142 static const struct { 143 enum rte_eth_fec_mode mode; 144 const char *name; 145 } fec_mode_name[] = { 146 { 147 .mode = RTE_ETH_FEC_NOFEC, 148 .name = "off", 149 }, 150 { 151 .mode = RTE_ETH_FEC_AUTO, 152 .name = "auto", 153 }, 154 { 155 .mode = RTE_ETH_FEC_BASER, 156 .name = "baser", 157 }, 158 { 159 .mode = RTE_ETH_FEC_RS, 160 .name = "rs", 161 }, 162 }; 163 164 static void 165 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 166 { 167 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 168 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 169 printf("%s%s", name, buf); 170 } 171 172 void 173 nic_stats_display(portid_t port_id) 174 { 175 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 176 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 177 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 178 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 179 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 180 struct timespec cur_time; 181 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 182 diff_ns; 183 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 184 struct rte_eth_stats stats; 185 struct rte_port *port = &ports[port_id]; 186 uint8_t i; 187 188 static const char *nic_stats_border = "########################"; 189 190 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 191 print_valid_ports(); 192 return; 193 } 194 rte_eth_stats_get(port_id, &stats); 195 printf("\n %s NIC statistics for port %-2d %s\n", 196 nic_stats_border, port_id, nic_stats_border); 197 198 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 199 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 200 "%-"PRIu64"\n", 201 stats.ipackets, stats.imissed, stats.ibytes); 202 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 203 printf(" RX-nombuf: %-10"PRIu64"\n", 204 stats.rx_nombuf); 205 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 206 "%-"PRIu64"\n", 207 stats.opackets, stats.oerrors, stats.obytes); 208 } 209 else { 210 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 211 " RX-bytes: %10"PRIu64"\n", 212 stats.ipackets, stats.ierrors, stats.ibytes); 213 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 214 printf(" RX-nombuf: %10"PRIu64"\n", 215 stats.rx_nombuf); 216 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 217 " TX-bytes: %10"PRIu64"\n", 218 stats.opackets, stats.oerrors, stats.obytes); 219 } 220 221 if (port->rx_queue_stats_mapping_enabled) { 222 printf("\n"); 223 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 224 printf(" Stats reg %2d RX-packets: %10"PRIu64 225 " RX-errors: %10"PRIu64 226 " RX-bytes: %10"PRIu64"\n", 227 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 228 } 229 } 230 if (port->tx_queue_stats_mapping_enabled) { 231 printf("\n"); 232 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 233 printf(" Stats reg %2d TX-packets: %10"PRIu64 234 " TX-bytes: %10"PRIu64"\n", 235 i, stats.q_opackets[i], stats.q_obytes[i]); 236 } 237 } 238 239 diff_ns = 0; 240 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 241 uint64_t ns; 242 243 ns = cur_time.tv_sec * NS_PER_SEC; 244 ns += cur_time.tv_nsec; 245 246 if (prev_ns[port_id] != 0) 247 diff_ns = ns - prev_ns[port_id]; 248 prev_ns[port_id] = ns; 249 } 250 251 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 252 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 253 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 254 (stats.opackets - prev_pkts_tx[port_id]) : 0; 255 prev_pkts_rx[port_id] = stats.ipackets; 256 prev_pkts_tx[port_id] = stats.opackets; 257 mpps_rx = diff_ns > 0 ? 258 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 259 mpps_tx = diff_ns > 0 ? 260 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 261 262 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 263 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 264 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 265 (stats.obytes - prev_bytes_tx[port_id]) : 0; 266 prev_bytes_rx[port_id] = stats.ibytes; 267 prev_bytes_tx[port_id] = stats.obytes; 268 mbps_rx = diff_ns > 0 ? 269 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 270 mbps_tx = diff_ns > 0 ? 271 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 272 273 printf("\n Throughput (since last show)\n"); 274 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 275 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 276 mpps_tx, mbps_tx * 8); 277 278 printf(" %s############################%s\n", 279 nic_stats_border, nic_stats_border); 280 } 281 282 void 283 nic_stats_clear(portid_t port_id) 284 { 285 int ret; 286 287 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 288 print_valid_ports(); 289 return; 290 } 291 292 ret = rte_eth_stats_reset(port_id); 293 if (ret != 0) { 294 printf("%s: Error: failed to reset stats (port %u): %s", 295 __func__, port_id, strerror(-ret)); 296 return; 297 } 298 299 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 300 if (ret != 0) { 301 if (ret < 0) 302 ret = -ret; 303 printf("%s: Error: failed to get stats (port %u): %s", 304 __func__, port_id, strerror(ret)); 305 return; 306 } 307 printf("\n NIC statistics for port %d cleared\n", port_id); 308 } 309 310 void 311 nic_xstats_display(portid_t port_id) 312 { 313 struct rte_eth_xstat *xstats; 314 int cnt_xstats, idx_xstat; 315 struct rte_eth_xstat_name *xstats_names; 316 317 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 318 print_valid_ports(); 319 return; 320 } 321 printf("###### NIC extended statistics for port %-2d\n", port_id); 322 if (!rte_eth_dev_is_valid_port(port_id)) { 323 printf("Error: Invalid port number %i\n", port_id); 324 return; 325 } 326 327 /* Get count */ 328 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 329 if (cnt_xstats < 0) { 330 printf("Error: Cannot get count of xstats\n"); 331 return; 332 } 333 334 /* Get id-name lookup table */ 335 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 336 if (xstats_names == NULL) { 337 printf("Cannot allocate memory for xstats lookup\n"); 338 return; 339 } 340 if (cnt_xstats != rte_eth_xstats_get_names( 341 port_id, xstats_names, cnt_xstats)) { 342 printf("Error: Cannot get xstats lookup\n"); 343 free(xstats_names); 344 return; 345 } 346 347 /* Get stats themselves */ 348 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 349 if (xstats == NULL) { 350 printf("Cannot allocate memory for xstats\n"); 351 free(xstats_names); 352 return; 353 } 354 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 355 printf("Error: Unable to get xstats\n"); 356 free(xstats_names); 357 free(xstats); 358 return; 359 } 360 361 /* Display xstats */ 362 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 363 if (xstats_hide_zero && !xstats[idx_xstat].value) 364 continue; 365 printf("%s: %"PRIu64"\n", 366 xstats_names[idx_xstat].name, 367 xstats[idx_xstat].value); 368 } 369 free(xstats_names); 370 free(xstats); 371 } 372 373 void 374 nic_xstats_clear(portid_t port_id) 375 { 376 int ret; 377 378 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 379 print_valid_ports(); 380 return; 381 } 382 383 ret = rte_eth_xstats_reset(port_id); 384 if (ret != 0) { 385 printf("%s: Error: failed to reset xstats (port %u): %s", 386 __func__, port_id, strerror(-ret)); 387 return; 388 } 389 390 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 391 if (ret != 0) { 392 if (ret < 0) 393 ret = -ret; 394 printf("%s: Error: failed to get stats (port %u): %s", 395 __func__, port_id, strerror(ret)); 396 return; 397 } 398 } 399 400 void 401 nic_stats_mapping_display(portid_t port_id) 402 { 403 struct rte_port *port = &ports[port_id]; 404 uint16_t i; 405 406 static const char *nic_stats_mapping_border = "########################"; 407 408 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 409 print_valid_ports(); 410 return; 411 } 412 413 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 414 printf("Port id %d - either does not support queue statistic mapping or" 415 " no queue statistic mapping set\n", port_id); 416 return; 417 } 418 419 printf("\n %s NIC statistics mapping for port %-2d %s\n", 420 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 421 422 if (port->rx_queue_stats_mapping_enabled) { 423 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 424 if (rx_queue_stats_mappings[i].port_id == port_id) { 425 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 426 rx_queue_stats_mappings[i].queue_id, 427 rx_queue_stats_mappings[i].stats_counter_id); 428 } 429 } 430 printf("\n"); 431 } 432 433 434 if (port->tx_queue_stats_mapping_enabled) { 435 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 436 if (tx_queue_stats_mappings[i].port_id == port_id) { 437 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 438 tx_queue_stats_mappings[i].queue_id, 439 tx_queue_stats_mappings[i].stats_counter_id); 440 } 441 } 442 } 443 444 printf(" %s####################################%s\n", 445 nic_stats_mapping_border, nic_stats_mapping_border); 446 } 447 448 void 449 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 450 { 451 struct rte_eth_burst_mode mode; 452 struct rte_eth_rxq_info qinfo; 453 int32_t rc; 454 static const char *info_border = "*********************"; 455 456 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 457 if (rc != 0) { 458 printf("Failed to retrieve information for port: %u, " 459 "RX queue: %hu\nerror desc: %s(%d)\n", 460 port_id, queue_id, strerror(-rc), rc); 461 return; 462 } 463 464 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 465 info_border, port_id, queue_id, info_border); 466 467 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 468 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 469 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 470 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 471 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 472 printf("\nRX drop packets: %s", 473 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 474 printf("\nRX deferred start: %s", 475 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 476 printf("\nRX scattered packets: %s", 477 (qinfo.scattered_rx != 0) ? "on" : "off"); 478 if (qinfo.rx_buf_size != 0) 479 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 480 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 481 482 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 483 printf("\nBurst mode: %s%s", 484 mode.info, 485 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 486 " (per queue)" : ""); 487 488 printf("\n"); 489 } 490 491 void 492 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 493 { 494 struct rte_eth_burst_mode mode; 495 struct rte_eth_txq_info qinfo; 496 int32_t rc; 497 static const char *info_border = "*********************"; 498 499 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 500 if (rc != 0) { 501 printf("Failed to retrieve information for port: %u, " 502 "TX queue: %hu\nerror desc: %s(%d)\n", 503 port_id, queue_id, strerror(-rc), rc); 504 return; 505 } 506 507 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 508 info_border, port_id, queue_id, info_border); 509 510 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 511 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 512 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 513 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 514 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 515 printf("\nTX deferred start: %s", 516 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 517 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 518 519 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 520 printf("\nBurst mode: %s%s", 521 mode.info, 522 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 523 " (per queue)" : ""); 524 525 printf("\n"); 526 } 527 528 static int bus_match_all(const struct rte_bus *bus, const void *data) 529 { 530 RTE_SET_USED(bus); 531 RTE_SET_USED(data); 532 return 0; 533 } 534 535 static void 536 device_infos_display_speeds(uint32_t speed_capa) 537 { 538 printf("\n\tDevice speed capability:"); 539 if (speed_capa == ETH_LINK_SPEED_AUTONEG) 540 printf(" Autonegotiate (all speeds)"); 541 if (speed_capa & ETH_LINK_SPEED_FIXED) 542 printf(" Disable autonegotiate (fixed speed) "); 543 if (speed_capa & ETH_LINK_SPEED_10M_HD) 544 printf(" 10 Mbps half-duplex "); 545 if (speed_capa & ETH_LINK_SPEED_10M) 546 printf(" 10 Mbps full-duplex "); 547 if (speed_capa & ETH_LINK_SPEED_100M_HD) 548 printf(" 100 Mbps half-duplex "); 549 if (speed_capa & ETH_LINK_SPEED_100M) 550 printf(" 100 Mbps full-duplex "); 551 if (speed_capa & ETH_LINK_SPEED_1G) 552 printf(" 1 Gbps "); 553 if (speed_capa & ETH_LINK_SPEED_2_5G) 554 printf(" 2.5 Gbps "); 555 if (speed_capa & ETH_LINK_SPEED_5G) 556 printf(" 5 Gbps "); 557 if (speed_capa & ETH_LINK_SPEED_10G) 558 printf(" 10 Gbps "); 559 if (speed_capa & ETH_LINK_SPEED_20G) 560 printf(" 20 Gbps "); 561 if (speed_capa & ETH_LINK_SPEED_25G) 562 printf(" 25 Gbps "); 563 if (speed_capa & ETH_LINK_SPEED_40G) 564 printf(" 40 Gbps "); 565 if (speed_capa & ETH_LINK_SPEED_50G) 566 printf(" 50 Gbps "); 567 if (speed_capa & ETH_LINK_SPEED_56G) 568 printf(" 56 Gbps "); 569 if (speed_capa & ETH_LINK_SPEED_100G) 570 printf(" 100 Gbps "); 571 if (speed_capa & ETH_LINK_SPEED_200G) 572 printf(" 200 Gbps "); 573 } 574 575 void 576 device_infos_display(const char *identifier) 577 { 578 static const char *info_border = "*********************"; 579 struct rte_bus *start = NULL, *next; 580 struct rte_dev_iterator dev_iter; 581 char name[RTE_ETH_NAME_MAX_LEN]; 582 struct rte_ether_addr mac_addr; 583 struct rte_device *dev; 584 struct rte_devargs da; 585 portid_t port_id; 586 struct rte_eth_dev_info dev_info; 587 char devstr[128]; 588 589 memset(&da, 0, sizeof(da)); 590 if (!identifier) 591 goto skip_parse; 592 593 if (rte_devargs_parsef(&da, "%s", identifier)) { 594 printf("cannot parse identifier\n"); 595 if (da.args) 596 free(da.args); 597 return; 598 } 599 600 skip_parse: 601 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 602 603 start = next; 604 if (identifier && da.bus != next) 605 continue; 606 607 /* Skip buses that don't have iterate method */ 608 if (!next->dev_iterate) 609 continue; 610 611 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 612 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 613 614 if (!dev->driver) 615 continue; 616 /* Check for matching device if identifier is present */ 617 if (identifier && 618 strncmp(da.name, dev->name, strlen(dev->name))) 619 continue; 620 printf("\n%s Infos for device %s %s\n", 621 info_border, dev->name, info_border); 622 printf("Bus name: %s", dev->bus->name); 623 printf("\nDriver name: %s", dev->driver->name); 624 printf("\nDevargs: %s", 625 dev->devargs ? dev->devargs->args : ""); 626 printf("\nConnect to socket: %d", dev->numa_node); 627 printf("\n"); 628 629 /* List ports with matching device name */ 630 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 631 printf("\n\tPort id: %-2d", port_id); 632 if (eth_macaddr_get_print_err(port_id, 633 &mac_addr) == 0) 634 print_ethaddr("\n\tMAC address: ", 635 &mac_addr); 636 rte_eth_dev_get_name_by_port(port_id, name); 637 printf("\n\tDevice name: %s", name); 638 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 639 device_infos_display_speeds(dev_info.speed_capa); 640 printf("\n"); 641 } 642 } 643 }; 644 } 645 646 void 647 port_infos_display(portid_t port_id) 648 { 649 struct rte_port *port; 650 struct rte_ether_addr mac_addr; 651 struct rte_eth_link link; 652 struct rte_eth_dev_info dev_info; 653 int vlan_offload; 654 struct rte_mempool * mp; 655 static const char *info_border = "*********************"; 656 uint16_t mtu; 657 char name[RTE_ETH_NAME_MAX_LEN]; 658 int ret; 659 char fw_version[ETHDEV_FWVERS_LEN]; 660 661 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 662 print_valid_ports(); 663 return; 664 } 665 port = &ports[port_id]; 666 ret = eth_link_get_nowait_print_err(port_id, &link); 667 if (ret < 0) 668 return; 669 670 ret = eth_dev_info_get_print_err(port_id, &dev_info); 671 if (ret != 0) 672 return; 673 674 printf("\n%s Infos for port %-2d %s\n", 675 info_border, port_id, info_border); 676 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 677 print_ethaddr("MAC address: ", &mac_addr); 678 rte_eth_dev_get_name_by_port(port_id, name); 679 printf("\nDevice name: %s", name); 680 printf("\nDriver name: %s", dev_info.driver_name); 681 682 if (rte_eth_dev_fw_version_get(port_id, fw_version, 683 ETHDEV_FWVERS_LEN) == 0) 684 printf("\nFirmware-version: %s", fw_version); 685 else 686 printf("\nFirmware-version: %s", "not available"); 687 688 if (dev_info.device->devargs && dev_info.device->devargs->args) 689 printf("\nDevargs: %s", dev_info.device->devargs->args); 690 printf("\nConnect to socket: %u", port->socket_id); 691 692 if (port_numa[port_id] != NUMA_NO_CONFIG) { 693 mp = mbuf_pool_find(port_numa[port_id], 0); 694 if (mp) 695 printf("\nmemory allocation on the socket: %d", 696 port_numa[port_id]); 697 } else 698 printf("\nmemory allocation on the socket: %u",port->socket_id); 699 700 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 701 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 702 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 703 ("full-duplex") : ("half-duplex")); 704 705 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 706 printf("MTU: %u\n", mtu); 707 708 printf("Promiscuous mode: %s\n", 709 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 710 printf("Allmulticast mode: %s\n", 711 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 712 printf("Maximum number of MAC addresses: %u\n", 713 (unsigned int)(port->dev_info.max_mac_addrs)); 714 printf("Maximum number of MAC addresses of hash filtering: %u\n", 715 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 716 717 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 718 if (vlan_offload >= 0){ 719 printf("VLAN offload: \n"); 720 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 721 printf(" strip on, "); 722 else 723 printf(" strip off, "); 724 725 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 726 printf("filter on, "); 727 else 728 printf("filter off, "); 729 730 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 731 printf("extend on, "); 732 else 733 printf("extend off, "); 734 735 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) 736 printf("qinq strip on\n"); 737 else 738 printf("qinq strip off\n"); 739 } 740 741 if (dev_info.hash_key_size > 0) 742 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 743 if (dev_info.reta_size > 0) 744 printf("Redirection table size: %u\n", dev_info.reta_size); 745 if (!dev_info.flow_type_rss_offloads) 746 printf("No RSS offload flow type is supported.\n"); 747 else { 748 uint16_t i; 749 char *p; 750 751 printf("Supported RSS offload flow types:\n"); 752 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 753 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 754 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 755 continue; 756 p = flowtype_to_str(i); 757 if (p) 758 printf(" %s\n", p); 759 else 760 printf(" user defined %d\n", i); 761 } 762 } 763 764 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 765 printf("Maximum configurable length of RX packet: %u\n", 766 dev_info.max_rx_pktlen); 767 printf("Maximum configurable size of LRO aggregated packet: %u\n", 768 dev_info.max_lro_pkt_size); 769 if (dev_info.max_vfs) 770 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 771 if (dev_info.max_vmdq_pools) 772 printf("Maximum number of VMDq pools: %u\n", 773 dev_info.max_vmdq_pools); 774 775 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 776 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 777 printf("Max possible number of RXDs per queue: %hu\n", 778 dev_info.rx_desc_lim.nb_max); 779 printf("Min possible number of RXDs per queue: %hu\n", 780 dev_info.rx_desc_lim.nb_min); 781 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 782 783 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 784 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 785 printf("Max possible number of TXDs per queue: %hu\n", 786 dev_info.tx_desc_lim.nb_max); 787 printf("Min possible number of TXDs per queue: %hu\n", 788 dev_info.tx_desc_lim.nb_min); 789 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 790 printf("Max segment number per packet: %hu\n", 791 dev_info.tx_desc_lim.nb_seg_max); 792 printf("Max segment number per MTU/TSO: %hu\n", 793 dev_info.tx_desc_lim.nb_mtu_seg_max); 794 795 /* Show switch info only if valid switch domain and port id is set */ 796 if (dev_info.switch_info.domain_id != 797 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 798 if (dev_info.switch_info.name) 799 printf("Switch name: %s\n", dev_info.switch_info.name); 800 801 printf("Switch domain Id: %u\n", 802 dev_info.switch_info.domain_id); 803 printf("Switch Port Id: %u\n", 804 dev_info.switch_info.port_id); 805 } 806 } 807 808 void 809 port_summary_header_display(void) 810 { 811 uint16_t port_number; 812 813 port_number = rte_eth_dev_count_avail(); 814 printf("Number of available ports: %i\n", port_number); 815 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 816 "Driver", "Status", "Link"); 817 } 818 819 void 820 port_summary_display(portid_t port_id) 821 { 822 struct rte_ether_addr mac_addr; 823 struct rte_eth_link link; 824 struct rte_eth_dev_info dev_info; 825 char name[RTE_ETH_NAME_MAX_LEN]; 826 int ret; 827 828 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 829 print_valid_ports(); 830 return; 831 } 832 833 ret = eth_link_get_nowait_print_err(port_id, &link); 834 if (ret < 0) 835 return; 836 837 ret = eth_dev_info_get_print_err(port_id, &dev_info); 838 if (ret != 0) 839 return; 840 841 rte_eth_dev_get_name_by_port(port_id, name); 842 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 843 if (ret != 0) 844 return; 845 846 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", 847 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 848 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 849 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 850 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 851 rte_eth_link_speed_to_str(link.link_speed)); 852 } 853 854 void 855 port_eeprom_display(portid_t port_id) 856 { 857 struct rte_dev_eeprom_info einfo; 858 int ret; 859 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 860 print_valid_ports(); 861 return; 862 } 863 864 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 865 if (len_eeprom < 0) { 866 switch (len_eeprom) { 867 case -ENODEV: 868 printf("port index %d invalid\n", port_id); 869 break; 870 case -ENOTSUP: 871 printf("operation not supported by device\n"); 872 break; 873 case -EIO: 874 printf("device is removed\n"); 875 break; 876 default: 877 printf("Unable to get EEPROM: %d\n", len_eeprom); 878 break; 879 } 880 return; 881 } 882 883 char buf[len_eeprom]; 884 einfo.offset = 0; 885 einfo.length = len_eeprom; 886 einfo.data = buf; 887 888 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 889 if (ret != 0) { 890 switch (ret) { 891 case -ENODEV: 892 printf("port index %d invalid\n", port_id); 893 break; 894 case -ENOTSUP: 895 printf("operation not supported by device\n"); 896 break; 897 case -EIO: 898 printf("device is removed\n"); 899 break; 900 default: 901 printf("Unable to get EEPROM: %d\n", ret); 902 break; 903 } 904 return; 905 } 906 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 907 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 908 } 909 910 void 911 port_module_eeprom_display(portid_t port_id) 912 { 913 struct rte_eth_dev_module_info minfo; 914 struct rte_dev_eeprom_info einfo; 915 int ret; 916 917 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 918 print_valid_ports(); 919 return; 920 } 921 922 923 ret = rte_eth_dev_get_module_info(port_id, &minfo); 924 if (ret != 0) { 925 switch (ret) { 926 case -ENODEV: 927 printf("port index %d invalid\n", port_id); 928 break; 929 case -ENOTSUP: 930 printf("operation not supported by device\n"); 931 break; 932 case -EIO: 933 printf("device is removed\n"); 934 break; 935 default: 936 printf("Unable to get module EEPROM: %d\n", ret); 937 break; 938 } 939 return; 940 } 941 942 char buf[minfo.eeprom_len]; 943 einfo.offset = 0; 944 einfo.length = minfo.eeprom_len; 945 einfo.data = buf; 946 947 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 948 if (ret != 0) { 949 switch (ret) { 950 case -ENODEV: 951 printf("port index %d invalid\n", port_id); 952 break; 953 case -ENOTSUP: 954 printf("operation not supported by device\n"); 955 break; 956 case -EIO: 957 printf("device is removed\n"); 958 break; 959 default: 960 printf("Unable to get module EEPROM: %d\n", ret); 961 break; 962 } 963 return; 964 } 965 966 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 967 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 968 } 969 970 void 971 port_offload_cap_display(portid_t port_id) 972 { 973 struct rte_eth_dev_info dev_info; 974 static const char *info_border = "************"; 975 int ret; 976 977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 978 return; 979 980 ret = eth_dev_info_get_print_err(port_id, &dev_info); 981 if (ret != 0) 982 return; 983 984 printf("\n%s Port %d supported offload features: %s\n", 985 info_border, port_id, info_border); 986 987 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 988 printf("VLAN stripped: "); 989 if (ports[port_id].dev_conf.rxmode.offloads & 990 DEV_RX_OFFLOAD_VLAN_STRIP) 991 printf("on\n"); 992 else 993 printf("off\n"); 994 } 995 996 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 997 printf("Double VLANs stripped: "); 998 if (ports[port_id].dev_conf.rxmode.offloads & 999 DEV_RX_OFFLOAD_QINQ_STRIP) 1000 printf("on\n"); 1001 else 1002 printf("off\n"); 1003 } 1004 1005 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 1006 printf("RX IPv4 checksum: "); 1007 if (ports[port_id].dev_conf.rxmode.offloads & 1008 DEV_RX_OFFLOAD_IPV4_CKSUM) 1009 printf("on\n"); 1010 else 1011 printf("off\n"); 1012 } 1013 1014 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 1015 printf("RX UDP checksum: "); 1016 if (ports[port_id].dev_conf.rxmode.offloads & 1017 DEV_RX_OFFLOAD_UDP_CKSUM) 1018 printf("on\n"); 1019 else 1020 printf("off\n"); 1021 } 1022 1023 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 1024 printf("RX TCP checksum: "); 1025 if (ports[port_id].dev_conf.rxmode.offloads & 1026 DEV_RX_OFFLOAD_TCP_CKSUM) 1027 printf("on\n"); 1028 else 1029 printf("off\n"); 1030 } 1031 1032 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 1033 printf("RX SCTP checksum: "); 1034 if (ports[port_id].dev_conf.rxmode.offloads & 1035 DEV_RX_OFFLOAD_SCTP_CKSUM) 1036 printf("on\n"); 1037 else 1038 printf("off\n"); 1039 } 1040 1041 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 1042 printf("RX Outer IPv4 checksum: "); 1043 if (ports[port_id].dev_conf.rxmode.offloads & 1044 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 1045 printf("on\n"); 1046 else 1047 printf("off\n"); 1048 } 1049 1050 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 1051 printf("RX Outer UDP checksum: "); 1052 if (ports[port_id].dev_conf.rxmode.offloads & 1053 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 1054 printf("on\n"); 1055 else 1056 printf("off\n"); 1057 } 1058 1059 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 1060 printf("Large receive offload: "); 1061 if (ports[port_id].dev_conf.rxmode.offloads & 1062 DEV_RX_OFFLOAD_TCP_LRO) 1063 printf("on\n"); 1064 else 1065 printf("off\n"); 1066 } 1067 1068 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 1069 printf("HW timestamp: "); 1070 if (ports[port_id].dev_conf.rxmode.offloads & 1071 DEV_RX_OFFLOAD_TIMESTAMP) 1072 printf("on\n"); 1073 else 1074 printf("off\n"); 1075 } 1076 1077 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 1078 printf("Rx Keep CRC: "); 1079 if (ports[port_id].dev_conf.rxmode.offloads & 1080 DEV_RX_OFFLOAD_KEEP_CRC) 1081 printf("on\n"); 1082 else 1083 printf("off\n"); 1084 } 1085 1086 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 1087 printf("RX offload security: "); 1088 if (ports[port_id].dev_conf.rxmode.offloads & 1089 DEV_RX_OFFLOAD_SECURITY) 1090 printf("on\n"); 1091 else 1092 printf("off\n"); 1093 } 1094 1095 if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1096 printf("RX offload buffer split: "); 1097 if (ports[port_id].dev_conf.rxmode.offloads & 1098 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) 1099 printf("on\n"); 1100 else 1101 printf("off\n"); 1102 } 1103 1104 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 1105 printf("VLAN insert: "); 1106 if (ports[port_id].dev_conf.txmode.offloads & 1107 DEV_TX_OFFLOAD_VLAN_INSERT) 1108 printf("on\n"); 1109 else 1110 printf("off\n"); 1111 } 1112 1113 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 1114 printf("Double VLANs insert: "); 1115 if (ports[port_id].dev_conf.txmode.offloads & 1116 DEV_TX_OFFLOAD_QINQ_INSERT) 1117 printf("on\n"); 1118 else 1119 printf("off\n"); 1120 } 1121 1122 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 1123 printf("TX IPv4 checksum: "); 1124 if (ports[port_id].dev_conf.txmode.offloads & 1125 DEV_TX_OFFLOAD_IPV4_CKSUM) 1126 printf("on\n"); 1127 else 1128 printf("off\n"); 1129 } 1130 1131 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 1132 printf("TX UDP checksum: "); 1133 if (ports[port_id].dev_conf.txmode.offloads & 1134 DEV_TX_OFFLOAD_UDP_CKSUM) 1135 printf("on\n"); 1136 else 1137 printf("off\n"); 1138 } 1139 1140 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 1141 printf("TX TCP checksum: "); 1142 if (ports[port_id].dev_conf.txmode.offloads & 1143 DEV_TX_OFFLOAD_TCP_CKSUM) 1144 printf("on\n"); 1145 else 1146 printf("off\n"); 1147 } 1148 1149 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 1150 printf("TX SCTP checksum: "); 1151 if (ports[port_id].dev_conf.txmode.offloads & 1152 DEV_TX_OFFLOAD_SCTP_CKSUM) 1153 printf("on\n"); 1154 else 1155 printf("off\n"); 1156 } 1157 1158 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 1159 printf("TX Outer IPv4 checksum: "); 1160 if (ports[port_id].dev_conf.txmode.offloads & 1161 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 1162 printf("on\n"); 1163 else 1164 printf("off\n"); 1165 } 1166 1167 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 1168 printf("TX TCP segmentation: "); 1169 if (ports[port_id].dev_conf.txmode.offloads & 1170 DEV_TX_OFFLOAD_TCP_TSO) 1171 printf("on\n"); 1172 else 1173 printf("off\n"); 1174 } 1175 1176 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 1177 printf("TX UDP segmentation: "); 1178 if (ports[port_id].dev_conf.txmode.offloads & 1179 DEV_TX_OFFLOAD_UDP_TSO) 1180 printf("on\n"); 1181 else 1182 printf("off\n"); 1183 } 1184 1185 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 1186 printf("TSO for VXLAN tunnel packet: "); 1187 if (ports[port_id].dev_conf.txmode.offloads & 1188 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 1189 printf("on\n"); 1190 else 1191 printf("off\n"); 1192 } 1193 1194 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 1195 printf("TSO for GRE tunnel packet: "); 1196 if (ports[port_id].dev_conf.txmode.offloads & 1197 DEV_TX_OFFLOAD_GRE_TNL_TSO) 1198 printf("on\n"); 1199 else 1200 printf("off\n"); 1201 } 1202 1203 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 1204 printf("TSO for IPIP tunnel packet: "); 1205 if (ports[port_id].dev_conf.txmode.offloads & 1206 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 1207 printf("on\n"); 1208 else 1209 printf("off\n"); 1210 } 1211 1212 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 1213 printf("TSO for GENEVE tunnel packet: "); 1214 if (ports[port_id].dev_conf.txmode.offloads & 1215 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 1216 printf("on\n"); 1217 else 1218 printf("off\n"); 1219 } 1220 1221 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 1222 printf("IP tunnel TSO: "); 1223 if (ports[port_id].dev_conf.txmode.offloads & 1224 DEV_TX_OFFLOAD_IP_TNL_TSO) 1225 printf("on\n"); 1226 else 1227 printf("off\n"); 1228 } 1229 1230 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 1231 printf("UDP tunnel TSO: "); 1232 if (ports[port_id].dev_conf.txmode.offloads & 1233 DEV_TX_OFFLOAD_UDP_TNL_TSO) 1234 printf("on\n"); 1235 else 1236 printf("off\n"); 1237 } 1238 1239 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 1240 printf("TX Outer UDP checksum: "); 1241 if (ports[port_id].dev_conf.txmode.offloads & 1242 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1243 printf("on\n"); 1244 else 1245 printf("off\n"); 1246 } 1247 1248 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { 1249 printf("Tx scheduling on timestamp: "); 1250 if (ports[port_id].dev_conf.txmode.offloads & 1251 DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) 1252 printf("on\n"); 1253 else 1254 printf("off\n"); 1255 } 1256 1257 } 1258 1259 int 1260 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1261 { 1262 uint16_t pid; 1263 1264 if (port_id == (portid_t)RTE_PORT_ALL) 1265 return 0; 1266 1267 RTE_ETH_FOREACH_DEV(pid) 1268 if (port_id == pid) 1269 return 0; 1270 1271 if (warning == ENABLED_WARN) 1272 printf("Invalid port %d\n", port_id); 1273 1274 return 1; 1275 } 1276 1277 void print_valid_ports(void) 1278 { 1279 portid_t pid; 1280 1281 printf("The valid ports array is ["); 1282 RTE_ETH_FOREACH_DEV(pid) { 1283 printf(" %d", pid); 1284 } 1285 printf(" ]\n"); 1286 } 1287 1288 static int 1289 vlan_id_is_invalid(uint16_t vlan_id) 1290 { 1291 if (vlan_id < 4096) 1292 return 0; 1293 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1294 return 1; 1295 } 1296 1297 static int 1298 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1299 { 1300 const struct rte_pci_device *pci_dev; 1301 const struct rte_bus *bus; 1302 uint64_t pci_len; 1303 1304 if (reg_off & 0x3) { 1305 printf("Port register offset 0x%X not aligned on a 4-byte " 1306 "boundary\n", 1307 (unsigned)reg_off); 1308 return 1; 1309 } 1310 1311 if (!ports[port_id].dev_info.device) { 1312 printf("Invalid device\n"); 1313 return 0; 1314 } 1315 1316 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1317 if (bus && !strcmp(bus->name, "pci")) { 1318 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1319 } else { 1320 printf("Not a PCI device\n"); 1321 return 1; 1322 } 1323 1324 pci_len = pci_dev->mem_resource[0].len; 1325 if (reg_off >= pci_len) { 1326 printf("Port %d: register offset %u (0x%X) out of port PCI " 1327 "resource (length=%"PRIu64")\n", 1328 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 1329 return 1; 1330 } 1331 return 0; 1332 } 1333 1334 static int 1335 reg_bit_pos_is_invalid(uint8_t bit_pos) 1336 { 1337 if (bit_pos <= 31) 1338 return 0; 1339 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 1340 return 1; 1341 } 1342 1343 #define display_port_and_reg_off(port_id, reg_off) \ 1344 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1345 1346 static inline void 1347 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1348 { 1349 display_port_and_reg_off(port_id, (unsigned)reg_off); 1350 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1351 } 1352 1353 void 1354 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1355 { 1356 uint32_t reg_v; 1357 1358 1359 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1360 return; 1361 if (port_reg_off_is_invalid(port_id, reg_off)) 1362 return; 1363 if (reg_bit_pos_is_invalid(bit_x)) 1364 return; 1365 reg_v = port_id_pci_reg_read(port_id, reg_off); 1366 display_port_and_reg_off(port_id, (unsigned)reg_off); 1367 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1368 } 1369 1370 void 1371 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1372 uint8_t bit1_pos, uint8_t bit2_pos) 1373 { 1374 uint32_t reg_v; 1375 uint8_t l_bit; 1376 uint8_t h_bit; 1377 1378 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1379 return; 1380 if (port_reg_off_is_invalid(port_id, reg_off)) 1381 return; 1382 if (reg_bit_pos_is_invalid(bit1_pos)) 1383 return; 1384 if (reg_bit_pos_is_invalid(bit2_pos)) 1385 return; 1386 if (bit1_pos > bit2_pos) 1387 l_bit = bit2_pos, h_bit = bit1_pos; 1388 else 1389 l_bit = bit1_pos, h_bit = bit2_pos; 1390 1391 reg_v = port_id_pci_reg_read(port_id, reg_off); 1392 reg_v >>= l_bit; 1393 if (h_bit < 31) 1394 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1395 display_port_and_reg_off(port_id, (unsigned)reg_off); 1396 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1397 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1398 } 1399 1400 void 1401 port_reg_display(portid_t port_id, uint32_t reg_off) 1402 { 1403 uint32_t reg_v; 1404 1405 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1406 return; 1407 if (port_reg_off_is_invalid(port_id, reg_off)) 1408 return; 1409 reg_v = port_id_pci_reg_read(port_id, reg_off); 1410 display_port_reg_value(port_id, reg_off, reg_v); 1411 } 1412 1413 void 1414 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1415 uint8_t bit_v) 1416 { 1417 uint32_t reg_v; 1418 1419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1420 return; 1421 if (port_reg_off_is_invalid(port_id, reg_off)) 1422 return; 1423 if (reg_bit_pos_is_invalid(bit_pos)) 1424 return; 1425 if (bit_v > 1) { 1426 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1427 return; 1428 } 1429 reg_v = port_id_pci_reg_read(port_id, reg_off); 1430 if (bit_v == 0) 1431 reg_v &= ~(1 << bit_pos); 1432 else 1433 reg_v |= (1 << bit_pos); 1434 port_id_pci_reg_write(port_id, reg_off, reg_v); 1435 display_port_reg_value(port_id, reg_off, reg_v); 1436 } 1437 1438 void 1439 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1440 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1441 { 1442 uint32_t max_v; 1443 uint32_t reg_v; 1444 uint8_t l_bit; 1445 uint8_t h_bit; 1446 1447 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1448 return; 1449 if (port_reg_off_is_invalid(port_id, reg_off)) 1450 return; 1451 if (reg_bit_pos_is_invalid(bit1_pos)) 1452 return; 1453 if (reg_bit_pos_is_invalid(bit2_pos)) 1454 return; 1455 if (bit1_pos > bit2_pos) 1456 l_bit = bit2_pos, h_bit = bit1_pos; 1457 else 1458 l_bit = bit1_pos, h_bit = bit2_pos; 1459 1460 if ((h_bit - l_bit) < 31) 1461 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1462 else 1463 max_v = 0xFFFFFFFF; 1464 1465 if (value > max_v) { 1466 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1467 (unsigned)value, (unsigned)value, 1468 (unsigned)max_v, (unsigned)max_v); 1469 return; 1470 } 1471 reg_v = port_id_pci_reg_read(port_id, reg_off); 1472 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1473 reg_v |= (value << l_bit); /* Set changed bits */ 1474 port_id_pci_reg_write(port_id, reg_off, reg_v); 1475 display_port_reg_value(port_id, reg_off, reg_v); 1476 } 1477 1478 void 1479 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1480 { 1481 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1482 return; 1483 if (port_reg_off_is_invalid(port_id, reg_off)) 1484 return; 1485 port_id_pci_reg_write(port_id, reg_off, reg_v); 1486 display_port_reg_value(port_id, reg_off, reg_v); 1487 } 1488 1489 void 1490 port_mtu_set(portid_t port_id, uint16_t mtu) 1491 { 1492 int diag; 1493 struct rte_port *rte_port = &ports[port_id]; 1494 struct rte_eth_dev_info dev_info; 1495 uint16_t eth_overhead; 1496 int ret; 1497 1498 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1499 return; 1500 1501 ret = eth_dev_info_get_print_err(port_id, &dev_info); 1502 if (ret != 0) 1503 return; 1504 1505 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { 1506 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", 1507 mtu, dev_info.min_mtu, dev_info.max_mtu); 1508 return; 1509 } 1510 diag = rte_eth_dev_set_mtu(port_id, mtu); 1511 if (diag) 1512 printf("Set MTU failed. diag=%d\n", diag); 1513 else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1514 /* 1515 * Ether overhead in driver is equal to the difference of 1516 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the 1517 * device supports jumbo frame. 1518 */ 1519 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; 1520 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { 1521 rte_port->dev_conf.rxmode.offloads |= 1522 DEV_RX_OFFLOAD_JUMBO_FRAME; 1523 rte_port->dev_conf.rxmode.max_rx_pkt_len = 1524 mtu + eth_overhead; 1525 } else 1526 rte_port->dev_conf.rxmode.offloads &= 1527 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1528 } 1529 } 1530 1531 /* Generic flow management functions. */ 1532 1533 static struct port_flow_tunnel * 1534 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1535 { 1536 struct port_flow_tunnel *flow_tunnel; 1537 1538 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1539 if (flow_tunnel->id == port_tunnel_id) 1540 goto out; 1541 } 1542 flow_tunnel = NULL; 1543 1544 out: 1545 return flow_tunnel; 1546 } 1547 1548 const char * 1549 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1550 { 1551 const char *type; 1552 switch (tunnel->type) { 1553 default: 1554 type = "unknown"; 1555 break; 1556 case RTE_FLOW_ITEM_TYPE_VXLAN: 1557 type = "vxlan"; 1558 break; 1559 } 1560 1561 return type; 1562 } 1563 1564 struct port_flow_tunnel * 1565 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1566 { 1567 struct rte_port *port = &ports[port_id]; 1568 struct port_flow_tunnel *flow_tunnel; 1569 1570 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1571 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1572 goto out; 1573 } 1574 flow_tunnel = NULL; 1575 1576 out: 1577 return flow_tunnel; 1578 } 1579 1580 void port_flow_tunnel_list(portid_t port_id) 1581 { 1582 struct rte_port *port = &ports[port_id]; 1583 struct port_flow_tunnel *flt; 1584 1585 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1586 printf("port %u tunnel #%u type=%s", 1587 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1588 if (flt->tunnel.tun_id) 1589 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1590 printf("\n"); 1591 } 1592 } 1593 1594 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1595 { 1596 struct rte_port *port = &ports[port_id]; 1597 struct port_flow_tunnel *flt; 1598 1599 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1600 if (flt->id == tunnel_id) 1601 break; 1602 } 1603 if (flt) { 1604 LIST_REMOVE(flt, chain); 1605 free(flt); 1606 printf("port %u: flow tunnel #%u destroyed\n", 1607 port_id, tunnel_id); 1608 } 1609 } 1610 1611 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1612 { 1613 struct rte_port *port = &ports[port_id]; 1614 enum rte_flow_item_type type; 1615 struct port_flow_tunnel *flt; 1616 1617 if (!strcmp(ops->type, "vxlan")) 1618 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1619 else { 1620 printf("cannot offload \"%s\" tunnel type\n", ops->type); 1621 return; 1622 } 1623 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1624 if (flt->tunnel.type == type) 1625 break; 1626 } 1627 if (!flt) { 1628 flt = calloc(1, sizeof(*flt)); 1629 if (!flt) { 1630 printf("failed to allocate port flt object\n"); 1631 return; 1632 } 1633 flt->tunnel.type = type; 1634 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1635 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1636 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1637 } 1638 printf("port %d: flow tunnel #%u type %s\n", 1639 port_id, flt->id, ops->type); 1640 } 1641 1642 /** Generate a port_flow entry from attributes/pattern/actions. */ 1643 static struct port_flow * 1644 port_flow_new(const struct rte_flow_attr *attr, 1645 const struct rte_flow_item *pattern, 1646 const struct rte_flow_action *actions, 1647 struct rte_flow_error *error) 1648 { 1649 const struct rte_flow_conv_rule rule = { 1650 .attr_ro = attr, 1651 .pattern_ro = pattern, 1652 .actions_ro = actions, 1653 }; 1654 struct port_flow *pf; 1655 int ret; 1656 1657 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1658 if (ret < 0) 1659 return NULL; 1660 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1661 if (!pf) { 1662 rte_flow_error_set 1663 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1664 "calloc() failed"); 1665 return NULL; 1666 } 1667 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1668 error) >= 0) 1669 return pf; 1670 free(pf); 1671 return NULL; 1672 } 1673 1674 /** Print a message out of a flow error. */ 1675 static int 1676 port_flow_complain(struct rte_flow_error *error) 1677 { 1678 static const char *const errstrlist[] = { 1679 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1680 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1681 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1682 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1683 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1684 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1685 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1686 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1687 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1688 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1689 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1690 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1691 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1692 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1693 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1694 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1695 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1696 }; 1697 const char *errstr; 1698 char buf[32]; 1699 int err = rte_errno; 1700 1701 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1702 !errstrlist[error->type]) 1703 errstr = "unknown type"; 1704 else 1705 errstr = errstrlist[error->type]; 1706 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, 1707 error->type, errstr, 1708 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1709 error->cause), buf) : "", 1710 error->message ? error->message : "(no stated reason)", 1711 rte_strerror(err)); 1712 return -err; 1713 } 1714 1715 static void 1716 rss_config_display(struct rte_flow_action_rss *rss_conf) 1717 { 1718 uint8_t i; 1719 1720 if (rss_conf == NULL) { 1721 printf("Invalid rule\n"); 1722 return; 1723 } 1724 1725 printf("RSS:\n" 1726 " queues:"); 1727 if (rss_conf->queue_num == 0) 1728 printf(" none"); 1729 for (i = 0; i < rss_conf->queue_num; i++) 1730 printf(" %d", rss_conf->queue[i]); 1731 printf("\n"); 1732 1733 printf(" function: "); 1734 switch (rss_conf->func) { 1735 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1736 printf("default\n"); 1737 break; 1738 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1739 printf("toeplitz\n"); 1740 break; 1741 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1742 printf("simple_xor\n"); 1743 break; 1744 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1745 printf("symmetric_toeplitz\n"); 1746 break; 1747 default: 1748 printf("Unknown function\n"); 1749 return; 1750 } 1751 1752 printf(" types:\n"); 1753 if (rss_conf->types == 0) { 1754 printf(" none\n"); 1755 return; 1756 } 1757 for (i = 0; rss_type_table[i].str; i++) { 1758 if ((rss_conf->types & 1759 rss_type_table[i].rss_type) == 1760 rss_type_table[i].rss_type && 1761 rss_type_table[i].rss_type != 0) 1762 printf(" %s\n", rss_type_table[i].str); 1763 } 1764 } 1765 1766 static struct port_shared_action * 1767 action_get_by_id(portid_t port_id, uint32_t id) 1768 { 1769 struct rte_port *port; 1770 struct port_shared_action **ppsa; 1771 struct port_shared_action *psa = NULL; 1772 1773 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1774 port_id == (portid_t)RTE_PORT_ALL) 1775 return NULL; 1776 port = &ports[port_id]; 1777 ppsa = &port->actions_list; 1778 while (*ppsa) { 1779 if ((*ppsa)->id == id) { 1780 psa = *ppsa; 1781 break; 1782 } 1783 ppsa = &(*ppsa)->next; 1784 } 1785 if (!psa) 1786 printf("Failed to find shared action #%u on port %u\n", 1787 id, port_id); 1788 return psa; 1789 } 1790 1791 static int 1792 action_alloc(portid_t port_id, uint32_t id, 1793 struct port_shared_action **action) 1794 { 1795 struct rte_port *port; 1796 struct port_shared_action **ppsa; 1797 struct port_shared_action *psa = NULL; 1798 1799 *action = NULL; 1800 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1801 port_id == (portid_t)RTE_PORT_ALL) 1802 return -EINVAL; 1803 port = &ports[port_id]; 1804 if (id == UINT32_MAX) { 1805 /* taking first available ID */ 1806 if (port->actions_list) { 1807 if (port->actions_list->id == UINT32_MAX - 1) { 1808 printf("Highest shared action ID is already" 1809 " assigned, delete it first\n"); 1810 return -ENOMEM; 1811 } 1812 id = port->actions_list->id + 1; 1813 } else { 1814 id = 0; 1815 } 1816 } 1817 psa = calloc(1, sizeof(*psa)); 1818 if (!psa) { 1819 printf("Allocation of port %u shared action failed\n", 1820 port_id); 1821 return -ENOMEM; 1822 } 1823 ppsa = &port->actions_list; 1824 while (*ppsa && (*ppsa)->id > id) 1825 ppsa = &(*ppsa)->next; 1826 if (*ppsa && (*ppsa)->id == id) { 1827 printf("Shared action #%u is already assigned," 1828 " delete it first\n", id); 1829 free(psa); 1830 return -EINVAL; 1831 } 1832 psa->next = *ppsa; 1833 psa->id = id; 1834 *ppsa = psa; 1835 *action = psa; 1836 return 0; 1837 } 1838 1839 /** Create shared action */ 1840 int 1841 port_shared_action_create(portid_t port_id, uint32_t id, 1842 const struct rte_flow_shared_action_conf *conf, 1843 const struct rte_flow_action *action) 1844 { 1845 struct port_shared_action *psa; 1846 int ret; 1847 struct rte_flow_error error; 1848 1849 ret = action_alloc(port_id, id, &psa); 1850 if (ret) 1851 return ret; 1852 /* Poisoning to make sure PMDs update it in case of error. */ 1853 memset(&error, 0x22, sizeof(error)); 1854 psa->action = rte_flow_shared_action_create(port_id, conf, action, 1855 &error); 1856 if (!psa->action) { 1857 uint32_t destroy_id = psa->id; 1858 port_shared_action_destroy(port_id, 1, &destroy_id); 1859 return port_flow_complain(&error); 1860 } 1861 psa->type = action->type; 1862 printf("Shared action #%u created\n", psa->id); 1863 return 0; 1864 } 1865 1866 /** Destroy shared action */ 1867 int 1868 port_shared_action_destroy(portid_t port_id, 1869 uint32_t n, 1870 const uint32_t *actions) 1871 { 1872 struct rte_port *port; 1873 struct port_shared_action **tmp; 1874 uint32_t c = 0; 1875 int ret = 0; 1876 1877 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1878 port_id == (portid_t)RTE_PORT_ALL) 1879 return -EINVAL; 1880 port = &ports[port_id]; 1881 tmp = &port->actions_list; 1882 while (*tmp) { 1883 uint32_t i; 1884 1885 for (i = 0; i != n; ++i) { 1886 struct rte_flow_error error; 1887 struct port_shared_action *psa = *tmp; 1888 1889 if (actions[i] != psa->id) 1890 continue; 1891 /* 1892 * Poisoning to make sure PMDs update it in case 1893 * of error. 1894 */ 1895 memset(&error, 0x33, sizeof(error)); 1896 1897 if (psa->action && rte_flow_shared_action_destroy( 1898 port_id, psa->action, &error)) { 1899 ret = port_flow_complain(&error); 1900 continue; 1901 } 1902 *tmp = psa->next; 1903 free(psa); 1904 printf("Shared action #%u destroyed\n", psa->id); 1905 break; 1906 } 1907 if (i == n) 1908 tmp = &(*tmp)->next; 1909 ++c; 1910 } 1911 return ret; 1912 } 1913 1914 1915 /** Get shared action by port + id */ 1916 struct rte_flow_shared_action * 1917 port_shared_action_get_by_id(portid_t port_id, uint32_t id) 1918 { 1919 1920 struct port_shared_action *psa = action_get_by_id(port_id, id); 1921 1922 return (psa) ? psa->action : NULL; 1923 } 1924 1925 /** Update shared action */ 1926 int 1927 port_shared_action_update(portid_t port_id, uint32_t id, 1928 const struct rte_flow_action *action) 1929 { 1930 struct rte_flow_error error; 1931 struct rte_flow_shared_action *shared_action; 1932 1933 shared_action = port_shared_action_get_by_id(port_id, id); 1934 if (!shared_action) 1935 return -EINVAL; 1936 if (rte_flow_shared_action_update(port_id, shared_action, action, 1937 &error)) { 1938 return port_flow_complain(&error); 1939 } 1940 printf("Shared action #%u updated\n", id); 1941 return 0; 1942 } 1943 1944 int 1945 port_shared_action_query(portid_t port_id, uint32_t id) 1946 { 1947 struct rte_flow_error error; 1948 struct port_shared_action *psa; 1949 uint64_t default_data; 1950 void *data = NULL; 1951 int ret = 0; 1952 1953 psa = action_get_by_id(port_id, id); 1954 if (!psa) 1955 return -EINVAL; 1956 switch (psa->type) { 1957 case RTE_FLOW_ACTION_TYPE_RSS: 1958 data = &default_data; 1959 break; 1960 default: 1961 printf("Shared action %u (type: %d) on port %u doesn't support" 1962 " query\n", id, psa->type, port_id); 1963 return -1; 1964 } 1965 if (rte_flow_shared_action_query(port_id, psa->action, data, &error)) 1966 ret = port_flow_complain(&error); 1967 switch (psa->type) { 1968 case RTE_FLOW_ACTION_TYPE_RSS: 1969 if (!ret) 1970 printf("Shared RSS action:\n\trefs:%u\n", 1971 *((uint32_t *)data)); 1972 data = NULL; 1973 break; 1974 default: 1975 printf("Shared action %u (type: %d) on port %u doesn't support" 1976 " query\n", id, psa->type, port_id); 1977 ret = -1; 1978 } 1979 return ret; 1980 } 1981 static struct port_flow_tunnel * 1982 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1983 const struct rte_flow_item *pattern, 1984 const struct rte_flow_action *actions, 1985 const struct tunnel_ops *tunnel_ops) 1986 { 1987 int ret; 1988 struct rte_port *port; 1989 struct port_flow_tunnel *pft; 1990 struct rte_flow_error error; 1991 1992 port = &ports[port_id]; 1993 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 1994 if (!pft) { 1995 printf("failed to locate port flow tunnel #%u\n", 1996 tunnel_ops->id); 1997 return NULL; 1998 } 1999 if (tunnel_ops->actions) { 2000 uint32_t num_actions; 2001 const struct rte_flow_action *aptr; 2002 2003 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2004 &pft->pmd_actions, 2005 &pft->num_pmd_actions, 2006 &error); 2007 if (ret) { 2008 port_flow_complain(&error); 2009 return NULL; 2010 } 2011 for (aptr = actions, num_actions = 1; 2012 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2013 aptr++, num_actions++); 2014 pft->actions = malloc( 2015 (num_actions + pft->num_pmd_actions) * 2016 sizeof(actions[0])); 2017 if (!pft->actions) { 2018 rte_flow_tunnel_action_decap_release( 2019 port_id, pft->actions, 2020 pft->num_pmd_actions, &error); 2021 return NULL; 2022 } 2023 rte_memcpy(pft->actions, pft->pmd_actions, 2024 pft->num_pmd_actions * sizeof(actions[0])); 2025 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2026 num_actions * sizeof(actions[0])); 2027 } 2028 if (tunnel_ops->items) { 2029 uint32_t num_items; 2030 const struct rte_flow_item *iptr; 2031 2032 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2033 &pft->pmd_items, 2034 &pft->num_pmd_items, 2035 &error); 2036 if (ret) { 2037 port_flow_complain(&error); 2038 return NULL; 2039 } 2040 for (iptr = pattern, num_items = 1; 2041 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2042 iptr++, num_items++); 2043 pft->items = malloc((num_items + pft->num_pmd_items) * 2044 sizeof(pattern[0])); 2045 if (!pft->items) { 2046 rte_flow_tunnel_item_release( 2047 port_id, pft->pmd_items, 2048 pft->num_pmd_items, &error); 2049 return NULL; 2050 } 2051 rte_memcpy(pft->items, pft->pmd_items, 2052 pft->num_pmd_items * sizeof(pattern[0])); 2053 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2054 num_items * sizeof(pattern[0])); 2055 } 2056 2057 return pft; 2058 } 2059 2060 static void 2061 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2062 const struct tunnel_ops *tunnel_ops, 2063 struct port_flow_tunnel *pft) 2064 { 2065 struct rte_flow_error error; 2066 2067 if (tunnel_ops->actions) { 2068 free(pft->actions); 2069 rte_flow_tunnel_action_decap_release( 2070 port_id, pft->pmd_actions, 2071 pft->num_pmd_actions, &error); 2072 pft->actions = NULL; 2073 pft->pmd_actions = NULL; 2074 } 2075 if (tunnel_ops->items) { 2076 free(pft->items); 2077 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2078 pft->num_pmd_items, 2079 &error); 2080 pft->items = NULL; 2081 pft->pmd_items = NULL; 2082 } 2083 } 2084 2085 /** Validate flow rule. */ 2086 int 2087 port_flow_validate(portid_t port_id, 2088 const struct rte_flow_attr *attr, 2089 const struct rte_flow_item *pattern, 2090 const struct rte_flow_action *actions, 2091 const struct tunnel_ops *tunnel_ops) 2092 { 2093 struct rte_flow_error error; 2094 struct port_flow_tunnel *pft = NULL; 2095 2096 /* Poisoning to make sure PMDs update it in case of error. */ 2097 memset(&error, 0x11, sizeof(error)); 2098 if (tunnel_ops->enabled) { 2099 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2100 actions, tunnel_ops); 2101 if (!pft) 2102 return -ENOENT; 2103 if (pft->items) 2104 pattern = pft->items; 2105 if (pft->actions) 2106 actions = pft->actions; 2107 } 2108 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 2109 return port_flow_complain(&error); 2110 if (tunnel_ops->enabled) 2111 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2112 printf("Flow rule validated\n"); 2113 return 0; 2114 } 2115 2116 /** Update age action context by port_flow pointer. */ 2117 void 2118 update_age_action_context(const struct rte_flow_action *actions, 2119 struct port_flow *pf) 2120 { 2121 struct rte_flow_action_age *age = NULL; 2122 2123 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2124 switch (actions->type) { 2125 case RTE_FLOW_ACTION_TYPE_AGE: 2126 age = (struct rte_flow_action_age *) 2127 (uintptr_t)actions->conf; 2128 age->context = pf; 2129 return; 2130 default: 2131 break; 2132 } 2133 } 2134 } 2135 2136 /** Create flow rule. */ 2137 int 2138 port_flow_create(portid_t port_id, 2139 const struct rte_flow_attr *attr, 2140 const struct rte_flow_item *pattern, 2141 const struct rte_flow_action *actions, 2142 const struct tunnel_ops *tunnel_ops) 2143 { 2144 struct rte_flow *flow; 2145 struct rte_port *port; 2146 struct port_flow *pf; 2147 uint32_t id = 0; 2148 struct rte_flow_error error; 2149 struct port_flow_tunnel *pft = NULL; 2150 2151 port = &ports[port_id]; 2152 if (port->flow_list) { 2153 if (port->flow_list->id == UINT32_MAX) { 2154 printf("Highest rule ID is already assigned, delete" 2155 " it first"); 2156 return -ENOMEM; 2157 } 2158 id = port->flow_list->id + 1; 2159 } 2160 if (tunnel_ops->enabled) { 2161 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2162 actions, tunnel_ops); 2163 if (!pft) 2164 return -ENOENT; 2165 if (pft->items) 2166 pattern = pft->items; 2167 if (pft->actions) 2168 actions = pft->actions; 2169 } 2170 pf = port_flow_new(attr, pattern, actions, &error); 2171 if (!pf) 2172 return port_flow_complain(&error); 2173 update_age_action_context(actions, pf); 2174 /* Poisoning to make sure PMDs update it in case of error. */ 2175 memset(&error, 0x22, sizeof(error)); 2176 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2177 if (!flow) { 2178 free(pf); 2179 return port_flow_complain(&error); 2180 } 2181 pf->next = port->flow_list; 2182 pf->id = id; 2183 pf->flow = flow; 2184 port->flow_list = pf; 2185 if (tunnel_ops->enabled) 2186 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2187 printf("Flow rule #%u created\n", pf->id); 2188 return 0; 2189 } 2190 2191 /** Destroy a number of flow rules. */ 2192 int 2193 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2194 { 2195 struct rte_port *port; 2196 struct port_flow **tmp; 2197 uint32_t c = 0; 2198 int ret = 0; 2199 2200 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2201 port_id == (portid_t)RTE_PORT_ALL) 2202 return -EINVAL; 2203 port = &ports[port_id]; 2204 tmp = &port->flow_list; 2205 while (*tmp) { 2206 uint32_t i; 2207 2208 for (i = 0; i != n; ++i) { 2209 struct rte_flow_error error; 2210 struct port_flow *pf = *tmp; 2211 2212 if (rule[i] != pf->id) 2213 continue; 2214 /* 2215 * Poisoning to make sure PMDs update it in case 2216 * of error. 2217 */ 2218 memset(&error, 0x33, sizeof(error)); 2219 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2220 ret = port_flow_complain(&error); 2221 continue; 2222 } 2223 printf("Flow rule #%u destroyed\n", pf->id); 2224 *tmp = pf->next; 2225 free(pf); 2226 break; 2227 } 2228 if (i == n) 2229 tmp = &(*tmp)->next; 2230 ++c; 2231 } 2232 return ret; 2233 } 2234 2235 /** Remove all flow rules. */ 2236 int 2237 port_flow_flush(portid_t port_id) 2238 { 2239 struct rte_flow_error error; 2240 struct rte_port *port; 2241 int ret = 0; 2242 2243 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2244 port_id == (portid_t)RTE_PORT_ALL) 2245 return -EINVAL; 2246 2247 port = &ports[port_id]; 2248 2249 if (port->flow_list == NULL) 2250 return ret; 2251 2252 /* Poisoning to make sure PMDs update it in case of error. */ 2253 memset(&error, 0x44, sizeof(error)); 2254 if (rte_flow_flush(port_id, &error)) { 2255 port_flow_complain(&error); 2256 } 2257 2258 while (port->flow_list) { 2259 struct port_flow *pf = port->flow_list->next; 2260 2261 free(port->flow_list); 2262 port->flow_list = pf; 2263 } 2264 return ret; 2265 } 2266 2267 /** Dump all flow rules. */ 2268 int 2269 port_flow_dump(portid_t port_id, const char *file_name) 2270 { 2271 int ret = 0; 2272 FILE *file = stdout; 2273 struct rte_flow_error error; 2274 2275 if (file_name && strlen(file_name)) { 2276 file = fopen(file_name, "w"); 2277 if (!file) { 2278 printf("Failed to create file %s: %s\n", file_name, 2279 strerror(errno)); 2280 return -errno; 2281 } 2282 } 2283 ret = rte_flow_dev_dump(port_id, file, &error); 2284 if (ret) { 2285 port_flow_complain(&error); 2286 printf("Failed to dump flow: %s\n", strerror(-ret)); 2287 } else 2288 printf("Flow dump finished\n"); 2289 if (file_name && strlen(file_name)) 2290 fclose(file); 2291 return ret; 2292 } 2293 2294 /** Query a flow rule. */ 2295 int 2296 port_flow_query(portid_t port_id, uint32_t rule, 2297 const struct rte_flow_action *action) 2298 { 2299 struct rte_flow_error error; 2300 struct rte_port *port; 2301 struct port_flow *pf; 2302 const char *name; 2303 union { 2304 struct rte_flow_query_count count; 2305 struct rte_flow_action_rss rss_conf; 2306 struct rte_flow_query_age age; 2307 } query; 2308 int ret; 2309 2310 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2311 port_id == (portid_t)RTE_PORT_ALL) 2312 return -EINVAL; 2313 port = &ports[port_id]; 2314 for (pf = port->flow_list; pf; pf = pf->next) 2315 if (pf->id == rule) 2316 break; 2317 if (!pf) { 2318 printf("Flow rule #%u not found\n", rule); 2319 return -ENOENT; 2320 } 2321 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2322 &name, sizeof(name), 2323 (void *)(uintptr_t)action->type, &error); 2324 if (ret < 0) 2325 return port_flow_complain(&error); 2326 switch (action->type) { 2327 case RTE_FLOW_ACTION_TYPE_COUNT: 2328 case RTE_FLOW_ACTION_TYPE_RSS: 2329 case RTE_FLOW_ACTION_TYPE_AGE: 2330 break; 2331 default: 2332 printf("Cannot query action type %d (%s)\n", 2333 action->type, name); 2334 return -ENOTSUP; 2335 } 2336 /* Poisoning to make sure PMDs update it in case of error. */ 2337 memset(&error, 0x55, sizeof(error)); 2338 memset(&query, 0, sizeof(query)); 2339 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 2340 return port_flow_complain(&error); 2341 switch (action->type) { 2342 case RTE_FLOW_ACTION_TYPE_COUNT: 2343 printf("%s:\n" 2344 " hits_set: %u\n" 2345 " bytes_set: %u\n" 2346 " hits: %" PRIu64 "\n" 2347 " bytes: %" PRIu64 "\n", 2348 name, 2349 query.count.hits_set, 2350 query.count.bytes_set, 2351 query.count.hits, 2352 query.count.bytes); 2353 break; 2354 case RTE_FLOW_ACTION_TYPE_RSS: 2355 rss_config_display(&query.rss_conf); 2356 break; 2357 case RTE_FLOW_ACTION_TYPE_AGE: 2358 printf("%s:\n" 2359 " aged: %u\n" 2360 " sec_since_last_hit_valid: %u\n" 2361 " sec_since_last_hit: %" PRIu32 "\n", 2362 name, 2363 query.age.aged, 2364 query.age.sec_since_last_hit_valid, 2365 query.age.sec_since_last_hit); 2366 break; 2367 default: 2368 printf("Cannot display result for action type %d (%s)\n", 2369 action->type, name); 2370 break; 2371 } 2372 return 0; 2373 } 2374 2375 /** List simply and destroy all aged flows. */ 2376 void 2377 port_flow_aged(portid_t port_id, uint8_t destroy) 2378 { 2379 void **contexts; 2380 int nb_context, total = 0, idx; 2381 struct rte_flow_error error; 2382 struct port_flow *pf; 2383 2384 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2385 port_id == (portid_t)RTE_PORT_ALL) 2386 return; 2387 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 2388 printf("Port %u total aged flows: %d\n", port_id, total); 2389 if (total < 0) { 2390 port_flow_complain(&error); 2391 return; 2392 } 2393 if (total == 0) 2394 return; 2395 contexts = malloc(sizeof(void *) * total); 2396 if (contexts == NULL) { 2397 printf("Cannot allocate contexts for aged flow\n"); 2398 return; 2399 } 2400 printf("ID\tGroup\tPrio\tAttr\n"); 2401 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 2402 if (nb_context != total) { 2403 printf("Port:%d get aged flows count(%d) != total(%d)\n", 2404 port_id, nb_context, total); 2405 free(contexts); 2406 return; 2407 } 2408 for (idx = 0; idx < nb_context; idx++) { 2409 pf = (struct port_flow *)contexts[idx]; 2410 if (!pf) { 2411 printf("Error: get Null context in port %u\n", port_id); 2412 continue; 2413 } 2414 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n", 2415 pf->id, 2416 pf->rule.attr->group, 2417 pf->rule.attr->priority, 2418 pf->rule.attr->ingress ? 'i' : '-', 2419 pf->rule.attr->egress ? 'e' : '-', 2420 pf->rule.attr->transfer ? 't' : '-'); 2421 } 2422 if (destroy) { 2423 int ret; 2424 uint32_t flow_id; 2425 2426 total = 0; 2427 printf("\n"); 2428 for (idx = 0; idx < nb_context; idx++) { 2429 pf = (struct port_flow *)contexts[idx]; 2430 if (!pf) 2431 continue; 2432 flow_id = pf->id; 2433 ret = port_flow_destroy(port_id, 1, &flow_id); 2434 if (!ret) 2435 total++; 2436 } 2437 printf("%d flows be destroyed\n", total); 2438 } 2439 free(contexts); 2440 } 2441 2442 /** List flow rules. */ 2443 void 2444 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 2445 { 2446 struct rte_port *port; 2447 struct port_flow *pf; 2448 struct port_flow *list = NULL; 2449 uint32_t i; 2450 2451 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2452 port_id == (portid_t)RTE_PORT_ALL) 2453 return; 2454 port = &ports[port_id]; 2455 if (!port->flow_list) 2456 return; 2457 /* Sort flows by group, priority and ID. */ 2458 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 2459 struct port_flow **tmp; 2460 const struct rte_flow_attr *curr = pf->rule.attr; 2461 2462 if (n) { 2463 /* Filter out unwanted groups. */ 2464 for (i = 0; i != n; ++i) 2465 if (curr->group == group[i]) 2466 break; 2467 if (i == n) 2468 continue; 2469 } 2470 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 2471 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 2472 2473 if (curr->group > comp->group || 2474 (curr->group == comp->group && 2475 curr->priority > comp->priority) || 2476 (curr->group == comp->group && 2477 curr->priority == comp->priority && 2478 pf->id > (*tmp)->id)) 2479 continue; 2480 break; 2481 } 2482 pf->tmp = *tmp; 2483 *tmp = pf; 2484 } 2485 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 2486 for (pf = list; pf != NULL; pf = pf->tmp) { 2487 const struct rte_flow_item *item = pf->rule.pattern; 2488 const struct rte_flow_action *action = pf->rule.actions; 2489 const char *name; 2490 2491 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 2492 pf->id, 2493 pf->rule.attr->group, 2494 pf->rule.attr->priority, 2495 pf->rule.attr->ingress ? 'i' : '-', 2496 pf->rule.attr->egress ? 'e' : '-', 2497 pf->rule.attr->transfer ? 't' : '-'); 2498 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 2499 if ((uint32_t)item->type > INT_MAX) 2500 name = "PMD_INTERNAL"; 2501 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 2502 &name, sizeof(name), 2503 (void *)(uintptr_t)item->type, 2504 NULL) <= 0) 2505 name = "[UNKNOWN]"; 2506 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 2507 printf("%s ", name); 2508 ++item; 2509 } 2510 printf("=>"); 2511 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 2512 if ((uint32_t)action->type > INT_MAX) 2513 name = "PMD_INTERNAL"; 2514 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 2515 &name, sizeof(name), 2516 (void *)(uintptr_t)action->type, 2517 NULL) <= 0) 2518 name = "[UNKNOWN]"; 2519 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 2520 printf(" %s", name); 2521 ++action; 2522 } 2523 printf("\n"); 2524 } 2525 } 2526 2527 /** Restrict ingress traffic to the defined flow rules. */ 2528 int 2529 port_flow_isolate(portid_t port_id, int set) 2530 { 2531 struct rte_flow_error error; 2532 2533 /* Poisoning to make sure PMDs update it in case of error. */ 2534 memset(&error, 0x66, sizeof(error)); 2535 if (rte_flow_isolate(port_id, set, &error)) 2536 return port_flow_complain(&error); 2537 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 2538 port_id, 2539 set ? "now restricted" : "not restricted anymore"); 2540 return 0; 2541 } 2542 2543 /* 2544 * RX/TX ring descriptors display functions. 2545 */ 2546 int 2547 rx_queue_id_is_invalid(queueid_t rxq_id) 2548 { 2549 if (rxq_id < nb_rxq) 2550 return 0; 2551 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 2552 return 1; 2553 } 2554 2555 int 2556 tx_queue_id_is_invalid(queueid_t txq_id) 2557 { 2558 if (txq_id < nb_txq) 2559 return 0; 2560 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 2561 return 1; 2562 } 2563 2564 static int 2565 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 2566 { 2567 struct rte_port *port = &ports[port_id]; 2568 struct rte_eth_rxq_info rx_qinfo; 2569 int ret; 2570 2571 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 2572 if (ret == 0) { 2573 *ring_size = rx_qinfo.nb_desc; 2574 return ret; 2575 } 2576 2577 if (ret != -ENOTSUP) 2578 return ret; 2579 /* 2580 * If the rte_eth_rx_queue_info_get is not support for this PMD, 2581 * ring_size stored in testpmd will be used for validity verification. 2582 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 2583 * being 0, it will use a default value provided by PMDs to setup this 2584 * rxq. If the default value is 0, it will use the 2585 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 2586 */ 2587 if (port->nb_rx_desc[rxq_id]) 2588 *ring_size = port->nb_rx_desc[rxq_id]; 2589 else if (port->dev_info.default_rxportconf.ring_size) 2590 *ring_size = port->dev_info.default_rxportconf.ring_size; 2591 else 2592 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2593 return 0; 2594 } 2595 2596 static int 2597 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 2598 { 2599 struct rte_port *port = &ports[port_id]; 2600 struct rte_eth_txq_info tx_qinfo; 2601 int ret; 2602 2603 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 2604 if (ret == 0) { 2605 *ring_size = tx_qinfo.nb_desc; 2606 return ret; 2607 } 2608 2609 if (ret != -ENOTSUP) 2610 return ret; 2611 /* 2612 * If the rte_eth_tx_queue_info_get is not support for this PMD, 2613 * ring_size stored in testpmd will be used for validity verification. 2614 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 2615 * being 0, it will use a default value provided by PMDs to setup this 2616 * txq. If the default value is 0, it will use the 2617 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 2618 */ 2619 if (port->nb_tx_desc[txq_id]) 2620 *ring_size = port->nb_tx_desc[txq_id]; 2621 else if (port->dev_info.default_txportconf.ring_size) 2622 *ring_size = port->dev_info.default_txportconf.ring_size; 2623 else 2624 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2625 return 0; 2626 } 2627 2628 static int 2629 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 2630 { 2631 uint16_t ring_size; 2632 int ret; 2633 2634 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 2635 if (ret) 2636 return 1; 2637 2638 if (rxdesc_id < ring_size) 2639 return 0; 2640 2641 printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", 2642 rxdesc_id, ring_size); 2643 return 1; 2644 } 2645 2646 static int 2647 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 2648 { 2649 uint16_t ring_size; 2650 int ret; 2651 2652 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 2653 if (ret) 2654 return 1; 2655 2656 if (txdesc_id < ring_size) 2657 return 0; 2658 2659 printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", 2660 txdesc_id, ring_size); 2661 return 1; 2662 } 2663 2664 static const struct rte_memzone * 2665 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 2666 { 2667 char mz_name[RTE_MEMZONE_NAMESIZE]; 2668 const struct rte_memzone *mz; 2669 2670 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 2671 port_id, q_id, ring_name); 2672 mz = rte_memzone_lookup(mz_name); 2673 if (mz == NULL) 2674 printf("%s ring memory zoneof (port %d, queue %d) not" 2675 "found (zone name = %s\n", 2676 ring_name, port_id, q_id, mz_name); 2677 return mz; 2678 } 2679 2680 union igb_ring_dword { 2681 uint64_t dword; 2682 struct { 2683 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2684 uint32_t lo; 2685 uint32_t hi; 2686 #else 2687 uint32_t hi; 2688 uint32_t lo; 2689 #endif 2690 } words; 2691 }; 2692 2693 struct igb_ring_desc_32_bytes { 2694 union igb_ring_dword lo_dword; 2695 union igb_ring_dword hi_dword; 2696 union igb_ring_dword resv1; 2697 union igb_ring_dword resv2; 2698 }; 2699 2700 struct igb_ring_desc_16_bytes { 2701 union igb_ring_dword lo_dword; 2702 union igb_ring_dword hi_dword; 2703 }; 2704 2705 static void 2706 ring_rxd_display_dword(union igb_ring_dword dword) 2707 { 2708 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 2709 (unsigned)dword.words.hi); 2710 } 2711 2712 static void 2713 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 2714 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2715 portid_t port_id, 2716 #else 2717 __rte_unused portid_t port_id, 2718 #endif 2719 uint16_t desc_id) 2720 { 2721 struct igb_ring_desc_16_bytes *ring = 2722 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2723 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 2724 int ret; 2725 struct rte_eth_dev_info dev_info; 2726 2727 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2728 if (ret != 0) 2729 return; 2730 2731 if (strstr(dev_info.driver_name, "i40e") != NULL) { 2732 /* 32 bytes RX descriptor, i40e only */ 2733 struct igb_ring_desc_32_bytes *ring = 2734 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 2735 ring[desc_id].lo_dword.dword = 2736 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2737 ring_rxd_display_dword(ring[desc_id].lo_dword); 2738 ring[desc_id].hi_dword.dword = 2739 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2740 ring_rxd_display_dword(ring[desc_id].hi_dword); 2741 ring[desc_id].resv1.dword = 2742 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 2743 ring_rxd_display_dword(ring[desc_id].resv1); 2744 ring[desc_id].resv2.dword = 2745 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 2746 ring_rxd_display_dword(ring[desc_id].resv2); 2747 2748 return; 2749 } 2750 #endif 2751 /* 16 bytes RX descriptor */ 2752 ring[desc_id].lo_dword.dword = 2753 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2754 ring_rxd_display_dword(ring[desc_id].lo_dword); 2755 ring[desc_id].hi_dword.dword = 2756 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2757 ring_rxd_display_dword(ring[desc_id].hi_dword); 2758 } 2759 2760 static void 2761 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 2762 { 2763 struct igb_ring_desc_16_bytes *ring; 2764 struct igb_ring_desc_16_bytes txd; 2765 2766 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 2767 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 2768 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 2769 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 2770 (unsigned)txd.lo_dword.words.lo, 2771 (unsigned)txd.lo_dword.words.hi, 2772 (unsigned)txd.hi_dword.words.lo, 2773 (unsigned)txd.hi_dword.words.hi); 2774 } 2775 2776 void 2777 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 2778 { 2779 const struct rte_memzone *rx_mz; 2780 2781 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 2782 return; 2783 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 2784 if (rx_mz == NULL) 2785 return; 2786 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 2787 } 2788 2789 void 2790 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 2791 { 2792 const struct rte_memzone *tx_mz; 2793 2794 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 2795 return; 2796 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 2797 if (tx_mz == NULL) 2798 return; 2799 ring_tx_descriptor_display(tx_mz, txd_id); 2800 } 2801 2802 void 2803 fwd_lcores_config_display(void) 2804 { 2805 lcoreid_t lc_id; 2806 2807 printf("List of forwarding lcores:"); 2808 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 2809 printf(" %2u", fwd_lcores_cpuids[lc_id]); 2810 printf("\n"); 2811 } 2812 void 2813 rxtx_config_display(void) 2814 { 2815 portid_t pid; 2816 queueid_t qid; 2817 2818 printf(" %s packet forwarding%s packets/burst=%d\n", 2819 cur_fwd_eng->fwd_mode_name, 2820 retry_enabled == 0 ? "" : " with retry", 2821 nb_pkt_per_burst); 2822 2823 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 2824 printf(" packet len=%u - nb packet segments=%d\n", 2825 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 2826 2827 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 2828 nb_fwd_lcores, nb_fwd_ports); 2829 2830 RTE_ETH_FOREACH_DEV(pid) { 2831 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 2832 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 2833 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 2834 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 2835 struct rte_eth_rxq_info rx_qinfo; 2836 struct rte_eth_txq_info tx_qinfo; 2837 uint16_t rx_free_thresh_tmp; 2838 uint16_t tx_free_thresh_tmp; 2839 uint16_t tx_rs_thresh_tmp; 2840 uint16_t nb_rx_desc_tmp; 2841 uint16_t nb_tx_desc_tmp; 2842 uint64_t offloads_tmp; 2843 uint8_t pthresh_tmp; 2844 uint8_t hthresh_tmp; 2845 uint8_t wthresh_tmp; 2846 int32_t rc; 2847 2848 /* per port config */ 2849 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 2850 (unsigned int)pid, nb_rxq, nb_txq); 2851 2852 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 2853 ports[pid].dev_conf.rxmode.offloads, 2854 ports[pid].dev_conf.txmode.offloads); 2855 2856 /* per rx queue config only for first queue to be less verbose */ 2857 for (qid = 0; qid < 1; qid++) { 2858 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 2859 if (rc) { 2860 nb_rx_desc_tmp = nb_rx_desc[qid]; 2861 rx_free_thresh_tmp = 2862 rx_conf[qid].rx_free_thresh; 2863 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 2864 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 2865 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 2866 offloads_tmp = rx_conf[qid].offloads; 2867 } else { 2868 nb_rx_desc_tmp = rx_qinfo.nb_desc; 2869 rx_free_thresh_tmp = 2870 rx_qinfo.conf.rx_free_thresh; 2871 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 2872 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 2873 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 2874 offloads_tmp = rx_qinfo.conf.offloads; 2875 } 2876 2877 printf(" RX queue: %d\n", qid); 2878 printf(" RX desc=%d - RX free threshold=%d\n", 2879 nb_rx_desc_tmp, rx_free_thresh_tmp); 2880 printf(" RX threshold registers: pthresh=%d hthresh=%d " 2881 " wthresh=%d\n", 2882 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2883 printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); 2884 } 2885 2886 /* per tx queue config only for first queue to be less verbose */ 2887 for (qid = 0; qid < 1; qid++) { 2888 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 2889 if (rc) { 2890 nb_tx_desc_tmp = nb_tx_desc[qid]; 2891 tx_free_thresh_tmp = 2892 tx_conf[qid].tx_free_thresh; 2893 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 2894 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 2895 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 2896 offloads_tmp = tx_conf[qid].offloads; 2897 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 2898 } else { 2899 nb_tx_desc_tmp = tx_qinfo.nb_desc; 2900 tx_free_thresh_tmp = 2901 tx_qinfo.conf.tx_free_thresh; 2902 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 2903 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 2904 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 2905 offloads_tmp = tx_qinfo.conf.offloads; 2906 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 2907 } 2908 2909 printf(" TX queue: %d\n", qid); 2910 printf(" TX desc=%d - TX free threshold=%d\n", 2911 nb_tx_desc_tmp, tx_free_thresh_tmp); 2912 printf(" TX threshold registers: pthresh=%d hthresh=%d " 2913 " wthresh=%d\n", 2914 pthresh_tmp, hthresh_tmp, wthresh_tmp); 2915 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 2916 offloads_tmp, tx_rs_thresh_tmp); 2917 } 2918 } 2919 } 2920 2921 void 2922 port_rss_reta_info(portid_t port_id, 2923 struct rte_eth_rss_reta_entry64 *reta_conf, 2924 uint16_t nb_entries) 2925 { 2926 uint16_t i, idx, shift; 2927 int ret; 2928 2929 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2930 return; 2931 2932 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 2933 if (ret != 0) { 2934 printf("Failed to get RSS RETA info, return code = %d\n", ret); 2935 return; 2936 } 2937 2938 for (i = 0; i < nb_entries; i++) { 2939 idx = i / RTE_RETA_GROUP_SIZE; 2940 shift = i % RTE_RETA_GROUP_SIZE; 2941 if (!(reta_conf[idx].mask & (1ULL << shift))) 2942 continue; 2943 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 2944 i, reta_conf[idx].reta[shift]); 2945 } 2946 } 2947 2948 /* 2949 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 2950 * key of the port. 2951 */ 2952 void 2953 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 2954 { 2955 struct rte_eth_rss_conf rss_conf = {0}; 2956 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 2957 uint64_t rss_hf; 2958 uint8_t i; 2959 int diag; 2960 struct rte_eth_dev_info dev_info; 2961 uint8_t hash_key_size; 2962 int ret; 2963 2964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2965 return; 2966 2967 ret = eth_dev_info_get_print_err(port_id, &dev_info); 2968 if (ret != 0) 2969 return; 2970 2971 if (dev_info.hash_key_size > 0 && 2972 dev_info.hash_key_size <= sizeof(rss_key)) 2973 hash_key_size = dev_info.hash_key_size; 2974 else { 2975 printf("dev_info did not provide a valid hash key size\n"); 2976 return; 2977 } 2978 2979 /* Get RSS hash key if asked to display it */ 2980 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 2981 rss_conf.rss_key_len = hash_key_size; 2982 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2983 if (diag != 0) { 2984 switch (diag) { 2985 case -ENODEV: 2986 printf("port index %d invalid\n", port_id); 2987 break; 2988 case -ENOTSUP: 2989 printf("operation not supported by device\n"); 2990 break; 2991 default: 2992 printf("operation failed - diag=%d\n", diag); 2993 break; 2994 } 2995 return; 2996 } 2997 rss_hf = rss_conf.rss_hf; 2998 if (rss_hf == 0) { 2999 printf("RSS disabled\n"); 3000 return; 3001 } 3002 printf("RSS functions:\n "); 3003 for (i = 0; rss_type_table[i].str; i++) { 3004 if (rss_hf & rss_type_table[i].rss_type) 3005 printf("%s ", rss_type_table[i].str); 3006 } 3007 printf("\n"); 3008 if (!show_rss_key) 3009 return; 3010 printf("RSS key:\n"); 3011 for (i = 0; i < hash_key_size; i++) 3012 printf("%02X", rss_key[i]); 3013 printf("\n"); 3014 } 3015 3016 void 3017 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3018 uint hash_key_len) 3019 { 3020 struct rte_eth_rss_conf rss_conf; 3021 int diag; 3022 unsigned int i; 3023 3024 rss_conf.rss_key = NULL; 3025 rss_conf.rss_key_len = hash_key_len; 3026 rss_conf.rss_hf = 0; 3027 for (i = 0; rss_type_table[i].str; i++) { 3028 if (!strcmp(rss_type_table[i].str, rss_type)) 3029 rss_conf.rss_hf = rss_type_table[i].rss_type; 3030 } 3031 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3032 if (diag == 0) { 3033 rss_conf.rss_key = hash_key; 3034 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3035 } 3036 if (diag == 0) 3037 return; 3038 3039 switch (diag) { 3040 case -ENODEV: 3041 printf("port index %d invalid\n", port_id); 3042 break; 3043 case -ENOTSUP: 3044 printf("operation not supported by device\n"); 3045 break; 3046 default: 3047 printf("operation failed - diag=%d\n", diag); 3048 break; 3049 } 3050 } 3051 3052 /* 3053 * Setup forwarding configuration for each logical core. 3054 */ 3055 static void 3056 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3057 { 3058 streamid_t nb_fs_per_lcore; 3059 streamid_t nb_fs; 3060 streamid_t sm_id; 3061 lcoreid_t nb_extra; 3062 lcoreid_t nb_fc; 3063 lcoreid_t nb_lc; 3064 lcoreid_t lc_id; 3065 3066 nb_fs = cfg->nb_fwd_streams; 3067 nb_fc = cfg->nb_fwd_lcores; 3068 if (nb_fs <= nb_fc) { 3069 nb_fs_per_lcore = 1; 3070 nb_extra = 0; 3071 } else { 3072 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3073 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3074 } 3075 3076 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3077 sm_id = 0; 3078 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3079 fwd_lcores[lc_id]->stream_idx = sm_id; 3080 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3081 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3082 } 3083 3084 /* 3085 * Assign extra remaining streams, if any. 3086 */ 3087 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3088 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3089 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3090 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3091 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3092 } 3093 } 3094 3095 static portid_t 3096 fwd_topology_tx_port_get(portid_t rxp) 3097 { 3098 static int warning_once = 1; 3099 3100 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3101 3102 switch (port_topology) { 3103 default: 3104 case PORT_TOPOLOGY_PAIRED: 3105 if ((rxp & 0x1) == 0) { 3106 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3107 return rxp + 1; 3108 if (warning_once) { 3109 printf("\nWarning! port-topology=paired" 3110 " and odd forward ports number," 3111 " the last port will pair with" 3112 " itself.\n\n"); 3113 warning_once = 0; 3114 } 3115 return rxp; 3116 } 3117 return rxp - 1; 3118 case PORT_TOPOLOGY_CHAINED: 3119 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3120 case PORT_TOPOLOGY_LOOP: 3121 return rxp; 3122 } 3123 } 3124 3125 static void 3126 simple_fwd_config_setup(void) 3127 { 3128 portid_t i; 3129 3130 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3131 cur_fwd_config.nb_fwd_streams = 3132 (streamid_t) cur_fwd_config.nb_fwd_ports; 3133 3134 /* reinitialize forwarding streams */ 3135 init_fwd_streams(); 3136 3137 /* 3138 * In the simple forwarding test, the number of forwarding cores 3139 * must be lower or equal to the number of forwarding ports. 3140 */ 3141 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3142 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3143 cur_fwd_config.nb_fwd_lcores = 3144 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3145 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3146 3147 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3148 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3149 fwd_streams[i]->rx_queue = 0; 3150 fwd_streams[i]->tx_port = 3151 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3152 fwd_streams[i]->tx_queue = 0; 3153 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3154 fwd_streams[i]->retry_enabled = retry_enabled; 3155 } 3156 } 3157 3158 /** 3159 * For the RSS forwarding test all streams distributed over lcores. Each stream 3160 * being composed of a RX queue to poll on a RX port for input messages, 3161 * associated with a TX queue of a TX port where to send forwarded packets. 3162 */ 3163 static void 3164 rss_fwd_config_setup(void) 3165 { 3166 portid_t rxp; 3167 portid_t txp; 3168 queueid_t rxq; 3169 queueid_t nb_q; 3170 streamid_t sm_id; 3171 3172 nb_q = nb_rxq; 3173 if (nb_q > nb_txq) 3174 nb_q = nb_txq; 3175 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3176 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3177 cur_fwd_config.nb_fwd_streams = 3178 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 3179 3180 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3181 cur_fwd_config.nb_fwd_lcores = 3182 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3183 3184 /* reinitialize forwarding streams */ 3185 init_fwd_streams(); 3186 3187 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3188 rxp = 0; rxq = 0; 3189 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 3190 struct fwd_stream *fs; 3191 3192 fs = fwd_streams[sm_id]; 3193 txp = fwd_topology_tx_port_get(rxp); 3194 fs->rx_port = fwd_ports_ids[rxp]; 3195 fs->rx_queue = rxq; 3196 fs->tx_port = fwd_ports_ids[txp]; 3197 fs->tx_queue = rxq; 3198 fs->peer_addr = fs->tx_port; 3199 fs->retry_enabled = retry_enabled; 3200 rxp++; 3201 if (rxp < nb_fwd_ports) 3202 continue; 3203 rxp = 0; 3204 rxq++; 3205 } 3206 } 3207 3208 /** 3209 * For the DCB forwarding test, each core is assigned on each traffic class. 3210 * 3211 * Each core is assigned a multi-stream, each stream being composed of 3212 * a RX queue to poll on a RX port for input messages, associated with 3213 * a TX queue of a TX port where to send forwarded packets. All RX and 3214 * TX queues are mapping to the same traffic class. 3215 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 3216 * the same core 3217 */ 3218 static void 3219 dcb_fwd_config_setup(void) 3220 { 3221 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 3222 portid_t txp, rxp = 0; 3223 queueid_t txq, rxq = 0; 3224 lcoreid_t lc_id; 3225 uint16_t nb_rx_queue, nb_tx_queue; 3226 uint16_t i, j, k, sm_id = 0; 3227 uint8_t tc = 0; 3228 3229 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3230 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3231 cur_fwd_config.nb_fwd_streams = 3232 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3233 3234 /* reinitialize forwarding streams */ 3235 init_fwd_streams(); 3236 sm_id = 0; 3237 txp = 1; 3238 /* get the dcb info on the first RX and TX ports */ 3239 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3240 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3241 3242 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3243 fwd_lcores[lc_id]->stream_nb = 0; 3244 fwd_lcores[lc_id]->stream_idx = sm_id; 3245 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 3246 /* if the nb_queue is zero, means this tc is 3247 * not enabled on the POOL 3248 */ 3249 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 3250 break; 3251 k = fwd_lcores[lc_id]->stream_nb + 3252 fwd_lcores[lc_id]->stream_idx; 3253 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 3254 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 3255 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3256 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 3257 for (j = 0; j < nb_rx_queue; j++) { 3258 struct fwd_stream *fs; 3259 3260 fs = fwd_streams[k + j]; 3261 fs->rx_port = fwd_ports_ids[rxp]; 3262 fs->rx_queue = rxq + j; 3263 fs->tx_port = fwd_ports_ids[txp]; 3264 fs->tx_queue = txq + j % nb_tx_queue; 3265 fs->peer_addr = fs->tx_port; 3266 fs->retry_enabled = retry_enabled; 3267 } 3268 fwd_lcores[lc_id]->stream_nb += 3269 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 3270 } 3271 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 3272 3273 tc++; 3274 if (tc < rxp_dcb_info.nb_tcs) 3275 continue; 3276 /* Restart from TC 0 on next RX port */ 3277 tc = 0; 3278 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 3279 rxp = (portid_t) 3280 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 3281 else 3282 rxp++; 3283 if (rxp >= nb_fwd_ports) 3284 return; 3285 /* get the dcb information on next RX and TX ports */ 3286 if ((rxp & 0x1) == 0) 3287 txp = (portid_t) (rxp + 1); 3288 else 3289 txp = (portid_t) (rxp - 1); 3290 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 3291 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 3292 } 3293 } 3294 3295 static void 3296 icmp_echo_config_setup(void) 3297 { 3298 portid_t rxp; 3299 queueid_t rxq; 3300 lcoreid_t lc_id; 3301 uint16_t sm_id; 3302 3303 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 3304 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 3305 (nb_txq * nb_fwd_ports); 3306 else 3307 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3308 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 3309 cur_fwd_config.nb_fwd_streams = 3310 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 3311 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 3312 cur_fwd_config.nb_fwd_lcores = 3313 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 3314 if (verbose_level > 0) { 3315 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 3316 __FUNCTION__, 3317 cur_fwd_config.nb_fwd_lcores, 3318 cur_fwd_config.nb_fwd_ports, 3319 cur_fwd_config.nb_fwd_streams); 3320 } 3321 3322 /* reinitialize forwarding streams */ 3323 init_fwd_streams(); 3324 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3325 rxp = 0; rxq = 0; 3326 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 3327 if (verbose_level > 0) 3328 printf(" core=%d: \n", lc_id); 3329 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3330 struct fwd_stream *fs; 3331 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3332 fs->rx_port = fwd_ports_ids[rxp]; 3333 fs->rx_queue = rxq; 3334 fs->tx_port = fs->rx_port; 3335 fs->tx_queue = rxq; 3336 fs->peer_addr = fs->tx_port; 3337 fs->retry_enabled = retry_enabled; 3338 if (verbose_level > 0) 3339 printf(" stream=%d port=%d rxq=%d txq=%d\n", 3340 sm_id, fs->rx_port, fs->rx_queue, 3341 fs->tx_queue); 3342 rxq = (queueid_t) (rxq + 1); 3343 if (rxq == nb_rxq) { 3344 rxq = 0; 3345 rxp = (portid_t) (rxp + 1); 3346 } 3347 } 3348 } 3349 } 3350 3351 void 3352 fwd_config_setup(void) 3353 { 3354 cur_fwd_config.fwd_eng = cur_fwd_eng; 3355 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 3356 icmp_echo_config_setup(); 3357 return; 3358 } 3359 3360 if ((nb_rxq > 1) && (nb_txq > 1)){ 3361 if (dcb_config) 3362 dcb_fwd_config_setup(); 3363 else 3364 rss_fwd_config_setup(); 3365 } 3366 else 3367 simple_fwd_config_setup(); 3368 } 3369 3370 static const char * 3371 mp_alloc_to_str(uint8_t mode) 3372 { 3373 switch (mode) { 3374 case MP_ALLOC_NATIVE: 3375 return "native"; 3376 case MP_ALLOC_ANON: 3377 return "anon"; 3378 case MP_ALLOC_XMEM: 3379 return "xmem"; 3380 case MP_ALLOC_XMEM_HUGE: 3381 return "xmemhuge"; 3382 case MP_ALLOC_XBUF: 3383 return "xbuf"; 3384 default: 3385 return "invalid"; 3386 } 3387 } 3388 3389 void 3390 pkt_fwd_config_display(struct fwd_config *cfg) 3391 { 3392 struct fwd_stream *fs; 3393 lcoreid_t lc_id; 3394 streamid_t sm_id; 3395 3396 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 3397 "NUMA support %s, MP allocation mode: %s\n", 3398 cfg->fwd_eng->fwd_mode_name, 3399 retry_enabled == 0 ? "" : " with retry", 3400 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 3401 numa_support == 1 ? "enabled" : "disabled", 3402 mp_alloc_to_str(mp_alloc_type)); 3403 3404 if (retry_enabled) 3405 printf("TX retry num: %u, delay between TX retries: %uus\n", 3406 burst_tx_retry_num, burst_tx_delay_time); 3407 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 3408 printf("Logical Core %u (socket %u) forwards packets on " 3409 "%d streams:", 3410 fwd_lcores_cpuids[lc_id], 3411 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 3412 fwd_lcores[lc_id]->stream_nb); 3413 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 3414 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 3415 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 3416 "P=%d/Q=%d (socket %u) ", 3417 fs->rx_port, fs->rx_queue, 3418 ports[fs->rx_port].socket_id, 3419 fs->tx_port, fs->tx_queue, 3420 ports[fs->tx_port].socket_id); 3421 print_ethaddr("peer=", 3422 &peer_eth_addrs[fs->peer_addr]); 3423 } 3424 printf("\n"); 3425 } 3426 printf("\n"); 3427 } 3428 3429 void 3430 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 3431 { 3432 struct rte_ether_addr new_peer_addr; 3433 if (!rte_eth_dev_is_valid_port(port_id)) { 3434 printf("Error: Invalid port number %i\n", port_id); 3435 return; 3436 } 3437 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 3438 printf("Error: Invalid ethernet address: %s\n", peer_addr); 3439 return; 3440 } 3441 peer_eth_addrs[port_id] = new_peer_addr; 3442 } 3443 3444 int 3445 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 3446 { 3447 unsigned int i; 3448 unsigned int lcore_cpuid; 3449 int record_now; 3450 3451 record_now = 0; 3452 again: 3453 for (i = 0; i < nb_lc; i++) { 3454 lcore_cpuid = lcorelist[i]; 3455 if (! rte_lcore_is_enabled(lcore_cpuid)) { 3456 printf("lcore %u not enabled\n", lcore_cpuid); 3457 return -1; 3458 } 3459 if (lcore_cpuid == rte_get_main_lcore()) { 3460 printf("lcore %u cannot be masked on for running " 3461 "packet forwarding, which is the main lcore " 3462 "and reserved for command line parsing only\n", 3463 lcore_cpuid); 3464 return -1; 3465 } 3466 if (record_now) 3467 fwd_lcores_cpuids[i] = lcore_cpuid; 3468 } 3469 if (record_now == 0) { 3470 record_now = 1; 3471 goto again; 3472 } 3473 nb_cfg_lcores = (lcoreid_t) nb_lc; 3474 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 3475 printf("previous number of forwarding cores %u - changed to " 3476 "number of configured cores %u\n", 3477 (unsigned int) nb_fwd_lcores, nb_lc); 3478 nb_fwd_lcores = (lcoreid_t) nb_lc; 3479 } 3480 3481 return 0; 3482 } 3483 3484 int 3485 set_fwd_lcores_mask(uint64_t lcoremask) 3486 { 3487 unsigned int lcorelist[64]; 3488 unsigned int nb_lc; 3489 unsigned int i; 3490 3491 if (lcoremask == 0) { 3492 printf("Invalid NULL mask of cores\n"); 3493 return -1; 3494 } 3495 nb_lc = 0; 3496 for (i = 0; i < 64; i++) { 3497 if (! ((uint64_t)(1ULL << i) & lcoremask)) 3498 continue; 3499 lcorelist[nb_lc++] = i; 3500 } 3501 return set_fwd_lcores_list(lcorelist, nb_lc); 3502 } 3503 3504 void 3505 set_fwd_lcores_number(uint16_t nb_lc) 3506 { 3507 if (nb_lc > nb_cfg_lcores) { 3508 printf("nb fwd cores %u > %u (max. number of configured " 3509 "lcores) - ignored\n", 3510 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 3511 return; 3512 } 3513 nb_fwd_lcores = (lcoreid_t) nb_lc; 3514 printf("Number of forwarding cores set to %u\n", 3515 (unsigned int) nb_fwd_lcores); 3516 } 3517 3518 void 3519 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 3520 { 3521 unsigned int i; 3522 portid_t port_id; 3523 int record_now; 3524 3525 record_now = 0; 3526 again: 3527 for (i = 0; i < nb_pt; i++) { 3528 port_id = (portid_t) portlist[i]; 3529 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3530 return; 3531 if (record_now) 3532 fwd_ports_ids[i] = port_id; 3533 } 3534 if (record_now == 0) { 3535 record_now = 1; 3536 goto again; 3537 } 3538 nb_cfg_ports = (portid_t) nb_pt; 3539 if (nb_fwd_ports != (portid_t) nb_pt) { 3540 printf("previous number of forwarding ports %u - changed to " 3541 "number of configured ports %u\n", 3542 (unsigned int) nb_fwd_ports, nb_pt); 3543 nb_fwd_ports = (portid_t) nb_pt; 3544 } 3545 } 3546 3547 /** 3548 * Parse the user input and obtain the list of forwarding ports 3549 * 3550 * @param[in] list 3551 * String containing the user input. User can specify 3552 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 3553 * For example, if the user wants to use all the available 3554 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 3555 * If the user wants to use only the ports 1,2 then the input 3556 * is 1,2. 3557 * valid characters are '-' and ',' 3558 * @param[out] values 3559 * This array will be filled with a list of port IDs 3560 * based on the user input 3561 * Note that duplicate entries are discarded and only the first 3562 * count entries in this array are port IDs and all the rest 3563 * will contain default values 3564 * @param[in] maxsize 3565 * This parameter denotes 2 things 3566 * 1) Number of elements in the values array 3567 * 2) Maximum value of each element in the values array 3568 * @return 3569 * On success, returns total count of parsed port IDs 3570 * On failure, returns 0 3571 */ 3572 static unsigned int 3573 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 3574 { 3575 unsigned int count = 0; 3576 char *end = NULL; 3577 int min, max; 3578 int value, i; 3579 unsigned int marked[maxsize]; 3580 3581 if (list == NULL || values == NULL) 3582 return 0; 3583 3584 for (i = 0; i < (int)maxsize; i++) 3585 marked[i] = 0; 3586 3587 min = INT_MAX; 3588 3589 do { 3590 /*Remove the blank spaces if any*/ 3591 while (isblank(*list)) 3592 list++; 3593 if (*list == '\0') 3594 break; 3595 errno = 0; 3596 value = strtol(list, &end, 10); 3597 if (errno || end == NULL) 3598 return 0; 3599 if (value < 0 || value >= (int)maxsize) 3600 return 0; 3601 while (isblank(*end)) 3602 end++; 3603 if (*end == '-' && min == INT_MAX) { 3604 min = value; 3605 } else if ((*end == ',') || (*end == '\0')) { 3606 max = value; 3607 if (min == INT_MAX) 3608 min = value; 3609 for (i = min; i <= max; i++) { 3610 if (count < maxsize) { 3611 if (marked[i]) 3612 continue; 3613 values[count] = i; 3614 marked[i] = 1; 3615 count++; 3616 } 3617 } 3618 min = INT_MAX; 3619 } else 3620 return 0; 3621 list = end + 1; 3622 } while (*end != '\0'); 3623 3624 return count; 3625 } 3626 3627 void 3628 parse_fwd_portlist(const char *portlist) 3629 { 3630 unsigned int portcount; 3631 unsigned int portindex[RTE_MAX_ETHPORTS]; 3632 unsigned int i, valid_port_count = 0; 3633 3634 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 3635 if (!portcount) 3636 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 3637 3638 /* 3639 * Here we verify the validity of the ports 3640 * and thereby calculate the total number of 3641 * valid ports 3642 */ 3643 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 3644 if (rte_eth_dev_is_valid_port(portindex[i])) { 3645 portindex[valid_port_count] = portindex[i]; 3646 valid_port_count++; 3647 } 3648 } 3649 3650 set_fwd_ports_list(portindex, valid_port_count); 3651 } 3652 3653 void 3654 set_fwd_ports_mask(uint64_t portmask) 3655 { 3656 unsigned int portlist[64]; 3657 unsigned int nb_pt; 3658 unsigned int i; 3659 3660 if (portmask == 0) { 3661 printf("Invalid NULL mask of ports\n"); 3662 return; 3663 } 3664 nb_pt = 0; 3665 RTE_ETH_FOREACH_DEV(i) { 3666 if (! ((uint64_t)(1ULL << i) & portmask)) 3667 continue; 3668 portlist[nb_pt++] = i; 3669 } 3670 set_fwd_ports_list(portlist, nb_pt); 3671 } 3672 3673 void 3674 set_fwd_ports_number(uint16_t nb_pt) 3675 { 3676 if (nb_pt > nb_cfg_ports) { 3677 printf("nb fwd ports %u > %u (number of configured " 3678 "ports) - ignored\n", 3679 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 3680 return; 3681 } 3682 nb_fwd_ports = (portid_t) nb_pt; 3683 printf("Number of forwarding ports set to %u\n", 3684 (unsigned int) nb_fwd_ports); 3685 } 3686 3687 int 3688 port_is_forwarding(portid_t port_id) 3689 { 3690 unsigned int i; 3691 3692 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3693 return -1; 3694 3695 for (i = 0; i < nb_fwd_ports; i++) { 3696 if (fwd_ports_ids[i] == port_id) 3697 return 1; 3698 } 3699 3700 return 0; 3701 } 3702 3703 void 3704 set_nb_pkt_per_burst(uint16_t nb) 3705 { 3706 if (nb > MAX_PKT_BURST) { 3707 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 3708 " ignored\n", 3709 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 3710 return; 3711 } 3712 nb_pkt_per_burst = nb; 3713 printf("Number of packets per burst set to %u\n", 3714 (unsigned int) nb_pkt_per_burst); 3715 } 3716 3717 static const char * 3718 tx_split_get_name(enum tx_pkt_split split) 3719 { 3720 uint32_t i; 3721 3722 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3723 if (tx_split_name[i].split == split) 3724 return tx_split_name[i].name; 3725 } 3726 return NULL; 3727 } 3728 3729 void 3730 set_tx_pkt_split(const char *name) 3731 { 3732 uint32_t i; 3733 3734 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 3735 if (strcmp(tx_split_name[i].name, name) == 0) { 3736 tx_pkt_split = tx_split_name[i].split; 3737 return; 3738 } 3739 } 3740 printf("unknown value: \"%s\"\n", name); 3741 } 3742 3743 int 3744 parse_fec_mode(const char *name, uint32_t *mode) 3745 { 3746 uint8_t i; 3747 3748 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 3749 if (strcmp(fec_mode_name[i].name, name) == 0) { 3750 *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 3751 return 0; 3752 } 3753 } 3754 return -1; 3755 } 3756 3757 void 3758 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 3759 { 3760 unsigned int i, j; 3761 3762 printf("FEC capabilities:\n"); 3763 3764 for (i = 0; i < num; i++) { 3765 printf("%s : ", 3766 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 3767 3768 for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) { 3769 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 3770 speed_fec_capa[i].capa) 3771 printf("%s ", fec_mode_name[j].name); 3772 } 3773 printf("\n"); 3774 } 3775 } 3776 3777 void 3778 show_rx_pkt_offsets(void) 3779 { 3780 uint32_t i, n; 3781 3782 n = rx_pkt_nb_offs; 3783 printf("Number of offsets: %u\n", n); 3784 if (n) { 3785 printf("Segment offsets: "); 3786 for (i = 0; i != n - 1; i++) 3787 printf("%hu,", rx_pkt_seg_offsets[i]); 3788 printf("%hu\n", rx_pkt_seg_lengths[i]); 3789 } 3790 } 3791 3792 void 3793 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 3794 { 3795 unsigned int i; 3796 3797 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 3798 printf("nb segments per RX packets=%u >= " 3799 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 3800 return; 3801 } 3802 3803 /* 3804 * No extra check here, the segment length will be checked by PMD 3805 * in the extended queue setup. 3806 */ 3807 for (i = 0; i < nb_offs; i++) { 3808 if (seg_offsets[i] >= UINT16_MAX) { 3809 printf("offset[%u]=%u > UINT16_MAX - give up\n", 3810 i, seg_offsets[i]); 3811 return; 3812 } 3813 } 3814 3815 for (i = 0; i < nb_offs; i++) 3816 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 3817 3818 rx_pkt_nb_offs = (uint8_t) nb_offs; 3819 } 3820 3821 void 3822 show_rx_pkt_segments(void) 3823 { 3824 uint32_t i, n; 3825 3826 n = rx_pkt_nb_segs; 3827 printf("Number of segments: %u\n", n); 3828 if (n) { 3829 printf("Segment sizes: "); 3830 for (i = 0; i != n - 1; i++) 3831 printf("%hu,", rx_pkt_seg_lengths[i]); 3832 printf("%hu\n", rx_pkt_seg_lengths[i]); 3833 } 3834 } 3835 3836 void 3837 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3838 { 3839 unsigned int i; 3840 3841 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 3842 printf("nb segments per RX packets=%u >= " 3843 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 3844 return; 3845 } 3846 3847 /* 3848 * No extra check here, the segment length will be checked by PMD 3849 * in the extended queue setup. 3850 */ 3851 for (i = 0; i < nb_segs; i++) { 3852 if (seg_lengths[i] >= UINT16_MAX) { 3853 printf("length[%u]=%u > UINT16_MAX - give up\n", 3854 i, seg_lengths[i]); 3855 return; 3856 } 3857 } 3858 3859 for (i = 0; i < nb_segs; i++) 3860 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3861 3862 rx_pkt_nb_segs = (uint8_t) nb_segs; 3863 } 3864 3865 void 3866 show_tx_pkt_segments(void) 3867 { 3868 uint32_t i, n; 3869 const char *split; 3870 3871 n = tx_pkt_nb_segs; 3872 split = tx_split_get_name(tx_pkt_split); 3873 3874 printf("Number of segments: %u\n", n); 3875 printf("Segment sizes: "); 3876 for (i = 0; i != n - 1; i++) 3877 printf("%hu,", tx_pkt_seg_lengths[i]); 3878 printf("%hu\n", tx_pkt_seg_lengths[i]); 3879 printf("Split packet: %s\n", split); 3880 } 3881 3882 static bool 3883 nb_segs_is_invalid(unsigned int nb_segs) 3884 { 3885 uint16_t ring_size; 3886 uint16_t queue_id; 3887 uint16_t port_id; 3888 int ret; 3889 3890 RTE_ETH_FOREACH_DEV(port_id) { 3891 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 3892 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 3893 3894 if (ret) 3895 return true; 3896 3897 if (ring_size < nb_segs) { 3898 printf("nb segments per TX packets=%u >= " 3899 "TX queue(%u) ring_size=%u - ignored\n", 3900 nb_segs, queue_id, ring_size); 3901 return true; 3902 } 3903 } 3904 } 3905 3906 return false; 3907 } 3908 3909 void 3910 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 3911 { 3912 uint16_t tx_pkt_len; 3913 unsigned int i; 3914 3915 if (nb_segs_is_invalid(nb_segs)) 3916 return; 3917 3918 /* 3919 * Check that each segment length is greater or equal than 3920 * the mbuf data sise. 3921 * Check also that the total packet length is greater or equal than the 3922 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 3923 * 20 + 8). 3924 */ 3925 tx_pkt_len = 0; 3926 for (i = 0; i < nb_segs; i++) { 3927 if (seg_lengths[i] > mbuf_data_size[0]) { 3928 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 3929 i, seg_lengths[i], mbuf_data_size[0]); 3930 return; 3931 } 3932 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 3933 } 3934 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 3935 printf("total packet length=%u < %d - give up\n", 3936 (unsigned) tx_pkt_len, 3937 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 3938 return; 3939 } 3940 3941 for (i = 0; i < nb_segs; i++) 3942 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 3943 3944 tx_pkt_length = tx_pkt_len; 3945 tx_pkt_nb_segs = (uint8_t) nb_segs; 3946 } 3947 3948 void 3949 show_tx_pkt_times(void) 3950 { 3951 printf("Interburst gap: %u\n", tx_pkt_times_inter); 3952 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 3953 } 3954 3955 void 3956 set_tx_pkt_times(unsigned int *tx_times) 3957 { 3958 tx_pkt_times_inter = tx_times[0]; 3959 tx_pkt_times_intra = tx_times[1]; 3960 } 3961 3962 void 3963 setup_gro(const char *onoff, portid_t port_id) 3964 { 3965 if (!rte_eth_dev_is_valid_port(port_id)) { 3966 printf("invalid port id %u\n", port_id); 3967 return; 3968 } 3969 if (test_done == 0) { 3970 printf("Before enable/disable GRO," 3971 " please stop forwarding first\n"); 3972 return; 3973 } 3974 if (strcmp(onoff, "on") == 0) { 3975 if (gro_ports[port_id].enable != 0) { 3976 printf("Port %u has enabled GRO. Please" 3977 " disable GRO first\n", port_id); 3978 return; 3979 } 3980 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 3981 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 3982 gro_ports[port_id].param.max_flow_num = 3983 GRO_DEFAULT_FLOW_NUM; 3984 gro_ports[port_id].param.max_item_per_flow = 3985 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 3986 } 3987 gro_ports[port_id].enable = 1; 3988 } else { 3989 if (gro_ports[port_id].enable == 0) { 3990 printf("Port %u has disabled GRO\n", port_id); 3991 return; 3992 } 3993 gro_ports[port_id].enable = 0; 3994 } 3995 } 3996 3997 void 3998 setup_gro_flush_cycles(uint8_t cycles) 3999 { 4000 if (test_done == 0) { 4001 printf("Before change flush interval for GRO," 4002 " please stop forwarding first.\n"); 4003 return; 4004 } 4005 4006 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4007 GRO_DEFAULT_FLUSH_CYCLES) { 4008 printf("The flushing cycle be in the range" 4009 " of 1 to %u. Revert to the default" 4010 " value %u.\n", 4011 GRO_MAX_FLUSH_CYCLES, 4012 GRO_DEFAULT_FLUSH_CYCLES); 4013 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4014 } 4015 4016 gro_flush_cycles = cycles; 4017 } 4018 4019 void 4020 show_gro(portid_t port_id) 4021 { 4022 struct rte_gro_param *param; 4023 uint32_t max_pkts_num; 4024 4025 param = &gro_ports[port_id].param; 4026 4027 if (!rte_eth_dev_is_valid_port(port_id)) { 4028 printf("Invalid port id %u.\n", port_id); 4029 return; 4030 } 4031 if (gro_ports[port_id].enable) { 4032 printf("GRO type: TCP/IPv4\n"); 4033 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4034 max_pkts_num = param->max_flow_num * 4035 param->max_item_per_flow; 4036 } else 4037 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4038 printf("Max number of packets to perform GRO: %u\n", 4039 max_pkts_num); 4040 printf("Flushing cycles: %u\n", gro_flush_cycles); 4041 } else 4042 printf("Port %u doesn't enable GRO.\n", port_id); 4043 } 4044 4045 void 4046 setup_gso(const char *mode, portid_t port_id) 4047 { 4048 if (!rte_eth_dev_is_valid_port(port_id)) { 4049 printf("invalid port id %u\n", port_id); 4050 return; 4051 } 4052 if (strcmp(mode, "on") == 0) { 4053 if (test_done == 0) { 4054 printf("before enabling GSO," 4055 " please stop forwarding first\n"); 4056 return; 4057 } 4058 gso_ports[port_id].enable = 1; 4059 } else if (strcmp(mode, "off") == 0) { 4060 if (test_done == 0) { 4061 printf("before disabling GSO," 4062 " please stop forwarding first\n"); 4063 return; 4064 } 4065 gso_ports[port_id].enable = 0; 4066 } 4067 } 4068 4069 char* 4070 list_pkt_forwarding_modes(void) 4071 { 4072 static char fwd_modes[128] = ""; 4073 const char *separator = "|"; 4074 struct fwd_engine *fwd_eng; 4075 unsigned i = 0; 4076 4077 if (strlen (fwd_modes) == 0) { 4078 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4079 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4080 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4081 strncat(fwd_modes, separator, 4082 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 4083 } 4084 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4085 } 4086 4087 return fwd_modes; 4088 } 4089 4090 char* 4091 list_pkt_forwarding_retry_modes(void) 4092 { 4093 static char fwd_modes[128] = ""; 4094 const char *separator = "|"; 4095 struct fwd_engine *fwd_eng; 4096 unsigned i = 0; 4097 4098 if (strlen(fwd_modes) == 0) { 4099 while ((fwd_eng = fwd_engines[i++]) != NULL) { 4100 if (fwd_eng == &rx_only_engine) 4101 continue; 4102 strncat(fwd_modes, fwd_eng->fwd_mode_name, 4103 sizeof(fwd_modes) - 4104 strlen(fwd_modes) - 1); 4105 strncat(fwd_modes, separator, 4106 sizeof(fwd_modes) - 4107 strlen(fwd_modes) - 1); 4108 } 4109 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 4110 } 4111 4112 return fwd_modes; 4113 } 4114 4115 void 4116 set_pkt_forwarding_mode(const char *fwd_mode_name) 4117 { 4118 struct fwd_engine *fwd_eng; 4119 unsigned i; 4120 4121 i = 0; 4122 while ((fwd_eng = fwd_engines[i]) != NULL) { 4123 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 4124 printf("Set %s packet forwarding mode%s\n", 4125 fwd_mode_name, 4126 retry_enabled == 0 ? "" : " with retry"); 4127 cur_fwd_eng = fwd_eng; 4128 return; 4129 } 4130 i++; 4131 } 4132 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 4133 } 4134 4135 void 4136 add_rx_dump_callbacks(portid_t portid) 4137 { 4138 struct rte_eth_dev_info dev_info; 4139 uint16_t queue; 4140 int ret; 4141 4142 if (port_id_is_invalid(portid, ENABLED_WARN)) 4143 return; 4144 4145 ret = eth_dev_info_get_print_err(portid, &dev_info); 4146 if (ret != 0) 4147 return; 4148 4149 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4150 if (!ports[portid].rx_dump_cb[queue]) 4151 ports[portid].rx_dump_cb[queue] = 4152 rte_eth_add_rx_callback(portid, queue, 4153 dump_rx_pkts, NULL); 4154 } 4155 4156 void 4157 add_tx_dump_callbacks(portid_t portid) 4158 { 4159 struct rte_eth_dev_info dev_info; 4160 uint16_t queue; 4161 int ret; 4162 4163 if (port_id_is_invalid(portid, ENABLED_WARN)) 4164 return; 4165 4166 ret = eth_dev_info_get_print_err(portid, &dev_info); 4167 if (ret != 0) 4168 return; 4169 4170 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4171 if (!ports[portid].tx_dump_cb[queue]) 4172 ports[portid].tx_dump_cb[queue] = 4173 rte_eth_add_tx_callback(portid, queue, 4174 dump_tx_pkts, NULL); 4175 } 4176 4177 void 4178 remove_rx_dump_callbacks(portid_t portid) 4179 { 4180 struct rte_eth_dev_info dev_info; 4181 uint16_t queue; 4182 int ret; 4183 4184 if (port_id_is_invalid(portid, ENABLED_WARN)) 4185 return; 4186 4187 ret = eth_dev_info_get_print_err(portid, &dev_info); 4188 if (ret != 0) 4189 return; 4190 4191 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 4192 if (ports[portid].rx_dump_cb[queue]) { 4193 rte_eth_remove_rx_callback(portid, queue, 4194 ports[portid].rx_dump_cb[queue]); 4195 ports[portid].rx_dump_cb[queue] = NULL; 4196 } 4197 } 4198 4199 void 4200 remove_tx_dump_callbacks(portid_t portid) 4201 { 4202 struct rte_eth_dev_info dev_info; 4203 uint16_t queue; 4204 int ret; 4205 4206 if (port_id_is_invalid(portid, ENABLED_WARN)) 4207 return; 4208 4209 ret = eth_dev_info_get_print_err(portid, &dev_info); 4210 if (ret != 0) 4211 return; 4212 4213 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 4214 if (ports[portid].tx_dump_cb[queue]) { 4215 rte_eth_remove_tx_callback(portid, queue, 4216 ports[portid].tx_dump_cb[queue]); 4217 ports[portid].tx_dump_cb[queue] = NULL; 4218 } 4219 } 4220 4221 void 4222 configure_rxtx_dump_callbacks(uint16_t verbose) 4223 { 4224 portid_t portid; 4225 4226 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4227 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 4228 return; 4229 #endif 4230 4231 RTE_ETH_FOREACH_DEV(portid) 4232 { 4233 if (verbose == 1 || verbose > 2) 4234 add_rx_dump_callbacks(portid); 4235 else 4236 remove_rx_dump_callbacks(portid); 4237 if (verbose >= 2) 4238 add_tx_dump_callbacks(portid); 4239 else 4240 remove_tx_dump_callbacks(portid); 4241 } 4242 } 4243 4244 void 4245 set_verbose_level(uint16_t vb_level) 4246 { 4247 printf("Change verbose level from %u to %u\n", 4248 (unsigned int) verbose_level, (unsigned int) vb_level); 4249 verbose_level = vb_level; 4250 configure_rxtx_dump_callbacks(verbose_level); 4251 } 4252 4253 void 4254 vlan_extend_set(portid_t port_id, int on) 4255 { 4256 int diag; 4257 int vlan_offload; 4258 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4259 4260 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4261 return; 4262 4263 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4264 4265 if (on) { 4266 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 4267 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 4268 } else { 4269 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 4270 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 4271 } 4272 4273 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4274 if (diag < 0) { 4275 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 4276 "diag=%d\n", port_id, on, diag); 4277 return; 4278 } 4279 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4280 } 4281 4282 void 4283 rx_vlan_strip_set(portid_t port_id, int on) 4284 { 4285 int diag; 4286 int vlan_offload; 4287 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4288 4289 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4290 return; 4291 4292 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4293 4294 if (on) { 4295 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 4296 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 4297 } else { 4298 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 4299 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 4300 } 4301 4302 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4303 if (diag < 0) { 4304 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 4305 "diag=%d\n", port_id, on, diag); 4306 return; 4307 } 4308 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4309 } 4310 4311 void 4312 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 4313 { 4314 int diag; 4315 4316 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4317 return; 4318 4319 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 4320 if (diag < 0) 4321 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 4322 "diag=%d\n", port_id, queue_id, on, diag); 4323 } 4324 4325 void 4326 rx_vlan_filter_set(portid_t port_id, int on) 4327 { 4328 int diag; 4329 int vlan_offload; 4330 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4331 4332 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4333 return; 4334 4335 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4336 4337 if (on) { 4338 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 4339 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 4340 } else { 4341 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 4342 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 4343 } 4344 4345 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4346 if (diag < 0) { 4347 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 4348 "diag=%d\n", port_id, on, diag); 4349 return; 4350 } 4351 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4352 } 4353 4354 void 4355 rx_vlan_qinq_strip_set(portid_t port_id, int on) 4356 { 4357 int diag; 4358 int vlan_offload; 4359 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 4360 4361 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4362 return; 4363 4364 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 4365 4366 if (on) { 4367 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; 4368 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 4369 } else { 4370 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; 4371 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 4372 } 4373 4374 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 4375 if (diag < 0) { 4376 printf("%s(port_pi=%d, on=%d) failed " 4377 "diag=%d\n", __func__, port_id, on, diag); 4378 return; 4379 } 4380 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 4381 } 4382 4383 int 4384 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 4385 { 4386 int diag; 4387 4388 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4389 return 1; 4390 if (vlan_id_is_invalid(vlan_id)) 4391 return 1; 4392 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 4393 if (diag == 0) 4394 return 0; 4395 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 4396 "diag=%d\n", 4397 port_id, vlan_id, on, diag); 4398 return -1; 4399 } 4400 4401 void 4402 rx_vlan_all_filter_set(portid_t port_id, int on) 4403 { 4404 uint16_t vlan_id; 4405 4406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4407 return; 4408 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 4409 if (rx_vft_set(port_id, vlan_id, on)) 4410 break; 4411 } 4412 } 4413 4414 void 4415 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 4416 { 4417 int diag; 4418 4419 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4420 return; 4421 4422 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 4423 if (diag == 0) 4424 return; 4425 4426 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 4427 "diag=%d\n", 4428 port_id, vlan_type, tp_id, diag); 4429 } 4430 4431 void 4432 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 4433 { 4434 struct rte_eth_dev_info dev_info; 4435 int ret; 4436 4437 if (vlan_id_is_invalid(vlan_id)) 4438 return; 4439 4440 if (ports[port_id].dev_conf.txmode.offloads & 4441 DEV_TX_OFFLOAD_QINQ_INSERT) { 4442 printf("Error, as QinQ has been enabled.\n"); 4443 return; 4444 } 4445 4446 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4447 if (ret != 0) 4448 return; 4449 4450 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 4451 printf("Error: vlan insert is not supported by port %d\n", 4452 port_id); 4453 return; 4454 } 4455 4456 tx_vlan_reset(port_id); 4457 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 4458 ports[port_id].tx_vlan_id = vlan_id; 4459 } 4460 4461 void 4462 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 4463 { 4464 struct rte_eth_dev_info dev_info; 4465 int ret; 4466 4467 if (vlan_id_is_invalid(vlan_id)) 4468 return; 4469 if (vlan_id_is_invalid(vlan_id_outer)) 4470 return; 4471 4472 ret = eth_dev_info_get_print_err(port_id, &dev_info); 4473 if (ret != 0) 4474 return; 4475 4476 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 4477 printf("Error: qinq insert not supported by port %d\n", 4478 port_id); 4479 return; 4480 } 4481 4482 tx_vlan_reset(port_id); 4483 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | 4484 DEV_TX_OFFLOAD_QINQ_INSERT); 4485 ports[port_id].tx_vlan_id = vlan_id; 4486 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 4487 } 4488 4489 void 4490 tx_vlan_reset(portid_t port_id) 4491 { 4492 ports[port_id].dev_conf.txmode.offloads &= 4493 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 4494 DEV_TX_OFFLOAD_QINQ_INSERT); 4495 ports[port_id].tx_vlan_id = 0; 4496 ports[port_id].tx_vlan_id_outer = 0; 4497 } 4498 4499 void 4500 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 4501 { 4502 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4503 return; 4504 4505 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 4506 } 4507 4508 void 4509 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 4510 { 4511 uint16_t i; 4512 uint8_t existing_mapping_found = 0; 4513 4514 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4515 return; 4516 4517 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 4518 return; 4519 4520 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 4521 printf("map_value not in required range 0..%d\n", 4522 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 4523 return; 4524 } 4525 4526 if (!is_rx) { /*then tx*/ 4527 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 4528 if ((tx_queue_stats_mappings[i].port_id == port_id) && 4529 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 4530 tx_queue_stats_mappings[i].stats_counter_id = map_value; 4531 existing_mapping_found = 1; 4532 break; 4533 } 4534 } 4535 if (!existing_mapping_found) { /* A new additional mapping... */ 4536 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 4537 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 4538 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 4539 nb_tx_queue_stats_mappings++; 4540 } 4541 } 4542 else { /*rx*/ 4543 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 4544 if ((rx_queue_stats_mappings[i].port_id == port_id) && 4545 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 4546 rx_queue_stats_mappings[i].stats_counter_id = map_value; 4547 existing_mapping_found = 1; 4548 break; 4549 } 4550 } 4551 if (!existing_mapping_found) { /* A new additional mapping... */ 4552 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 4553 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 4554 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 4555 nb_rx_queue_stats_mappings++; 4556 } 4557 } 4558 } 4559 4560 void 4561 set_xstats_hide_zero(uint8_t on_off) 4562 { 4563 xstats_hide_zero = on_off; 4564 } 4565 4566 void 4567 set_record_core_cycles(uint8_t on_off) 4568 { 4569 record_core_cycles = on_off; 4570 } 4571 4572 void 4573 set_record_burst_stats(uint8_t on_off) 4574 { 4575 record_burst_stats = on_off; 4576 } 4577 4578 static inline void 4579 print_fdir_mask(struct rte_eth_fdir_masks *mask) 4580 { 4581 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 4582 4583 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4584 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 4585 " tunnel_id: 0x%08x", 4586 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 4587 rte_be_to_cpu_32(mask->tunnel_id_mask)); 4588 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 4589 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 4590 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 4591 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 4592 4593 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 4594 rte_be_to_cpu_16(mask->src_port_mask), 4595 rte_be_to_cpu_16(mask->dst_port_mask)); 4596 4597 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4598 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 4599 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 4600 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 4601 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 4602 4603 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 4604 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 4605 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 4606 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 4607 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 4608 } 4609 4610 printf("\n"); 4611 } 4612 4613 static inline void 4614 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4615 { 4616 struct rte_eth_flex_payload_cfg *cfg; 4617 uint32_t i, j; 4618 4619 for (i = 0; i < flex_conf->nb_payloads; i++) { 4620 cfg = &flex_conf->flex_set[i]; 4621 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 4622 printf("\n RAW: "); 4623 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 4624 printf("\n L2_PAYLOAD: "); 4625 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 4626 printf("\n L3_PAYLOAD: "); 4627 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 4628 printf("\n L4_PAYLOAD: "); 4629 else 4630 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 4631 for (j = 0; j < num; j++) 4632 printf(" %-5u", cfg->src_offset[j]); 4633 } 4634 printf("\n"); 4635 } 4636 4637 static char * 4638 flowtype_to_str(uint16_t flow_type) 4639 { 4640 struct flow_type_info { 4641 char str[32]; 4642 uint16_t ftype; 4643 }; 4644 4645 uint8_t i; 4646 static struct flow_type_info flowtype_str_table[] = { 4647 {"raw", RTE_ETH_FLOW_RAW}, 4648 {"ipv4", RTE_ETH_FLOW_IPV4}, 4649 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 4650 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 4651 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 4652 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 4653 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 4654 {"ipv6", RTE_ETH_FLOW_IPV6}, 4655 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 4656 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 4657 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 4658 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 4659 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 4660 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 4661 {"port", RTE_ETH_FLOW_PORT}, 4662 {"vxlan", RTE_ETH_FLOW_VXLAN}, 4663 {"geneve", RTE_ETH_FLOW_GENEVE}, 4664 {"nvgre", RTE_ETH_FLOW_NVGRE}, 4665 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 4666 }; 4667 4668 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 4669 if (flowtype_str_table[i].ftype == flow_type) 4670 return flowtype_str_table[i].str; 4671 } 4672 4673 return NULL; 4674 } 4675 4676 static inline void 4677 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 4678 { 4679 struct rte_eth_fdir_flex_mask *mask; 4680 uint32_t i, j; 4681 char *p; 4682 4683 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 4684 mask = &flex_conf->flex_mask[i]; 4685 p = flowtype_to_str(mask->flow_type); 4686 printf("\n %s:\t", p ? p : "unknown"); 4687 for (j = 0; j < num; j++) 4688 printf(" %02x", mask->mask[j]); 4689 } 4690 printf("\n"); 4691 } 4692 4693 static inline void 4694 print_fdir_flow_type(uint32_t flow_types_mask) 4695 { 4696 int i; 4697 char *p; 4698 4699 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 4700 if (!(flow_types_mask & (1 << i))) 4701 continue; 4702 p = flowtype_to_str(i); 4703 if (p) 4704 printf(" %s", p); 4705 else 4706 printf(" unknown"); 4707 } 4708 printf("\n"); 4709 } 4710 4711 static int 4712 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 4713 struct rte_eth_fdir_stats *fdir_stat) 4714 { 4715 int ret; 4716 4717 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 4718 if (!ret) { 4719 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 4720 RTE_ETH_FILTER_INFO, fdir_info); 4721 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 4722 RTE_ETH_FILTER_STATS, fdir_stat); 4723 return 0; 4724 } 4725 4726 #ifdef RTE_NET_I40E 4727 if (ret == -ENOTSUP) { 4728 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 4729 if (!ret) 4730 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 4731 } 4732 #endif 4733 #ifdef RTE_NET_IXGBE 4734 if (ret == -ENOTSUP) { 4735 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 4736 if (!ret) 4737 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 4738 } 4739 #endif 4740 switch (ret) { 4741 case 0: 4742 break; 4743 case -ENOTSUP: 4744 printf("\n FDIR is not supported on port %-2d\n", 4745 port_id); 4746 break; 4747 default: 4748 printf("programming error: (%s)\n", strerror(-ret)); 4749 break; 4750 } 4751 return ret; 4752 } 4753 4754 void 4755 fdir_get_infos(portid_t port_id) 4756 { 4757 struct rte_eth_fdir_stats fdir_stat; 4758 struct rte_eth_fdir_info fdir_info; 4759 4760 static const char *fdir_stats_border = "########################"; 4761 4762 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4763 return; 4764 4765 memset(&fdir_info, 0, sizeof(fdir_info)); 4766 memset(&fdir_stat, 0, sizeof(fdir_stat)); 4767 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 4768 return; 4769 4770 printf("\n %s FDIR infos for port %-2d %s\n", 4771 fdir_stats_border, port_id, fdir_stats_border); 4772 printf(" MODE: "); 4773 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 4774 printf(" PERFECT\n"); 4775 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 4776 printf(" PERFECT-MAC-VLAN\n"); 4777 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 4778 printf(" PERFECT-TUNNEL\n"); 4779 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 4780 printf(" SIGNATURE\n"); 4781 else 4782 printf(" DISABLE\n"); 4783 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 4784 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 4785 printf(" SUPPORTED FLOW TYPE: "); 4786 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 4787 } 4788 printf(" FLEX PAYLOAD INFO:\n"); 4789 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 4790 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 4791 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 4792 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 4793 fdir_info.flex_payload_unit, 4794 fdir_info.max_flex_payload_segment_num, 4795 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 4796 printf(" MASK: "); 4797 print_fdir_mask(&fdir_info.mask); 4798 if (fdir_info.flex_conf.nb_payloads > 0) { 4799 printf(" FLEX PAYLOAD SRC OFFSET:"); 4800 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4801 } 4802 if (fdir_info.flex_conf.nb_flexmasks > 0) { 4803 printf(" FLEX MASK CFG:"); 4804 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 4805 } 4806 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 4807 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 4808 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 4809 fdir_info.guarant_spc, fdir_info.best_spc); 4810 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 4811 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 4812 " add: %-10"PRIu64" remove: %"PRIu64"\n" 4813 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 4814 fdir_stat.collision, fdir_stat.free, 4815 fdir_stat.maxhash, fdir_stat.maxlen, 4816 fdir_stat.add, fdir_stat.remove, 4817 fdir_stat.f_add, fdir_stat.f_remove); 4818 printf(" %s############################%s\n", 4819 fdir_stats_border, fdir_stats_border); 4820 } 4821 4822 void 4823 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 4824 { 4825 struct rte_port *port; 4826 struct rte_eth_fdir_flex_conf *flex_conf; 4827 int i, idx = 0; 4828 4829 port = &ports[port_id]; 4830 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4831 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 4832 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 4833 idx = i; 4834 break; 4835 } 4836 } 4837 if (i >= RTE_ETH_FLOW_MAX) { 4838 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 4839 idx = flex_conf->nb_flexmasks; 4840 flex_conf->nb_flexmasks++; 4841 } else { 4842 printf("The flex mask table is full. Can not set flex" 4843 " mask for flow_type(%u).", cfg->flow_type); 4844 return; 4845 } 4846 } 4847 rte_memcpy(&flex_conf->flex_mask[idx], 4848 cfg, 4849 sizeof(struct rte_eth_fdir_flex_mask)); 4850 } 4851 4852 void 4853 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 4854 { 4855 struct rte_port *port; 4856 struct rte_eth_fdir_flex_conf *flex_conf; 4857 int i, idx = 0; 4858 4859 port = &ports[port_id]; 4860 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 4861 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 4862 if (cfg->type == flex_conf->flex_set[i].type) { 4863 idx = i; 4864 break; 4865 } 4866 } 4867 if (i >= RTE_ETH_PAYLOAD_MAX) { 4868 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 4869 idx = flex_conf->nb_payloads; 4870 flex_conf->nb_payloads++; 4871 } else { 4872 printf("The flex payload table is full. Can not set" 4873 " flex payload for type(%u).", cfg->type); 4874 return; 4875 } 4876 } 4877 rte_memcpy(&flex_conf->flex_set[idx], 4878 cfg, 4879 sizeof(struct rte_eth_flex_payload_cfg)); 4880 4881 } 4882 4883 void 4884 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 4885 { 4886 #ifdef RTE_NET_IXGBE 4887 int diag; 4888 4889 if (is_rx) 4890 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 4891 else 4892 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 4893 4894 if (diag == 0) 4895 return; 4896 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 4897 is_rx ? "rx" : "tx", port_id, diag); 4898 return; 4899 #endif 4900 printf("VF %s setting not supported for port %d\n", 4901 is_rx ? "Rx" : "Tx", port_id); 4902 RTE_SET_USED(vf); 4903 RTE_SET_USED(on); 4904 } 4905 4906 int 4907 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 4908 { 4909 int diag; 4910 struct rte_eth_link link; 4911 int ret; 4912 4913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4914 return 1; 4915 ret = eth_link_get_nowait_print_err(port_id, &link); 4916 if (ret < 0) 4917 return 1; 4918 if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && 4919 rate > link.link_speed) { 4920 printf("Invalid rate value:%u bigger than link speed: %u\n", 4921 rate, link.link_speed); 4922 return 1; 4923 } 4924 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 4925 if (diag == 0) 4926 return diag; 4927 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 4928 port_id, diag); 4929 return diag; 4930 } 4931 4932 int 4933 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 4934 { 4935 int diag = -ENOTSUP; 4936 4937 RTE_SET_USED(vf); 4938 RTE_SET_USED(rate); 4939 RTE_SET_USED(q_msk); 4940 4941 #ifdef RTE_NET_IXGBE 4942 if (diag == -ENOTSUP) 4943 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 4944 q_msk); 4945 #endif 4946 #ifdef RTE_NET_BNXT 4947 if (diag == -ENOTSUP) 4948 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 4949 #endif 4950 if (diag == 0) 4951 return diag; 4952 4953 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 4954 port_id, diag); 4955 return diag; 4956 } 4957 4958 /* 4959 * Functions to manage the set of filtered Multicast MAC addresses. 4960 * 4961 * A pool of filtered multicast MAC addresses is associated with each port. 4962 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 4963 * The address of the pool and the number of valid multicast MAC addresses 4964 * recorded in the pool are stored in the fields "mc_addr_pool" and 4965 * "mc_addr_nb" of the "rte_port" data structure. 4966 * 4967 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 4968 * to be supplied a contiguous array of multicast MAC addresses. 4969 * To comply with this constraint, the set of multicast addresses recorded 4970 * into the pool are systematically compacted at the beginning of the pool. 4971 * Hence, when a multicast address is removed from the pool, all following 4972 * addresses, if any, are copied back to keep the set contiguous. 4973 */ 4974 #define MCAST_POOL_INC 32 4975 4976 static int 4977 mcast_addr_pool_extend(struct rte_port *port) 4978 { 4979 struct rte_ether_addr *mc_pool; 4980 size_t mc_pool_size; 4981 4982 /* 4983 * If a free entry is available at the end of the pool, just 4984 * increment the number of recorded multicast addresses. 4985 */ 4986 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 4987 port->mc_addr_nb++; 4988 return 0; 4989 } 4990 4991 /* 4992 * [re]allocate a pool with MCAST_POOL_INC more entries. 4993 * The previous test guarantees that port->mc_addr_nb is a multiple 4994 * of MCAST_POOL_INC. 4995 */ 4996 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 4997 MCAST_POOL_INC); 4998 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 4999 mc_pool_size); 5000 if (mc_pool == NULL) { 5001 printf("allocation of pool of %u multicast addresses failed\n", 5002 port->mc_addr_nb + MCAST_POOL_INC); 5003 return -ENOMEM; 5004 } 5005 5006 port->mc_addr_pool = mc_pool; 5007 port->mc_addr_nb++; 5008 return 0; 5009 5010 } 5011 5012 static void 5013 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5014 { 5015 if (mcast_addr_pool_extend(port) != 0) 5016 return; 5017 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5018 } 5019 5020 static void 5021 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5022 { 5023 port->mc_addr_nb--; 5024 if (addr_idx == port->mc_addr_nb) { 5025 /* No need to recompact the set of multicast addressses. */ 5026 if (port->mc_addr_nb == 0) { 5027 /* free the pool of multicast addresses. */ 5028 free(port->mc_addr_pool); 5029 port->mc_addr_pool = NULL; 5030 } 5031 return; 5032 } 5033 memmove(&port->mc_addr_pool[addr_idx], 5034 &port->mc_addr_pool[addr_idx + 1], 5035 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5036 } 5037 5038 static int 5039 eth_port_multicast_addr_list_set(portid_t port_id) 5040 { 5041 struct rte_port *port; 5042 int diag; 5043 5044 port = &ports[port_id]; 5045 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5046 port->mc_addr_nb); 5047 if (diag < 0) 5048 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5049 port_id, port->mc_addr_nb, diag); 5050 5051 return diag; 5052 } 5053 5054 void 5055 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 5056 { 5057 struct rte_port *port; 5058 uint32_t i; 5059 5060 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5061 return; 5062 5063 port = &ports[port_id]; 5064 5065 /* 5066 * Check that the added multicast MAC address is not already recorded 5067 * in the pool of multicast addresses. 5068 */ 5069 for (i = 0; i < port->mc_addr_nb; i++) { 5070 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 5071 printf("multicast address already filtered by port\n"); 5072 return; 5073 } 5074 } 5075 5076 mcast_addr_pool_append(port, mc_addr); 5077 if (eth_port_multicast_addr_list_set(port_id) < 0) 5078 /* Rollback on failure, remove the address from the pool */ 5079 mcast_addr_pool_remove(port, i); 5080 } 5081 5082 void 5083 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 5084 { 5085 struct rte_port *port; 5086 uint32_t i; 5087 5088 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5089 return; 5090 5091 port = &ports[port_id]; 5092 5093 /* 5094 * Search the pool of multicast MAC addresses for the removed address. 5095 */ 5096 for (i = 0; i < port->mc_addr_nb; i++) { 5097 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 5098 break; 5099 } 5100 if (i == port->mc_addr_nb) { 5101 printf("multicast address not filtered by port %d\n", port_id); 5102 return; 5103 } 5104 5105 mcast_addr_pool_remove(port, i); 5106 if (eth_port_multicast_addr_list_set(port_id) < 0) 5107 /* Rollback on failure, add the address back into the pool */ 5108 mcast_addr_pool_append(port, mc_addr); 5109 } 5110 5111 void 5112 port_dcb_info_display(portid_t port_id) 5113 { 5114 struct rte_eth_dcb_info dcb_info; 5115 uint16_t i; 5116 int ret; 5117 static const char *border = "================"; 5118 5119 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5120 return; 5121 5122 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 5123 if (ret) { 5124 printf("\n Failed to get dcb infos on port %-2d\n", 5125 port_id); 5126 return; 5127 } 5128 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 5129 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 5130 printf("\n TC : "); 5131 for (i = 0; i < dcb_info.nb_tcs; i++) 5132 printf("\t%4d", i); 5133 printf("\n Priority : "); 5134 for (i = 0; i < dcb_info.nb_tcs; i++) 5135 printf("\t%4d", dcb_info.prio_tc[i]); 5136 printf("\n BW percent :"); 5137 for (i = 0; i < dcb_info.nb_tcs; i++) 5138 printf("\t%4d%%", dcb_info.tc_bws[i]); 5139 printf("\n RXQ base : "); 5140 for (i = 0; i < dcb_info.nb_tcs; i++) 5141 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 5142 printf("\n RXQ number :"); 5143 for (i = 0; i < dcb_info.nb_tcs; i++) 5144 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 5145 printf("\n TXQ base : "); 5146 for (i = 0; i < dcb_info.nb_tcs; i++) 5147 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 5148 printf("\n TXQ number :"); 5149 for (i = 0; i < dcb_info.nb_tcs; i++) 5150 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 5151 printf("\n"); 5152 } 5153 5154 uint8_t * 5155 open_file(const char *file_path, uint32_t *size) 5156 { 5157 int fd = open(file_path, O_RDONLY); 5158 off_t pkg_size; 5159 uint8_t *buf = NULL; 5160 int ret = 0; 5161 struct stat st_buf; 5162 5163 if (size) 5164 *size = 0; 5165 5166 if (fd == -1) { 5167 printf("%s: Failed to open %s\n", __func__, file_path); 5168 return buf; 5169 } 5170 5171 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 5172 close(fd); 5173 printf("%s: File operations failed\n", __func__); 5174 return buf; 5175 } 5176 5177 pkg_size = st_buf.st_size; 5178 if (pkg_size < 0) { 5179 close(fd); 5180 printf("%s: File operations failed\n", __func__); 5181 return buf; 5182 } 5183 5184 buf = (uint8_t *)malloc(pkg_size); 5185 if (!buf) { 5186 close(fd); 5187 printf("%s: Failed to malloc memory\n", __func__); 5188 return buf; 5189 } 5190 5191 ret = read(fd, buf, pkg_size); 5192 if (ret < 0) { 5193 close(fd); 5194 printf("%s: File read operation failed\n", __func__); 5195 close_file(buf); 5196 return NULL; 5197 } 5198 5199 if (size) 5200 *size = pkg_size; 5201 5202 close(fd); 5203 5204 return buf; 5205 } 5206 5207 int 5208 save_file(const char *file_path, uint8_t *buf, uint32_t size) 5209 { 5210 FILE *fh = fopen(file_path, "wb"); 5211 5212 if (fh == NULL) { 5213 printf("%s: Failed to open %s\n", __func__, file_path); 5214 return -1; 5215 } 5216 5217 if (fwrite(buf, 1, size, fh) != size) { 5218 fclose(fh); 5219 printf("%s: File write operation failed\n", __func__); 5220 return -1; 5221 } 5222 5223 fclose(fh); 5224 5225 return 0; 5226 } 5227 5228 int 5229 close_file(uint8_t *buf) 5230 { 5231 if (buf) { 5232 free((void *)buf); 5233 return 0; 5234 } 5235 5236 return -1; 5237 } 5238 5239 void 5240 port_queue_region_info_display(portid_t port_id, void *buf) 5241 { 5242 #ifdef RTE_NET_I40E 5243 uint16_t i, j; 5244 struct rte_pmd_i40e_queue_regions *info = 5245 (struct rte_pmd_i40e_queue_regions *)buf; 5246 static const char *queue_region_info_stats_border = "-------"; 5247 5248 if (!info->queue_region_number) 5249 printf("there is no region has been set before"); 5250 5251 printf("\n %s All queue region info for port=%2d %s", 5252 queue_region_info_stats_border, port_id, 5253 queue_region_info_stats_border); 5254 printf("\n queue_region_number: %-14u \n", 5255 info->queue_region_number); 5256 5257 for (i = 0; i < info->queue_region_number; i++) { 5258 printf("\n region_id: %-14u queue_number: %-14u " 5259 "queue_start_index: %-14u \n", 5260 info->region[i].region_id, 5261 info->region[i].queue_num, 5262 info->region[i].queue_start_index); 5263 5264 printf(" user_priority_num is %-14u :", 5265 info->region[i].user_priority_num); 5266 for (j = 0; j < info->region[i].user_priority_num; j++) 5267 printf(" %-14u ", info->region[i].user_priority[j]); 5268 5269 printf("\n flowtype_num is %-14u :", 5270 info->region[i].flowtype_num); 5271 for (j = 0; j < info->region[i].flowtype_num; j++) 5272 printf(" %-14u ", info->region[i].hw_flowtype[j]); 5273 } 5274 #else 5275 RTE_SET_USED(port_id); 5276 RTE_SET_USED(buf); 5277 #endif 5278 5279 printf("\n\n"); 5280 } 5281 5282 void 5283 show_macs(portid_t port_id) 5284 { 5285 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5286 struct rte_eth_dev_info dev_info; 5287 struct rte_ether_addr *addr; 5288 uint32_t i, num_macs = 0; 5289 struct rte_eth_dev *dev; 5290 5291 dev = &rte_eth_devices[port_id]; 5292 5293 rte_eth_dev_info_get(port_id, &dev_info); 5294 5295 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5296 addr = &dev->data->mac_addrs[i]; 5297 5298 /* skip zero address */ 5299 if (rte_is_zero_ether_addr(addr)) 5300 continue; 5301 5302 num_macs++; 5303 } 5304 5305 printf("Number of MAC address added: %d\n", num_macs); 5306 5307 for (i = 0; i < dev_info.max_mac_addrs; i++) { 5308 addr = &dev->data->mac_addrs[i]; 5309 5310 /* skip zero address */ 5311 if (rte_is_zero_ether_addr(addr)) 5312 continue; 5313 5314 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5315 printf(" %s\n", buf); 5316 } 5317 } 5318 5319 void 5320 show_mcast_macs(portid_t port_id) 5321 { 5322 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 5323 struct rte_ether_addr *addr; 5324 struct rte_port *port; 5325 uint32_t i; 5326 5327 port = &ports[port_id]; 5328 5329 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 5330 5331 for (i = 0; i < port->mc_addr_nb; i++) { 5332 addr = &port->mc_addr_pool[i]; 5333 5334 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 5335 printf(" %s\n", buf); 5336 } 5337 } 5338