1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 struct rss_type_info { 77 char str[32]; 78 uint64_t rss_type; 79 }; 80 81 static const struct rss_type_info rss_type_table[] = { 82 { "ipv4", ETH_RSS_IPV4 }, 83 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 84 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 85 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 86 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 87 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 88 { "ipv6", ETH_RSS_IPV6 }, 89 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 90 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 91 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 92 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 93 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 94 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 95 { "ipv6-ex", ETH_RSS_IPV6_EX }, 96 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 97 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 98 { "port", ETH_RSS_PORT }, 99 { "vxlan", ETH_RSS_VXLAN }, 100 { "geneve", ETH_RSS_GENEVE }, 101 { "nvgre", ETH_RSS_NVGRE }, 102 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 411 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 412 printf("Valid port range is [0"); 413 RTE_ETH_FOREACH_DEV(pid) 414 printf(", %d", pid); 415 printf("]\n"); 416 return; 417 } 418 port = &ports[port_id]; 419 rte_eth_link_get_nowait(port_id, &link); 420 memset(&dev_info, 0, sizeof(dev_info)); 421 rte_eth_dev_info_get(port_id, &dev_info); 422 printf("\n%s Infos for port %-2d %s\n", 423 info_border, port_id, info_border); 424 rte_eth_macaddr_get(port_id, &mac_addr); 425 print_ethaddr("MAC address: ", &mac_addr); 426 printf("\nDriver name: %s", dev_info.driver_name); 427 printf("\nConnect to socket: %u", port->socket_id); 428 429 if (port_numa[port_id] != NUMA_NO_CONFIG) { 430 mp = mbuf_pool_find(port_numa[port_id]); 431 if (mp) 432 printf("\nmemory allocation on the socket: %d", 433 port_numa[port_id]); 434 } else 435 printf("\nmemory allocation on the socket: %u",port->socket_id); 436 437 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 438 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 439 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 440 ("full-duplex") : ("half-duplex")); 441 442 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 443 printf("MTU: %u\n", mtu); 444 445 printf("Promiscuous mode: %s\n", 446 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 447 printf("Allmulticast mode: %s\n", 448 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 449 printf("Maximum number of MAC addresses: %u\n", 450 (unsigned int)(port->dev_info.max_mac_addrs)); 451 printf("Maximum number of MAC addresses of hash filtering: %u\n", 452 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 453 454 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 455 if (vlan_offload >= 0){ 456 printf("VLAN offload: \n"); 457 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 458 printf(" strip on \n"); 459 else 460 printf(" strip off \n"); 461 462 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 463 printf(" filter on \n"); 464 else 465 printf(" filter off \n"); 466 467 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 468 printf(" qinq(extend) on \n"); 469 else 470 printf(" qinq(extend) off \n"); 471 } 472 473 if (dev_info.hash_key_size > 0) 474 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 475 if (dev_info.reta_size > 0) 476 printf("Redirection table size: %u\n", dev_info.reta_size); 477 if (!dev_info.flow_type_rss_offloads) 478 printf("No flow type is supported.\n"); 479 else { 480 uint16_t i; 481 char *p; 482 483 printf("Supported flow types:\n"); 484 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 485 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 486 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 487 continue; 488 p = flowtype_to_str(i); 489 if (p) 490 printf(" %s\n", p); 491 else 492 printf(" user defined %d\n", i); 493 } 494 } 495 496 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 497 printf("Maximum configurable length of RX packet: %u\n", 498 dev_info.max_rx_pktlen); 499 if (dev_info.max_vfs) 500 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 501 if (dev_info.max_vmdq_pools) 502 printf("Maximum number of VMDq pools: %u\n", 503 dev_info.max_vmdq_pools); 504 505 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 506 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 507 printf("Max possible number of RXDs per queue: %hu\n", 508 dev_info.rx_desc_lim.nb_max); 509 printf("Min possible number of RXDs per queue: %hu\n", 510 dev_info.rx_desc_lim.nb_min); 511 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 512 513 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 514 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 515 printf("Max possible number of TXDs per queue: %hu\n", 516 dev_info.tx_desc_lim.nb_max); 517 printf("Min possible number of TXDs per queue: %hu\n", 518 dev_info.tx_desc_lim.nb_min); 519 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 520 } 521 522 void 523 port_offload_cap_display(portid_t port_id) 524 { 525 struct rte_eth_dev_info dev_info; 526 static const char *info_border = "************"; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) 529 return; 530 531 rte_eth_dev_info_get(port_id, &dev_info); 532 533 printf("\n%s Port %d supported offload features: %s\n", 534 info_border, port_id, info_border); 535 536 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 537 printf("VLAN stripped: "); 538 if (ports[port_id].dev_conf.rxmode.offloads & 539 DEV_RX_OFFLOAD_VLAN_STRIP) 540 printf("on\n"); 541 else 542 printf("off\n"); 543 } 544 545 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 546 printf("Double VLANs stripped: "); 547 if (ports[port_id].dev_conf.rxmode.offloads & 548 DEV_RX_OFFLOAD_VLAN_EXTEND) 549 printf("on\n"); 550 else 551 printf("off\n"); 552 } 553 554 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 555 printf("RX IPv4 checksum: "); 556 if (ports[port_id].dev_conf.rxmode.offloads & 557 DEV_RX_OFFLOAD_IPV4_CKSUM) 558 printf("on\n"); 559 else 560 printf("off\n"); 561 } 562 563 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 564 printf("RX UDP checksum: "); 565 if (ports[port_id].dev_conf.rxmode.offloads & 566 DEV_RX_OFFLOAD_UDP_CKSUM) 567 printf("on\n"); 568 else 569 printf("off\n"); 570 } 571 572 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 573 printf("RX TCP checksum: "); 574 if (ports[port_id].dev_conf.rxmode.offloads & 575 DEV_RX_OFFLOAD_TCP_CKSUM) 576 printf("on\n"); 577 else 578 printf("off\n"); 579 } 580 581 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 582 printf("RX Outer IPv4 checksum: "); 583 if (ports[port_id].dev_conf.rxmode.offloads & 584 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 585 printf("on\n"); 586 else 587 printf("off\n"); 588 } 589 590 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 591 printf("Large receive offload: "); 592 if (ports[port_id].dev_conf.rxmode.offloads & 593 DEV_RX_OFFLOAD_TCP_LRO) 594 printf("on\n"); 595 else 596 printf("off\n"); 597 } 598 599 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 600 printf("VLAN insert: "); 601 if (ports[port_id].dev_conf.txmode.offloads & 602 DEV_TX_OFFLOAD_VLAN_INSERT) 603 printf("on\n"); 604 else 605 printf("off\n"); 606 } 607 608 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 609 printf("HW timestamp: "); 610 if (ports[port_id].dev_conf.rxmode.offloads & 611 DEV_RX_OFFLOAD_TIMESTAMP) 612 printf("on\n"); 613 else 614 printf("off\n"); 615 } 616 617 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 618 printf("Double VLANs insert: "); 619 if (ports[port_id].dev_conf.txmode.offloads & 620 DEV_TX_OFFLOAD_QINQ_INSERT) 621 printf("on\n"); 622 else 623 printf("off\n"); 624 } 625 626 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 627 printf("TX IPv4 checksum: "); 628 if (ports[port_id].dev_conf.txmode.offloads & 629 DEV_TX_OFFLOAD_IPV4_CKSUM) 630 printf("on\n"); 631 else 632 printf("off\n"); 633 } 634 635 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 636 printf("TX UDP checksum: "); 637 if (ports[port_id].dev_conf.txmode.offloads & 638 DEV_TX_OFFLOAD_UDP_CKSUM) 639 printf("on\n"); 640 else 641 printf("off\n"); 642 } 643 644 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 645 printf("TX TCP checksum: "); 646 if (ports[port_id].dev_conf.txmode.offloads & 647 DEV_TX_OFFLOAD_TCP_CKSUM) 648 printf("on\n"); 649 else 650 printf("off\n"); 651 } 652 653 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 654 printf("TX SCTP checksum: "); 655 if (ports[port_id].dev_conf.txmode.offloads & 656 DEV_TX_OFFLOAD_SCTP_CKSUM) 657 printf("on\n"); 658 else 659 printf("off\n"); 660 } 661 662 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 663 printf("TX Outer IPv4 checksum: "); 664 if (ports[port_id].dev_conf.txmode.offloads & 665 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 666 printf("on\n"); 667 else 668 printf("off\n"); 669 } 670 671 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 672 printf("TX TCP segmentation: "); 673 if (ports[port_id].dev_conf.txmode.offloads & 674 DEV_TX_OFFLOAD_TCP_TSO) 675 printf("on\n"); 676 else 677 printf("off\n"); 678 } 679 680 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 681 printf("TX UDP segmentation: "); 682 if (ports[port_id].dev_conf.txmode.offloads & 683 DEV_TX_OFFLOAD_UDP_TSO) 684 printf("on\n"); 685 else 686 printf("off\n"); 687 } 688 689 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 690 printf("TSO for VXLAN tunnel packet: "); 691 if (ports[port_id].dev_conf.txmode.offloads & 692 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 693 printf("on\n"); 694 else 695 printf("off\n"); 696 } 697 698 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 699 printf("TSO for GRE tunnel packet: "); 700 if (ports[port_id].dev_conf.txmode.offloads & 701 DEV_TX_OFFLOAD_GRE_TNL_TSO) 702 printf("on\n"); 703 else 704 printf("off\n"); 705 } 706 707 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 708 printf("TSO for IPIP tunnel packet: "); 709 if (ports[port_id].dev_conf.txmode.offloads & 710 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 711 printf("on\n"); 712 else 713 printf("off\n"); 714 } 715 716 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 717 printf("TSO for GENEVE tunnel packet: "); 718 if (ports[port_id].dev_conf.txmode.offloads & 719 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 720 printf("on\n"); 721 else 722 printf("off\n"); 723 } 724 725 } 726 727 int 728 port_id_is_invalid(portid_t port_id, enum print_warning warning) 729 { 730 uint16_t pid; 731 732 if (port_id == (portid_t)RTE_PORT_ALL) 733 return 0; 734 735 RTE_ETH_FOREACH_DEV(pid) 736 if (port_id == pid) 737 return 0; 738 739 if (warning == ENABLED_WARN) 740 printf("Invalid port %d\n", port_id); 741 742 return 1; 743 } 744 745 static int 746 vlan_id_is_invalid(uint16_t vlan_id) 747 { 748 if (vlan_id < 4096) 749 return 0; 750 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 751 return 1; 752 } 753 754 static int 755 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 756 { 757 const struct rte_pci_device *pci_dev; 758 const struct rte_bus *bus; 759 uint64_t pci_len; 760 761 if (reg_off & 0x3) { 762 printf("Port register offset 0x%X not aligned on a 4-byte " 763 "boundary\n", 764 (unsigned)reg_off); 765 return 1; 766 } 767 768 if (!ports[port_id].dev_info.device) { 769 printf("Invalid device\n"); 770 return 0; 771 } 772 773 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 774 if (bus && !strcmp(bus->name, "pci")) { 775 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 776 } else { 777 printf("Not a PCI device\n"); 778 return 1; 779 } 780 781 pci_len = pci_dev->mem_resource[0].len; 782 if (reg_off >= pci_len) { 783 printf("Port %d: register offset %u (0x%X) out of port PCI " 784 "resource (length=%"PRIu64")\n", 785 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 786 return 1; 787 } 788 return 0; 789 } 790 791 static int 792 reg_bit_pos_is_invalid(uint8_t bit_pos) 793 { 794 if (bit_pos <= 31) 795 return 0; 796 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 797 return 1; 798 } 799 800 #define display_port_and_reg_off(port_id, reg_off) \ 801 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 802 803 static inline void 804 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 805 { 806 display_port_and_reg_off(port_id, (unsigned)reg_off); 807 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 808 } 809 810 void 811 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 812 { 813 uint32_t reg_v; 814 815 816 if (port_id_is_invalid(port_id, ENABLED_WARN)) 817 return; 818 if (port_reg_off_is_invalid(port_id, reg_off)) 819 return; 820 if (reg_bit_pos_is_invalid(bit_x)) 821 return; 822 reg_v = port_id_pci_reg_read(port_id, reg_off); 823 display_port_and_reg_off(port_id, (unsigned)reg_off); 824 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 825 } 826 827 void 828 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 829 uint8_t bit1_pos, uint8_t bit2_pos) 830 { 831 uint32_t reg_v; 832 uint8_t l_bit; 833 uint8_t h_bit; 834 835 if (port_id_is_invalid(port_id, ENABLED_WARN)) 836 return; 837 if (port_reg_off_is_invalid(port_id, reg_off)) 838 return; 839 if (reg_bit_pos_is_invalid(bit1_pos)) 840 return; 841 if (reg_bit_pos_is_invalid(bit2_pos)) 842 return; 843 if (bit1_pos > bit2_pos) 844 l_bit = bit2_pos, h_bit = bit1_pos; 845 else 846 l_bit = bit1_pos, h_bit = bit2_pos; 847 848 reg_v = port_id_pci_reg_read(port_id, reg_off); 849 reg_v >>= l_bit; 850 if (h_bit < 31) 851 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 852 display_port_and_reg_off(port_id, (unsigned)reg_off); 853 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 854 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 855 } 856 857 void 858 port_reg_display(portid_t port_id, uint32_t reg_off) 859 { 860 uint32_t reg_v; 861 862 if (port_id_is_invalid(port_id, ENABLED_WARN)) 863 return; 864 if (port_reg_off_is_invalid(port_id, reg_off)) 865 return; 866 reg_v = port_id_pci_reg_read(port_id, reg_off); 867 display_port_reg_value(port_id, reg_off, reg_v); 868 } 869 870 void 871 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 872 uint8_t bit_v) 873 { 874 uint32_t reg_v; 875 876 if (port_id_is_invalid(port_id, ENABLED_WARN)) 877 return; 878 if (port_reg_off_is_invalid(port_id, reg_off)) 879 return; 880 if (reg_bit_pos_is_invalid(bit_pos)) 881 return; 882 if (bit_v > 1) { 883 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 884 return; 885 } 886 reg_v = port_id_pci_reg_read(port_id, reg_off); 887 if (bit_v == 0) 888 reg_v &= ~(1 << bit_pos); 889 else 890 reg_v |= (1 << bit_pos); 891 port_id_pci_reg_write(port_id, reg_off, reg_v); 892 display_port_reg_value(port_id, reg_off, reg_v); 893 } 894 895 void 896 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 897 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 898 { 899 uint32_t max_v; 900 uint32_t reg_v; 901 uint8_t l_bit; 902 uint8_t h_bit; 903 904 if (port_id_is_invalid(port_id, ENABLED_WARN)) 905 return; 906 if (port_reg_off_is_invalid(port_id, reg_off)) 907 return; 908 if (reg_bit_pos_is_invalid(bit1_pos)) 909 return; 910 if (reg_bit_pos_is_invalid(bit2_pos)) 911 return; 912 if (bit1_pos > bit2_pos) 913 l_bit = bit2_pos, h_bit = bit1_pos; 914 else 915 l_bit = bit1_pos, h_bit = bit2_pos; 916 917 if ((h_bit - l_bit) < 31) 918 max_v = (1 << (h_bit - l_bit + 1)) - 1; 919 else 920 max_v = 0xFFFFFFFF; 921 922 if (value > max_v) { 923 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 924 (unsigned)value, (unsigned)value, 925 (unsigned)max_v, (unsigned)max_v); 926 return; 927 } 928 reg_v = port_id_pci_reg_read(port_id, reg_off); 929 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 930 reg_v |= (value << l_bit); /* Set changed bits */ 931 port_id_pci_reg_write(port_id, reg_off, reg_v); 932 display_port_reg_value(port_id, reg_off, reg_v); 933 } 934 935 void 936 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 937 { 938 if (port_id_is_invalid(port_id, ENABLED_WARN)) 939 return; 940 if (port_reg_off_is_invalid(port_id, reg_off)) 941 return; 942 port_id_pci_reg_write(port_id, reg_off, reg_v); 943 display_port_reg_value(port_id, reg_off, reg_v); 944 } 945 946 void 947 port_mtu_set(portid_t port_id, uint16_t mtu) 948 { 949 int diag; 950 951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 952 return; 953 diag = rte_eth_dev_set_mtu(port_id, mtu); 954 if (diag == 0) 955 return; 956 printf("Set MTU failed. diag=%d\n", diag); 957 } 958 959 /* Generic flow management functions. */ 960 961 /** Generate flow_item[] entry. */ 962 #define MK_FLOW_ITEM(t, s) \ 963 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 964 .name = # t, \ 965 .size = s, \ 966 } 967 968 /** Information about known flow pattern items. */ 969 static const struct { 970 const char *name; 971 size_t size; 972 } flow_item[] = { 973 MK_FLOW_ITEM(END, 0), 974 MK_FLOW_ITEM(VOID, 0), 975 MK_FLOW_ITEM(INVERT, 0), 976 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 977 MK_FLOW_ITEM(PF, 0), 978 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 979 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 980 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 981 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 982 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 983 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 984 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 985 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 986 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 987 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 988 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 989 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 990 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 991 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 992 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 993 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 994 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 995 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 996 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 997 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 998 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 999 }; 1000 1001 /** Compute storage space needed by item specification. */ 1002 static void 1003 flow_item_spec_size(const struct rte_flow_item *item, 1004 size_t *size, size_t *pad) 1005 { 1006 if (!item->spec) { 1007 *size = 0; 1008 goto empty; 1009 } 1010 switch (item->type) { 1011 union { 1012 const struct rte_flow_item_raw *raw; 1013 } spec; 1014 1015 case RTE_FLOW_ITEM_TYPE_RAW: 1016 spec.raw = item->spec; 1017 *size = offsetof(struct rte_flow_item_raw, pattern) + 1018 spec.raw->length * sizeof(*spec.raw->pattern); 1019 break; 1020 default: 1021 *size = flow_item[item->type].size; 1022 break; 1023 } 1024 empty: 1025 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1026 } 1027 1028 /** Generate flow_action[] entry. */ 1029 #define MK_FLOW_ACTION(t, s) \ 1030 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1031 .name = # t, \ 1032 .size = s, \ 1033 } 1034 1035 /** Information about known flow actions. */ 1036 static const struct { 1037 const char *name; 1038 size_t size; 1039 } flow_action[] = { 1040 MK_FLOW_ACTION(END, 0), 1041 MK_FLOW_ACTION(VOID, 0), 1042 MK_FLOW_ACTION(PASSTHRU, 0), 1043 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1044 MK_FLOW_ACTION(FLAG, 0), 1045 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1046 MK_FLOW_ACTION(DROP, 0), 1047 MK_FLOW_ACTION(COUNT, 0), 1048 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 1049 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 1050 MK_FLOW_ACTION(PF, 0), 1051 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1052 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1053 }; 1054 1055 /** Compute storage space needed by action configuration. */ 1056 static void 1057 flow_action_conf_size(const struct rte_flow_action *action, 1058 size_t *size, size_t *pad) 1059 { 1060 if (!action->conf) { 1061 *size = 0; 1062 goto empty; 1063 } 1064 switch (action->type) { 1065 union { 1066 const struct rte_flow_action_rss *rss; 1067 } conf; 1068 1069 case RTE_FLOW_ACTION_TYPE_RSS: 1070 conf.rss = action->conf; 1071 *size = offsetof(struct rte_flow_action_rss, queue) + 1072 conf.rss->num * sizeof(*conf.rss->queue); 1073 break; 1074 default: 1075 *size = flow_action[action->type].size; 1076 break; 1077 } 1078 empty: 1079 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1080 } 1081 1082 /** Generate a port_flow entry from attributes/pattern/actions. */ 1083 static struct port_flow * 1084 port_flow_new(const struct rte_flow_attr *attr, 1085 const struct rte_flow_item *pattern, 1086 const struct rte_flow_action *actions) 1087 { 1088 const struct rte_flow_item *item; 1089 const struct rte_flow_action *action; 1090 struct port_flow *pf = NULL; 1091 size_t tmp; 1092 size_t pad; 1093 size_t off1 = 0; 1094 size_t off2 = 0; 1095 int err = ENOTSUP; 1096 1097 store: 1098 item = pattern; 1099 if (pf) 1100 pf->pattern = (void *)&pf->data[off1]; 1101 do { 1102 struct rte_flow_item *dst = NULL; 1103 1104 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1105 !flow_item[item->type].name) 1106 goto notsup; 1107 if (pf) 1108 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1109 off1 += sizeof(*item); 1110 flow_item_spec_size(item, &tmp, &pad); 1111 if (item->spec) { 1112 if (pf) 1113 dst->spec = memcpy(pf->data + off2, 1114 item->spec, tmp); 1115 off2 += tmp + pad; 1116 } 1117 if (item->last) { 1118 if (pf) 1119 dst->last = memcpy(pf->data + off2, 1120 item->last, tmp); 1121 off2 += tmp + pad; 1122 } 1123 if (item->mask) { 1124 if (pf) 1125 dst->mask = memcpy(pf->data + off2, 1126 item->mask, tmp); 1127 off2 += tmp + pad; 1128 } 1129 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1130 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1131 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1132 action = actions; 1133 if (pf) 1134 pf->actions = (void *)&pf->data[off1]; 1135 do { 1136 struct rte_flow_action *dst = NULL; 1137 1138 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1139 !flow_action[action->type].name) 1140 goto notsup; 1141 if (pf) 1142 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1143 off1 += sizeof(*action); 1144 flow_action_conf_size(action, &tmp, &pad); 1145 if (action->conf) { 1146 if (pf) 1147 dst->conf = memcpy(pf->data + off2, 1148 action->conf, tmp); 1149 off2 += tmp + pad; 1150 } 1151 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1152 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1153 if (pf != NULL) 1154 return pf; 1155 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1156 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1157 pf = calloc(1, tmp + off1 + off2); 1158 if (pf == NULL) 1159 err = errno; 1160 else { 1161 *pf = (const struct port_flow){ 1162 .size = tmp + off1 + off2, 1163 .attr = *attr, 1164 }; 1165 tmp -= offsetof(struct port_flow, data); 1166 off2 = tmp + off1; 1167 off1 = tmp; 1168 goto store; 1169 } 1170 notsup: 1171 rte_errno = err; 1172 return NULL; 1173 } 1174 1175 /** Print a message out of a flow error. */ 1176 static int 1177 port_flow_complain(struct rte_flow_error *error) 1178 { 1179 static const char *const errstrlist[] = { 1180 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1181 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1182 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1183 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1184 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1185 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1186 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1187 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1188 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1189 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1190 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1191 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1192 }; 1193 const char *errstr; 1194 char buf[32]; 1195 int err = rte_errno; 1196 1197 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1198 !errstrlist[error->type]) 1199 errstr = "unknown type"; 1200 else 1201 errstr = errstrlist[error->type]; 1202 printf("Caught error type %d (%s): %s%s\n", 1203 error->type, errstr, 1204 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1205 error->cause), buf) : "", 1206 error->message ? error->message : "(no stated reason)"); 1207 return -err; 1208 } 1209 1210 /** Validate flow rule. */ 1211 int 1212 port_flow_validate(portid_t port_id, 1213 const struct rte_flow_attr *attr, 1214 const struct rte_flow_item *pattern, 1215 const struct rte_flow_action *actions) 1216 { 1217 struct rte_flow_error error; 1218 1219 /* Poisoning to make sure PMDs update it in case of error. */ 1220 memset(&error, 0x11, sizeof(error)); 1221 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1222 return port_flow_complain(&error); 1223 printf("Flow rule validated\n"); 1224 return 0; 1225 } 1226 1227 /** Create flow rule. */ 1228 int 1229 port_flow_create(portid_t port_id, 1230 const struct rte_flow_attr *attr, 1231 const struct rte_flow_item *pattern, 1232 const struct rte_flow_action *actions) 1233 { 1234 struct rte_flow *flow; 1235 struct rte_port *port; 1236 struct port_flow *pf; 1237 uint32_t id; 1238 struct rte_flow_error error; 1239 1240 /* Poisoning to make sure PMDs update it in case of error. */ 1241 memset(&error, 0x22, sizeof(error)); 1242 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1243 if (!flow) 1244 return port_flow_complain(&error); 1245 port = &ports[port_id]; 1246 if (port->flow_list) { 1247 if (port->flow_list->id == UINT32_MAX) { 1248 printf("Highest rule ID is already assigned, delete" 1249 " it first"); 1250 rte_flow_destroy(port_id, flow, NULL); 1251 return -ENOMEM; 1252 } 1253 id = port->flow_list->id + 1; 1254 } else 1255 id = 0; 1256 pf = port_flow_new(attr, pattern, actions); 1257 if (!pf) { 1258 int err = rte_errno; 1259 1260 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1261 rte_flow_destroy(port_id, flow, NULL); 1262 return -err; 1263 } 1264 pf->next = port->flow_list; 1265 pf->id = id; 1266 pf->flow = flow; 1267 port->flow_list = pf; 1268 printf("Flow rule #%u created\n", pf->id); 1269 return 0; 1270 } 1271 1272 /** Destroy a number of flow rules. */ 1273 int 1274 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1275 { 1276 struct rte_port *port; 1277 struct port_flow **tmp; 1278 uint32_t c = 0; 1279 int ret = 0; 1280 1281 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1282 port_id == (portid_t)RTE_PORT_ALL) 1283 return -EINVAL; 1284 port = &ports[port_id]; 1285 tmp = &port->flow_list; 1286 while (*tmp) { 1287 uint32_t i; 1288 1289 for (i = 0; i != n; ++i) { 1290 struct rte_flow_error error; 1291 struct port_flow *pf = *tmp; 1292 1293 if (rule[i] != pf->id) 1294 continue; 1295 /* 1296 * Poisoning to make sure PMDs update it in case 1297 * of error. 1298 */ 1299 memset(&error, 0x33, sizeof(error)); 1300 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1301 ret = port_flow_complain(&error); 1302 continue; 1303 } 1304 printf("Flow rule #%u destroyed\n", pf->id); 1305 *tmp = pf->next; 1306 free(pf); 1307 break; 1308 } 1309 if (i == n) 1310 tmp = &(*tmp)->next; 1311 ++c; 1312 } 1313 return ret; 1314 } 1315 1316 /** Remove all flow rules. */ 1317 int 1318 port_flow_flush(portid_t port_id) 1319 { 1320 struct rte_flow_error error; 1321 struct rte_port *port; 1322 int ret = 0; 1323 1324 /* Poisoning to make sure PMDs update it in case of error. */ 1325 memset(&error, 0x44, sizeof(error)); 1326 if (rte_flow_flush(port_id, &error)) { 1327 ret = port_flow_complain(&error); 1328 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1329 port_id == (portid_t)RTE_PORT_ALL) 1330 return ret; 1331 } 1332 port = &ports[port_id]; 1333 while (port->flow_list) { 1334 struct port_flow *pf = port->flow_list->next; 1335 1336 free(port->flow_list); 1337 port->flow_list = pf; 1338 } 1339 return ret; 1340 } 1341 1342 /** Query a flow rule. */ 1343 int 1344 port_flow_query(portid_t port_id, uint32_t rule, 1345 enum rte_flow_action_type action) 1346 { 1347 struct rte_flow_error error; 1348 struct rte_port *port; 1349 struct port_flow *pf; 1350 const char *name; 1351 union { 1352 struct rte_flow_query_count count; 1353 } query; 1354 1355 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1356 port_id == (portid_t)RTE_PORT_ALL) 1357 return -EINVAL; 1358 port = &ports[port_id]; 1359 for (pf = port->flow_list; pf; pf = pf->next) 1360 if (pf->id == rule) 1361 break; 1362 if (!pf) { 1363 printf("Flow rule #%u not found\n", rule); 1364 return -ENOENT; 1365 } 1366 if ((unsigned int)action >= RTE_DIM(flow_action) || 1367 !flow_action[action].name) 1368 name = "unknown"; 1369 else 1370 name = flow_action[action].name; 1371 switch (action) { 1372 case RTE_FLOW_ACTION_TYPE_COUNT: 1373 break; 1374 default: 1375 printf("Cannot query action type %d (%s)\n", action, name); 1376 return -ENOTSUP; 1377 } 1378 /* Poisoning to make sure PMDs update it in case of error. */ 1379 memset(&error, 0x55, sizeof(error)); 1380 memset(&query, 0, sizeof(query)); 1381 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1382 return port_flow_complain(&error); 1383 switch (action) { 1384 case RTE_FLOW_ACTION_TYPE_COUNT: 1385 printf("%s:\n" 1386 " hits_set: %u\n" 1387 " bytes_set: %u\n" 1388 " hits: %" PRIu64 "\n" 1389 " bytes: %" PRIu64 "\n", 1390 name, 1391 query.count.hits_set, 1392 query.count.bytes_set, 1393 query.count.hits, 1394 query.count.bytes); 1395 break; 1396 default: 1397 printf("Cannot display result for action type %d (%s)\n", 1398 action, name); 1399 break; 1400 } 1401 return 0; 1402 } 1403 1404 /** List flow rules. */ 1405 void 1406 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1407 { 1408 struct rte_port *port; 1409 struct port_flow *pf; 1410 struct port_flow *list = NULL; 1411 uint32_t i; 1412 1413 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1414 port_id == (portid_t)RTE_PORT_ALL) 1415 return; 1416 port = &ports[port_id]; 1417 if (!port->flow_list) 1418 return; 1419 /* Sort flows by group, priority and ID. */ 1420 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1421 struct port_flow **tmp; 1422 1423 if (n) { 1424 /* Filter out unwanted groups. */ 1425 for (i = 0; i != n; ++i) 1426 if (pf->attr.group == group[i]) 1427 break; 1428 if (i == n) 1429 continue; 1430 } 1431 tmp = &list; 1432 while (*tmp && 1433 (pf->attr.group > (*tmp)->attr.group || 1434 (pf->attr.group == (*tmp)->attr.group && 1435 pf->attr.priority > (*tmp)->attr.priority) || 1436 (pf->attr.group == (*tmp)->attr.group && 1437 pf->attr.priority == (*tmp)->attr.priority && 1438 pf->id > (*tmp)->id))) 1439 tmp = &(*tmp)->tmp; 1440 pf->tmp = *tmp; 1441 *tmp = pf; 1442 } 1443 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1444 for (pf = list; pf != NULL; pf = pf->tmp) { 1445 const struct rte_flow_item *item = pf->pattern; 1446 const struct rte_flow_action *action = pf->actions; 1447 1448 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1449 pf->id, 1450 pf->attr.group, 1451 pf->attr.priority, 1452 pf->attr.ingress ? 'i' : '-', 1453 pf->attr.egress ? 'e' : '-'); 1454 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1455 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1456 printf("%s ", flow_item[item->type].name); 1457 ++item; 1458 } 1459 printf("=>"); 1460 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1461 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1462 printf(" %s", flow_action[action->type].name); 1463 ++action; 1464 } 1465 printf("\n"); 1466 } 1467 } 1468 1469 /** Restrict ingress traffic to the defined flow rules. */ 1470 int 1471 port_flow_isolate(portid_t port_id, int set) 1472 { 1473 struct rte_flow_error error; 1474 1475 /* Poisoning to make sure PMDs update it in case of error. */ 1476 memset(&error, 0x66, sizeof(error)); 1477 if (rte_flow_isolate(port_id, set, &error)) 1478 return port_flow_complain(&error); 1479 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1480 port_id, 1481 set ? "now restricted" : "not restricted anymore"); 1482 return 0; 1483 } 1484 1485 /* 1486 * RX/TX ring descriptors display functions. 1487 */ 1488 int 1489 rx_queue_id_is_invalid(queueid_t rxq_id) 1490 { 1491 if (rxq_id < nb_rxq) 1492 return 0; 1493 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1494 return 1; 1495 } 1496 1497 int 1498 tx_queue_id_is_invalid(queueid_t txq_id) 1499 { 1500 if (txq_id < nb_txq) 1501 return 0; 1502 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1503 return 1; 1504 } 1505 1506 static int 1507 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1508 { 1509 if (rxdesc_id < nb_rxd) 1510 return 0; 1511 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1512 rxdesc_id, nb_rxd); 1513 return 1; 1514 } 1515 1516 static int 1517 tx_desc_id_is_invalid(uint16_t txdesc_id) 1518 { 1519 if (txdesc_id < nb_txd) 1520 return 0; 1521 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1522 txdesc_id, nb_txd); 1523 return 1; 1524 } 1525 1526 static const struct rte_memzone * 1527 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1528 { 1529 char mz_name[RTE_MEMZONE_NAMESIZE]; 1530 const struct rte_memzone *mz; 1531 1532 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1533 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1534 mz = rte_memzone_lookup(mz_name); 1535 if (mz == NULL) 1536 printf("%s ring memory zoneof (port %d, queue %d) not" 1537 "found (zone name = %s\n", 1538 ring_name, port_id, q_id, mz_name); 1539 return mz; 1540 } 1541 1542 union igb_ring_dword { 1543 uint64_t dword; 1544 struct { 1545 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1546 uint32_t lo; 1547 uint32_t hi; 1548 #else 1549 uint32_t hi; 1550 uint32_t lo; 1551 #endif 1552 } words; 1553 }; 1554 1555 struct igb_ring_desc_32_bytes { 1556 union igb_ring_dword lo_dword; 1557 union igb_ring_dword hi_dword; 1558 union igb_ring_dword resv1; 1559 union igb_ring_dword resv2; 1560 }; 1561 1562 struct igb_ring_desc_16_bytes { 1563 union igb_ring_dword lo_dword; 1564 union igb_ring_dword hi_dword; 1565 }; 1566 1567 static void 1568 ring_rxd_display_dword(union igb_ring_dword dword) 1569 { 1570 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1571 (unsigned)dword.words.hi); 1572 } 1573 1574 static void 1575 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1576 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1577 portid_t port_id, 1578 #else 1579 __rte_unused portid_t port_id, 1580 #endif 1581 uint16_t desc_id) 1582 { 1583 struct igb_ring_desc_16_bytes *ring = 1584 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1585 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1586 struct rte_eth_dev_info dev_info; 1587 1588 memset(&dev_info, 0, sizeof(dev_info)); 1589 rte_eth_dev_info_get(port_id, &dev_info); 1590 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1591 /* 32 bytes RX descriptor, i40e only */ 1592 struct igb_ring_desc_32_bytes *ring = 1593 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1594 ring[desc_id].lo_dword.dword = 1595 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1596 ring_rxd_display_dword(ring[desc_id].lo_dword); 1597 ring[desc_id].hi_dword.dword = 1598 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1599 ring_rxd_display_dword(ring[desc_id].hi_dword); 1600 ring[desc_id].resv1.dword = 1601 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1602 ring_rxd_display_dword(ring[desc_id].resv1); 1603 ring[desc_id].resv2.dword = 1604 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1605 ring_rxd_display_dword(ring[desc_id].resv2); 1606 1607 return; 1608 } 1609 #endif 1610 /* 16 bytes RX descriptor */ 1611 ring[desc_id].lo_dword.dword = 1612 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1613 ring_rxd_display_dword(ring[desc_id].lo_dword); 1614 ring[desc_id].hi_dword.dword = 1615 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1616 ring_rxd_display_dword(ring[desc_id].hi_dword); 1617 } 1618 1619 static void 1620 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1621 { 1622 struct igb_ring_desc_16_bytes *ring; 1623 struct igb_ring_desc_16_bytes txd; 1624 1625 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1626 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1627 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1628 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1629 (unsigned)txd.lo_dword.words.lo, 1630 (unsigned)txd.lo_dword.words.hi, 1631 (unsigned)txd.hi_dword.words.lo, 1632 (unsigned)txd.hi_dword.words.hi); 1633 } 1634 1635 void 1636 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1637 { 1638 const struct rte_memzone *rx_mz; 1639 1640 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1641 return; 1642 if (rx_queue_id_is_invalid(rxq_id)) 1643 return; 1644 if (rx_desc_id_is_invalid(rxd_id)) 1645 return; 1646 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1647 if (rx_mz == NULL) 1648 return; 1649 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1650 } 1651 1652 void 1653 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1654 { 1655 const struct rte_memzone *tx_mz; 1656 1657 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1658 return; 1659 if (tx_queue_id_is_invalid(txq_id)) 1660 return; 1661 if (tx_desc_id_is_invalid(txd_id)) 1662 return; 1663 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1664 if (tx_mz == NULL) 1665 return; 1666 ring_tx_descriptor_display(tx_mz, txd_id); 1667 } 1668 1669 void 1670 fwd_lcores_config_display(void) 1671 { 1672 lcoreid_t lc_id; 1673 1674 printf("List of forwarding lcores:"); 1675 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1676 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1677 printf("\n"); 1678 } 1679 void 1680 rxtx_config_display(void) 1681 { 1682 portid_t pid; 1683 1684 printf(" %s packet forwarding%s packets/burst=%d\n", 1685 cur_fwd_eng->fwd_mode_name, 1686 retry_enabled == 0 ? "" : " with retry", 1687 nb_pkt_per_burst); 1688 1689 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1690 printf(" packet len=%u - nb packet segments=%d\n", 1691 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1692 1693 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1694 nb_fwd_lcores, nb_fwd_ports); 1695 1696 RTE_ETH_FOREACH_DEV(pid) { 1697 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; 1698 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; 1699 1700 printf(" port %d:\n", (unsigned int)pid); 1701 printf(" CRC stripping %s\n", 1702 (ports[pid].dev_conf.rxmode.offloads & 1703 DEV_RX_OFFLOAD_CRC_STRIP) ? 1704 "enabled" : "disabled"); 1705 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1706 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1707 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1708 " wthresh=%d\n", 1709 rx_conf->rx_thresh.pthresh, 1710 rx_conf->rx_thresh.hthresh, 1711 rx_conf->rx_thresh.wthresh); 1712 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1713 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1714 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1715 " wthresh=%d\n", 1716 tx_conf->tx_thresh.pthresh, 1717 tx_conf->tx_thresh.hthresh, 1718 tx_conf->tx_thresh.wthresh); 1719 printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", 1720 tx_conf->tx_rs_thresh, tx_conf->offloads); 1721 } 1722 } 1723 1724 void 1725 port_rss_reta_info(portid_t port_id, 1726 struct rte_eth_rss_reta_entry64 *reta_conf, 1727 uint16_t nb_entries) 1728 { 1729 uint16_t i, idx, shift; 1730 int ret; 1731 1732 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1733 return; 1734 1735 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1736 if (ret != 0) { 1737 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1738 return; 1739 } 1740 1741 for (i = 0; i < nb_entries; i++) { 1742 idx = i / RTE_RETA_GROUP_SIZE; 1743 shift = i % RTE_RETA_GROUP_SIZE; 1744 if (!(reta_conf[idx].mask & (1ULL << shift))) 1745 continue; 1746 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1747 i, reta_conf[idx].reta[shift]); 1748 } 1749 } 1750 1751 /* 1752 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1753 * key of the port. 1754 */ 1755 void 1756 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1757 { 1758 struct rte_eth_rss_conf rss_conf; 1759 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1760 uint64_t rss_hf; 1761 uint8_t i; 1762 int diag; 1763 struct rte_eth_dev_info dev_info; 1764 uint8_t hash_key_size; 1765 1766 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1767 return; 1768 1769 memset(&dev_info, 0, sizeof(dev_info)); 1770 rte_eth_dev_info_get(port_id, &dev_info); 1771 if (dev_info.hash_key_size > 0 && 1772 dev_info.hash_key_size <= sizeof(rss_key)) 1773 hash_key_size = dev_info.hash_key_size; 1774 else { 1775 printf("dev_info did not provide a valid hash key size\n"); 1776 return; 1777 } 1778 1779 rss_conf.rss_hf = 0; 1780 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1781 if (!strcmp(rss_info, rss_type_table[i].str)) 1782 rss_conf.rss_hf = rss_type_table[i].rss_type; 1783 } 1784 1785 /* Get RSS hash key if asked to display it */ 1786 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1787 rss_conf.rss_key_len = hash_key_size; 1788 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1789 if (diag != 0) { 1790 switch (diag) { 1791 case -ENODEV: 1792 printf("port index %d invalid\n", port_id); 1793 break; 1794 case -ENOTSUP: 1795 printf("operation not supported by device\n"); 1796 break; 1797 default: 1798 printf("operation failed - diag=%d\n", diag); 1799 break; 1800 } 1801 return; 1802 } 1803 rss_hf = rss_conf.rss_hf; 1804 if (rss_hf == 0) { 1805 printf("RSS disabled\n"); 1806 return; 1807 } 1808 printf("RSS functions:\n "); 1809 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1810 if (rss_hf & rss_type_table[i].rss_type) 1811 printf("%s ", rss_type_table[i].str); 1812 } 1813 printf("\n"); 1814 if (!show_rss_key) 1815 return; 1816 printf("RSS key:\n"); 1817 for (i = 0; i < hash_key_size; i++) 1818 printf("%02X", rss_key[i]); 1819 printf("\n"); 1820 } 1821 1822 void 1823 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1824 uint hash_key_len) 1825 { 1826 struct rte_eth_rss_conf rss_conf; 1827 int diag; 1828 unsigned int i; 1829 1830 rss_conf.rss_key = NULL; 1831 rss_conf.rss_key_len = hash_key_len; 1832 rss_conf.rss_hf = 0; 1833 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1834 if (!strcmp(rss_type_table[i].str, rss_type)) 1835 rss_conf.rss_hf = rss_type_table[i].rss_type; 1836 } 1837 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1838 if (diag == 0) { 1839 rss_conf.rss_key = hash_key; 1840 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1841 } 1842 if (diag == 0) 1843 return; 1844 1845 switch (diag) { 1846 case -ENODEV: 1847 printf("port index %d invalid\n", port_id); 1848 break; 1849 case -ENOTSUP: 1850 printf("operation not supported by device\n"); 1851 break; 1852 default: 1853 printf("operation failed - diag=%d\n", diag); 1854 break; 1855 } 1856 } 1857 1858 /* 1859 * Setup forwarding configuration for each logical core. 1860 */ 1861 static void 1862 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1863 { 1864 streamid_t nb_fs_per_lcore; 1865 streamid_t nb_fs; 1866 streamid_t sm_id; 1867 lcoreid_t nb_extra; 1868 lcoreid_t nb_fc; 1869 lcoreid_t nb_lc; 1870 lcoreid_t lc_id; 1871 1872 nb_fs = cfg->nb_fwd_streams; 1873 nb_fc = cfg->nb_fwd_lcores; 1874 if (nb_fs <= nb_fc) { 1875 nb_fs_per_lcore = 1; 1876 nb_extra = 0; 1877 } else { 1878 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1879 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1880 } 1881 1882 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1883 sm_id = 0; 1884 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1885 fwd_lcores[lc_id]->stream_idx = sm_id; 1886 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1887 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1888 } 1889 1890 /* 1891 * Assign extra remaining streams, if any. 1892 */ 1893 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1894 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1895 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1896 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1897 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1898 } 1899 } 1900 1901 static portid_t 1902 fwd_topology_tx_port_get(portid_t rxp) 1903 { 1904 static int warning_once = 1; 1905 1906 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 1907 1908 switch (port_topology) { 1909 default: 1910 case PORT_TOPOLOGY_PAIRED: 1911 if ((rxp & 0x1) == 0) { 1912 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 1913 return rxp + 1; 1914 if (warning_once) { 1915 printf("\nWarning! port-topology=paired" 1916 " and odd forward ports number," 1917 " the last port will pair with" 1918 " itself.\n\n"); 1919 warning_once = 0; 1920 } 1921 return rxp; 1922 } 1923 return rxp - 1; 1924 case PORT_TOPOLOGY_CHAINED: 1925 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 1926 case PORT_TOPOLOGY_LOOP: 1927 return rxp; 1928 } 1929 } 1930 1931 static void 1932 simple_fwd_config_setup(void) 1933 { 1934 portid_t i; 1935 1936 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1937 cur_fwd_config.nb_fwd_streams = 1938 (streamid_t) cur_fwd_config.nb_fwd_ports; 1939 1940 /* reinitialize forwarding streams */ 1941 init_fwd_streams(); 1942 1943 /* 1944 * In the simple forwarding test, the number of forwarding cores 1945 * must be lower or equal to the number of forwarding ports. 1946 */ 1947 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1948 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1949 cur_fwd_config.nb_fwd_lcores = 1950 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1951 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1952 1953 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1954 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1955 fwd_streams[i]->rx_queue = 0; 1956 fwd_streams[i]->tx_port = 1957 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 1958 fwd_streams[i]->tx_queue = 0; 1959 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 1960 fwd_streams[i]->retry_enabled = retry_enabled; 1961 } 1962 } 1963 1964 /** 1965 * For the RSS forwarding test all streams distributed over lcores. Each stream 1966 * being composed of a RX queue to poll on a RX port for input messages, 1967 * associated with a TX queue of a TX port where to send forwarded packets. 1968 */ 1969 static void 1970 rss_fwd_config_setup(void) 1971 { 1972 portid_t rxp; 1973 portid_t txp; 1974 queueid_t rxq; 1975 queueid_t nb_q; 1976 streamid_t sm_id; 1977 1978 nb_q = nb_rxq; 1979 if (nb_q > nb_txq) 1980 nb_q = nb_txq; 1981 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1982 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1983 cur_fwd_config.nb_fwd_streams = 1984 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1985 1986 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1987 cur_fwd_config.nb_fwd_lcores = 1988 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1989 1990 /* reinitialize forwarding streams */ 1991 init_fwd_streams(); 1992 1993 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1994 rxp = 0; rxq = 0; 1995 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1996 struct fwd_stream *fs; 1997 1998 fs = fwd_streams[sm_id]; 1999 txp = fwd_topology_tx_port_get(rxp); 2000 fs->rx_port = fwd_ports_ids[rxp]; 2001 fs->rx_queue = rxq; 2002 fs->tx_port = fwd_ports_ids[txp]; 2003 fs->tx_queue = rxq; 2004 fs->peer_addr = fs->tx_port; 2005 fs->retry_enabled = retry_enabled; 2006 rxq = (queueid_t) (rxq + 1); 2007 if (rxq < nb_q) 2008 continue; 2009 /* 2010 * rxq == nb_q 2011 * Restart from RX queue 0 on next RX port 2012 */ 2013 rxq = 0; 2014 rxp++; 2015 } 2016 } 2017 2018 /** 2019 * For the DCB forwarding test, each core is assigned on each traffic class. 2020 * 2021 * Each core is assigned a multi-stream, each stream being composed of 2022 * a RX queue to poll on a RX port for input messages, associated with 2023 * a TX queue of a TX port where to send forwarded packets. All RX and 2024 * TX queues are mapping to the same traffic class. 2025 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2026 * the same core 2027 */ 2028 static void 2029 dcb_fwd_config_setup(void) 2030 { 2031 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2032 portid_t txp, rxp = 0; 2033 queueid_t txq, rxq = 0; 2034 lcoreid_t lc_id; 2035 uint16_t nb_rx_queue, nb_tx_queue; 2036 uint16_t i, j, k, sm_id = 0; 2037 uint8_t tc = 0; 2038 2039 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2040 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2041 cur_fwd_config.nb_fwd_streams = 2042 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2043 2044 /* reinitialize forwarding streams */ 2045 init_fwd_streams(); 2046 sm_id = 0; 2047 txp = 1; 2048 /* get the dcb info on the first RX and TX ports */ 2049 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2050 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2051 2052 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2053 fwd_lcores[lc_id]->stream_nb = 0; 2054 fwd_lcores[lc_id]->stream_idx = sm_id; 2055 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2056 /* if the nb_queue is zero, means this tc is 2057 * not enabled on the POOL 2058 */ 2059 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2060 break; 2061 k = fwd_lcores[lc_id]->stream_nb + 2062 fwd_lcores[lc_id]->stream_idx; 2063 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2064 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2065 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2066 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2067 for (j = 0; j < nb_rx_queue; j++) { 2068 struct fwd_stream *fs; 2069 2070 fs = fwd_streams[k + j]; 2071 fs->rx_port = fwd_ports_ids[rxp]; 2072 fs->rx_queue = rxq + j; 2073 fs->tx_port = fwd_ports_ids[txp]; 2074 fs->tx_queue = txq + j % nb_tx_queue; 2075 fs->peer_addr = fs->tx_port; 2076 fs->retry_enabled = retry_enabled; 2077 } 2078 fwd_lcores[lc_id]->stream_nb += 2079 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2080 } 2081 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2082 2083 tc++; 2084 if (tc < rxp_dcb_info.nb_tcs) 2085 continue; 2086 /* Restart from TC 0 on next RX port */ 2087 tc = 0; 2088 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2089 rxp = (portid_t) 2090 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2091 else 2092 rxp++; 2093 if (rxp >= nb_fwd_ports) 2094 return; 2095 /* get the dcb information on next RX and TX ports */ 2096 if ((rxp & 0x1) == 0) 2097 txp = (portid_t) (rxp + 1); 2098 else 2099 txp = (portid_t) (rxp - 1); 2100 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2101 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2102 } 2103 } 2104 2105 static void 2106 icmp_echo_config_setup(void) 2107 { 2108 portid_t rxp; 2109 queueid_t rxq; 2110 lcoreid_t lc_id; 2111 uint16_t sm_id; 2112 2113 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2114 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2115 (nb_txq * nb_fwd_ports); 2116 else 2117 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2118 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2119 cur_fwd_config.nb_fwd_streams = 2120 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2121 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2122 cur_fwd_config.nb_fwd_lcores = 2123 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2124 if (verbose_level > 0) { 2125 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2126 __FUNCTION__, 2127 cur_fwd_config.nb_fwd_lcores, 2128 cur_fwd_config.nb_fwd_ports, 2129 cur_fwd_config.nb_fwd_streams); 2130 } 2131 2132 /* reinitialize forwarding streams */ 2133 init_fwd_streams(); 2134 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2135 rxp = 0; rxq = 0; 2136 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2137 if (verbose_level > 0) 2138 printf(" core=%d: \n", lc_id); 2139 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2140 struct fwd_stream *fs; 2141 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2142 fs->rx_port = fwd_ports_ids[rxp]; 2143 fs->rx_queue = rxq; 2144 fs->tx_port = fs->rx_port; 2145 fs->tx_queue = rxq; 2146 fs->peer_addr = fs->tx_port; 2147 fs->retry_enabled = retry_enabled; 2148 if (verbose_level > 0) 2149 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2150 sm_id, fs->rx_port, fs->rx_queue, 2151 fs->tx_queue); 2152 rxq = (queueid_t) (rxq + 1); 2153 if (rxq == nb_rxq) { 2154 rxq = 0; 2155 rxp = (portid_t) (rxp + 1); 2156 } 2157 } 2158 } 2159 } 2160 2161 void 2162 fwd_config_setup(void) 2163 { 2164 cur_fwd_config.fwd_eng = cur_fwd_eng; 2165 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2166 icmp_echo_config_setup(); 2167 return; 2168 } 2169 if ((nb_rxq > 1) && (nb_txq > 1)){ 2170 if (dcb_config) 2171 dcb_fwd_config_setup(); 2172 else 2173 rss_fwd_config_setup(); 2174 } 2175 else 2176 simple_fwd_config_setup(); 2177 } 2178 2179 void 2180 pkt_fwd_config_display(struct fwd_config *cfg) 2181 { 2182 struct fwd_stream *fs; 2183 lcoreid_t lc_id; 2184 streamid_t sm_id; 2185 2186 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2187 "NUMA support %s, MP over anonymous pages %s\n", 2188 cfg->fwd_eng->fwd_mode_name, 2189 retry_enabled == 0 ? "" : " with retry", 2190 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2191 numa_support == 1 ? "enabled" : "disabled", 2192 mp_anon != 0 ? "enabled" : "disabled"); 2193 2194 if (retry_enabled) 2195 printf("TX retry num: %u, delay between TX retries: %uus\n", 2196 burst_tx_retry_num, burst_tx_delay_time); 2197 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2198 printf("Logical Core %u (socket %u) forwards packets on " 2199 "%d streams:", 2200 fwd_lcores_cpuids[lc_id], 2201 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2202 fwd_lcores[lc_id]->stream_nb); 2203 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2204 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2205 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2206 "P=%d/Q=%d (socket %u) ", 2207 fs->rx_port, fs->rx_queue, 2208 ports[fs->rx_port].socket_id, 2209 fs->tx_port, fs->tx_queue, 2210 ports[fs->tx_port].socket_id); 2211 print_ethaddr("peer=", 2212 &peer_eth_addrs[fs->peer_addr]); 2213 } 2214 printf("\n"); 2215 } 2216 printf("\n"); 2217 } 2218 2219 void 2220 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2221 { 2222 uint8_t c, new_peer_addr[6]; 2223 if (!rte_eth_dev_is_valid_port(port_id)) { 2224 printf("Error: Invalid port number %i\n", port_id); 2225 return; 2226 } 2227 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2228 sizeof(new_peer_addr)) < 0) { 2229 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2230 return; 2231 } 2232 for (c = 0; c < 6; c++) 2233 peer_eth_addrs[port_id].addr_bytes[c] = 2234 new_peer_addr[c]; 2235 } 2236 2237 int 2238 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2239 { 2240 unsigned int i; 2241 unsigned int lcore_cpuid; 2242 int record_now; 2243 2244 record_now = 0; 2245 again: 2246 for (i = 0; i < nb_lc; i++) { 2247 lcore_cpuid = lcorelist[i]; 2248 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2249 printf("lcore %u not enabled\n", lcore_cpuid); 2250 return -1; 2251 } 2252 if (lcore_cpuid == rte_get_master_lcore()) { 2253 printf("lcore %u cannot be masked on for running " 2254 "packet forwarding, which is the master lcore " 2255 "and reserved for command line parsing only\n", 2256 lcore_cpuid); 2257 return -1; 2258 } 2259 if (record_now) 2260 fwd_lcores_cpuids[i] = lcore_cpuid; 2261 } 2262 if (record_now == 0) { 2263 record_now = 1; 2264 goto again; 2265 } 2266 nb_cfg_lcores = (lcoreid_t) nb_lc; 2267 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2268 printf("previous number of forwarding cores %u - changed to " 2269 "number of configured cores %u\n", 2270 (unsigned int) nb_fwd_lcores, nb_lc); 2271 nb_fwd_lcores = (lcoreid_t) nb_lc; 2272 } 2273 2274 return 0; 2275 } 2276 2277 int 2278 set_fwd_lcores_mask(uint64_t lcoremask) 2279 { 2280 unsigned int lcorelist[64]; 2281 unsigned int nb_lc; 2282 unsigned int i; 2283 2284 if (lcoremask == 0) { 2285 printf("Invalid NULL mask of cores\n"); 2286 return -1; 2287 } 2288 nb_lc = 0; 2289 for (i = 0; i < 64; i++) { 2290 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2291 continue; 2292 lcorelist[nb_lc++] = i; 2293 } 2294 return set_fwd_lcores_list(lcorelist, nb_lc); 2295 } 2296 2297 void 2298 set_fwd_lcores_number(uint16_t nb_lc) 2299 { 2300 if (nb_lc > nb_cfg_lcores) { 2301 printf("nb fwd cores %u > %u (max. number of configured " 2302 "lcores) - ignored\n", 2303 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2304 return; 2305 } 2306 nb_fwd_lcores = (lcoreid_t) nb_lc; 2307 printf("Number of forwarding cores set to %u\n", 2308 (unsigned int) nb_fwd_lcores); 2309 } 2310 2311 void 2312 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2313 { 2314 unsigned int i; 2315 portid_t port_id; 2316 int record_now; 2317 2318 record_now = 0; 2319 again: 2320 for (i = 0; i < nb_pt; i++) { 2321 port_id = (portid_t) portlist[i]; 2322 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2323 return; 2324 if (record_now) 2325 fwd_ports_ids[i] = port_id; 2326 } 2327 if (record_now == 0) { 2328 record_now = 1; 2329 goto again; 2330 } 2331 nb_cfg_ports = (portid_t) nb_pt; 2332 if (nb_fwd_ports != (portid_t) nb_pt) { 2333 printf("previous number of forwarding ports %u - changed to " 2334 "number of configured ports %u\n", 2335 (unsigned int) nb_fwd_ports, nb_pt); 2336 nb_fwd_ports = (portid_t) nb_pt; 2337 } 2338 } 2339 2340 void 2341 set_fwd_ports_mask(uint64_t portmask) 2342 { 2343 unsigned int portlist[64]; 2344 unsigned int nb_pt; 2345 unsigned int i; 2346 2347 if (portmask == 0) { 2348 printf("Invalid NULL mask of ports\n"); 2349 return; 2350 } 2351 nb_pt = 0; 2352 RTE_ETH_FOREACH_DEV(i) { 2353 if (! ((uint64_t)(1ULL << i) & portmask)) 2354 continue; 2355 portlist[nb_pt++] = i; 2356 } 2357 set_fwd_ports_list(portlist, nb_pt); 2358 } 2359 2360 void 2361 set_fwd_ports_number(uint16_t nb_pt) 2362 { 2363 if (nb_pt > nb_cfg_ports) { 2364 printf("nb fwd ports %u > %u (number of configured " 2365 "ports) - ignored\n", 2366 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2367 return; 2368 } 2369 nb_fwd_ports = (portid_t) nb_pt; 2370 printf("Number of forwarding ports set to %u\n", 2371 (unsigned int) nb_fwd_ports); 2372 } 2373 2374 int 2375 port_is_forwarding(portid_t port_id) 2376 { 2377 unsigned int i; 2378 2379 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2380 return -1; 2381 2382 for (i = 0; i < nb_fwd_ports; i++) { 2383 if (fwd_ports_ids[i] == port_id) 2384 return 1; 2385 } 2386 2387 return 0; 2388 } 2389 2390 void 2391 set_nb_pkt_per_burst(uint16_t nb) 2392 { 2393 if (nb > MAX_PKT_BURST) { 2394 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2395 " ignored\n", 2396 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2397 return; 2398 } 2399 nb_pkt_per_burst = nb; 2400 printf("Number of packets per burst set to %u\n", 2401 (unsigned int) nb_pkt_per_burst); 2402 } 2403 2404 static const char * 2405 tx_split_get_name(enum tx_pkt_split split) 2406 { 2407 uint32_t i; 2408 2409 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2410 if (tx_split_name[i].split == split) 2411 return tx_split_name[i].name; 2412 } 2413 return NULL; 2414 } 2415 2416 void 2417 set_tx_pkt_split(const char *name) 2418 { 2419 uint32_t i; 2420 2421 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2422 if (strcmp(tx_split_name[i].name, name) == 0) { 2423 tx_pkt_split = tx_split_name[i].split; 2424 return; 2425 } 2426 } 2427 printf("unknown value: \"%s\"\n", name); 2428 } 2429 2430 void 2431 show_tx_pkt_segments(void) 2432 { 2433 uint32_t i, n; 2434 const char *split; 2435 2436 n = tx_pkt_nb_segs; 2437 split = tx_split_get_name(tx_pkt_split); 2438 2439 printf("Number of segments: %u\n", n); 2440 printf("Segment sizes: "); 2441 for (i = 0; i != n - 1; i++) 2442 printf("%hu,", tx_pkt_seg_lengths[i]); 2443 printf("%hu\n", tx_pkt_seg_lengths[i]); 2444 printf("Split packet: %s\n", split); 2445 } 2446 2447 void 2448 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2449 { 2450 uint16_t tx_pkt_len; 2451 unsigned i; 2452 2453 if (nb_segs >= (unsigned) nb_txd) { 2454 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2455 nb_segs, (unsigned int) nb_txd); 2456 return; 2457 } 2458 2459 /* 2460 * Check that each segment length is greater or equal than 2461 * the mbuf data sise. 2462 * Check also that the total packet length is greater or equal than the 2463 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2464 */ 2465 tx_pkt_len = 0; 2466 for (i = 0; i < nb_segs; i++) { 2467 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2468 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2469 i, seg_lengths[i], (unsigned) mbuf_data_size); 2470 return; 2471 } 2472 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2473 } 2474 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2475 printf("total packet length=%u < %d - give up\n", 2476 (unsigned) tx_pkt_len, 2477 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2478 return; 2479 } 2480 2481 for (i = 0; i < nb_segs; i++) 2482 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2483 2484 tx_pkt_length = tx_pkt_len; 2485 tx_pkt_nb_segs = (uint8_t) nb_segs; 2486 } 2487 2488 void 2489 setup_gro(const char *onoff, portid_t port_id) 2490 { 2491 if (!rte_eth_dev_is_valid_port(port_id)) { 2492 printf("invalid port id %u\n", port_id); 2493 return; 2494 } 2495 if (test_done == 0) { 2496 printf("Before enable/disable GRO," 2497 " please stop forwarding first\n"); 2498 return; 2499 } 2500 if (strcmp(onoff, "on") == 0) { 2501 if (gro_ports[port_id].enable != 0) { 2502 printf("Port %u has enabled GRO. Please" 2503 " disable GRO first\n", port_id); 2504 return; 2505 } 2506 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2507 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2508 gro_ports[port_id].param.max_flow_num = 2509 GRO_DEFAULT_FLOW_NUM; 2510 gro_ports[port_id].param.max_item_per_flow = 2511 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2512 } 2513 gro_ports[port_id].enable = 1; 2514 } else { 2515 if (gro_ports[port_id].enable == 0) { 2516 printf("Port %u has disabled GRO\n", port_id); 2517 return; 2518 } 2519 gro_ports[port_id].enable = 0; 2520 } 2521 } 2522 2523 void 2524 setup_gro_flush_cycles(uint8_t cycles) 2525 { 2526 if (test_done == 0) { 2527 printf("Before change flush interval for GRO," 2528 " please stop forwarding first.\n"); 2529 return; 2530 } 2531 2532 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2533 GRO_DEFAULT_FLUSH_CYCLES) { 2534 printf("The flushing cycle be in the range" 2535 " of 1 to %u. Revert to the default" 2536 " value %u.\n", 2537 GRO_MAX_FLUSH_CYCLES, 2538 GRO_DEFAULT_FLUSH_CYCLES); 2539 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2540 } 2541 2542 gro_flush_cycles = cycles; 2543 } 2544 2545 void 2546 show_gro(portid_t port_id) 2547 { 2548 struct rte_gro_param *param; 2549 uint32_t max_pkts_num; 2550 2551 param = &gro_ports[port_id].param; 2552 2553 if (!rte_eth_dev_is_valid_port(port_id)) { 2554 printf("Invalid port id %u.\n", port_id); 2555 return; 2556 } 2557 if (gro_ports[port_id].enable) { 2558 printf("GRO type: TCP/IPv4\n"); 2559 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2560 max_pkts_num = param->max_flow_num * 2561 param->max_item_per_flow; 2562 } else 2563 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2564 printf("Max number of packets to perform GRO: %u\n", 2565 max_pkts_num); 2566 printf("Flushing cycles: %u\n", gro_flush_cycles); 2567 } else 2568 printf("Port %u doesn't enable GRO.\n", port_id); 2569 } 2570 2571 void 2572 setup_gso(const char *mode, portid_t port_id) 2573 { 2574 if (!rte_eth_dev_is_valid_port(port_id)) { 2575 printf("invalid port id %u\n", port_id); 2576 return; 2577 } 2578 if (strcmp(mode, "on") == 0) { 2579 if (test_done == 0) { 2580 printf("before enabling GSO," 2581 " please stop forwarding first\n"); 2582 return; 2583 } 2584 gso_ports[port_id].enable = 1; 2585 } else if (strcmp(mode, "off") == 0) { 2586 if (test_done == 0) { 2587 printf("before disabling GSO," 2588 " please stop forwarding first\n"); 2589 return; 2590 } 2591 gso_ports[port_id].enable = 0; 2592 } 2593 } 2594 2595 char* 2596 list_pkt_forwarding_modes(void) 2597 { 2598 static char fwd_modes[128] = ""; 2599 const char *separator = "|"; 2600 struct fwd_engine *fwd_eng; 2601 unsigned i = 0; 2602 2603 if (strlen (fwd_modes) == 0) { 2604 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2605 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2606 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2607 strncat(fwd_modes, separator, 2608 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2609 } 2610 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2611 } 2612 2613 return fwd_modes; 2614 } 2615 2616 char* 2617 list_pkt_forwarding_retry_modes(void) 2618 { 2619 static char fwd_modes[128] = ""; 2620 const char *separator = "|"; 2621 struct fwd_engine *fwd_eng; 2622 unsigned i = 0; 2623 2624 if (strlen(fwd_modes) == 0) { 2625 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2626 if (fwd_eng == &rx_only_engine) 2627 continue; 2628 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2629 sizeof(fwd_modes) - 2630 strlen(fwd_modes) - 1); 2631 strncat(fwd_modes, separator, 2632 sizeof(fwd_modes) - 2633 strlen(fwd_modes) - 1); 2634 } 2635 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2636 } 2637 2638 return fwd_modes; 2639 } 2640 2641 void 2642 set_pkt_forwarding_mode(const char *fwd_mode_name) 2643 { 2644 struct fwd_engine *fwd_eng; 2645 unsigned i; 2646 2647 i = 0; 2648 while ((fwd_eng = fwd_engines[i]) != NULL) { 2649 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2650 printf("Set %s packet forwarding mode%s\n", 2651 fwd_mode_name, 2652 retry_enabled == 0 ? "" : " with retry"); 2653 cur_fwd_eng = fwd_eng; 2654 return; 2655 } 2656 i++; 2657 } 2658 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2659 } 2660 2661 void 2662 set_verbose_level(uint16_t vb_level) 2663 { 2664 printf("Change verbose level from %u to %u\n", 2665 (unsigned int) verbose_level, (unsigned int) vb_level); 2666 verbose_level = vb_level; 2667 } 2668 2669 void 2670 vlan_extend_set(portid_t port_id, int on) 2671 { 2672 int diag; 2673 int vlan_offload; 2674 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2675 2676 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2677 return; 2678 2679 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2680 2681 if (on) { 2682 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2683 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2684 } else { 2685 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2686 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2687 } 2688 2689 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2690 if (diag < 0) 2691 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2692 "diag=%d\n", port_id, on, diag); 2693 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2694 } 2695 2696 void 2697 rx_vlan_strip_set(portid_t port_id, int on) 2698 { 2699 int diag; 2700 int vlan_offload; 2701 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2702 2703 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2704 return; 2705 2706 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2707 2708 if (on) { 2709 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2710 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2711 } else { 2712 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2713 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2714 } 2715 2716 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2717 if (diag < 0) 2718 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2719 "diag=%d\n", port_id, on, diag); 2720 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2721 } 2722 2723 void 2724 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2725 { 2726 int diag; 2727 2728 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2729 return; 2730 2731 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2732 if (diag < 0) 2733 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2734 "diag=%d\n", port_id, queue_id, on, diag); 2735 } 2736 2737 void 2738 rx_vlan_filter_set(portid_t port_id, int on) 2739 { 2740 int diag; 2741 int vlan_offload; 2742 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2743 2744 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2745 return; 2746 2747 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2748 2749 if (on) { 2750 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2751 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2752 } else { 2753 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2754 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2755 } 2756 2757 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2758 if (diag < 0) 2759 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2760 "diag=%d\n", port_id, on, diag); 2761 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2762 } 2763 2764 int 2765 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2766 { 2767 int diag; 2768 2769 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2770 return 1; 2771 if (vlan_id_is_invalid(vlan_id)) 2772 return 1; 2773 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2774 if (diag == 0) 2775 return 0; 2776 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2777 "diag=%d\n", 2778 port_id, vlan_id, on, diag); 2779 return -1; 2780 } 2781 2782 void 2783 rx_vlan_all_filter_set(portid_t port_id, int on) 2784 { 2785 uint16_t vlan_id; 2786 2787 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2788 return; 2789 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2790 if (rx_vft_set(port_id, vlan_id, on)) 2791 break; 2792 } 2793 } 2794 2795 void 2796 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2797 { 2798 int diag; 2799 2800 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2801 return; 2802 2803 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2804 if (diag == 0) 2805 return; 2806 2807 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2808 "diag=%d\n", 2809 port_id, vlan_type, tp_id, diag); 2810 } 2811 2812 void 2813 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2814 { 2815 int vlan_offload; 2816 struct rte_eth_dev_info dev_info; 2817 2818 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2819 return; 2820 if (vlan_id_is_invalid(vlan_id)) 2821 return; 2822 2823 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2824 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2825 printf("Error, as QinQ has been enabled.\n"); 2826 return; 2827 } 2828 rte_eth_dev_info_get(port_id, &dev_info); 2829 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2830 printf("Error: vlan insert is not supported by port %d\n", 2831 port_id); 2832 return; 2833 } 2834 2835 tx_vlan_reset(port_id); 2836 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2837 ports[port_id].tx_vlan_id = vlan_id; 2838 } 2839 2840 void 2841 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2842 { 2843 int vlan_offload; 2844 struct rte_eth_dev_info dev_info; 2845 2846 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2847 return; 2848 if (vlan_id_is_invalid(vlan_id)) 2849 return; 2850 if (vlan_id_is_invalid(vlan_id_outer)) 2851 return; 2852 2853 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2854 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2855 printf("Error, as QinQ hasn't been enabled.\n"); 2856 return; 2857 } 2858 rte_eth_dev_info_get(port_id, &dev_info); 2859 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2860 printf("Error: qinq insert not supported by port %d\n", 2861 port_id); 2862 return; 2863 } 2864 2865 tx_vlan_reset(port_id); 2866 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2867 ports[port_id].tx_vlan_id = vlan_id; 2868 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2869 } 2870 2871 void 2872 tx_vlan_reset(portid_t port_id) 2873 { 2874 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2875 return; 2876 ports[port_id].dev_conf.txmode.offloads &= 2877 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2878 DEV_TX_OFFLOAD_QINQ_INSERT); 2879 ports[port_id].tx_vlan_id = 0; 2880 ports[port_id].tx_vlan_id_outer = 0; 2881 } 2882 2883 void 2884 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2885 { 2886 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2887 return; 2888 2889 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2890 } 2891 2892 void 2893 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2894 { 2895 uint16_t i; 2896 uint8_t existing_mapping_found = 0; 2897 2898 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2899 return; 2900 2901 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2902 return; 2903 2904 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2905 printf("map_value not in required range 0..%d\n", 2906 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2907 return; 2908 } 2909 2910 if (!is_rx) { /*then tx*/ 2911 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2912 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2913 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2914 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2915 existing_mapping_found = 1; 2916 break; 2917 } 2918 } 2919 if (!existing_mapping_found) { /* A new additional mapping... */ 2920 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2921 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2922 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2923 nb_tx_queue_stats_mappings++; 2924 } 2925 } 2926 else { /*rx*/ 2927 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2928 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2929 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2930 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2931 existing_mapping_found = 1; 2932 break; 2933 } 2934 } 2935 if (!existing_mapping_found) { /* A new additional mapping... */ 2936 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2937 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2938 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2939 nb_rx_queue_stats_mappings++; 2940 } 2941 } 2942 } 2943 2944 void 2945 set_xstats_hide_zero(uint8_t on_off) 2946 { 2947 xstats_hide_zero = on_off; 2948 } 2949 2950 static inline void 2951 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2952 { 2953 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2954 2955 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2956 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2957 " tunnel_id: 0x%08x", 2958 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2959 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2960 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2961 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2962 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2963 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2964 2965 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2966 rte_be_to_cpu_16(mask->src_port_mask), 2967 rte_be_to_cpu_16(mask->dst_port_mask)); 2968 2969 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2970 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2971 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2972 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2973 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2974 2975 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2976 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2977 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2978 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2979 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2980 } 2981 2982 printf("\n"); 2983 } 2984 2985 static inline void 2986 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2987 { 2988 struct rte_eth_flex_payload_cfg *cfg; 2989 uint32_t i, j; 2990 2991 for (i = 0; i < flex_conf->nb_payloads; i++) { 2992 cfg = &flex_conf->flex_set[i]; 2993 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2994 printf("\n RAW: "); 2995 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2996 printf("\n L2_PAYLOAD: "); 2997 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2998 printf("\n L3_PAYLOAD: "); 2999 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3000 printf("\n L4_PAYLOAD: "); 3001 else 3002 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3003 for (j = 0; j < num; j++) 3004 printf(" %-5u", cfg->src_offset[j]); 3005 } 3006 printf("\n"); 3007 } 3008 3009 static char * 3010 flowtype_to_str(uint16_t flow_type) 3011 { 3012 struct flow_type_info { 3013 char str[32]; 3014 uint16_t ftype; 3015 }; 3016 3017 uint8_t i; 3018 static struct flow_type_info flowtype_str_table[] = { 3019 {"raw", RTE_ETH_FLOW_RAW}, 3020 {"ipv4", RTE_ETH_FLOW_IPV4}, 3021 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3022 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3023 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3024 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3025 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3026 {"ipv6", RTE_ETH_FLOW_IPV6}, 3027 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3028 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3029 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3030 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3031 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3032 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3033 {"port", RTE_ETH_FLOW_PORT}, 3034 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3035 {"geneve", RTE_ETH_FLOW_GENEVE}, 3036 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3037 }; 3038 3039 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3040 if (flowtype_str_table[i].ftype == flow_type) 3041 return flowtype_str_table[i].str; 3042 } 3043 3044 return NULL; 3045 } 3046 3047 static inline void 3048 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3049 { 3050 struct rte_eth_fdir_flex_mask *mask; 3051 uint32_t i, j; 3052 char *p; 3053 3054 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3055 mask = &flex_conf->flex_mask[i]; 3056 p = flowtype_to_str(mask->flow_type); 3057 printf("\n %s:\t", p ? p : "unknown"); 3058 for (j = 0; j < num; j++) 3059 printf(" %02x", mask->mask[j]); 3060 } 3061 printf("\n"); 3062 } 3063 3064 static inline void 3065 print_fdir_flow_type(uint32_t flow_types_mask) 3066 { 3067 int i; 3068 char *p; 3069 3070 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3071 if (!(flow_types_mask & (1 << i))) 3072 continue; 3073 p = flowtype_to_str(i); 3074 if (p) 3075 printf(" %s", p); 3076 else 3077 printf(" unknown"); 3078 } 3079 printf("\n"); 3080 } 3081 3082 void 3083 fdir_get_infos(portid_t port_id) 3084 { 3085 struct rte_eth_fdir_stats fdir_stat; 3086 struct rte_eth_fdir_info fdir_info; 3087 int ret; 3088 3089 static const char *fdir_stats_border = "########################"; 3090 3091 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3092 return; 3093 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3094 if (ret < 0) { 3095 printf("\n FDIR is not supported on port %-2d\n", 3096 port_id); 3097 return; 3098 } 3099 3100 memset(&fdir_info, 0, sizeof(fdir_info)); 3101 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3102 RTE_ETH_FILTER_INFO, &fdir_info); 3103 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3104 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3105 RTE_ETH_FILTER_STATS, &fdir_stat); 3106 printf("\n %s FDIR infos for port %-2d %s\n", 3107 fdir_stats_border, port_id, fdir_stats_border); 3108 printf(" MODE: "); 3109 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3110 printf(" PERFECT\n"); 3111 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3112 printf(" PERFECT-MAC-VLAN\n"); 3113 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3114 printf(" PERFECT-TUNNEL\n"); 3115 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3116 printf(" SIGNATURE\n"); 3117 else 3118 printf(" DISABLE\n"); 3119 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3120 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3121 printf(" SUPPORTED FLOW TYPE: "); 3122 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3123 } 3124 printf(" FLEX PAYLOAD INFO:\n"); 3125 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3126 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3127 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3128 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3129 fdir_info.flex_payload_unit, 3130 fdir_info.max_flex_payload_segment_num, 3131 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3132 printf(" MASK: "); 3133 print_fdir_mask(&fdir_info.mask); 3134 if (fdir_info.flex_conf.nb_payloads > 0) { 3135 printf(" FLEX PAYLOAD SRC OFFSET:"); 3136 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3137 } 3138 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3139 printf(" FLEX MASK CFG:"); 3140 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3141 } 3142 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3143 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3144 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3145 fdir_info.guarant_spc, fdir_info.best_spc); 3146 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3147 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3148 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3149 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3150 fdir_stat.collision, fdir_stat.free, 3151 fdir_stat.maxhash, fdir_stat.maxlen, 3152 fdir_stat.add, fdir_stat.remove, 3153 fdir_stat.f_add, fdir_stat.f_remove); 3154 printf(" %s############################%s\n", 3155 fdir_stats_border, fdir_stats_border); 3156 } 3157 3158 void 3159 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3160 { 3161 struct rte_port *port; 3162 struct rte_eth_fdir_flex_conf *flex_conf; 3163 int i, idx = 0; 3164 3165 port = &ports[port_id]; 3166 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3167 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3168 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3169 idx = i; 3170 break; 3171 } 3172 } 3173 if (i >= RTE_ETH_FLOW_MAX) { 3174 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3175 idx = flex_conf->nb_flexmasks; 3176 flex_conf->nb_flexmasks++; 3177 } else { 3178 printf("The flex mask table is full. Can not set flex" 3179 " mask for flow_type(%u).", cfg->flow_type); 3180 return; 3181 } 3182 } 3183 rte_memcpy(&flex_conf->flex_mask[idx], 3184 cfg, 3185 sizeof(struct rte_eth_fdir_flex_mask)); 3186 } 3187 3188 void 3189 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3190 { 3191 struct rte_port *port; 3192 struct rte_eth_fdir_flex_conf *flex_conf; 3193 int i, idx = 0; 3194 3195 port = &ports[port_id]; 3196 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3197 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3198 if (cfg->type == flex_conf->flex_set[i].type) { 3199 idx = i; 3200 break; 3201 } 3202 } 3203 if (i >= RTE_ETH_PAYLOAD_MAX) { 3204 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3205 idx = flex_conf->nb_payloads; 3206 flex_conf->nb_payloads++; 3207 } else { 3208 printf("The flex payload table is full. Can not set" 3209 " flex payload for type(%u).", cfg->type); 3210 return; 3211 } 3212 } 3213 rte_memcpy(&flex_conf->flex_set[idx], 3214 cfg, 3215 sizeof(struct rte_eth_flex_payload_cfg)); 3216 3217 } 3218 3219 void 3220 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3221 { 3222 #ifdef RTE_LIBRTE_IXGBE_PMD 3223 int diag; 3224 3225 if (is_rx) 3226 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3227 else 3228 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3229 3230 if (diag == 0) 3231 return; 3232 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3233 is_rx ? "rx" : "tx", port_id, diag); 3234 return; 3235 #endif 3236 printf("VF %s setting not supported for port %d\n", 3237 is_rx ? "Rx" : "Tx", port_id); 3238 RTE_SET_USED(vf); 3239 RTE_SET_USED(on); 3240 } 3241 3242 int 3243 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3244 { 3245 int diag; 3246 struct rte_eth_link link; 3247 3248 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3249 return 1; 3250 rte_eth_link_get_nowait(port_id, &link); 3251 if (rate > link.link_speed) { 3252 printf("Invalid rate value:%u bigger than link speed: %u\n", 3253 rate, link.link_speed); 3254 return 1; 3255 } 3256 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3257 if (diag == 0) 3258 return diag; 3259 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3260 port_id, diag); 3261 return diag; 3262 } 3263 3264 int 3265 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3266 { 3267 int diag = -ENOTSUP; 3268 3269 RTE_SET_USED(vf); 3270 RTE_SET_USED(rate); 3271 RTE_SET_USED(q_msk); 3272 3273 #ifdef RTE_LIBRTE_IXGBE_PMD 3274 if (diag == -ENOTSUP) 3275 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3276 q_msk); 3277 #endif 3278 #ifdef RTE_LIBRTE_BNXT_PMD 3279 if (diag == -ENOTSUP) 3280 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3281 #endif 3282 if (diag == 0) 3283 return diag; 3284 3285 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3286 port_id, diag); 3287 return diag; 3288 } 3289 3290 /* 3291 * Functions to manage the set of filtered Multicast MAC addresses. 3292 * 3293 * A pool of filtered multicast MAC addresses is associated with each port. 3294 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3295 * The address of the pool and the number of valid multicast MAC addresses 3296 * recorded in the pool are stored in the fields "mc_addr_pool" and 3297 * "mc_addr_nb" of the "rte_port" data structure. 3298 * 3299 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3300 * to be supplied a contiguous array of multicast MAC addresses. 3301 * To comply with this constraint, the set of multicast addresses recorded 3302 * into the pool are systematically compacted at the beginning of the pool. 3303 * Hence, when a multicast address is removed from the pool, all following 3304 * addresses, if any, are copied back to keep the set contiguous. 3305 */ 3306 #define MCAST_POOL_INC 32 3307 3308 static int 3309 mcast_addr_pool_extend(struct rte_port *port) 3310 { 3311 struct ether_addr *mc_pool; 3312 size_t mc_pool_size; 3313 3314 /* 3315 * If a free entry is available at the end of the pool, just 3316 * increment the number of recorded multicast addresses. 3317 */ 3318 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3319 port->mc_addr_nb++; 3320 return 0; 3321 } 3322 3323 /* 3324 * [re]allocate a pool with MCAST_POOL_INC more entries. 3325 * The previous test guarantees that port->mc_addr_nb is a multiple 3326 * of MCAST_POOL_INC. 3327 */ 3328 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3329 MCAST_POOL_INC); 3330 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3331 mc_pool_size); 3332 if (mc_pool == NULL) { 3333 printf("allocation of pool of %u multicast addresses failed\n", 3334 port->mc_addr_nb + MCAST_POOL_INC); 3335 return -ENOMEM; 3336 } 3337 3338 port->mc_addr_pool = mc_pool; 3339 port->mc_addr_nb++; 3340 return 0; 3341 3342 } 3343 3344 static void 3345 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3346 { 3347 port->mc_addr_nb--; 3348 if (addr_idx == port->mc_addr_nb) { 3349 /* No need to recompact the set of multicast addressses. */ 3350 if (port->mc_addr_nb == 0) { 3351 /* free the pool of multicast addresses. */ 3352 free(port->mc_addr_pool); 3353 port->mc_addr_pool = NULL; 3354 } 3355 return; 3356 } 3357 memmove(&port->mc_addr_pool[addr_idx], 3358 &port->mc_addr_pool[addr_idx + 1], 3359 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3360 } 3361 3362 static void 3363 eth_port_multicast_addr_list_set(portid_t port_id) 3364 { 3365 struct rte_port *port; 3366 int diag; 3367 3368 port = &ports[port_id]; 3369 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3370 port->mc_addr_nb); 3371 if (diag == 0) 3372 return; 3373 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3374 port->mc_addr_nb, port_id, -diag); 3375 } 3376 3377 void 3378 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3379 { 3380 struct rte_port *port; 3381 uint32_t i; 3382 3383 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3384 return; 3385 3386 port = &ports[port_id]; 3387 3388 /* 3389 * Check that the added multicast MAC address is not already recorded 3390 * in the pool of multicast addresses. 3391 */ 3392 for (i = 0; i < port->mc_addr_nb; i++) { 3393 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3394 printf("multicast address already filtered by port\n"); 3395 return; 3396 } 3397 } 3398 3399 if (mcast_addr_pool_extend(port) != 0) 3400 return; 3401 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3402 eth_port_multicast_addr_list_set(port_id); 3403 } 3404 3405 void 3406 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3407 { 3408 struct rte_port *port; 3409 uint32_t i; 3410 3411 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3412 return; 3413 3414 port = &ports[port_id]; 3415 3416 /* 3417 * Search the pool of multicast MAC addresses for the removed address. 3418 */ 3419 for (i = 0; i < port->mc_addr_nb; i++) { 3420 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3421 break; 3422 } 3423 if (i == port->mc_addr_nb) { 3424 printf("multicast address not filtered by port %d\n", port_id); 3425 return; 3426 } 3427 3428 mcast_addr_pool_remove(port, i); 3429 eth_port_multicast_addr_list_set(port_id); 3430 } 3431 3432 void 3433 port_dcb_info_display(portid_t port_id) 3434 { 3435 struct rte_eth_dcb_info dcb_info; 3436 uint16_t i; 3437 int ret; 3438 static const char *border = "================"; 3439 3440 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3441 return; 3442 3443 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3444 if (ret) { 3445 printf("\n Failed to get dcb infos on port %-2d\n", 3446 port_id); 3447 return; 3448 } 3449 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3450 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3451 printf("\n TC : "); 3452 for (i = 0; i < dcb_info.nb_tcs; i++) 3453 printf("\t%4d", i); 3454 printf("\n Priority : "); 3455 for (i = 0; i < dcb_info.nb_tcs; i++) 3456 printf("\t%4d", dcb_info.prio_tc[i]); 3457 printf("\n BW percent :"); 3458 for (i = 0; i < dcb_info.nb_tcs; i++) 3459 printf("\t%4d%%", dcb_info.tc_bws[i]); 3460 printf("\n RXQ base : "); 3461 for (i = 0; i < dcb_info.nb_tcs; i++) 3462 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3463 printf("\n RXQ number :"); 3464 for (i = 0; i < dcb_info.nb_tcs; i++) 3465 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3466 printf("\n TXQ base : "); 3467 for (i = 0; i < dcb_info.nb_tcs; i++) 3468 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3469 printf("\n TXQ number :"); 3470 for (i = 0; i < dcb_info.nb_tcs; i++) 3471 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3472 printf("\n"); 3473 } 3474 3475 uint8_t * 3476 open_file(const char *file_path, uint32_t *size) 3477 { 3478 int fd = open(file_path, O_RDONLY); 3479 off_t pkg_size; 3480 uint8_t *buf = NULL; 3481 int ret = 0; 3482 struct stat st_buf; 3483 3484 if (size) 3485 *size = 0; 3486 3487 if (fd == -1) { 3488 printf("%s: Failed to open %s\n", __func__, file_path); 3489 return buf; 3490 } 3491 3492 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3493 close(fd); 3494 printf("%s: File operations failed\n", __func__); 3495 return buf; 3496 } 3497 3498 pkg_size = st_buf.st_size; 3499 if (pkg_size < 0) { 3500 close(fd); 3501 printf("%s: File operations failed\n", __func__); 3502 return buf; 3503 } 3504 3505 buf = (uint8_t *)malloc(pkg_size); 3506 if (!buf) { 3507 close(fd); 3508 printf("%s: Failed to malloc memory\n", __func__); 3509 return buf; 3510 } 3511 3512 ret = read(fd, buf, pkg_size); 3513 if (ret < 0) { 3514 close(fd); 3515 printf("%s: File read operation failed\n", __func__); 3516 close_file(buf); 3517 return NULL; 3518 } 3519 3520 if (size) 3521 *size = pkg_size; 3522 3523 close(fd); 3524 3525 return buf; 3526 } 3527 3528 int 3529 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3530 { 3531 FILE *fh = fopen(file_path, "wb"); 3532 3533 if (fh == NULL) { 3534 printf("%s: Failed to open %s\n", __func__, file_path); 3535 return -1; 3536 } 3537 3538 if (fwrite(buf, 1, size, fh) != size) { 3539 fclose(fh); 3540 printf("%s: File write operation failed\n", __func__); 3541 return -1; 3542 } 3543 3544 fclose(fh); 3545 3546 return 0; 3547 } 3548 3549 int 3550 close_file(uint8_t *buf) 3551 { 3552 if (buf) { 3553 free((void *)buf); 3554 return 0; 3555 } 3556 3557 return -1; 3558 } 3559 3560 void 3561 port_queue_region_info_display(portid_t port_id, void *buf) 3562 { 3563 #ifdef RTE_LIBRTE_I40E_PMD 3564 uint16_t i, j; 3565 struct rte_pmd_i40e_queue_regions *info = 3566 (struct rte_pmd_i40e_queue_regions *)buf; 3567 static const char *queue_region_info_stats_border = "-------"; 3568 3569 if (!info->queue_region_number) 3570 printf("there is no region has been set before"); 3571 3572 printf("\n %s All queue region info for port=%2d %s", 3573 queue_region_info_stats_border, port_id, 3574 queue_region_info_stats_border); 3575 printf("\n queue_region_number: %-14u \n", 3576 info->queue_region_number); 3577 3578 for (i = 0; i < info->queue_region_number; i++) { 3579 printf("\n region_id: %-14u queue_number: %-14u " 3580 "queue_start_index: %-14u \n", 3581 info->region[i].region_id, 3582 info->region[i].queue_num, 3583 info->region[i].queue_start_index); 3584 3585 printf(" user_priority_num is %-14u :", 3586 info->region[i].user_priority_num); 3587 for (j = 0; j < info->region[i].user_priority_num; j++) 3588 printf(" %-14u ", info->region[i].user_priority[j]); 3589 3590 printf("\n flowtype_num is %-14u :", 3591 info->region[i].flowtype_num); 3592 for (j = 0; j < info->region[i].flowtype_num; j++) 3593 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3594 } 3595 #else 3596 RTE_SET_USED(port_id); 3597 RTE_SET_USED(buf); 3598 #endif 3599 3600 printf("\n\n"); 3601 } 3602