1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 struct rss_type_info { 77 char str[32]; 78 uint64_t rss_type; 79 }; 80 81 static const struct rss_type_info rss_type_table[] = { 82 { "ipv4", ETH_RSS_IPV4 }, 83 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 84 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 85 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 86 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 87 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 88 { "ipv6", ETH_RSS_IPV6 }, 89 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 90 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 91 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 92 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 93 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 94 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 95 { "ipv6-ex", ETH_RSS_IPV6_EX }, 96 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 97 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 98 { "port", ETH_RSS_PORT }, 99 { "vxlan", ETH_RSS_VXLAN }, 100 { "geneve", ETH_RSS_GENEVE }, 101 { "nvgre", ETH_RSS_NVGRE }, 102 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 411 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 412 printf("Valid port range is [0"); 413 RTE_ETH_FOREACH_DEV(pid) 414 printf(", %d", pid); 415 printf("]\n"); 416 return; 417 } 418 port = &ports[port_id]; 419 rte_eth_link_get_nowait(port_id, &link); 420 memset(&dev_info, 0, sizeof(dev_info)); 421 rte_eth_dev_info_get(port_id, &dev_info); 422 printf("\n%s Infos for port %-2d %s\n", 423 info_border, port_id, info_border); 424 rte_eth_macaddr_get(port_id, &mac_addr); 425 print_ethaddr("MAC address: ", &mac_addr); 426 printf("\nDriver name: %s", dev_info.driver_name); 427 printf("\nConnect to socket: %u", port->socket_id); 428 429 if (port_numa[port_id] != NUMA_NO_CONFIG) { 430 mp = mbuf_pool_find(port_numa[port_id]); 431 if (mp) 432 printf("\nmemory allocation on the socket: %d", 433 port_numa[port_id]); 434 } else 435 printf("\nmemory allocation on the socket: %u",port->socket_id); 436 437 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 438 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 439 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 440 ("full-duplex") : ("half-duplex")); 441 442 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 443 printf("MTU: %u\n", mtu); 444 445 printf("Promiscuous mode: %s\n", 446 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 447 printf("Allmulticast mode: %s\n", 448 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 449 printf("Maximum number of MAC addresses: %u\n", 450 (unsigned int)(port->dev_info.max_mac_addrs)); 451 printf("Maximum number of MAC addresses of hash filtering: %u\n", 452 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 453 454 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 455 if (vlan_offload >= 0){ 456 printf("VLAN offload: \n"); 457 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 458 printf(" strip on \n"); 459 else 460 printf(" strip off \n"); 461 462 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 463 printf(" filter on \n"); 464 else 465 printf(" filter off \n"); 466 467 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 468 printf(" qinq(extend) on \n"); 469 else 470 printf(" qinq(extend) off \n"); 471 } 472 473 if (dev_info.hash_key_size > 0) 474 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 475 if (dev_info.reta_size > 0) 476 printf("Redirection table size: %u\n", dev_info.reta_size); 477 if (!dev_info.flow_type_rss_offloads) 478 printf("No flow type is supported.\n"); 479 else { 480 uint16_t i; 481 char *p; 482 483 printf("Supported flow types:\n"); 484 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 485 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 486 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 487 continue; 488 p = flowtype_to_str(i); 489 if (p) 490 printf(" %s\n", p); 491 else 492 printf(" user defined %d\n", i); 493 } 494 } 495 496 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 497 printf("Maximum configurable length of RX packet: %u\n", 498 dev_info.max_rx_pktlen); 499 if (dev_info.max_vfs) 500 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 501 if (dev_info.max_vmdq_pools) 502 printf("Maximum number of VMDq pools: %u\n", 503 dev_info.max_vmdq_pools); 504 505 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 506 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 507 printf("Max possible number of RXDs per queue: %hu\n", 508 dev_info.rx_desc_lim.nb_max); 509 printf("Min possible number of RXDs per queue: %hu\n", 510 dev_info.rx_desc_lim.nb_min); 511 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 512 513 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 514 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 515 printf("Max possible number of TXDs per queue: %hu\n", 516 dev_info.tx_desc_lim.nb_max); 517 printf("Min possible number of TXDs per queue: %hu\n", 518 dev_info.tx_desc_lim.nb_min); 519 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 520 } 521 522 void 523 port_offload_cap_display(portid_t port_id) 524 { 525 struct rte_eth_dev_info dev_info; 526 static const char *info_border = "************"; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) 529 return; 530 531 rte_eth_dev_info_get(port_id, &dev_info); 532 533 printf("\n%s Port %d supported offload features: %s\n", 534 info_border, port_id, info_border); 535 536 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 537 printf("VLAN stripped: "); 538 if (ports[port_id].dev_conf.rxmode.offloads & 539 DEV_RX_OFFLOAD_VLAN_STRIP) 540 printf("on\n"); 541 else 542 printf("off\n"); 543 } 544 545 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 546 printf("Double VLANs stripped: "); 547 if (ports[port_id].dev_conf.rxmode.offloads & 548 DEV_RX_OFFLOAD_VLAN_EXTEND) 549 printf("on\n"); 550 else 551 printf("off\n"); 552 } 553 554 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 555 printf("RX IPv4 checksum: "); 556 if (ports[port_id].dev_conf.rxmode.offloads & 557 DEV_RX_OFFLOAD_IPV4_CKSUM) 558 printf("on\n"); 559 else 560 printf("off\n"); 561 } 562 563 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 564 printf("RX UDP checksum: "); 565 if (ports[port_id].dev_conf.rxmode.offloads & 566 DEV_RX_OFFLOAD_UDP_CKSUM) 567 printf("on\n"); 568 else 569 printf("off\n"); 570 } 571 572 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 573 printf("RX TCP checksum: "); 574 if (ports[port_id].dev_conf.rxmode.offloads & 575 DEV_RX_OFFLOAD_TCP_CKSUM) 576 printf("on\n"); 577 else 578 printf("off\n"); 579 } 580 581 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 582 printf("RX Outer IPv4 checksum: "); 583 if (ports[port_id].dev_conf.rxmode.offloads & 584 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 585 printf("on\n"); 586 else 587 printf("off\n"); 588 } 589 590 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 591 printf("Large receive offload: "); 592 if (ports[port_id].dev_conf.rxmode.offloads & 593 DEV_RX_OFFLOAD_TCP_LRO) 594 printf("on\n"); 595 else 596 printf("off\n"); 597 } 598 599 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 600 printf("VLAN insert: "); 601 if (ports[port_id].dev_conf.txmode.offloads & 602 DEV_TX_OFFLOAD_VLAN_INSERT) 603 printf("on\n"); 604 else 605 printf("off\n"); 606 } 607 608 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 609 printf("HW timestamp: "); 610 if (ports[port_id].dev_conf.rxmode.offloads & 611 DEV_RX_OFFLOAD_TIMESTAMP) 612 printf("on\n"); 613 else 614 printf("off\n"); 615 } 616 617 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 618 printf("Double VLANs insert: "); 619 if (ports[port_id].dev_conf.txmode.offloads & 620 DEV_TX_OFFLOAD_QINQ_INSERT) 621 printf("on\n"); 622 else 623 printf("off\n"); 624 } 625 626 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 627 printf("TX IPv4 checksum: "); 628 if (ports[port_id].dev_conf.txmode.offloads & 629 DEV_TX_OFFLOAD_IPV4_CKSUM) 630 printf("on\n"); 631 else 632 printf("off\n"); 633 } 634 635 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 636 printf("TX UDP checksum: "); 637 if (ports[port_id].dev_conf.txmode.offloads & 638 DEV_TX_OFFLOAD_UDP_CKSUM) 639 printf("on\n"); 640 else 641 printf("off\n"); 642 } 643 644 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 645 printf("TX TCP checksum: "); 646 if (ports[port_id].dev_conf.txmode.offloads & 647 DEV_TX_OFFLOAD_TCP_CKSUM) 648 printf("on\n"); 649 else 650 printf("off\n"); 651 } 652 653 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 654 printf("TX SCTP checksum: "); 655 if (ports[port_id].dev_conf.txmode.offloads & 656 DEV_TX_OFFLOAD_SCTP_CKSUM) 657 printf("on\n"); 658 else 659 printf("off\n"); 660 } 661 662 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 663 printf("TX Outer IPv4 checksum: "); 664 if (ports[port_id].dev_conf.txmode.offloads & 665 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 666 printf("on\n"); 667 else 668 printf("off\n"); 669 } 670 671 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 672 printf("TX TCP segmentation: "); 673 if (ports[port_id].dev_conf.txmode.offloads & 674 DEV_TX_OFFLOAD_TCP_TSO) 675 printf("on\n"); 676 else 677 printf("off\n"); 678 } 679 680 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 681 printf("TX UDP segmentation: "); 682 if (ports[port_id].dev_conf.txmode.offloads & 683 DEV_TX_OFFLOAD_UDP_TSO) 684 printf("on\n"); 685 else 686 printf("off\n"); 687 } 688 689 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 690 printf("TSO for VXLAN tunnel packet: "); 691 if (ports[port_id].dev_conf.txmode.offloads & 692 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 693 printf("on\n"); 694 else 695 printf("off\n"); 696 } 697 698 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 699 printf("TSO for GRE tunnel packet: "); 700 if (ports[port_id].dev_conf.txmode.offloads & 701 DEV_TX_OFFLOAD_GRE_TNL_TSO) 702 printf("on\n"); 703 else 704 printf("off\n"); 705 } 706 707 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 708 printf("TSO for IPIP tunnel packet: "); 709 if (ports[port_id].dev_conf.txmode.offloads & 710 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 711 printf("on\n"); 712 else 713 printf("off\n"); 714 } 715 716 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 717 printf("TSO for GENEVE tunnel packet: "); 718 if (ports[port_id].dev_conf.txmode.offloads & 719 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 720 printf("on\n"); 721 else 722 printf("off\n"); 723 } 724 725 } 726 727 int 728 port_id_is_invalid(portid_t port_id, enum print_warning warning) 729 { 730 uint16_t pid; 731 732 if (port_id == (portid_t)RTE_PORT_ALL) 733 return 0; 734 735 RTE_ETH_FOREACH_DEV(pid) 736 if (port_id == pid) 737 return 0; 738 739 if (warning == ENABLED_WARN) 740 printf("Invalid port %d\n", port_id); 741 742 return 1; 743 } 744 745 static int 746 vlan_id_is_invalid(uint16_t vlan_id) 747 { 748 if (vlan_id < 4096) 749 return 0; 750 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 751 return 1; 752 } 753 754 static int 755 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 756 { 757 uint64_t pci_len; 758 759 if (reg_off & 0x3) { 760 printf("Port register offset 0x%X not aligned on a 4-byte " 761 "boundary\n", 762 (unsigned)reg_off); 763 return 1; 764 } 765 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; 766 if (reg_off >= pci_len) { 767 printf("Port %d: register offset %u (0x%X) out of port PCI " 768 "resource (length=%"PRIu64")\n", 769 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 770 return 1; 771 } 772 return 0; 773 } 774 775 static int 776 reg_bit_pos_is_invalid(uint8_t bit_pos) 777 { 778 if (bit_pos <= 31) 779 return 0; 780 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 781 return 1; 782 } 783 784 #define display_port_and_reg_off(port_id, reg_off) \ 785 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 786 787 static inline void 788 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 789 { 790 display_port_and_reg_off(port_id, (unsigned)reg_off); 791 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 792 } 793 794 void 795 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 796 { 797 uint32_t reg_v; 798 799 800 if (port_id_is_invalid(port_id, ENABLED_WARN)) 801 return; 802 if (port_reg_off_is_invalid(port_id, reg_off)) 803 return; 804 if (reg_bit_pos_is_invalid(bit_x)) 805 return; 806 reg_v = port_id_pci_reg_read(port_id, reg_off); 807 display_port_and_reg_off(port_id, (unsigned)reg_off); 808 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 809 } 810 811 void 812 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 813 uint8_t bit1_pos, uint8_t bit2_pos) 814 { 815 uint32_t reg_v; 816 uint8_t l_bit; 817 uint8_t h_bit; 818 819 if (port_id_is_invalid(port_id, ENABLED_WARN)) 820 return; 821 if (port_reg_off_is_invalid(port_id, reg_off)) 822 return; 823 if (reg_bit_pos_is_invalid(bit1_pos)) 824 return; 825 if (reg_bit_pos_is_invalid(bit2_pos)) 826 return; 827 if (bit1_pos > bit2_pos) 828 l_bit = bit2_pos, h_bit = bit1_pos; 829 else 830 l_bit = bit1_pos, h_bit = bit2_pos; 831 832 reg_v = port_id_pci_reg_read(port_id, reg_off); 833 reg_v >>= l_bit; 834 if (h_bit < 31) 835 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 836 display_port_and_reg_off(port_id, (unsigned)reg_off); 837 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 838 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 839 } 840 841 void 842 port_reg_display(portid_t port_id, uint32_t reg_off) 843 { 844 uint32_t reg_v; 845 846 if (port_id_is_invalid(port_id, ENABLED_WARN)) 847 return; 848 if (port_reg_off_is_invalid(port_id, reg_off)) 849 return; 850 reg_v = port_id_pci_reg_read(port_id, reg_off); 851 display_port_reg_value(port_id, reg_off, reg_v); 852 } 853 854 void 855 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 856 uint8_t bit_v) 857 { 858 uint32_t reg_v; 859 860 if (port_id_is_invalid(port_id, ENABLED_WARN)) 861 return; 862 if (port_reg_off_is_invalid(port_id, reg_off)) 863 return; 864 if (reg_bit_pos_is_invalid(bit_pos)) 865 return; 866 if (bit_v > 1) { 867 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 868 return; 869 } 870 reg_v = port_id_pci_reg_read(port_id, reg_off); 871 if (bit_v == 0) 872 reg_v &= ~(1 << bit_pos); 873 else 874 reg_v |= (1 << bit_pos); 875 port_id_pci_reg_write(port_id, reg_off, reg_v); 876 display_port_reg_value(port_id, reg_off, reg_v); 877 } 878 879 void 880 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 881 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 882 { 883 uint32_t max_v; 884 uint32_t reg_v; 885 uint8_t l_bit; 886 uint8_t h_bit; 887 888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 889 return; 890 if (port_reg_off_is_invalid(port_id, reg_off)) 891 return; 892 if (reg_bit_pos_is_invalid(bit1_pos)) 893 return; 894 if (reg_bit_pos_is_invalid(bit2_pos)) 895 return; 896 if (bit1_pos > bit2_pos) 897 l_bit = bit2_pos, h_bit = bit1_pos; 898 else 899 l_bit = bit1_pos, h_bit = bit2_pos; 900 901 if ((h_bit - l_bit) < 31) 902 max_v = (1 << (h_bit - l_bit + 1)) - 1; 903 else 904 max_v = 0xFFFFFFFF; 905 906 if (value > max_v) { 907 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 908 (unsigned)value, (unsigned)value, 909 (unsigned)max_v, (unsigned)max_v); 910 return; 911 } 912 reg_v = port_id_pci_reg_read(port_id, reg_off); 913 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 914 reg_v |= (value << l_bit); /* Set changed bits */ 915 port_id_pci_reg_write(port_id, reg_off, reg_v); 916 display_port_reg_value(port_id, reg_off, reg_v); 917 } 918 919 void 920 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 921 { 922 if (port_id_is_invalid(port_id, ENABLED_WARN)) 923 return; 924 if (port_reg_off_is_invalid(port_id, reg_off)) 925 return; 926 port_id_pci_reg_write(port_id, reg_off, reg_v); 927 display_port_reg_value(port_id, reg_off, reg_v); 928 } 929 930 void 931 port_mtu_set(portid_t port_id, uint16_t mtu) 932 { 933 int diag; 934 935 if (port_id_is_invalid(port_id, ENABLED_WARN)) 936 return; 937 diag = rte_eth_dev_set_mtu(port_id, mtu); 938 if (diag == 0) 939 return; 940 printf("Set MTU failed. diag=%d\n", diag); 941 } 942 943 /* Generic flow management functions. */ 944 945 /** Generate flow_item[] entry. */ 946 #define MK_FLOW_ITEM(t, s) \ 947 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 948 .name = # t, \ 949 .size = s, \ 950 } 951 952 /** Information about known flow pattern items. */ 953 static const struct { 954 const char *name; 955 size_t size; 956 } flow_item[] = { 957 MK_FLOW_ITEM(END, 0), 958 MK_FLOW_ITEM(VOID, 0), 959 MK_FLOW_ITEM(INVERT, 0), 960 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 961 MK_FLOW_ITEM(PF, 0), 962 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 963 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), 964 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ 965 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 966 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 967 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 968 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 969 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 970 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 971 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 972 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 973 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 974 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 975 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 976 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 977 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 978 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 979 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 980 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 981 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 982 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 983 }; 984 985 /** Compute storage space needed by item specification. */ 986 static void 987 flow_item_spec_size(const struct rte_flow_item *item, 988 size_t *size, size_t *pad) 989 { 990 if (!item->spec) { 991 *size = 0; 992 goto empty; 993 } 994 switch (item->type) { 995 union { 996 const struct rte_flow_item_raw *raw; 997 } spec; 998 999 case RTE_FLOW_ITEM_TYPE_RAW: 1000 spec.raw = item->spec; 1001 *size = offsetof(struct rte_flow_item_raw, pattern) + 1002 spec.raw->length * sizeof(*spec.raw->pattern); 1003 break; 1004 default: 1005 *size = flow_item[item->type].size; 1006 break; 1007 } 1008 empty: 1009 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1010 } 1011 1012 /** Generate flow_action[] entry. */ 1013 #define MK_FLOW_ACTION(t, s) \ 1014 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1015 .name = # t, \ 1016 .size = s, \ 1017 } 1018 1019 /** Information about known flow actions. */ 1020 static const struct { 1021 const char *name; 1022 size_t size; 1023 } flow_action[] = { 1024 MK_FLOW_ACTION(END, 0), 1025 MK_FLOW_ACTION(VOID, 0), 1026 MK_FLOW_ACTION(PASSTHRU, 0), 1027 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1028 MK_FLOW_ACTION(FLAG, 0), 1029 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1030 MK_FLOW_ACTION(DROP, 0), 1031 MK_FLOW_ACTION(COUNT, 0), 1032 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), 1033 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ 1034 MK_FLOW_ACTION(PF, 0), 1035 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1036 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1037 }; 1038 1039 /** Compute storage space needed by action configuration. */ 1040 static void 1041 flow_action_conf_size(const struct rte_flow_action *action, 1042 size_t *size, size_t *pad) 1043 { 1044 if (!action->conf) { 1045 *size = 0; 1046 goto empty; 1047 } 1048 switch (action->type) { 1049 union { 1050 const struct rte_flow_action_rss *rss; 1051 } conf; 1052 1053 case RTE_FLOW_ACTION_TYPE_RSS: 1054 conf.rss = action->conf; 1055 *size = offsetof(struct rte_flow_action_rss, queue) + 1056 conf.rss->num * sizeof(*conf.rss->queue); 1057 break; 1058 default: 1059 *size = flow_action[action->type].size; 1060 break; 1061 } 1062 empty: 1063 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; 1064 } 1065 1066 /** Generate a port_flow entry from attributes/pattern/actions. */ 1067 static struct port_flow * 1068 port_flow_new(const struct rte_flow_attr *attr, 1069 const struct rte_flow_item *pattern, 1070 const struct rte_flow_action *actions) 1071 { 1072 const struct rte_flow_item *item; 1073 const struct rte_flow_action *action; 1074 struct port_flow *pf = NULL; 1075 size_t tmp; 1076 size_t pad; 1077 size_t off1 = 0; 1078 size_t off2 = 0; 1079 int err = ENOTSUP; 1080 1081 store: 1082 item = pattern; 1083 if (pf) 1084 pf->pattern = (void *)&pf->data[off1]; 1085 do { 1086 struct rte_flow_item *dst = NULL; 1087 1088 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1089 !flow_item[item->type].name) 1090 goto notsup; 1091 if (pf) 1092 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1093 off1 += sizeof(*item); 1094 flow_item_spec_size(item, &tmp, &pad); 1095 if (item->spec) { 1096 if (pf) 1097 dst->spec = memcpy(pf->data + off2, 1098 item->spec, tmp); 1099 off2 += tmp + pad; 1100 } 1101 if (item->last) { 1102 if (pf) 1103 dst->last = memcpy(pf->data + off2, 1104 item->last, tmp); 1105 off2 += tmp + pad; 1106 } 1107 if (item->mask) { 1108 if (pf) 1109 dst->mask = memcpy(pf->data + off2, 1110 item->mask, tmp); 1111 off2 += tmp + pad; 1112 } 1113 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1114 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1115 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1116 action = actions; 1117 if (pf) 1118 pf->actions = (void *)&pf->data[off1]; 1119 do { 1120 struct rte_flow_action *dst = NULL; 1121 1122 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1123 !flow_action[action->type].name) 1124 goto notsup; 1125 if (pf) 1126 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1127 off1 += sizeof(*action); 1128 flow_action_conf_size(action, &tmp, &pad); 1129 if (action->conf) { 1130 if (pf) 1131 dst->conf = memcpy(pf->data + off2, 1132 action->conf, tmp); 1133 off2 += tmp + pad; 1134 } 1135 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1136 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1137 if (pf != NULL) 1138 return pf; 1139 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1140 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1141 pf = calloc(1, tmp + off1 + off2); 1142 if (pf == NULL) 1143 err = errno; 1144 else { 1145 *pf = (const struct port_flow){ 1146 .size = tmp + off1 + off2, 1147 .attr = *attr, 1148 }; 1149 tmp -= offsetof(struct port_flow, data); 1150 off2 = tmp + off1; 1151 off1 = tmp; 1152 goto store; 1153 } 1154 notsup: 1155 rte_errno = err; 1156 return NULL; 1157 } 1158 1159 /** Print a message out of a flow error. */ 1160 static int 1161 port_flow_complain(struct rte_flow_error *error) 1162 { 1163 static const char *const errstrlist[] = { 1164 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1165 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1166 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1167 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1168 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1169 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1170 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1171 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1172 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1173 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1174 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1175 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1176 }; 1177 const char *errstr; 1178 char buf[32]; 1179 int err = rte_errno; 1180 1181 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1182 !errstrlist[error->type]) 1183 errstr = "unknown type"; 1184 else 1185 errstr = errstrlist[error->type]; 1186 printf("Caught error type %d (%s): %s%s\n", 1187 error->type, errstr, 1188 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1189 error->cause), buf) : "", 1190 error->message ? error->message : "(no stated reason)"); 1191 return -err; 1192 } 1193 1194 /** Validate flow rule. */ 1195 int 1196 port_flow_validate(portid_t port_id, 1197 const struct rte_flow_attr *attr, 1198 const struct rte_flow_item *pattern, 1199 const struct rte_flow_action *actions) 1200 { 1201 struct rte_flow_error error; 1202 1203 /* Poisoning to make sure PMDs update it in case of error. */ 1204 memset(&error, 0x11, sizeof(error)); 1205 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1206 return port_flow_complain(&error); 1207 printf("Flow rule validated\n"); 1208 return 0; 1209 } 1210 1211 /** Create flow rule. */ 1212 int 1213 port_flow_create(portid_t port_id, 1214 const struct rte_flow_attr *attr, 1215 const struct rte_flow_item *pattern, 1216 const struct rte_flow_action *actions) 1217 { 1218 struct rte_flow *flow; 1219 struct rte_port *port; 1220 struct port_flow *pf; 1221 uint32_t id; 1222 struct rte_flow_error error; 1223 1224 /* Poisoning to make sure PMDs update it in case of error. */ 1225 memset(&error, 0x22, sizeof(error)); 1226 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1227 if (!flow) 1228 return port_flow_complain(&error); 1229 port = &ports[port_id]; 1230 if (port->flow_list) { 1231 if (port->flow_list->id == UINT32_MAX) { 1232 printf("Highest rule ID is already assigned, delete" 1233 " it first"); 1234 rte_flow_destroy(port_id, flow, NULL); 1235 return -ENOMEM; 1236 } 1237 id = port->flow_list->id + 1; 1238 } else 1239 id = 0; 1240 pf = port_flow_new(attr, pattern, actions); 1241 if (!pf) { 1242 int err = rte_errno; 1243 1244 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1245 rte_flow_destroy(port_id, flow, NULL); 1246 return -err; 1247 } 1248 pf->next = port->flow_list; 1249 pf->id = id; 1250 pf->flow = flow; 1251 port->flow_list = pf; 1252 printf("Flow rule #%u created\n", pf->id); 1253 return 0; 1254 } 1255 1256 /** Destroy a number of flow rules. */ 1257 int 1258 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1259 { 1260 struct rte_port *port; 1261 struct port_flow **tmp; 1262 uint32_t c = 0; 1263 int ret = 0; 1264 1265 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1266 port_id == (portid_t)RTE_PORT_ALL) 1267 return -EINVAL; 1268 port = &ports[port_id]; 1269 tmp = &port->flow_list; 1270 while (*tmp) { 1271 uint32_t i; 1272 1273 for (i = 0; i != n; ++i) { 1274 struct rte_flow_error error; 1275 struct port_flow *pf = *tmp; 1276 1277 if (rule[i] != pf->id) 1278 continue; 1279 /* 1280 * Poisoning to make sure PMDs update it in case 1281 * of error. 1282 */ 1283 memset(&error, 0x33, sizeof(error)); 1284 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1285 ret = port_flow_complain(&error); 1286 continue; 1287 } 1288 printf("Flow rule #%u destroyed\n", pf->id); 1289 *tmp = pf->next; 1290 free(pf); 1291 break; 1292 } 1293 if (i == n) 1294 tmp = &(*tmp)->next; 1295 ++c; 1296 } 1297 return ret; 1298 } 1299 1300 /** Remove all flow rules. */ 1301 int 1302 port_flow_flush(portid_t port_id) 1303 { 1304 struct rte_flow_error error; 1305 struct rte_port *port; 1306 int ret = 0; 1307 1308 /* Poisoning to make sure PMDs update it in case of error. */ 1309 memset(&error, 0x44, sizeof(error)); 1310 if (rte_flow_flush(port_id, &error)) { 1311 ret = port_flow_complain(&error); 1312 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1313 port_id == (portid_t)RTE_PORT_ALL) 1314 return ret; 1315 } 1316 port = &ports[port_id]; 1317 while (port->flow_list) { 1318 struct port_flow *pf = port->flow_list->next; 1319 1320 free(port->flow_list); 1321 port->flow_list = pf; 1322 } 1323 return ret; 1324 } 1325 1326 /** Query a flow rule. */ 1327 int 1328 port_flow_query(portid_t port_id, uint32_t rule, 1329 enum rte_flow_action_type action) 1330 { 1331 struct rte_flow_error error; 1332 struct rte_port *port; 1333 struct port_flow *pf; 1334 const char *name; 1335 union { 1336 struct rte_flow_query_count count; 1337 } query; 1338 1339 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1340 port_id == (portid_t)RTE_PORT_ALL) 1341 return -EINVAL; 1342 port = &ports[port_id]; 1343 for (pf = port->flow_list; pf; pf = pf->next) 1344 if (pf->id == rule) 1345 break; 1346 if (!pf) { 1347 printf("Flow rule #%u not found\n", rule); 1348 return -ENOENT; 1349 } 1350 if ((unsigned int)action >= RTE_DIM(flow_action) || 1351 !flow_action[action].name) 1352 name = "unknown"; 1353 else 1354 name = flow_action[action].name; 1355 switch (action) { 1356 case RTE_FLOW_ACTION_TYPE_COUNT: 1357 break; 1358 default: 1359 printf("Cannot query action type %d (%s)\n", action, name); 1360 return -ENOTSUP; 1361 } 1362 /* Poisoning to make sure PMDs update it in case of error. */ 1363 memset(&error, 0x55, sizeof(error)); 1364 memset(&query, 0, sizeof(query)); 1365 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1366 return port_flow_complain(&error); 1367 switch (action) { 1368 case RTE_FLOW_ACTION_TYPE_COUNT: 1369 printf("%s:\n" 1370 " hits_set: %u\n" 1371 " bytes_set: %u\n" 1372 " hits: %" PRIu64 "\n" 1373 " bytes: %" PRIu64 "\n", 1374 name, 1375 query.count.hits_set, 1376 query.count.bytes_set, 1377 query.count.hits, 1378 query.count.bytes); 1379 break; 1380 default: 1381 printf("Cannot display result for action type %d (%s)\n", 1382 action, name); 1383 break; 1384 } 1385 return 0; 1386 } 1387 1388 /** List flow rules. */ 1389 void 1390 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1391 { 1392 struct rte_port *port; 1393 struct port_flow *pf; 1394 struct port_flow *list = NULL; 1395 uint32_t i; 1396 1397 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1398 port_id == (portid_t)RTE_PORT_ALL) 1399 return; 1400 port = &ports[port_id]; 1401 if (!port->flow_list) 1402 return; 1403 /* Sort flows by group, priority and ID. */ 1404 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1405 struct port_flow **tmp; 1406 1407 if (n) { 1408 /* Filter out unwanted groups. */ 1409 for (i = 0; i != n; ++i) 1410 if (pf->attr.group == group[i]) 1411 break; 1412 if (i == n) 1413 continue; 1414 } 1415 tmp = &list; 1416 while (*tmp && 1417 (pf->attr.group > (*tmp)->attr.group || 1418 (pf->attr.group == (*tmp)->attr.group && 1419 pf->attr.priority > (*tmp)->attr.priority) || 1420 (pf->attr.group == (*tmp)->attr.group && 1421 pf->attr.priority == (*tmp)->attr.priority && 1422 pf->id > (*tmp)->id))) 1423 tmp = &(*tmp)->tmp; 1424 pf->tmp = *tmp; 1425 *tmp = pf; 1426 } 1427 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1428 for (pf = list; pf != NULL; pf = pf->tmp) { 1429 const struct rte_flow_item *item = pf->pattern; 1430 const struct rte_flow_action *action = pf->actions; 1431 1432 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", 1433 pf->id, 1434 pf->attr.group, 1435 pf->attr.priority, 1436 pf->attr.ingress ? 'i' : '-', 1437 pf->attr.egress ? 'e' : '-'); 1438 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1439 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1440 printf("%s ", flow_item[item->type].name); 1441 ++item; 1442 } 1443 printf("=>"); 1444 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1445 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1446 printf(" %s", flow_action[action->type].name); 1447 ++action; 1448 } 1449 printf("\n"); 1450 } 1451 } 1452 1453 /** Restrict ingress traffic to the defined flow rules. */ 1454 int 1455 port_flow_isolate(portid_t port_id, int set) 1456 { 1457 struct rte_flow_error error; 1458 1459 /* Poisoning to make sure PMDs update it in case of error. */ 1460 memset(&error, 0x66, sizeof(error)); 1461 if (rte_flow_isolate(port_id, set, &error)) 1462 return port_flow_complain(&error); 1463 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1464 port_id, 1465 set ? "now restricted" : "not restricted anymore"); 1466 return 0; 1467 } 1468 1469 /* 1470 * RX/TX ring descriptors display functions. 1471 */ 1472 int 1473 rx_queue_id_is_invalid(queueid_t rxq_id) 1474 { 1475 if (rxq_id < nb_rxq) 1476 return 0; 1477 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1478 return 1; 1479 } 1480 1481 int 1482 tx_queue_id_is_invalid(queueid_t txq_id) 1483 { 1484 if (txq_id < nb_txq) 1485 return 0; 1486 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1487 return 1; 1488 } 1489 1490 static int 1491 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1492 { 1493 if (rxdesc_id < nb_rxd) 1494 return 0; 1495 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1496 rxdesc_id, nb_rxd); 1497 return 1; 1498 } 1499 1500 static int 1501 tx_desc_id_is_invalid(uint16_t txdesc_id) 1502 { 1503 if (txdesc_id < nb_txd) 1504 return 0; 1505 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1506 txdesc_id, nb_txd); 1507 return 1; 1508 } 1509 1510 static const struct rte_memzone * 1511 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1512 { 1513 char mz_name[RTE_MEMZONE_NAMESIZE]; 1514 const struct rte_memzone *mz; 1515 1516 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1517 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1518 mz = rte_memzone_lookup(mz_name); 1519 if (mz == NULL) 1520 printf("%s ring memory zoneof (port %d, queue %d) not" 1521 "found (zone name = %s\n", 1522 ring_name, port_id, q_id, mz_name); 1523 return mz; 1524 } 1525 1526 union igb_ring_dword { 1527 uint64_t dword; 1528 struct { 1529 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1530 uint32_t lo; 1531 uint32_t hi; 1532 #else 1533 uint32_t hi; 1534 uint32_t lo; 1535 #endif 1536 } words; 1537 }; 1538 1539 struct igb_ring_desc_32_bytes { 1540 union igb_ring_dword lo_dword; 1541 union igb_ring_dword hi_dword; 1542 union igb_ring_dword resv1; 1543 union igb_ring_dword resv2; 1544 }; 1545 1546 struct igb_ring_desc_16_bytes { 1547 union igb_ring_dword lo_dword; 1548 union igb_ring_dword hi_dword; 1549 }; 1550 1551 static void 1552 ring_rxd_display_dword(union igb_ring_dword dword) 1553 { 1554 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1555 (unsigned)dword.words.hi); 1556 } 1557 1558 static void 1559 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1560 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1561 portid_t port_id, 1562 #else 1563 __rte_unused portid_t port_id, 1564 #endif 1565 uint16_t desc_id) 1566 { 1567 struct igb_ring_desc_16_bytes *ring = 1568 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1569 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1570 struct rte_eth_dev_info dev_info; 1571 1572 memset(&dev_info, 0, sizeof(dev_info)); 1573 rte_eth_dev_info_get(port_id, &dev_info); 1574 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1575 /* 32 bytes RX descriptor, i40e only */ 1576 struct igb_ring_desc_32_bytes *ring = 1577 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1578 ring[desc_id].lo_dword.dword = 1579 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1580 ring_rxd_display_dword(ring[desc_id].lo_dword); 1581 ring[desc_id].hi_dword.dword = 1582 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1583 ring_rxd_display_dword(ring[desc_id].hi_dword); 1584 ring[desc_id].resv1.dword = 1585 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1586 ring_rxd_display_dword(ring[desc_id].resv1); 1587 ring[desc_id].resv2.dword = 1588 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1589 ring_rxd_display_dword(ring[desc_id].resv2); 1590 1591 return; 1592 } 1593 #endif 1594 /* 16 bytes RX descriptor */ 1595 ring[desc_id].lo_dword.dword = 1596 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1597 ring_rxd_display_dword(ring[desc_id].lo_dword); 1598 ring[desc_id].hi_dword.dword = 1599 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1600 ring_rxd_display_dword(ring[desc_id].hi_dword); 1601 } 1602 1603 static void 1604 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1605 { 1606 struct igb_ring_desc_16_bytes *ring; 1607 struct igb_ring_desc_16_bytes txd; 1608 1609 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1610 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1611 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1612 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1613 (unsigned)txd.lo_dword.words.lo, 1614 (unsigned)txd.lo_dword.words.hi, 1615 (unsigned)txd.hi_dword.words.lo, 1616 (unsigned)txd.hi_dword.words.hi); 1617 } 1618 1619 void 1620 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1621 { 1622 const struct rte_memzone *rx_mz; 1623 1624 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1625 return; 1626 if (rx_queue_id_is_invalid(rxq_id)) 1627 return; 1628 if (rx_desc_id_is_invalid(rxd_id)) 1629 return; 1630 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1631 if (rx_mz == NULL) 1632 return; 1633 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1634 } 1635 1636 void 1637 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1638 { 1639 const struct rte_memzone *tx_mz; 1640 1641 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1642 return; 1643 if (tx_queue_id_is_invalid(txq_id)) 1644 return; 1645 if (tx_desc_id_is_invalid(txd_id)) 1646 return; 1647 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1648 if (tx_mz == NULL) 1649 return; 1650 ring_tx_descriptor_display(tx_mz, txd_id); 1651 } 1652 1653 void 1654 fwd_lcores_config_display(void) 1655 { 1656 lcoreid_t lc_id; 1657 1658 printf("List of forwarding lcores:"); 1659 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1660 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1661 printf("\n"); 1662 } 1663 void 1664 rxtx_config_display(void) 1665 { 1666 portid_t pid; 1667 1668 printf(" %s packet forwarding%s packets/burst=%d\n", 1669 cur_fwd_eng->fwd_mode_name, 1670 retry_enabled == 0 ? "" : " with retry", 1671 nb_pkt_per_burst); 1672 1673 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1674 printf(" packet len=%u - nb packet segments=%d\n", 1675 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1676 1677 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1678 nb_fwd_lcores, nb_fwd_ports); 1679 1680 RTE_ETH_FOREACH_DEV(pid) { 1681 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; 1682 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; 1683 1684 printf(" port %d:\n", (unsigned int)pid); 1685 printf(" CRC stripping %s\n", 1686 (ports[pid].dev_conf.rxmode.offloads & 1687 DEV_RX_OFFLOAD_CRC_STRIP) ? 1688 "enabled" : "disabled"); 1689 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", 1690 nb_rxq, nb_rxd, rx_conf->rx_free_thresh); 1691 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1692 " wthresh=%d\n", 1693 rx_conf->rx_thresh.pthresh, 1694 rx_conf->rx_thresh.hthresh, 1695 rx_conf->rx_thresh.wthresh); 1696 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", 1697 nb_txq, nb_txd, tx_conf->tx_free_thresh); 1698 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1699 " wthresh=%d\n", 1700 tx_conf->tx_thresh.pthresh, 1701 tx_conf->tx_thresh.hthresh, 1702 tx_conf->tx_thresh.wthresh); 1703 printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", 1704 tx_conf->tx_rs_thresh, tx_conf->offloads); 1705 } 1706 } 1707 1708 void 1709 port_rss_reta_info(portid_t port_id, 1710 struct rte_eth_rss_reta_entry64 *reta_conf, 1711 uint16_t nb_entries) 1712 { 1713 uint16_t i, idx, shift; 1714 int ret; 1715 1716 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1717 return; 1718 1719 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1720 if (ret != 0) { 1721 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1722 return; 1723 } 1724 1725 for (i = 0; i < nb_entries; i++) { 1726 idx = i / RTE_RETA_GROUP_SIZE; 1727 shift = i % RTE_RETA_GROUP_SIZE; 1728 if (!(reta_conf[idx].mask & (1ULL << shift))) 1729 continue; 1730 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1731 i, reta_conf[idx].reta[shift]); 1732 } 1733 } 1734 1735 /* 1736 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1737 * key of the port. 1738 */ 1739 void 1740 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1741 { 1742 struct rte_eth_rss_conf rss_conf; 1743 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1744 uint64_t rss_hf; 1745 uint8_t i; 1746 int diag; 1747 struct rte_eth_dev_info dev_info; 1748 uint8_t hash_key_size; 1749 1750 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1751 return; 1752 1753 memset(&dev_info, 0, sizeof(dev_info)); 1754 rte_eth_dev_info_get(port_id, &dev_info); 1755 if (dev_info.hash_key_size > 0 && 1756 dev_info.hash_key_size <= sizeof(rss_key)) 1757 hash_key_size = dev_info.hash_key_size; 1758 else { 1759 printf("dev_info did not provide a valid hash key size\n"); 1760 return; 1761 } 1762 1763 rss_conf.rss_hf = 0; 1764 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1765 if (!strcmp(rss_info, rss_type_table[i].str)) 1766 rss_conf.rss_hf = rss_type_table[i].rss_type; 1767 } 1768 1769 /* Get RSS hash key if asked to display it */ 1770 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1771 rss_conf.rss_key_len = hash_key_size; 1772 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1773 if (diag != 0) { 1774 switch (diag) { 1775 case -ENODEV: 1776 printf("port index %d invalid\n", port_id); 1777 break; 1778 case -ENOTSUP: 1779 printf("operation not supported by device\n"); 1780 break; 1781 default: 1782 printf("operation failed - diag=%d\n", diag); 1783 break; 1784 } 1785 return; 1786 } 1787 rss_hf = rss_conf.rss_hf; 1788 if (rss_hf == 0) { 1789 printf("RSS disabled\n"); 1790 return; 1791 } 1792 printf("RSS functions:\n "); 1793 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1794 if (rss_hf & rss_type_table[i].rss_type) 1795 printf("%s ", rss_type_table[i].str); 1796 } 1797 printf("\n"); 1798 if (!show_rss_key) 1799 return; 1800 printf("RSS key:\n"); 1801 for (i = 0; i < hash_key_size; i++) 1802 printf("%02X", rss_key[i]); 1803 printf("\n"); 1804 } 1805 1806 void 1807 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1808 uint hash_key_len) 1809 { 1810 struct rte_eth_rss_conf rss_conf; 1811 int diag; 1812 unsigned int i; 1813 1814 rss_conf.rss_key = NULL; 1815 rss_conf.rss_key_len = hash_key_len; 1816 rss_conf.rss_hf = 0; 1817 for (i = 0; i < RTE_DIM(rss_type_table); i++) { 1818 if (!strcmp(rss_type_table[i].str, rss_type)) 1819 rss_conf.rss_hf = rss_type_table[i].rss_type; 1820 } 1821 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1822 if (diag == 0) { 1823 rss_conf.rss_key = hash_key; 1824 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1825 } 1826 if (diag == 0) 1827 return; 1828 1829 switch (diag) { 1830 case -ENODEV: 1831 printf("port index %d invalid\n", port_id); 1832 break; 1833 case -ENOTSUP: 1834 printf("operation not supported by device\n"); 1835 break; 1836 default: 1837 printf("operation failed - diag=%d\n", diag); 1838 break; 1839 } 1840 } 1841 1842 /* 1843 * Setup forwarding configuration for each logical core. 1844 */ 1845 static void 1846 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1847 { 1848 streamid_t nb_fs_per_lcore; 1849 streamid_t nb_fs; 1850 streamid_t sm_id; 1851 lcoreid_t nb_extra; 1852 lcoreid_t nb_fc; 1853 lcoreid_t nb_lc; 1854 lcoreid_t lc_id; 1855 1856 nb_fs = cfg->nb_fwd_streams; 1857 nb_fc = cfg->nb_fwd_lcores; 1858 if (nb_fs <= nb_fc) { 1859 nb_fs_per_lcore = 1; 1860 nb_extra = 0; 1861 } else { 1862 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1863 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1864 } 1865 1866 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1867 sm_id = 0; 1868 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1869 fwd_lcores[lc_id]->stream_idx = sm_id; 1870 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1871 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1872 } 1873 1874 /* 1875 * Assign extra remaining streams, if any. 1876 */ 1877 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1878 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1879 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1880 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1881 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1882 } 1883 } 1884 1885 static void 1886 simple_fwd_config_setup(void) 1887 { 1888 portid_t i; 1889 portid_t j; 1890 portid_t inc = 2; 1891 1892 if (port_topology == PORT_TOPOLOGY_CHAINED || 1893 port_topology == PORT_TOPOLOGY_LOOP) { 1894 inc = 1; 1895 } else if (nb_fwd_ports % 2) { 1896 printf("\nWarning! Cannot handle an odd number of ports " 1897 "with the current port topology. Configuration " 1898 "must be changed to have an even number of ports, " 1899 "or relaunch application with " 1900 "--port-topology=chained\n\n"); 1901 } 1902 1903 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1904 cur_fwd_config.nb_fwd_streams = 1905 (streamid_t) cur_fwd_config.nb_fwd_ports; 1906 1907 /* reinitialize forwarding streams */ 1908 init_fwd_streams(); 1909 1910 /* 1911 * In the simple forwarding test, the number of forwarding cores 1912 * must be lower or equal to the number of forwarding ports. 1913 */ 1914 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1915 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1916 cur_fwd_config.nb_fwd_lcores = 1917 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1918 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1919 1920 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { 1921 if (port_topology != PORT_TOPOLOGY_LOOP) 1922 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); 1923 else 1924 j = i; 1925 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1926 fwd_streams[i]->rx_queue = 0; 1927 fwd_streams[i]->tx_port = fwd_ports_ids[j]; 1928 fwd_streams[i]->tx_queue = 0; 1929 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 1930 fwd_streams[i]->retry_enabled = retry_enabled; 1931 1932 if (port_topology == PORT_TOPOLOGY_PAIRED) { 1933 fwd_streams[j]->rx_port = fwd_ports_ids[j]; 1934 fwd_streams[j]->rx_queue = 0; 1935 fwd_streams[j]->tx_port = fwd_ports_ids[i]; 1936 fwd_streams[j]->tx_queue = 0; 1937 fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port; 1938 fwd_streams[j]->retry_enabled = retry_enabled; 1939 } 1940 } 1941 } 1942 1943 /** 1944 * For the RSS forwarding test all streams distributed over lcores. Each stream 1945 * being composed of a RX queue to poll on a RX port for input messages, 1946 * associated with a TX queue of a TX port where to send forwarded packets. 1947 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" 1948 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two 1949 * following rules: 1950 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd 1951 * - TxQl = RxQj 1952 */ 1953 static void 1954 rss_fwd_config_setup(void) 1955 { 1956 portid_t rxp; 1957 portid_t txp; 1958 queueid_t rxq; 1959 queueid_t nb_q; 1960 streamid_t sm_id; 1961 1962 nb_q = nb_rxq; 1963 if (nb_q > nb_txq) 1964 nb_q = nb_txq; 1965 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1966 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1967 cur_fwd_config.nb_fwd_streams = 1968 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1969 1970 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1971 cur_fwd_config.nb_fwd_lcores = 1972 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1973 1974 /* reinitialize forwarding streams */ 1975 init_fwd_streams(); 1976 1977 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1978 rxp = 0; rxq = 0; 1979 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1980 struct fwd_stream *fs; 1981 1982 fs = fwd_streams[sm_id]; 1983 1984 if ((rxp & 0x1) == 0) 1985 txp = (portid_t) (rxp + 1); 1986 else 1987 txp = (portid_t) (rxp - 1); 1988 /* 1989 * if we are in loopback, simply send stuff out through the 1990 * ingress port 1991 */ 1992 if (port_topology == PORT_TOPOLOGY_LOOP || 1993 txp >= cur_fwd_config.nb_fwd_ports) 1994 txp = rxp; 1995 1996 fs->rx_port = fwd_ports_ids[rxp]; 1997 fs->rx_queue = rxq; 1998 fs->tx_port = fwd_ports_ids[txp]; 1999 fs->tx_queue = rxq; 2000 fs->peer_addr = fs->tx_port; 2001 fs->retry_enabled = retry_enabled; 2002 rxq = (queueid_t) (rxq + 1); 2003 if (rxq < nb_q) 2004 continue; 2005 /* 2006 * rxq == nb_q 2007 * Restart from RX queue 0 on next RX port 2008 */ 2009 rxq = 0; 2010 rxp++; 2011 } 2012 } 2013 2014 /** 2015 * For the DCB forwarding test, each core is assigned on each traffic class. 2016 * 2017 * Each core is assigned a multi-stream, each stream being composed of 2018 * a RX queue to poll on a RX port for input messages, associated with 2019 * a TX queue of a TX port where to send forwarded packets. All RX and 2020 * TX queues are mapping to the same traffic class. 2021 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2022 * the same core 2023 */ 2024 static void 2025 dcb_fwd_config_setup(void) 2026 { 2027 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2028 portid_t txp, rxp = 0; 2029 queueid_t txq, rxq = 0; 2030 lcoreid_t lc_id; 2031 uint16_t nb_rx_queue, nb_tx_queue; 2032 uint16_t i, j, k, sm_id = 0; 2033 uint8_t tc = 0; 2034 2035 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2036 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2037 cur_fwd_config.nb_fwd_streams = 2038 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2039 2040 /* reinitialize forwarding streams */ 2041 init_fwd_streams(); 2042 sm_id = 0; 2043 txp = 1; 2044 /* get the dcb info on the first RX and TX ports */ 2045 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2046 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2047 2048 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2049 fwd_lcores[lc_id]->stream_nb = 0; 2050 fwd_lcores[lc_id]->stream_idx = sm_id; 2051 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2052 /* if the nb_queue is zero, means this tc is 2053 * not enabled on the POOL 2054 */ 2055 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2056 break; 2057 k = fwd_lcores[lc_id]->stream_nb + 2058 fwd_lcores[lc_id]->stream_idx; 2059 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2060 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2061 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2062 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2063 for (j = 0; j < nb_rx_queue; j++) { 2064 struct fwd_stream *fs; 2065 2066 fs = fwd_streams[k + j]; 2067 fs->rx_port = fwd_ports_ids[rxp]; 2068 fs->rx_queue = rxq + j; 2069 fs->tx_port = fwd_ports_ids[txp]; 2070 fs->tx_queue = txq + j % nb_tx_queue; 2071 fs->peer_addr = fs->tx_port; 2072 fs->retry_enabled = retry_enabled; 2073 } 2074 fwd_lcores[lc_id]->stream_nb += 2075 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2076 } 2077 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2078 2079 tc++; 2080 if (tc < rxp_dcb_info.nb_tcs) 2081 continue; 2082 /* Restart from TC 0 on next RX port */ 2083 tc = 0; 2084 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2085 rxp = (portid_t) 2086 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2087 else 2088 rxp++; 2089 if (rxp >= nb_fwd_ports) 2090 return; 2091 /* get the dcb information on next RX and TX ports */ 2092 if ((rxp & 0x1) == 0) 2093 txp = (portid_t) (rxp + 1); 2094 else 2095 txp = (portid_t) (rxp - 1); 2096 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2097 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2098 } 2099 } 2100 2101 static void 2102 icmp_echo_config_setup(void) 2103 { 2104 portid_t rxp; 2105 queueid_t rxq; 2106 lcoreid_t lc_id; 2107 uint16_t sm_id; 2108 2109 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2110 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2111 (nb_txq * nb_fwd_ports); 2112 else 2113 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2114 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2115 cur_fwd_config.nb_fwd_streams = 2116 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2117 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2118 cur_fwd_config.nb_fwd_lcores = 2119 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2120 if (verbose_level > 0) { 2121 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2122 __FUNCTION__, 2123 cur_fwd_config.nb_fwd_lcores, 2124 cur_fwd_config.nb_fwd_ports, 2125 cur_fwd_config.nb_fwd_streams); 2126 } 2127 2128 /* reinitialize forwarding streams */ 2129 init_fwd_streams(); 2130 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2131 rxp = 0; rxq = 0; 2132 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2133 if (verbose_level > 0) 2134 printf(" core=%d: \n", lc_id); 2135 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2136 struct fwd_stream *fs; 2137 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2138 fs->rx_port = fwd_ports_ids[rxp]; 2139 fs->rx_queue = rxq; 2140 fs->tx_port = fs->rx_port; 2141 fs->tx_queue = rxq; 2142 fs->peer_addr = fs->tx_port; 2143 fs->retry_enabled = retry_enabled; 2144 if (verbose_level > 0) 2145 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2146 sm_id, fs->rx_port, fs->rx_queue, 2147 fs->tx_queue); 2148 rxq = (queueid_t) (rxq + 1); 2149 if (rxq == nb_rxq) { 2150 rxq = 0; 2151 rxp = (portid_t) (rxp + 1); 2152 } 2153 } 2154 } 2155 } 2156 2157 void 2158 fwd_config_setup(void) 2159 { 2160 cur_fwd_config.fwd_eng = cur_fwd_eng; 2161 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2162 icmp_echo_config_setup(); 2163 return; 2164 } 2165 if ((nb_rxq > 1) && (nb_txq > 1)){ 2166 if (dcb_config) 2167 dcb_fwd_config_setup(); 2168 else 2169 rss_fwd_config_setup(); 2170 } 2171 else 2172 simple_fwd_config_setup(); 2173 } 2174 2175 void 2176 pkt_fwd_config_display(struct fwd_config *cfg) 2177 { 2178 struct fwd_stream *fs; 2179 lcoreid_t lc_id; 2180 streamid_t sm_id; 2181 2182 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2183 "NUMA support %s, MP over anonymous pages %s\n", 2184 cfg->fwd_eng->fwd_mode_name, 2185 retry_enabled == 0 ? "" : " with retry", 2186 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2187 numa_support == 1 ? "enabled" : "disabled", 2188 mp_anon != 0 ? "enabled" : "disabled"); 2189 2190 if (retry_enabled) 2191 printf("TX retry num: %u, delay between TX retries: %uus\n", 2192 burst_tx_retry_num, burst_tx_delay_time); 2193 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2194 printf("Logical Core %u (socket %u) forwards packets on " 2195 "%d streams:", 2196 fwd_lcores_cpuids[lc_id], 2197 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2198 fwd_lcores[lc_id]->stream_nb); 2199 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2200 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2201 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2202 "P=%d/Q=%d (socket %u) ", 2203 fs->rx_port, fs->rx_queue, 2204 ports[fs->rx_port].socket_id, 2205 fs->tx_port, fs->tx_queue, 2206 ports[fs->tx_port].socket_id); 2207 print_ethaddr("peer=", 2208 &peer_eth_addrs[fs->peer_addr]); 2209 } 2210 printf("\n"); 2211 } 2212 printf("\n"); 2213 } 2214 2215 void 2216 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2217 { 2218 uint8_t c, new_peer_addr[6]; 2219 if (!rte_eth_dev_is_valid_port(port_id)) { 2220 printf("Error: Invalid port number %i\n", port_id); 2221 return; 2222 } 2223 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2224 sizeof(new_peer_addr)) < 0) { 2225 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2226 return; 2227 } 2228 for (c = 0; c < 6; c++) 2229 peer_eth_addrs[port_id].addr_bytes[c] = 2230 new_peer_addr[c]; 2231 } 2232 2233 int 2234 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2235 { 2236 unsigned int i; 2237 unsigned int lcore_cpuid; 2238 int record_now; 2239 2240 record_now = 0; 2241 again: 2242 for (i = 0; i < nb_lc; i++) { 2243 lcore_cpuid = lcorelist[i]; 2244 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2245 printf("lcore %u not enabled\n", lcore_cpuid); 2246 return -1; 2247 } 2248 if (lcore_cpuid == rte_get_master_lcore()) { 2249 printf("lcore %u cannot be masked on for running " 2250 "packet forwarding, which is the master lcore " 2251 "and reserved for command line parsing only\n", 2252 lcore_cpuid); 2253 return -1; 2254 } 2255 if (record_now) 2256 fwd_lcores_cpuids[i] = lcore_cpuid; 2257 } 2258 if (record_now == 0) { 2259 record_now = 1; 2260 goto again; 2261 } 2262 nb_cfg_lcores = (lcoreid_t) nb_lc; 2263 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2264 printf("previous number of forwarding cores %u - changed to " 2265 "number of configured cores %u\n", 2266 (unsigned int) nb_fwd_lcores, nb_lc); 2267 nb_fwd_lcores = (lcoreid_t) nb_lc; 2268 } 2269 2270 return 0; 2271 } 2272 2273 int 2274 set_fwd_lcores_mask(uint64_t lcoremask) 2275 { 2276 unsigned int lcorelist[64]; 2277 unsigned int nb_lc; 2278 unsigned int i; 2279 2280 if (lcoremask == 0) { 2281 printf("Invalid NULL mask of cores\n"); 2282 return -1; 2283 } 2284 nb_lc = 0; 2285 for (i = 0; i < 64; i++) { 2286 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2287 continue; 2288 lcorelist[nb_lc++] = i; 2289 } 2290 return set_fwd_lcores_list(lcorelist, nb_lc); 2291 } 2292 2293 void 2294 set_fwd_lcores_number(uint16_t nb_lc) 2295 { 2296 if (nb_lc > nb_cfg_lcores) { 2297 printf("nb fwd cores %u > %u (max. number of configured " 2298 "lcores) - ignored\n", 2299 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2300 return; 2301 } 2302 nb_fwd_lcores = (lcoreid_t) nb_lc; 2303 printf("Number of forwarding cores set to %u\n", 2304 (unsigned int) nb_fwd_lcores); 2305 } 2306 2307 void 2308 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2309 { 2310 unsigned int i; 2311 portid_t port_id; 2312 int record_now; 2313 2314 record_now = 0; 2315 again: 2316 for (i = 0; i < nb_pt; i++) { 2317 port_id = (portid_t) portlist[i]; 2318 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2319 return; 2320 if (record_now) 2321 fwd_ports_ids[i] = port_id; 2322 } 2323 if (record_now == 0) { 2324 record_now = 1; 2325 goto again; 2326 } 2327 nb_cfg_ports = (portid_t) nb_pt; 2328 if (nb_fwd_ports != (portid_t) nb_pt) { 2329 printf("previous number of forwarding ports %u - changed to " 2330 "number of configured ports %u\n", 2331 (unsigned int) nb_fwd_ports, nb_pt); 2332 nb_fwd_ports = (portid_t) nb_pt; 2333 } 2334 } 2335 2336 void 2337 set_fwd_ports_mask(uint64_t portmask) 2338 { 2339 unsigned int portlist[64]; 2340 unsigned int nb_pt; 2341 unsigned int i; 2342 2343 if (portmask == 0) { 2344 printf("Invalid NULL mask of ports\n"); 2345 return; 2346 } 2347 nb_pt = 0; 2348 RTE_ETH_FOREACH_DEV(i) { 2349 if (! ((uint64_t)(1ULL << i) & portmask)) 2350 continue; 2351 portlist[nb_pt++] = i; 2352 } 2353 set_fwd_ports_list(portlist, nb_pt); 2354 } 2355 2356 void 2357 set_fwd_ports_number(uint16_t nb_pt) 2358 { 2359 if (nb_pt > nb_cfg_ports) { 2360 printf("nb fwd ports %u > %u (number of configured " 2361 "ports) - ignored\n", 2362 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2363 return; 2364 } 2365 nb_fwd_ports = (portid_t) nb_pt; 2366 printf("Number of forwarding ports set to %u\n", 2367 (unsigned int) nb_fwd_ports); 2368 } 2369 2370 int 2371 port_is_forwarding(portid_t port_id) 2372 { 2373 unsigned int i; 2374 2375 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2376 return -1; 2377 2378 for (i = 0; i < nb_fwd_ports; i++) { 2379 if (fwd_ports_ids[i] == port_id) 2380 return 1; 2381 } 2382 2383 return 0; 2384 } 2385 2386 void 2387 set_nb_pkt_per_burst(uint16_t nb) 2388 { 2389 if (nb > MAX_PKT_BURST) { 2390 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2391 " ignored\n", 2392 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2393 return; 2394 } 2395 nb_pkt_per_burst = nb; 2396 printf("Number of packets per burst set to %u\n", 2397 (unsigned int) nb_pkt_per_burst); 2398 } 2399 2400 static const char * 2401 tx_split_get_name(enum tx_pkt_split split) 2402 { 2403 uint32_t i; 2404 2405 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2406 if (tx_split_name[i].split == split) 2407 return tx_split_name[i].name; 2408 } 2409 return NULL; 2410 } 2411 2412 void 2413 set_tx_pkt_split(const char *name) 2414 { 2415 uint32_t i; 2416 2417 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2418 if (strcmp(tx_split_name[i].name, name) == 0) { 2419 tx_pkt_split = tx_split_name[i].split; 2420 return; 2421 } 2422 } 2423 printf("unknown value: \"%s\"\n", name); 2424 } 2425 2426 void 2427 show_tx_pkt_segments(void) 2428 { 2429 uint32_t i, n; 2430 const char *split; 2431 2432 n = tx_pkt_nb_segs; 2433 split = tx_split_get_name(tx_pkt_split); 2434 2435 printf("Number of segments: %u\n", n); 2436 printf("Segment sizes: "); 2437 for (i = 0; i != n - 1; i++) 2438 printf("%hu,", tx_pkt_seg_lengths[i]); 2439 printf("%hu\n", tx_pkt_seg_lengths[i]); 2440 printf("Split packet: %s\n", split); 2441 } 2442 2443 void 2444 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2445 { 2446 uint16_t tx_pkt_len; 2447 unsigned i; 2448 2449 if (nb_segs >= (unsigned) nb_txd) { 2450 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2451 nb_segs, (unsigned int) nb_txd); 2452 return; 2453 } 2454 2455 /* 2456 * Check that each segment length is greater or equal than 2457 * the mbuf data sise. 2458 * Check also that the total packet length is greater or equal than the 2459 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2460 */ 2461 tx_pkt_len = 0; 2462 for (i = 0; i < nb_segs; i++) { 2463 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2464 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2465 i, seg_lengths[i], (unsigned) mbuf_data_size); 2466 return; 2467 } 2468 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2469 } 2470 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2471 printf("total packet length=%u < %d - give up\n", 2472 (unsigned) tx_pkt_len, 2473 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2474 return; 2475 } 2476 2477 for (i = 0; i < nb_segs; i++) 2478 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2479 2480 tx_pkt_length = tx_pkt_len; 2481 tx_pkt_nb_segs = (uint8_t) nb_segs; 2482 } 2483 2484 void 2485 setup_gro(const char *onoff, portid_t port_id) 2486 { 2487 if (!rte_eth_dev_is_valid_port(port_id)) { 2488 printf("invalid port id %u\n", port_id); 2489 return; 2490 } 2491 if (test_done == 0) { 2492 printf("Before enable/disable GRO," 2493 " please stop forwarding first\n"); 2494 return; 2495 } 2496 if (strcmp(onoff, "on") == 0) { 2497 if (gro_ports[port_id].enable != 0) { 2498 printf("Port %u has enabled GRO. Please" 2499 " disable GRO first\n", port_id); 2500 return; 2501 } 2502 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2503 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2504 gro_ports[port_id].param.max_flow_num = 2505 GRO_DEFAULT_FLOW_NUM; 2506 gro_ports[port_id].param.max_item_per_flow = 2507 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2508 } 2509 gro_ports[port_id].enable = 1; 2510 } else { 2511 if (gro_ports[port_id].enable == 0) { 2512 printf("Port %u has disabled GRO\n", port_id); 2513 return; 2514 } 2515 gro_ports[port_id].enable = 0; 2516 } 2517 } 2518 2519 void 2520 setup_gro_flush_cycles(uint8_t cycles) 2521 { 2522 if (test_done == 0) { 2523 printf("Before change flush interval for GRO," 2524 " please stop forwarding first.\n"); 2525 return; 2526 } 2527 2528 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2529 GRO_DEFAULT_FLUSH_CYCLES) { 2530 printf("The flushing cycle be in the range" 2531 " of 1 to %u. Revert to the default" 2532 " value %u.\n", 2533 GRO_MAX_FLUSH_CYCLES, 2534 GRO_DEFAULT_FLUSH_CYCLES); 2535 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2536 } 2537 2538 gro_flush_cycles = cycles; 2539 } 2540 2541 void 2542 show_gro(portid_t port_id) 2543 { 2544 struct rte_gro_param *param; 2545 uint32_t max_pkts_num; 2546 2547 param = &gro_ports[port_id].param; 2548 2549 if (!rte_eth_dev_is_valid_port(port_id)) { 2550 printf("Invalid port id %u.\n", port_id); 2551 return; 2552 } 2553 if (gro_ports[port_id].enable) { 2554 printf("GRO type: TCP/IPv4\n"); 2555 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2556 max_pkts_num = param->max_flow_num * 2557 param->max_item_per_flow; 2558 } else 2559 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2560 printf("Max number of packets to perform GRO: %u\n", 2561 max_pkts_num); 2562 printf("Flushing cycles: %u\n", gro_flush_cycles); 2563 } else 2564 printf("Port %u doesn't enable GRO.\n", port_id); 2565 } 2566 2567 void 2568 setup_gso(const char *mode, portid_t port_id) 2569 { 2570 if (!rte_eth_dev_is_valid_port(port_id)) { 2571 printf("invalid port id %u\n", port_id); 2572 return; 2573 } 2574 if (strcmp(mode, "on") == 0) { 2575 if (test_done == 0) { 2576 printf("before enabling GSO," 2577 " please stop forwarding first\n"); 2578 return; 2579 } 2580 gso_ports[port_id].enable = 1; 2581 } else if (strcmp(mode, "off") == 0) { 2582 if (test_done == 0) { 2583 printf("before disabling GSO," 2584 " please stop forwarding first\n"); 2585 return; 2586 } 2587 gso_ports[port_id].enable = 0; 2588 } 2589 } 2590 2591 char* 2592 list_pkt_forwarding_modes(void) 2593 { 2594 static char fwd_modes[128] = ""; 2595 const char *separator = "|"; 2596 struct fwd_engine *fwd_eng; 2597 unsigned i = 0; 2598 2599 if (strlen (fwd_modes) == 0) { 2600 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2601 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2602 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2603 strncat(fwd_modes, separator, 2604 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2605 } 2606 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2607 } 2608 2609 return fwd_modes; 2610 } 2611 2612 char* 2613 list_pkt_forwarding_retry_modes(void) 2614 { 2615 static char fwd_modes[128] = ""; 2616 const char *separator = "|"; 2617 struct fwd_engine *fwd_eng; 2618 unsigned i = 0; 2619 2620 if (strlen(fwd_modes) == 0) { 2621 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2622 if (fwd_eng == &rx_only_engine) 2623 continue; 2624 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2625 sizeof(fwd_modes) - 2626 strlen(fwd_modes) - 1); 2627 strncat(fwd_modes, separator, 2628 sizeof(fwd_modes) - 2629 strlen(fwd_modes) - 1); 2630 } 2631 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2632 } 2633 2634 return fwd_modes; 2635 } 2636 2637 void 2638 set_pkt_forwarding_mode(const char *fwd_mode_name) 2639 { 2640 struct fwd_engine *fwd_eng; 2641 unsigned i; 2642 2643 i = 0; 2644 while ((fwd_eng = fwd_engines[i]) != NULL) { 2645 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2646 printf("Set %s packet forwarding mode%s\n", 2647 fwd_mode_name, 2648 retry_enabled == 0 ? "" : " with retry"); 2649 cur_fwd_eng = fwd_eng; 2650 return; 2651 } 2652 i++; 2653 } 2654 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2655 } 2656 2657 void 2658 set_verbose_level(uint16_t vb_level) 2659 { 2660 printf("Change verbose level from %u to %u\n", 2661 (unsigned int) verbose_level, (unsigned int) vb_level); 2662 verbose_level = vb_level; 2663 } 2664 2665 void 2666 vlan_extend_set(portid_t port_id, int on) 2667 { 2668 int diag; 2669 int vlan_offload; 2670 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2671 2672 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2673 return; 2674 2675 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2676 2677 if (on) { 2678 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2679 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2680 } else { 2681 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2682 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2683 } 2684 2685 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2686 if (diag < 0) 2687 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2688 "diag=%d\n", port_id, on, diag); 2689 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2690 } 2691 2692 void 2693 rx_vlan_strip_set(portid_t port_id, int on) 2694 { 2695 int diag; 2696 int vlan_offload; 2697 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2698 2699 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2700 return; 2701 2702 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2703 2704 if (on) { 2705 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2706 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2707 } else { 2708 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2709 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2710 } 2711 2712 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2713 if (diag < 0) 2714 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2715 "diag=%d\n", port_id, on, diag); 2716 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2717 } 2718 2719 void 2720 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2721 { 2722 int diag; 2723 2724 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2725 return; 2726 2727 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2728 if (diag < 0) 2729 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2730 "diag=%d\n", port_id, queue_id, on, diag); 2731 } 2732 2733 void 2734 rx_vlan_filter_set(portid_t port_id, int on) 2735 { 2736 int diag; 2737 int vlan_offload; 2738 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2739 2740 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2741 return; 2742 2743 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2744 2745 if (on) { 2746 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2747 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2748 } else { 2749 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2750 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2751 } 2752 2753 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2754 if (diag < 0) 2755 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2756 "diag=%d\n", port_id, on, diag); 2757 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2758 } 2759 2760 int 2761 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2762 { 2763 int diag; 2764 2765 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2766 return 1; 2767 if (vlan_id_is_invalid(vlan_id)) 2768 return 1; 2769 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2770 if (diag == 0) 2771 return 0; 2772 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2773 "diag=%d\n", 2774 port_id, vlan_id, on, diag); 2775 return -1; 2776 } 2777 2778 void 2779 rx_vlan_all_filter_set(portid_t port_id, int on) 2780 { 2781 uint16_t vlan_id; 2782 2783 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2784 return; 2785 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2786 if (rx_vft_set(port_id, vlan_id, on)) 2787 break; 2788 } 2789 } 2790 2791 void 2792 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2793 { 2794 int diag; 2795 2796 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2797 return; 2798 2799 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2800 if (diag == 0) 2801 return; 2802 2803 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2804 "diag=%d\n", 2805 port_id, vlan_type, tp_id, diag); 2806 } 2807 2808 void 2809 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2810 { 2811 int vlan_offload; 2812 struct rte_eth_dev_info dev_info; 2813 2814 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2815 return; 2816 if (vlan_id_is_invalid(vlan_id)) 2817 return; 2818 2819 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2820 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2821 printf("Error, as QinQ has been enabled.\n"); 2822 return; 2823 } 2824 rte_eth_dev_info_get(port_id, &dev_info); 2825 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2826 printf("Error: vlan insert is not supported by port %d\n", 2827 port_id); 2828 return; 2829 } 2830 2831 tx_vlan_reset(port_id); 2832 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2833 ports[port_id].tx_vlan_id = vlan_id; 2834 } 2835 2836 void 2837 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2838 { 2839 int vlan_offload; 2840 struct rte_eth_dev_info dev_info; 2841 2842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2843 return; 2844 if (vlan_id_is_invalid(vlan_id)) 2845 return; 2846 if (vlan_id_is_invalid(vlan_id_outer)) 2847 return; 2848 2849 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2850 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2851 printf("Error, as QinQ hasn't been enabled.\n"); 2852 return; 2853 } 2854 rte_eth_dev_info_get(port_id, &dev_info); 2855 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2856 printf("Error: qinq insert not supported by port %d\n", 2857 port_id); 2858 return; 2859 } 2860 2861 tx_vlan_reset(port_id); 2862 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2863 ports[port_id].tx_vlan_id = vlan_id; 2864 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2865 } 2866 2867 void 2868 tx_vlan_reset(portid_t port_id) 2869 { 2870 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2871 return; 2872 ports[port_id].dev_conf.txmode.offloads &= 2873 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2874 DEV_TX_OFFLOAD_QINQ_INSERT); 2875 ports[port_id].tx_vlan_id = 0; 2876 ports[port_id].tx_vlan_id_outer = 0; 2877 } 2878 2879 void 2880 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2881 { 2882 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2883 return; 2884 2885 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2886 } 2887 2888 void 2889 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2890 { 2891 uint16_t i; 2892 uint8_t existing_mapping_found = 0; 2893 2894 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2895 return; 2896 2897 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 2898 return; 2899 2900 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 2901 printf("map_value not in required range 0..%d\n", 2902 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 2903 return; 2904 } 2905 2906 if (!is_rx) { /*then tx*/ 2907 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2908 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2909 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 2910 tx_queue_stats_mappings[i].stats_counter_id = map_value; 2911 existing_mapping_found = 1; 2912 break; 2913 } 2914 } 2915 if (!existing_mapping_found) { /* A new additional mapping... */ 2916 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 2917 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 2918 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 2919 nb_tx_queue_stats_mappings++; 2920 } 2921 } 2922 else { /*rx*/ 2923 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2924 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2925 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 2926 rx_queue_stats_mappings[i].stats_counter_id = map_value; 2927 existing_mapping_found = 1; 2928 break; 2929 } 2930 } 2931 if (!existing_mapping_found) { /* A new additional mapping... */ 2932 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 2933 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 2934 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 2935 nb_rx_queue_stats_mappings++; 2936 } 2937 } 2938 } 2939 2940 void 2941 set_xstats_hide_zero(uint8_t on_off) 2942 { 2943 xstats_hide_zero = on_off; 2944 } 2945 2946 static inline void 2947 print_fdir_mask(struct rte_eth_fdir_masks *mask) 2948 { 2949 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 2950 2951 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2952 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 2953 " tunnel_id: 0x%08x", 2954 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 2955 rte_be_to_cpu_32(mask->tunnel_id_mask)); 2956 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2957 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 2958 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 2959 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 2960 2961 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 2962 rte_be_to_cpu_16(mask->src_port_mask), 2963 rte_be_to_cpu_16(mask->dst_port_mask)); 2964 2965 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2966 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 2967 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 2968 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 2969 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 2970 2971 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 2972 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 2973 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 2974 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 2975 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 2976 } 2977 2978 printf("\n"); 2979 } 2980 2981 static inline void 2982 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 2983 { 2984 struct rte_eth_flex_payload_cfg *cfg; 2985 uint32_t i, j; 2986 2987 for (i = 0; i < flex_conf->nb_payloads; i++) { 2988 cfg = &flex_conf->flex_set[i]; 2989 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 2990 printf("\n RAW: "); 2991 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 2992 printf("\n L2_PAYLOAD: "); 2993 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 2994 printf("\n L3_PAYLOAD: "); 2995 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 2996 printf("\n L4_PAYLOAD: "); 2997 else 2998 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 2999 for (j = 0; j < num; j++) 3000 printf(" %-5u", cfg->src_offset[j]); 3001 } 3002 printf("\n"); 3003 } 3004 3005 static char * 3006 flowtype_to_str(uint16_t flow_type) 3007 { 3008 struct flow_type_info { 3009 char str[32]; 3010 uint16_t ftype; 3011 }; 3012 3013 uint8_t i; 3014 static struct flow_type_info flowtype_str_table[] = { 3015 {"raw", RTE_ETH_FLOW_RAW}, 3016 {"ipv4", RTE_ETH_FLOW_IPV4}, 3017 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3018 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3019 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3020 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3021 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3022 {"ipv6", RTE_ETH_FLOW_IPV6}, 3023 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3024 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3025 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3026 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3027 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3028 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3029 {"port", RTE_ETH_FLOW_PORT}, 3030 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3031 {"geneve", RTE_ETH_FLOW_GENEVE}, 3032 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3033 }; 3034 3035 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3036 if (flowtype_str_table[i].ftype == flow_type) 3037 return flowtype_str_table[i].str; 3038 } 3039 3040 return NULL; 3041 } 3042 3043 static inline void 3044 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3045 { 3046 struct rte_eth_fdir_flex_mask *mask; 3047 uint32_t i, j; 3048 char *p; 3049 3050 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3051 mask = &flex_conf->flex_mask[i]; 3052 p = flowtype_to_str(mask->flow_type); 3053 printf("\n %s:\t", p ? p : "unknown"); 3054 for (j = 0; j < num; j++) 3055 printf(" %02x", mask->mask[j]); 3056 } 3057 printf("\n"); 3058 } 3059 3060 static inline void 3061 print_fdir_flow_type(uint32_t flow_types_mask) 3062 { 3063 int i; 3064 char *p; 3065 3066 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3067 if (!(flow_types_mask & (1 << i))) 3068 continue; 3069 p = flowtype_to_str(i); 3070 if (p) 3071 printf(" %s", p); 3072 else 3073 printf(" unknown"); 3074 } 3075 printf("\n"); 3076 } 3077 3078 void 3079 fdir_get_infos(portid_t port_id) 3080 { 3081 struct rte_eth_fdir_stats fdir_stat; 3082 struct rte_eth_fdir_info fdir_info; 3083 int ret; 3084 3085 static const char *fdir_stats_border = "########################"; 3086 3087 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3088 return; 3089 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3090 if (ret < 0) { 3091 printf("\n FDIR is not supported on port %-2d\n", 3092 port_id); 3093 return; 3094 } 3095 3096 memset(&fdir_info, 0, sizeof(fdir_info)); 3097 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3098 RTE_ETH_FILTER_INFO, &fdir_info); 3099 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3100 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3101 RTE_ETH_FILTER_STATS, &fdir_stat); 3102 printf("\n %s FDIR infos for port %-2d %s\n", 3103 fdir_stats_border, port_id, fdir_stats_border); 3104 printf(" MODE: "); 3105 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3106 printf(" PERFECT\n"); 3107 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3108 printf(" PERFECT-MAC-VLAN\n"); 3109 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3110 printf(" PERFECT-TUNNEL\n"); 3111 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3112 printf(" SIGNATURE\n"); 3113 else 3114 printf(" DISABLE\n"); 3115 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3116 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3117 printf(" SUPPORTED FLOW TYPE: "); 3118 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3119 } 3120 printf(" FLEX PAYLOAD INFO:\n"); 3121 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3122 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3123 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3124 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3125 fdir_info.flex_payload_unit, 3126 fdir_info.max_flex_payload_segment_num, 3127 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3128 printf(" MASK: "); 3129 print_fdir_mask(&fdir_info.mask); 3130 if (fdir_info.flex_conf.nb_payloads > 0) { 3131 printf(" FLEX PAYLOAD SRC OFFSET:"); 3132 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3133 } 3134 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3135 printf(" FLEX MASK CFG:"); 3136 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3137 } 3138 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3139 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3140 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3141 fdir_info.guarant_spc, fdir_info.best_spc); 3142 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3143 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3144 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3145 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3146 fdir_stat.collision, fdir_stat.free, 3147 fdir_stat.maxhash, fdir_stat.maxlen, 3148 fdir_stat.add, fdir_stat.remove, 3149 fdir_stat.f_add, fdir_stat.f_remove); 3150 printf(" %s############################%s\n", 3151 fdir_stats_border, fdir_stats_border); 3152 } 3153 3154 void 3155 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3156 { 3157 struct rte_port *port; 3158 struct rte_eth_fdir_flex_conf *flex_conf; 3159 int i, idx = 0; 3160 3161 port = &ports[port_id]; 3162 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3163 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3164 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3165 idx = i; 3166 break; 3167 } 3168 } 3169 if (i >= RTE_ETH_FLOW_MAX) { 3170 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3171 idx = flex_conf->nb_flexmasks; 3172 flex_conf->nb_flexmasks++; 3173 } else { 3174 printf("The flex mask table is full. Can not set flex" 3175 " mask for flow_type(%u).", cfg->flow_type); 3176 return; 3177 } 3178 } 3179 rte_memcpy(&flex_conf->flex_mask[idx], 3180 cfg, 3181 sizeof(struct rte_eth_fdir_flex_mask)); 3182 } 3183 3184 void 3185 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3186 { 3187 struct rte_port *port; 3188 struct rte_eth_fdir_flex_conf *flex_conf; 3189 int i, idx = 0; 3190 3191 port = &ports[port_id]; 3192 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3193 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3194 if (cfg->type == flex_conf->flex_set[i].type) { 3195 idx = i; 3196 break; 3197 } 3198 } 3199 if (i >= RTE_ETH_PAYLOAD_MAX) { 3200 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3201 idx = flex_conf->nb_payloads; 3202 flex_conf->nb_payloads++; 3203 } else { 3204 printf("The flex payload table is full. Can not set" 3205 " flex payload for type(%u).", cfg->type); 3206 return; 3207 } 3208 } 3209 rte_memcpy(&flex_conf->flex_set[idx], 3210 cfg, 3211 sizeof(struct rte_eth_flex_payload_cfg)); 3212 3213 } 3214 3215 void 3216 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3217 { 3218 #ifdef RTE_LIBRTE_IXGBE_PMD 3219 int diag; 3220 3221 if (is_rx) 3222 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3223 else 3224 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3225 3226 if (diag == 0) 3227 return; 3228 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3229 is_rx ? "rx" : "tx", port_id, diag); 3230 return; 3231 #endif 3232 printf("VF %s setting not supported for port %d\n", 3233 is_rx ? "Rx" : "Tx", port_id); 3234 RTE_SET_USED(vf); 3235 RTE_SET_USED(on); 3236 } 3237 3238 int 3239 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3240 { 3241 int diag; 3242 struct rte_eth_link link; 3243 3244 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3245 return 1; 3246 rte_eth_link_get_nowait(port_id, &link); 3247 if (rate > link.link_speed) { 3248 printf("Invalid rate value:%u bigger than link speed: %u\n", 3249 rate, link.link_speed); 3250 return 1; 3251 } 3252 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3253 if (diag == 0) 3254 return diag; 3255 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3256 port_id, diag); 3257 return diag; 3258 } 3259 3260 int 3261 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3262 { 3263 int diag = -ENOTSUP; 3264 3265 RTE_SET_USED(vf); 3266 RTE_SET_USED(rate); 3267 RTE_SET_USED(q_msk); 3268 3269 #ifdef RTE_LIBRTE_IXGBE_PMD 3270 if (diag == -ENOTSUP) 3271 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3272 q_msk); 3273 #endif 3274 #ifdef RTE_LIBRTE_BNXT_PMD 3275 if (diag == -ENOTSUP) 3276 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3277 #endif 3278 if (diag == 0) 3279 return diag; 3280 3281 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3282 port_id, diag); 3283 return diag; 3284 } 3285 3286 /* 3287 * Functions to manage the set of filtered Multicast MAC addresses. 3288 * 3289 * A pool of filtered multicast MAC addresses is associated with each port. 3290 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3291 * The address of the pool and the number of valid multicast MAC addresses 3292 * recorded in the pool are stored in the fields "mc_addr_pool" and 3293 * "mc_addr_nb" of the "rte_port" data structure. 3294 * 3295 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3296 * to be supplied a contiguous array of multicast MAC addresses. 3297 * To comply with this constraint, the set of multicast addresses recorded 3298 * into the pool are systematically compacted at the beginning of the pool. 3299 * Hence, when a multicast address is removed from the pool, all following 3300 * addresses, if any, are copied back to keep the set contiguous. 3301 */ 3302 #define MCAST_POOL_INC 32 3303 3304 static int 3305 mcast_addr_pool_extend(struct rte_port *port) 3306 { 3307 struct ether_addr *mc_pool; 3308 size_t mc_pool_size; 3309 3310 /* 3311 * If a free entry is available at the end of the pool, just 3312 * increment the number of recorded multicast addresses. 3313 */ 3314 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3315 port->mc_addr_nb++; 3316 return 0; 3317 } 3318 3319 /* 3320 * [re]allocate a pool with MCAST_POOL_INC more entries. 3321 * The previous test guarantees that port->mc_addr_nb is a multiple 3322 * of MCAST_POOL_INC. 3323 */ 3324 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3325 MCAST_POOL_INC); 3326 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3327 mc_pool_size); 3328 if (mc_pool == NULL) { 3329 printf("allocation of pool of %u multicast addresses failed\n", 3330 port->mc_addr_nb + MCAST_POOL_INC); 3331 return -ENOMEM; 3332 } 3333 3334 port->mc_addr_pool = mc_pool; 3335 port->mc_addr_nb++; 3336 return 0; 3337 3338 } 3339 3340 static void 3341 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3342 { 3343 port->mc_addr_nb--; 3344 if (addr_idx == port->mc_addr_nb) { 3345 /* No need to recompact the set of multicast addressses. */ 3346 if (port->mc_addr_nb == 0) { 3347 /* free the pool of multicast addresses. */ 3348 free(port->mc_addr_pool); 3349 port->mc_addr_pool = NULL; 3350 } 3351 return; 3352 } 3353 memmove(&port->mc_addr_pool[addr_idx], 3354 &port->mc_addr_pool[addr_idx + 1], 3355 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3356 } 3357 3358 static void 3359 eth_port_multicast_addr_list_set(portid_t port_id) 3360 { 3361 struct rte_port *port; 3362 int diag; 3363 3364 port = &ports[port_id]; 3365 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3366 port->mc_addr_nb); 3367 if (diag == 0) 3368 return; 3369 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3370 port->mc_addr_nb, port_id, -diag); 3371 } 3372 3373 void 3374 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3375 { 3376 struct rte_port *port; 3377 uint32_t i; 3378 3379 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3380 return; 3381 3382 port = &ports[port_id]; 3383 3384 /* 3385 * Check that the added multicast MAC address is not already recorded 3386 * in the pool of multicast addresses. 3387 */ 3388 for (i = 0; i < port->mc_addr_nb; i++) { 3389 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3390 printf("multicast address already filtered by port\n"); 3391 return; 3392 } 3393 } 3394 3395 if (mcast_addr_pool_extend(port) != 0) 3396 return; 3397 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3398 eth_port_multicast_addr_list_set(port_id); 3399 } 3400 3401 void 3402 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3403 { 3404 struct rte_port *port; 3405 uint32_t i; 3406 3407 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3408 return; 3409 3410 port = &ports[port_id]; 3411 3412 /* 3413 * Search the pool of multicast MAC addresses for the removed address. 3414 */ 3415 for (i = 0; i < port->mc_addr_nb; i++) { 3416 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3417 break; 3418 } 3419 if (i == port->mc_addr_nb) { 3420 printf("multicast address not filtered by port %d\n", port_id); 3421 return; 3422 } 3423 3424 mcast_addr_pool_remove(port, i); 3425 eth_port_multicast_addr_list_set(port_id); 3426 } 3427 3428 void 3429 port_dcb_info_display(portid_t port_id) 3430 { 3431 struct rte_eth_dcb_info dcb_info; 3432 uint16_t i; 3433 int ret; 3434 static const char *border = "================"; 3435 3436 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3437 return; 3438 3439 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3440 if (ret) { 3441 printf("\n Failed to get dcb infos on port %-2d\n", 3442 port_id); 3443 return; 3444 } 3445 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3446 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3447 printf("\n TC : "); 3448 for (i = 0; i < dcb_info.nb_tcs; i++) 3449 printf("\t%4d", i); 3450 printf("\n Priority : "); 3451 for (i = 0; i < dcb_info.nb_tcs; i++) 3452 printf("\t%4d", dcb_info.prio_tc[i]); 3453 printf("\n BW percent :"); 3454 for (i = 0; i < dcb_info.nb_tcs; i++) 3455 printf("\t%4d%%", dcb_info.tc_bws[i]); 3456 printf("\n RXQ base : "); 3457 for (i = 0; i < dcb_info.nb_tcs; i++) 3458 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3459 printf("\n RXQ number :"); 3460 for (i = 0; i < dcb_info.nb_tcs; i++) 3461 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3462 printf("\n TXQ base : "); 3463 for (i = 0; i < dcb_info.nb_tcs; i++) 3464 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3465 printf("\n TXQ number :"); 3466 for (i = 0; i < dcb_info.nb_tcs; i++) 3467 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3468 printf("\n"); 3469 } 3470 3471 uint8_t * 3472 open_file(const char *file_path, uint32_t *size) 3473 { 3474 int fd = open(file_path, O_RDONLY); 3475 off_t pkg_size; 3476 uint8_t *buf = NULL; 3477 int ret = 0; 3478 struct stat st_buf; 3479 3480 if (size) 3481 *size = 0; 3482 3483 if (fd == -1) { 3484 printf("%s: Failed to open %s\n", __func__, file_path); 3485 return buf; 3486 } 3487 3488 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3489 close(fd); 3490 printf("%s: File operations failed\n", __func__); 3491 return buf; 3492 } 3493 3494 pkg_size = st_buf.st_size; 3495 if (pkg_size < 0) { 3496 close(fd); 3497 printf("%s: File operations failed\n", __func__); 3498 return buf; 3499 } 3500 3501 buf = (uint8_t *)malloc(pkg_size); 3502 if (!buf) { 3503 close(fd); 3504 printf("%s: Failed to malloc memory\n", __func__); 3505 return buf; 3506 } 3507 3508 ret = read(fd, buf, pkg_size); 3509 if (ret < 0) { 3510 close(fd); 3511 printf("%s: File read operation failed\n", __func__); 3512 close_file(buf); 3513 return NULL; 3514 } 3515 3516 if (size) 3517 *size = pkg_size; 3518 3519 close(fd); 3520 3521 return buf; 3522 } 3523 3524 int 3525 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3526 { 3527 FILE *fh = fopen(file_path, "wb"); 3528 3529 if (fh == NULL) { 3530 printf("%s: Failed to open %s\n", __func__, file_path); 3531 return -1; 3532 } 3533 3534 if (fwrite(buf, 1, size, fh) != size) { 3535 fclose(fh); 3536 printf("%s: File write operation failed\n", __func__); 3537 return -1; 3538 } 3539 3540 fclose(fh); 3541 3542 return 0; 3543 } 3544 3545 int 3546 close_file(uint8_t *buf) 3547 { 3548 if (buf) { 3549 free((void *)buf); 3550 return 0; 3551 } 3552 3553 return -1; 3554 } 3555 3556 void 3557 port_queue_region_info_display(portid_t port_id, void *buf) 3558 { 3559 #ifdef RTE_LIBRTE_I40E_PMD 3560 uint16_t i, j; 3561 struct rte_pmd_i40e_queue_regions *info = 3562 (struct rte_pmd_i40e_queue_regions *)buf; 3563 static const char *queue_region_info_stats_border = "-------"; 3564 3565 if (!info->queue_region_number) 3566 printf("there is no region has been set before"); 3567 3568 printf("\n %s All queue region info for port=%2d %s", 3569 queue_region_info_stats_border, port_id, 3570 queue_region_info_stats_border); 3571 printf("\n queue_region_number: %-14u \n", 3572 info->queue_region_number); 3573 3574 for (i = 0; i < info->queue_region_number; i++) { 3575 printf("\n region_id: %-14u queue_number: %-14u " 3576 "queue_start_index: %-14u \n", 3577 info->region[i].region_id, 3578 info->region[i].queue_num, 3579 info->region[i].queue_start_index); 3580 3581 printf(" user_priority_num is %-14u :", 3582 info->region[i].user_priority_num); 3583 for (j = 0; j < info->region[i].user_priority_num; j++) 3584 printf(" %-14u ", info->region[i].user_priority[j]); 3585 3586 printf("\n flowtype_num is %-14u :", 3587 info->region[i].flowtype_num); 3588 for (j = 0; j < info->region[i].flowtype_num; j++) 3589 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3590 } 3591 #else 3592 RTE_SET_USED(port_id); 3593 RTE_SET_USED(buf); 3594 #endif 3595 3596 printf("\n\n"); 3597 } 3598