1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 411 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 412 printf("Valid port range is [0"); 413 RTE_ETH_FOREACH_DEV(pid) 414 printf(", %d", pid); 415 printf("]\n"); 416 return; 417 } 418 port = &ports[port_id]; 419 rte_eth_link_get_nowait(port_id, &link); 420 memset(&dev_info, 0, sizeof(dev_info)); 421 rte_eth_dev_info_get(port_id, &dev_info); 422 printf("\n%s Infos for port %-2d %s\n", 423 info_border, port_id, info_border); 424 rte_eth_macaddr_get(port_id, &mac_addr); 425 print_ethaddr("MAC address: ", &mac_addr); 426 printf("\nDriver name: %s", dev_info.driver_name); 427 printf("\nConnect to socket: %u", port->socket_id); 428 429 if (port_numa[port_id] != NUMA_NO_CONFIG) { 430 mp = mbuf_pool_find(port_numa[port_id]); 431 if (mp) 432 printf("\nmemory allocation on the socket: %d", 433 port_numa[port_id]); 434 } else 435 printf("\nmemory allocation on the socket: %u",port->socket_id); 436 437 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 438 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 439 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 440 ("full-duplex") : ("half-duplex")); 441 442 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 443 printf("MTU: %u\n", mtu); 444 445 printf("Promiscuous mode: %s\n", 446 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 447 printf("Allmulticast mode: %s\n", 448 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 449 printf("Maximum number of MAC addresses: %u\n", 450 (unsigned int)(port->dev_info.max_mac_addrs)); 451 printf("Maximum number of MAC addresses of hash filtering: %u\n", 452 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 453 454 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 455 if (vlan_offload >= 0){ 456 printf("VLAN offload: \n"); 457 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 458 printf(" strip on \n"); 459 else 460 printf(" strip off \n"); 461 462 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 463 printf(" filter on \n"); 464 else 465 printf(" filter off \n"); 466 467 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 468 printf(" qinq(extend) on \n"); 469 else 470 printf(" qinq(extend) off \n"); 471 } 472 473 if (dev_info.hash_key_size > 0) 474 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 475 if (dev_info.reta_size > 0) 476 printf("Redirection table size: %u\n", dev_info.reta_size); 477 if (!dev_info.flow_type_rss_offloads) 478 printf("No flow type is supported.\n"); 479 else { 480 uint16_t i; 481 char *p; 482 483 printf("Supported flow types:\n"); 484 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 485 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 486 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 487 continue; 488 p = flowtype_to_str(i); 489 if (p) 490 printf(" %s\n", p); 491 else 492 printf(" user defined %d\n", i); 493 } 494 } 495 496 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 497 printf("Maximum configurable length of RX packet: %u\n", 498 dev_info.max_rx_pktlen); 499 if (dev_info.max_vfs) 500 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 501 if (dev_info.max_vmdq_pools) 502 printf("Maximum number of VMDq pools: %u\n", 503 dev_info.max_vmdq_pools); 504 505 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 506 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 507 printf("Max possible number of RXDs per queue: %hu\n", 508 dev_info.rx_desc_lim.nb_max); 509 printf("Min possible number of RXDs per queue: %hu\n", 510 dev_info.rx_desc_lim.nb_min); 511 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 512 513 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 514 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 515 printf("Max possible number of TXDs per queue: %hu\n", 516 dev_info.tx_desc_lim.nb_max); 517 printf("Min possible number of TXDs per queue: %hu\n", 518 dev_info.tx_desc_lim.nb_min); 519 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 520 } 521 522 void 523 port_offload_cap_display(portid_t port_id) 524 { 525 struct rte_eth_dev_info dev_info; 526 static const char *info_border = "************"; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) 529 return; 530 531 rte_eth_dev_info_get(port_id, &dev_info); 532 533 printf("\n%s Port %d supported offload features: %s\n", 534 info_border, port_id, info_border); 535 536 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 537 printf("VLAN stripped: "); 538 if (ports[port_id].dev_conf.rxmode.offloads & 539 DEV_RX_OFFLOAD_VLAN_STRIP) 540 printf("on\n"); 541 else 542 printf("off\n"); 543 } 544 545 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 546 printf("Double VLANs stripped: "); 547 if (ports[port_id].dev_conf.rxmode.offloads & 548 DEV_RX_OFFLOAD_VLAN_EXTEND) 549 printf("on\n"); 550 else 551 printf("off\n"); 552 } 553 554 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 555 printf("RX IPv4 checksum: "); 556 if (ports[port_id].dev_conf.rxmode.offloads & 557 DEV_RX_OFFLOAD_IPV4_CKSUM) 558 printf("on\n"); 559 else 560 printf("off\n"); 561 } 562 563 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 564 printf("RX UDP checksum: "); 565 if (ports[port_id].dev_conf.rxmode.offloads & 566 DEV_RX_OFFLOAD_UDP_CKSUM) 567 printf("on\n"); 568 else 569 printf("off\n"); 570 } 571 572 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 573 printf("RX TCP checksum: "); 574 if (ports[port_id].dev_conf.rxmode.offloads & 575 DEV_RX_OFFLOAD_TCP_CKSUM) 576 printf("on\n"); 577 else 578 printf("off\n"); 579 } 580 581 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 582 printf("RX Outer IPv4 checksum: "); 583 if (ports[port_id].dev_conf.rxmode.offloads & 584 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 585 printf("on\n"); 586 else 587 printf("off\n"); 588 } 589 590 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 591 printf("Large receive offload: "); 592 if (ports[port_id].dev_conf.rxmode.offloads & 593 DEV_RX_OFFLOAD_TCP_LRO) 594 printf("on\n"); 595 else 596 printf("off\n"); 597 } 598 599 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 600 printf("VLAN insert: "); 601 if (ports[port_id].dev_conf.txmode.offloads & 602 DEV_TX_OFFLOAD_VLAN_INSERT) 603 printf("on\n"); 604 else 605 printf("off\n"); 606 } 607 608 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 609 printf("HW timestamp: "); 610 if (ports[port_id].dev_conf.rxmode.offloads & 611 DEV_RX_OFFLOAD_TIMESTAMP) 612 printf("on\n"); 613 else 614 printf("off\n"); 615 } 616 617 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 618 printf("Double VLANs insert: "); 619 if (ports[port_id].dev_conf.txmode.offloads & 620 DEV_TX_OFFLOAD_QINQ_INSERT) 621 printf("on\n"); 622 else 623 printf("off\n"); 624 } 625 626 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 627 printf("TX IPv4 checksum: "); 628 if (ports[port_id].dev_conf.txmode.offloads & 629 DEV_TX_OFFLOAD_IPV4_CKSUM) 630 printf("on\n"); 631 else 632 printf("off\n"); 633 } 634 635 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 636 printf("TX UDP checksum: "); 637 if (ports[port_id].dev_conf.txmode.offloads & 638 DEV_TX_OFFLOAD_UDP_CKSUM) 639 printf("on\n"); 640 else 641 printf("off\n"); 642 } 643 644 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 645 printf("TX TCP checksum: "); 646 if (ports[port_id].dev_conf.txmode.offloads & 647 DEV_TX_OFFLOAD_TCP_CKSUM) 648 printf("on\n"); 649 else 650 printf("off\n"); 651 } 652 653 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 654 printf("TX SCTP checksum: "); 655 if (ports[port_id].dev_conf.txmode.offloads & 656 DEV_TX_OFFLOAD_SCTP_CKSUM) 657 printf("on\n"); 658 else 659 printf("off\n"); 660 } 661 662 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 663 printf("TX Outer IPv4 checksum: "); 664 if (ports[port_id].dev_conf.txmode.offloads & 665 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 666 printf("on\n"); 667 else 668 printf("off\n"); 669 } 670 671 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 672 printf("TX TCP segmentation: "); 673 if (ports[port_id].dev_conf.txmode.offloads & 674 DEV_TX_OFFLOAD_TCP_TSO) 675 printf("on\n"); 676 else 677 printf("off\n"); 678 } 679 680 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 681 printf("TX UDP segmentation: "); 682 if (ports[port_id].dev_conf.txmode.offloads & 683 DEV_TX_OFFLOAD_UDP_TSO) 684 printf("on\n"); 685 else 686 printf("off\n"); 687 } 688 689 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 690 printf("TSO for VXLAN tunnel packet: "); 691 if (ports[port_id].dev_conf.txmode.offloads & 692 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 693 printf("on\n"); 694 else 695 printf("off\n"); 696 } 697 698 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 699 printf("TSO for GRE tunnel packet: "); 700 if (ports[port_id].dev_conf.txmode.offloads & 701 DEV_TX_OFFLOAD_GRE_TNL_TSO) 702 printf("on\n"); 703 else 704 printf("off\n"); 705 } 706 707 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 708 printf("TSO for IPIP tunnel packet: "); 709 if (ports[port_id].dev_conf.txmode.offloads & 710 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 711 printf("on\n"); 712 else 713 printf("off\n"); 714 } 715 716 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 717 printf("TSO for GENEVE tunnel packet: "); 718 if (ports[port_id].dev_conf.txmode.offloads & 719 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 720 printf("on\n"); 721 else 722 printf("off\n"); 723 } 724 725 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 726 printf("IP tunnel TSO: "); 727 if (ports[port_id].dev_conf.txmode.offloads & 728 DEV_TX_OFFLOAD_IP_TNL_TSO) 729 printf("on\n"); 730 else 731 printf("off\n"); 732 } 733 734 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 735 printf("UDP tunnel TSO: "); 736 if (ports[port_id].dev_conf.txmode.offloads & 737 DEV_TX_OFFLOAD_UDP_TNL_TSO) 738 printf("on\n"); 739 else 740 printf("off\n"); 741 } 742 } 743 744 int 745 port_id_is_invalid(portid_t port_id, enum print_warning warning) 746 { 747 uint16_t pid; 748 749 if (port_id == (portid_t)RTE_PORT_ALL) 750 return 0; 751 752 RTE_ETH_FOREACH_DEV(pid) 753 if (port_id == pid) 754 return 0; 755 756 if (warning == ENABLED_WARN) 757 printf("Invalid port %d\n", port_id); 758 759 return 1; 760 } 761 762 static int 763 vlan_id_is_invalid(uint16_t vlan_id) 764 { 765 if (vlan_id < 4096) 766 return 0; 767 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 768 return 1; 769 } 770 771 static int 772 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 773 { 774 const struct rte_pci_device *pci_dev; 775 const struct rte_bus *bus; 776 uint64_t pci_len; 777 778 if (reg_off & 0x3) { 779 printf("Port register offset 0x%X not aligned on a 4-byte " 780 "boundary\n", 781 (unsigned)reg_off); 782 return 1; 783 } 784 785 if (!ports[port_id].dev_info.device) { 786 printf("Invalid device\n"); 787 return 0; 788 } 789 790 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 791 if (bus && !strcmp(bus->name, "pci")) { 792 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 793 } else { 794 printf("Not a PCI device\n"); 795 return 1; 796 } 797 798 pci_len = pci_dev->mem_resource[0].len; 799 if (reg_off >= pci_len) { 800 printf("Port %d: register offset %u (0x%X) out of port PCI " 801 "resource (length=%"PRIu64")\n", 802 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 803 return 1; 804 } 805 return 0; 806 } 807 808 static int 809 reg_bit_pos_is_invalid(uint8_t bit_pos) 810 { 811 if (bit_pos <= 31) 812 return 0; 813 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 814 return 1; 815 } 816 817 #define display_port_and_reg_off(port_id, reg_off) \ 818 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 819 820 static inline void 821 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 822 { 823 display_port_and_reg_off(port_id, (unsigned)reg_off); 824 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 825 } 826 827 void 828 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 829 { 830 uint32_t reg_v; 831 832 833 if (port_id_is_invalid(port_id, ENABLED_WARN)) 834 return; 835 if (port_reg_off_is_invalid(port_id, reg_off)) 836 return; 837 if (reg_bit_pos_is_invalid(bit_x)) 838 return; 839 reg_v = port_id_pci_reg_read(port_id, reg_off); 840 display_port_and_reg_off(port_id, (unsigned)reg_off); 841 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 842 } 843 844 void 845 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 846 uint8_t bit1_pos, uint8_t bit2_pos) 847 { 848 uint32_t reg_v; 849 uint8_t l_bit; 850 uint8_t h_bit; 851 852 if (port_id_is_invalid(port_id, ENABLED_WARN)) 853 return; 854 if (port_reg_off_is_invalid(port_id, reg_off)) 855 return; 856 if (reg_bit_pos_is_invalid(bit1_pos)) 857 return; 858 if (reg_bit_pos_is_invalid(bit2_pos)) 859 return; 860 if (bit1_pos > bit2_pos) 861 l_bit = bit2_pos, h_bit = bit1_pos; 862 else 863 l_bit = bit1_pos, h_bit = bit2_pos; 864 865 reg_v = port_id_pci_reg_read(port_id, reg_off); 866 reg_v >>= l_bit; 867 if (h_bit < 31) 868 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 869 display_port_and_reg_off(port_id, (unsigned)reg_off); 870 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 871 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 872 } 873 874 void 875 port_reg_display(portid_t port_id, uint32_t reg_off) 876 { 877 uint32_t reg_v; 878 879 if (port_id_is_invalid(port_id, ENABLED_WARN)) 880 return; 881 if (port_reg_off_is_invalid(port_id, reg_off)) 882 return; 883 reg_v = port_id_pci_reg_read(port_id, reg_off); 884 display_port_reg_value(port_id, reg_off, reg_v); 885 } 886 887 void 888 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 889 uint8_t bit_v) 890 { 891 uint32_t reg_v; 892 893 if (port_id_is_invalid(port_id, ENABLED_WARN)) 894 return; 895 if (port_reg_off_is_invalid(port_id, reg_off)) 896 return; 897 if (reg_bit_pos_is_invalid(bit_pos)) 898 return; 899 if (bit_v > 1) { 900 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 901 return; 902 } 903 reg_v = port_id_pci_reg_read(port_id, reg_off); 904 if (bit_v == 0) 905 reg_v &= ~(1 << bit_pos); 906 else 907 reg_v |= (1 << bit_pos); 908 port_id_pci_reg_write(port_id, reg_off, reg_v); 909 display_port_reg_value(port_id, reg_off, reg_v); 910 } 911 912 void 913 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 914 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 915 { 916 uint32_t max_v; 917 uint32_t reg_v; 918 uint8_t l_bit; 919 uint8_t h_bit; 920 921 if (port_id_is_invalid(port_id, ENABLED_WARN)) 922 return; 923 if (port_reg_off_is_invalid(port_id, reg_off)) 924 return; 925 if (reg_bit_pos_is_invalid(bit1_pos)) 926 return; 927 if (reg_bit_pos_is_invalid(bit2_pos)) 928 return; 929 if (bit1_pos > bit2_pos) 930 l_bit = bit2_pos, h_bit = bit1_pos; 931 else 932 l_bit = bit1_pos, h_bit = bit2_pos; 933 934 if ((h_bit - l_bit) < 31) 935 max_v = (1 << (h_bit - l_bit + 1)) - 1; 936 else 937 max_v = 0xFFFFFFFF; 938 939 if (value > max_v) { 940 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 941 (unsigned)value, (unsigned)value, 942 (unsigned)max_v, (unsigned)max_v); 943 return; 944 } 945 reg_v = port_id_pci_reg_read(port_id, reg_off); 946 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 947 reg_v |= (value << l_bit); /* Set changed bits */ 948 port_id_pci_reg_write(port_id, reg_off, reg_v); 949 display_port_reg_value(port_id, reg_off, reg_v); 950 } 951 952 void 953 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 954 { 955 if (port_id_is_invalid(port_id, ENABLED_WARN)) 956 return; 957 if (port_reg_off_is_invalid(port_id, reg_off)) 958 return; 959 port_id_pci_reg_write(port_id, reg_off, reg_v); 960 display_port_reg_value(port_id, reg_off, reg_v); 961 } 962 963 void 964 port_mtu_set(portid_t port_id, uint16_t mtu) 965 { 966 int diag; 967 968 if (port_id_is_invalid(port_id, ENABLED_WARN)) 969 return; 970 diag = rte_eth_dev_set_mtu(port_id, mtu); 971 if (diag == 0) 972 return; 973 printf("Set MTU failed. diag=%d\n", diag); 974 } 975 976 /* Generic flow management functions. */ 977 978 /** Generate flow_item[] entry. */ 979 #define MK_FLOW_ITEM(t, s) \ 980 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 981 .name = # t, \ 982 .size = s, \ 983 } 984 985 /** Information about known flow pattern items. */ 986 static const struct { 987 const char *name; 988 size_t size; 989 } flow_item[] = { 990 MK_FLOW_ITEM(END, 0), 991 MK_FLOW_ITEM(VOID, 0), 992 MK_FLOW_ITEM(INVERT, 0), 993 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 994 MK_FLOW_ITEM(PF, 0), 995 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 996 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 997 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 998 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 999 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1000 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1001 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1002 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1003 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1004 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1005 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1006 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1007 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1008 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1009 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1010 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1011 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1012 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1013 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1014 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1015 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1016 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1017 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 1018 }; 1019 1020 /** Pattern item specification types. */ 1021 enum item_spec_type { 1022 ITEM_SPEC, 1023 ITEM_LAST, 1024 ITEM_MASK, 1025 }; 1026 1027 /** Compute storage space needed by item specification and copy it. */ 1028 static size_t 1029 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1030 enum item_spec_type type) 1031 { 1032 size_t size = 0; 1033 const void *item_spec = 1034 type == ITEM_SPEC ? item->spec : 1035 type == ITEM_LAST ? item->last : 1036 type == ITEM_MASK ? item->mask : 1037 NULL; 1038 1039 if (!item_spec) 1040 goto empty; 1041 switch (item->type) { 1042 union { 1043 const struct rte_flow_item_raw *raw; 1044 } src; 1045 union { 1046 struct rte_flow_item_raw *raw; 1047 } dst; 1048 size_t off; 1049 1050 case RTE_FLOW_ITEM_TYPE_RAW: 1051 src.raw = item_spec; 1052 dst.raw = buf; 1053 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1054 sizeof(*src.raw->pattern)); 1055 size = off + src.raw->length * sizeof(*src.raw->pattern); 1056 if (dst.raw) { 1057 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1058 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1059 src.raw->pattern, 1060 size - off); 1061 } 1062 break; 1063 default: 1064 size = flow_item[item->type].size; 1065 if (buf) 1066 memcpy(buf, item_spec, size); 1067 break; 1068 } 1069 empty: 1070 return RTE_ALIGN_CEIL(size, sizeof(double)); 1071 } 1072 1073 /** Generate flow_action[] entry. */ 1074 #define MK_FLOW_ACTION(t, s) \ 1075 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1076 .name = # t, \ 1077 .size = s, \ 1078 } 1079 1080 /** Information about known flow actions. */ 1081 static const struct { 1082 const char *name; 1083 size_t size; 1084 } flow_action[] = { 1085 MK_FLOW_ACTION(END, 0), 1086 MK_FLOW_ACTION(VOID, 0), 1087 MK_FLOW_ACTION(PASSTHRU, 0), 1088 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1089 MK_FLOW_ACTION(FLAG, 0), 1090 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1091 MK_FLOW_ACTION(DROP, 0), 1092 MK_FLOW_ACTION(COUNT, 0), 1093 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1094 MK_FLOW_ACTION(PF, 0), 1095 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1096 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1097 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1098 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1099 }; 1100 1101 /** Compute storage space needed by action configuration and copy it. */ 1102 static size_t 1103 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1104 { 1105 size_t size = 0; 1106 1107 if (!action->conf) 1108 goto empty; 1109 switch (action->type) { 1110 union { 1111 const struct rte_flow_action_rss *rss; 1112 } src; 1113 union { 1114 struct rte_flow_action_rss *rss; 1115 } dst; 1116 size_t off; 1117 1118 case RTE_FLOW_ACTION_TYPE_RSS: 1119 src.rss = action->conf; 1120 dst.rss = buf; 1121 off = 0; 1122 if (dst.rss) 1123 *dst.rss = (struct rte_flow_action_rss){ 1124 .func = src.rss->func, 1125 .level = src.rss->level, 1126 .types = src.rss->types, 1127 .key_len = src.rss->key_len, 1128 .queue_num = src.rss->queue_num, 1129 }; 1130 off += sizeof(*src.rss); 1131 if (src.rss->key_len) { 1132 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1133 size = sizeof(*src.rss->key) * src.rss->key_len; 1134 if (dst.rss) 1135 dst.rss->key = memcpy 1136 ((void *)((uintptr_t)dst.rss + off), 1137 src.rss->key, size); 1138 off += size; 1139 } 1140 if (src.rss->queue_num) { 1141 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1142 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1143 if (dst.rss) 1144 dst.rss->queue = memcpy 1145 ((void *)((uintptr_t)dst.rss + off), 1146 src.rss->queue, size); 1147 off += size; 1148 } 1149 size = off; 1150 break; 1151 default: 1152 size = flow_action[action->type].size; 1153 if (buf) 1154 memcpy(buf, action->conf, size); 1155 break; 1156 } 1157 empty: 1158 return RTE_ALIGN_CEIL(size, sizeof(double)); 1159 } 1160 1161 /** Generate a port_flow entry from attributes/pattern/actions. */ 1162 static struct port_flow * 1163 port_flow_new(const struct rte_flow_attr *attr, 1164 const struct rte_flow_item *pattern, 1165 const struct rte_flow_action *actions) 1166 { 1167 const struct rte_flow_item *item; 1168 const struct rte_flow_action *action; 1169 struct port_flow *pf = NULL; 1170 size_t tmp; 1171 size_t off1 = 0; 1172 size_t off2 = 0; 1173 int err = ENOTSUP; 1174 1175 store: 1176 item = pattern; 1177 if (pf) 1178 pf->pattern = (void *)&pf->data[off1]; 1179 do { 1180 struct rte_flow_item *dst = NULL; 1181 1182 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1183 !flow_item[item->type].name) 1184 goto notsup; 1185 if (pf) 1186 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1187 off1 += sizeof(*item); 1188 if (item->spec) { 1189 if (pf) 1190 dst->spec = pf->data + off2; 1191 off2 += flow_item_spec_copy 1192 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1193 } 1194 if (item->last) { 1195 if (pf) 1196 dst->last = pf->data + off2; 1197 off2 += flow_item_spec_copy 1198 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1199 } 1200 if (item->mask) { 1201 if (pf) 1202 dst->mask = pf->data + off2; 1203 off2 += flow_item_spec_copy 1204 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1205 } 1206 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1207 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1208 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1209 action = actions; 1210 if (pf) 1211 pf->actions = (void *)&pf->data[off1]; 1212 do { 1213 struct rte_flow_action *dst = NULL; 1214 1215 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1216 !flow_action[action->type].name) 1217 goto notsup; 1218 if (pf) 1219 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1220 off1 += sizeof(*action); 1221 if (action->conf) { 1222 if (pf) 1223 dst->conf = pf->data + off2; 1224 off2 += flow_action_conf_copy 1225 (pf ? pf->data + off2 : NULL, action); 1226 } 1227 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1228 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1229 if (pf != NULL) 1230 return pf; 1231 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1232 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1233 pf = calloc(1, tmp + off1 + off2); 1234 if (pf == NULL) 1235 err = errno; 1236 else { 1237 *pf = (const struct port_flow){ 1238 .size = tmp + off1 + off2, 1239 .attr = *attr, 1240 }; 1241 tmp -= offsetof(struct port_flow, data); 1242 off2 = tmp + off1; 1243 off1 = tmp; 1244 goto store; 1245 } 1246 notsup: 1247 rte_errno = err; 1248 return NULL; 1249 } 1250 1251 /** Print a message out of a flow error. */ 1252 static int 1253 port_flow_complain(struct rte_flow_error *error) 1254 { 1255 static const char *const errstrlist[] = { 1256 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1257 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1258 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1259 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1260 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1261 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1262 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1263 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1264 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1265 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1266 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1267 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1268 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1269 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1270 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1271 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1272 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1273 }; 1274 const char *errstr; 1275 char buf[32]; 1276 int err = rte_errno; 1277 1278 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1279 !errstrlist[error->type]) 1280 errstr = "unknown type"; 1281 else 1282 errstr = errstrlist[error->type]; 1283 printf("Caught error type %d (%s): %s%s\n", 1284 error->type, errstr, 1285 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1286 error->cause), buf) : "", 1287 error->message ? error->message : "(no stated reason)"); 1288 return -err; 1289 } 1290 1291 /** Validate flow rule. */ 1292 int 1293 port_flow_validate(portid_t port_id, 1294 const struct rte_flow_attr *attr, 1295 const struct rte_flow_item *pattern, 1296 const struct rte_flow_action *actions) 1297 { 1298 struct rte_flow_error error; 1299 1300 /* Poisoning to make sure PMDs update it in case of error. */ 1301 memset(&error, 0x11, sizeof(error)); 1302 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1303 return port_flow_complain(&error); 1304 printf("Flow rule validated\n"); 1305 return 0; 1306 } 1307 1308 /** Create flow rule. */ 1309 int 1310 port_flow_create(portid_t port_id, 1311 const struct rte_flow_attr *attr, 1312 const struct rte_flow_item *pattern, 1313 const struct rte_flow_action *actions) 1314 { 1315 struct rte_flow *flow; 1316 struct rte_port *port; 1317 struct port_flow *pf; 1318 uint32_t id; 1319 struct rte_flow_error error; 1320 1321 /* Poisoning to make sure PMDs update it in case of error. */ 1322 memset(&error, 0x22, sizeof(error)); 1323 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1324 if (!flow) 1325 return port_flow_complain(&error); 1326 port = &ports[port_id]; 1327 if (port->flow_list) { 1328 if (port->flow_list->id == UINT32_MAX) { 1329 printf("Highest rule ID is already assigned, delete" 1330 " it first"); 1331 rte_flow_destroy(port_id, flow, NULL); 1332 return -ENOMEM; 1333 } 1334 id = port->flow_list->id + 1; 1335 } else 1336 id = 0; 1337 pf = port_flow_new(attr, pattern, actions); 1338 if (!pf) { 1339 int err = rte_errno; 1340 1341 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1342 rte_flow_destroy(port_id, flow, NULL); 1343 return -err; 1344 } 1345 pf->next = port->flow_list; 1346 pf->id = id; 1347 pf->flow = flow; 1348 port->flow_list = pf; 1349 printf("Flow rule #%u created\n", pf->id); 1350 return 0; 1351 } 1352 1353 /** Destroy a number of flow rules. */ 1354 int 1355 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1356 { 1357 struct rte_port *port; 1358 struct port_flow **tmp; 1359 uint32_t c = 0; 1360 int ret = 0; 1361 1362 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1363 port_id == (portid_t)RTE_PORT_ALL) 1364 return -EINVAL; 1365 port = &ports[port_id]; 1366 tmp = &port->flow_list; 1367 while (*tmp) { 1368 uint32_t i; 1369 1370 for (i = 0; i != n; ++i) { 1371 struct rte_flow_error error; 1372 struct port_flow *pf = *tmp; 1373 1374 if (rule[i] != pf->id) 1375 continue; 1376 /* 1377 * Poisoning to make sure PMDs update it in case 1378 * of error. 1379 */ 1380 memset(&error, 0x33, sizeof(error)); 1381 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1382 ret = port_flow_complain(&error); 1383 continue; 1384 } 1385 printf("Flow rule #%u destroyed\n", pf->id); 1386 *tmp = pf->next; 1387 free(pf); 1388 break; 1389 } 1390 if (i == n) 1391 tmp = &(*tmp)->next; 1392 ++c; 1393 } 1394 return ret; 1395 } 1396 1397 /** Remove all flow rules. */ 1398 int 1399 port_flow_flush(portid_t port_id) 1400 { 1401 struct rte_flow_error error; 1402 struct rte_port *port; 1403 int ret = 0; 1404 1405 /* Poisoning to make sure PMDs update it in case of error. */ 1406 memset(&error, 0x44, sizeof(error)); 1407 if (rte_flow_flush(port_id, &error)) { 1408 ret = port_flow_complain(&error); 1409 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1410 port_id == (portid_t)RTE_PORT_ALL) 1411 return ret; 1412 } 1413 port = &ports[port_id]; 1414 while (port->flow_list) { 1415 struct port_flow *pf = port->flow_list->next; 1416 1417 free(port->flow_list); 1418 port->flow_list = pf; 1419 } 1420 return ret; 1421 } 1422 1423 /** Query a flow rule. */ 1424 int 1425 port_flow_query(portid_t port_id, uint32_t rule, 1426 enum rte_flow_action_type action) 1427 { 1428 struct rte_flow_error error; 1429 struct rte_port *port; 1430 struct port_flow *pf; 1431 const char *name; 1432 union { 1433 struct rte_flow_query_count count; 1434 } query; 1435 1436 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1437 port_id == (portid_t)RTE_PORT_ALL) 1438 return -EINVAL; 1439 port = &ports[port_id]; 1440 for (pf = port->flow_list; pf; pf = pf->next) 1441 if (pf->id == rule) 1442 break; 1443 if (!pf) { 1444 printf("Flow rule #%u not found\n", rule); 1445 return -ENOENT; 1446 } 1447 if ((unsigned int)action >= RTE_DIM(flow_action) || 1448 !flow_action[action].name) 1449 name = "unknown"; 1450 else 1451 name = flow_action[action].name; 1452 switch (action) { 1453 case RTE_FLOW_ACTION_TYPE_COUNT: 1454 break; 1455 default: 1456 printf("Cannot query action type %d (%s)\n", action, name); 1457 return -ENOTSUP; 1458 } 1459 /* Poisoning to make sure PMDs update it in case of error. */ 1460 memset(&error, 0x55, sizeof(error)); 1461 memset(&query, 0, sizeof(query)); 1462 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1463 return port_flow_complain(&error); 1464 switch (action) { 1465 case RTE_FLOW_ACTION_TYPE_COUNT: 1466 printf("%s:\n" 1467 " hits_set: %u\n" 1468 " bytes_set: %u\n" 1469 " hits: %" PRIu64 "\n" 1470 " bytes: %" PRIu64 "\n", 1471 name, 1472 query.count.hits_set, 1473 query.count.bytes_set, 1474 query.count.hits, 1475 query.count.bytes); 1476 break; 1477 default: 1478 printf("Cannot display result for action type %d (%s)\n", 1479 action, name); 1480 break; 1481 } 1482 return 0; 1483 } 1484 1485 /** List flow rules. */ 1486 void 1487 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1488 { 1489 struct rte_port *port; 1490 struct port_flow *pf; 1491 struct port_flow *list = NULL; 1492 uint32_t i; 1493 1494 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1495 port_id == (portid_t)RTE_PORT_ALL) 1496 return; 1497 port = &ports[port_id]; 1498 if (!port->flow_list) 1499 return; 1500 /* Sort flows by group, priority and ID. */ 1501 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1502 struct port_flow **tmp; 1503 1504 if (n) { 1505 /* Filter out unwanted groups. */ 1506 for (i = 0; i != n; ++i) 1507 if (pf->attr.group == group[i]) 1508 break; 1509 if (i == n) 1510 continue; 1511 } 1512 tmp = &list; 1513 while (*tmp && 1514 (pf->attr.group > (*tmp)->attr.group || 1515 (pf->attr.group == (*tmp)->attr.group && 1516 pf->attr.priority > (*tmp)->attr.priority) || 1517 (pf->attr.group == (*tmp)->attr.group && 1518 pf->attr.priority == (*tmp)->attr.priority && 1519 pf->id > (*tmp)->id))) 1520 tmp = &(*tmp)->tmp; 1521 pf->tmp = *tmp; 1522 *tmp = pf; 1523 } 1524 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1525 for (pf = list; pf != NULL; pf = pf->tmp) { 1526 const struct rte_flow_item *item = pf->pattern; 1527 const struct rte_flow_action *action = pf->actions; 1528 1529 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1530 pf->id, 1531 pf->attr.group, 1532 pf->attr.priority, 1533 pf->attr.ingress ? 'i' : '-', 1534 pf->attr.egress ? 'e' : '-', 1535 pf->attr.transfer ? 't' : '-'); 1536 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1537 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1538 printf("%s ", flow_item[item->type].name); 1539 ++item; 1540 } 1541 printf("=>"); 1542 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1543 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1544 printf(" %s", flow_action[action->type].name); 1545 ++action; 1546 } 1547 printf("\n"); 1548 } 1549 } 1550 1551 /** Restrict ingress traffic to the defined flow rules. */ 1552 int 1553 port_flow_isolate(portid_t port_id, int set) 1554 { 1555 struct rte_flow_error error; 1556 1557 /* Poisoning to make sure PMDs update it in case of error. */ 1558 memset(&error, 0x66, sizeof(error)); 1559 if (rte_flow_isolate(port_id, set, &error)) 1560 return port_flow_complain(&error); 1561 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1562 port_id, 1563 set ? "now restricted" : "not restricted anymore"); 1564 return 0; 1565 } 1566 1567 /* 1568 * RX/TX ring descriptors display functions. 1569 */ 1570 int 1571 rx_queue_id_is_invalid(queueid_t rxq_id) 1572 { 1573 if (rxq_id < nb_rxq) 1574 return 0; 1575 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1576 return 1; 1577 } 1578 1579 int 1580 tx_queue_id_is_invalid(queueid_t txq_id) 1581 { 1582 if (txq_id < nb_txq) 1583 return 0; 1584 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1585 return 1; 1586 } 1587 1588 static int 1589 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1590 { 1591 if (rxdesc_id < nb_rxd) 1592 return 0; 1593 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1594 rxdesc_id, nb_rxd); 1595 return 1; 1596 } 1597 1598 static int 1599 tx_desc_id_is_invalid(uint16_t txdesc_id) 1600 { 1601 if (txdesc_id < nb_txd) 1602 return 0; 1603 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1604 txdesc_id, nb_txd); 1605 return 1; 1606 } 1607 1608 static const struct rte_memzone * 1609 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1610 { 1611 char mz_name[RTE_MEMZONE_NAMESIZE]; 1612 const struct rte_memzone *mz; 1613 1614 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1615 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1616 mz = rte_memzone_lookup(mz_name); 1617 if (mz == NULL) 1618 printf("%s ring memory zoneof (port %d, queue %d) not" 1619 "found (zone name = %s\n", 1620 ring_name, port_id, q_id, mz_name); 1621 return mz; 1622 } 1623 1624 union igb_ring_dword { 1625 uint64_t dword; 1626 struct { 1627 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1628 uint32_t lo; 1629 uint32_t hi; 1630 #else 1631 uint32_t hi; 1632 uint32_t lo; 1633 #endif 1634 } words; 1635 }; 1636 1637 struct igb_ring_desc_32_bytes { 1638 union igb_ring_dword lo_dword; 1639 union igb_ring_dword hi_dword; 1640 union igb_ring_dword resv1; 1641 union igb_ring_dword resv2; 1642 }; 1643 1644 struct igb_ring_desc_16_bytes { 1645 union igb_ring_dword lo_dword; 1646 union igb_ring_dword hi_dword; 1647 }; 1648 1649 static void 1650 ring_rxd_display_dword(union igb_ring_dword dword) 1651 { 1652 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1653 (unsigned)dword.words.hi); 1654 } 1655 1656 static void 1657 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1658 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1659 portid_t port_id, 1660 #else 1661 __rte_unused portid_t port_id, 1662 #endif 1663 uint16_t desc_id) 1664 { 1665 struct igb_ring_desc_16_bytes *ring = 1666 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1667 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1668 struct rte_eth_dev_info dev_info; 1669 1670 memset(&dev_info, 0, sizeof(dev_info)); 1671 rte_eth_dev_info_get(port_id, &dev_info); 1672 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1673 /* 32 bytes RX descriptor, i40e only */ 1674 struct igb_ring_desc_32_bytes *ring = 1675 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1676 ring[desc_id].lo_dword.dword = 1677 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1678 ring_rxd_display_dword(ring[desc_id].lo_dword); 1679 ring[desc_id].hi_dword.dword = 1680 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1681 ring_rxd_display_dword(ring[desc_id].hi_dword); 1682 ring[desc_id].resv1.dword = 1683 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1684 ring_rxd_display_dword(ring[desc_id].resv1); 1685 ring[desc_id].resv2.dword = 1686 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1687 ring_rxd_display_dword(ring[desc_id].resv2); 1688 1689 return; 1690 } 1691 #endif 1692 /* 16 bytes RX descriptor */ 1693 ring[desc_id].lo_dword.dword = 1694 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1695 ring_rxd_display_dword(ring[desc_id].lo_dword); 1696 ring[desc_id].hi_dword.dword = 1697 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1698 ring_rxd_display_dword(ring[desc_id].hi_dword); 1699 } 1700 1701 static void 1702 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1703 { 1704 struct igb_ring_desc_16_bytes *ring; 1705 struct igb_ring_desc_16_bytes txd; 1706 1707 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1708 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1709 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1710 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1711 (unsigned)txd.lo_dword.words.lo, 1712 (unsigned)txd.lo_dword.words.hi, 1713 (unsigned)txd.hi_dword.words.lo, 1714 (unsigned)txd.hi_dword.words.hi); 1715 } 1716 1717 void 1718 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1719 { 1720 const struct rte_memzone *rx_mz; 1721 1722 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1723 return; 1724 if (rx_queue_id_is_invalid(rxq_id)) 1725 return; 1726 if (rx_desc_id_is_invalid(rxd_id)) 1727 return; 1728 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1729 if (rx_mz == NULL) 1730 return; 1731 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1732 } 1733 1734 void 1735 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1736 { 1737 const struct rte_memzone *tx_mz; 1738 1739 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1740 return; 1741 if (tx_queue_id_is_invalid(txq_id)) 1742 return; 1743 if (tx_desc_id_is_invalid(txd_id)) 1744 return; 1745 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1746 if (tx_mz == NULL) 1747 return; 1748 ring_tx_descriptor_display(tx_mz, txd_id); 1749 } 1750 1751 void 1752 fwd_lcores_config_display(void) 1753 { 1754 lcoreid_t lc_id; 1755 1756 printf("List of forwarding lcores:"); 1757 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1758 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1759 printf("\n"); 1760 } 1761 void 1762 rxtx_config_display(void) 1763 { 1764 portid_t pid; 1765 queueid_t qid; 1766 1767 printf(" %s packet forwarding%s packets/burst=%d\n", 1768 cur_fwd_eng->fwd_mode_name, 1769 retry_enabled == 0 ? "" : " with retry", 1770 nb_pkt_per_burst); 1771 1772 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1773 printf(" packet len=%u - nb packet segments=%d\n", 1774 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1775 1776 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1777 nb_fwd_lcores, nb_fwd_ports); 1778 1779 RTE_ETH_FOREACH_DEV(pid) { 1780 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1781 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1782 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1783 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1784 1785 /* per port config */ 1786 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1787 (unsigned int)pid, nb_rxq, nb_txq); 1788 1789 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1790 ports[pid].dev_conf.rxmode.offloads, 1791 ports[pid].dev_conf.txmode.offloads); 1792 1793 /* per rx queue config only for first queue to be less verbose */ 1794 for (qid = 0; qid < 1; qid++) { 1795 printf(" RX queue: %d\n", qid); 1796 printf(" RX desc=%d - RX free threshold=%d\n", 1797 nb_rx_desc[qid], rx_conf[qid].rx_free_thresh); 1798 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1799 " wthresh=%d\n", 1800 rx_conf[qid].rx_thresh.pthresh, 1801 rx_conf[qid].rx_thresh.hthresh, 1802 rx_conf[qid].rx_thresh.wthresh); 1803 printf(" RX Offloads=0x%"PRIx64"\n", 1804 rx_conf[qid].offloads); 1805 } 1806 1807 /* per tx queue config only for first queue to be less verbose */ 1808 for (qid = 0; qid < 1; qid++) { 1809 printf(" TX queue: %d\n", qid); 1810 printf(" TX desc=%d - TX free threshold=%d\n", 1811 nb_tx_desc[qid], tx_conf[qid].tx_free_thresh); 1812 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1813 " wthresh=%d\n", 1814 tx_conf[qid].tx_thresh.pthresh, 1815 tx_conf[qid].tx_thresh.hthresh, 1816 tx_conf[qid].tx_thresh.wthresh); 1817 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1818 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1819 } 1820 } 1821 } 1822 1823 void 1824 port_rss_reta_info(portid_t port_id, 1825 struct rte_eth_rss_reta_entry64 *reta_conf, 1826 uint16_t nb_entries) 1827 { 1828 uint16_t i, idx, shift; 1829 int ret; 1830 1831 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1832 return; 1833 1834 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1835 if (ret != 0) { 1836 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1837 return; 1838 } 1839 1840 for (i = 0; i < nb_entries; i++) { 1841 idx = i / RTE_RETA_GROUP_SIZE; 1842 shift = i % RTE_RETA_GROUP_SIZE; 1843 if (!(reta_conf[idx].mask & (1ULL << shift))) 1844 continue; 1845 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1846 i, reta_conf[idx].reta[shift]); 1847 } 1848 } 1849 1850 /* 1851 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1852 * key of the port. 1853 */ 1854 void 1855 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1856 { 1857 struct rte_eth_rss_conf rss_conf; 1858 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1859 uint64_t rss_hf; 1860 uint8_t i; 1861 int diag; 1862 struct rte_eth_dev_info dev_info; 1863 uint8_t hash_key_size; 1864 1865 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1866 return; 1867 1868 memset(&dev_info, 0, sizeof(dev_info)); 1869 rte_eth_dev_info_get(port_id, &dev_info); 1870 if (dev_info.hash_key_size > 0 && 1871 dev_info.hash_key_size <= sizeof(rss_key)) 1872 hash_key_size = dev_info.hash_key_size; 1873 else { 1874 printf("dev_info did not provide a valid hash key size\n"); 1875 return; 1876 } 1877 1878 rss_conf.rss_hf = 0; 1879 for (i = 0; rss_type_table[i].str; i++) { 1880 if (!strcmp(rss_info, rss_type_table[i].str)) 1881 rss_conf.rss_hf = rss_type_table[i].rss_type; 1882 } 1883 1884 /* Get RSS hash key if asked to display it */ 1885 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1886 rss_conf.rss_key_len = hash_key_size; 1887 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1888 if (diag != 0) { 1889 switch (diag) { 1890 case -ENODEV: 1891 printf("port index %d invalid\n", port_id); 1892 break; 1893 case -ENOTSUP: 1894 printf("operation not supported by device\n"); 1895 break; 1896 default: 1897 printf("operation failed - diag=%d\n", diag); 1898 break; 1899 } 1900 return; 1901 } 1902 rss_hf = rss_conf.rss_hf; 1903 if (rss_hf == 0) { 1904 printf("RSS disabled\n"); 1905 return; 1906 } 1907 printf("RSS functions:\n "); 1908 for (i = 0; rss_type_table[i].str; i++) { 1909 if (rss_hf & rss_type_table[i].rss_type) 1910 printf("%s ", rss_type_table[i].str); 1911 } 1912 printf("\n"); 1913 if (!show_rss_key) 1914 return; 1915 printf("RSS key:\n"); 1916 for (i = 0; i < hash_key_size; i++) 1917 printf("%02X", rss_key[i]); 1918 printf("\n"); 1919 } 1920 1921 void 1922 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1923 uint hash_key_len) 1924 { 1925 struct rte_eth_rss_conf rss_conf; 1926 int diag; 1927 unsigned int i; 1928 1929 rss_conf.rss_key = NULL; 1930 rss_conf.rss_key_len = hash_key_len; 1931 rss_conf.rss_hf = 0; 1932 for (i = 0; rss_type_table[i].str; i++) { 1933 if (!strcmp(rss_type_table[i].str, rss_type)) 1934 rss_conf.rss_hf = rss_type_table[i].rss_type; 1935 } 1936 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1937 if (diag == 0) { 1938 rss_conf.rss_key = hash_key; 1939 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1940 } 1941 if (diag == 0) 1942 return; 1943 1944 switch (diag) { 1945 case -ENODEV: 1946 printf("port index %d invalid\n", port_id); 1947 break; 1948 case -ENOTSUP: 1949 printf("operation not supported by device\n"); 1950 break; 1951 default: 1952 printf("operation failed - diag=%d\n", diag); 1953 break; 1954 } 1955 } 1956 1957 /* 1958 * Setup forwarding configuration for each logical core. 1959 */ 1960 static void 1961 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1962 { 1963 streamid_t nb_fs_per_lcore; 1964 streamid_t nb_fs; 1965 streamid_t sm_id; 1966 lcoreid_t nb_extra; 1967 lcoreid_t nb_fc; 1968 lcoreid_t nb_lc; 1969 lcoreid_t lc_id; 1970 1971 nb_fs = cfg->nb_fwd_streams; 1972 nb_fc = cfg->nb_fwd_lcores; 1973 if (nb_fs <= nb_fc) { 1974 nb_fs_per_lcore = 1; 1975 nb_extra = 0; 1976 } else { 1977 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1978 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1979 } 1980 1981 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1982 sm_id = 0; 1983 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1984 fwd_lcores[lc_id]->stream_idx = sm_id; 1985 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1986 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1987 } 1988 1989 /* 1990 * Assign extra remaining streams, if any. 1991 */ 1992 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1993 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1994 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1995 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1996 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1997 } 1998 } 1999 2000 static portid_t 2001 fwd_topology_tx_port_get(portid_t rxp) 2002 { 2003 static int warning_once = 1; 2004 2005 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2006 2007 switch (port_topology) { 2008 default: 2009 case PORT_TOPOLOGY_PAIRED: 2010 if ((rxp & 0x1) == 0) { 2011 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2012 return rxp + 1; 2013 if (warning_once) { 2014 printf("\nWarning! port-topology=paired" 2015 " and odd forward ports number," 2016 " the last port will pair with" 2017 " itself.\n\n"); 2018 warning_once = 0; 2019 } 2020 return rxp; 2021 } 2022 return rxp - 1; 2023 case PORT_TOPOLOGY_CHAINED: 2024 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2025 case PORT_TOPOLOGY_LOOP: 2026 return rxp; 2027 } 2028 } 2029 2030 static void 2031 simple_fwd_config_setup(void) 2032 { 2033 portid_t i; 2034 2035 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2036 cur_fwd_config.nb_fwd_streams = 2037 (streamid_t) cur_fwd_config.nb_fwd_ports; 2038 2039 /* reinitialize forwarding streams */ 2040 init_fwd_streams(); 2041 2042 /* 2043 * In the simple forwarding test, the number of forwarding cores 2044 * must be lower or equal to the number of forwarding ports. 2045 */ 2046 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2047 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2048 cur_fwd_config.nb_fwd_lcores = 2049 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2050 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2051 2052 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2053 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2054 fwd_streams[i]->rx_queue = 0; 2055 fwd_streams[i]->tx_port = 2056 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2057 fwd_streams[i]->tx_queue = 0; 2058 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2059 fwd_streams[i]->retry_enabled = retry_enabled; 2060 } 2061 } 2062 2063 /** 2064 * For the RSS forwarding test all streams distributed over lcores. Each stream 2065 * being composed of a RX queue to poll on a RX port for input messages, 2066 * associated with a TX queue of a TX port where to send forwarded packets. 2067 */ 2068 static void 2069 rss_fwd_config_setup(void) 2070 { 2071 portid_t rxp; 2072 portid_t txp; 2073 queueid_t rxq; 2074 queueid_t nb_q; 2075 streamid_t sm_id; 2076 2077 nb_q = nb_rxq; 2078 if (nb_q > nb_txq) 2079 nb_q = nb_txq; 2080 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2081 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2082 cur_fwd_config.nb_fwd_streams = 2083 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2084 2085 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2086 cur_fwd_config.nb_fwd_lcores = 2087 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2088 2089 /* reinitialize forwarding streams */ 2090 init_fwd_streams(); 2091 2092 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2093 rxp = 0; rxq = 0; 2094 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2095 struct fwd_stream *fs; 2096 2097 fs = fwd_streams[sm_id]; 2098 txp = fwd_topology_tx_port_get(rxp); 2099 fs->rx_port = fwd_ports_ids[rxp]; 2100 fs->rx_queue = rxq; 2101 fs->tx_port = fwd_ports_ids[txp]; 2102 fs->tx_queue = rxq; 2103 fs->peer_addr = fs->tx_port; 2104 fs->retry_enabled = retry_enabled; 2105 rxq = (queueid_t) (rxq + 1); 2106 if (rxq < nb_q) 2107 continue; 2108 /* 2109 * rxq == nb_q 2110 * Restart from RX queue 0 on next RX port 2111 */ 2112 rxq = 0; 2113 rxp++; 2114 } 2115 } 2116 2117 /** 2118 * For the DCB forwarding test, each core is assigned on each traffic class. 2119 * 2120 * Each core is assigned a multi-stream, each stream being composed of 2121 * a RX queue to poll on a RX port for input messages, associated with 2122 * a TX queue of a TX port where to send forwarded packets. All RX and 2123 * TX queues are mapping to the same traffic class. 2124 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2125 * the same core 2126 */ 2127 static void 2128 dcb_fwd_config_setup(void) 2129 { 2130 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2131 portid_t txp, rxp = 0; 2132 queueid_t txq, rxq = 0; 2133 lcoreid_t lc_id; 2134 uint16_t nb_rx_queue, nb_tx_queue; 2135 uint16_t i, j, k, sm_id = 0; 2136 uint8_t tc = 0; 2137 2138 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2139 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2140 cur_fwd_config.nb_fwd_streams = 2141 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2142 2143 /* reinitialize forwarding streams */ 2144 init_fwd_streams(); 2145 sm_id = 0; 2146 txp = 1; 2147 /* get the dcb info on the first RX and TX ports */ 2148 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2149 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2150 2151 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2152 fwd_lcores[lc_id]->stream_nb = 0; 2153 fwd_lcores[lc_id]->stream_idx = sm_id; 2154 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2155 /* if the nb_queue is zero, means this tc is 2156 * not enabled on the POOL 2157 */ 2158 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2159 break; 2160 k = fwd_lcores[lc_id]->stream_nb + 2161 fwd_lcores[lc_id]->stream_idx; 2162 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2163 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2164 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2165 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2166 for (j = 0; j < nb_rx_queue; j++) { 2167 struct fwd_stream *fs; 2168 2169 fs = fwd_streams[k + j]; 2170 fs->rx_port = fwd_ports_ids[rxp]; 2171 fs->rx_queue = rxq + j; 2172 fs->tx_port = fwd_ports_ids[txp]; 2173 fs->tx_queue = txq + j % nb_tx_queue; 2174 fs->peer_addr = fs->tx_port; 2175 fs->retry_enabled = retry_enabled; 2176 } 2177 fwd_lcores[lc_id]->stream_nb += 2178 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2179 } 2180 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2181 2182 tc++; 2183 if (tc < rxp_dcb_info.nb_tcs) 2184 continue; 2185 /* Restart from TC 0 on next RX port */ 2186 tc = 0; 2187 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2188 rxp = (portid_t) 2189 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2190 else 2191 rxp++; 2192 if (rxp >= nb_fwd_ports) 2193 return; 2194 /* get the dcb information on next RX and TX ports */ 2195 if ((rxp & 0x1) == 0) 2196 txp = (portid_t) (rxp + 1); 2197 else 2198 txp = (portid_t) (rxp - 1); 2199 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2200 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2201 } 2202 } 2203 2204 static void 2205 icmp_echo_config_setup(void) 2206 { 2207 portid_t rxp; 2208 queueid_t rxq; 2209 lcoreid_t lc_id; 2210 uint16_t sm_id; 2211 2212 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2213 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2214 (nb_txq * nb_fwd_ports); 2215 else 2216 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2217 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2218 cur_fwd_config.nb_fwd_streams = 2219 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2220 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2221 cur_fwd_config.nb_fwd_lcores = 2222 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2223 if (verbose_level > 0) { 2224 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2225 __FUNCTION__, 2226 cur_fwd_config.nb_fwd_lcores, 2227 cur_fwd_config.nb_fwd_ports, 2228 cur_fwd_config.nb_fwd_streams); 2229 } 2230 2231 /* reinitialize forwarding streams */ 2232 init_fwd_streams(); 2233 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2234 rxp = 0; rxq = 0; 2235 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2236 if (verbose_level > 0) 2237 printf(" core=%d: \n", lc_id); 2238 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2239 struct fwd_stream *fs; 2240 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2241 fs->rx_port = fwd_ports_ids[rxp]; 2242 fs->rx_queue = rxq; 2243 fs->tx_port = fs->rx_port; 2244 fs->tx_queue = rxq; 2245 fs->peer_addr = fs->tx_port; 2246 fs->retry_enabled = retry_enabled; 2247 if (verbose_level > 0) 2248 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2249 sm_id, fs->rx_port, fs->rx_queue, 2250 fs->tx_queue); 2251 rxq = (queueid_t) (rxq + 1); 2252 if (rxq == nb_rxq) { 2253 rxq = 0; 2254 rxp = (portid_t) (rxp + 1); 2255 } 2256 } 2257 } 2258 } 2259 2260 void 2261 fwd_config_setup(void) 2262 { 2263 cur_fwd_config.fwd_eng = cur_fwd_eng; 2264 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2265 icmp_echo_config_setup(); 2266 return; 2267 } 2268 if ((nb_rxq > 1) && (nb_txq > 1)){ 2269 if (dcb_config) 2270 dcb_fwd_config_setup(); 2271 else 2272 rss_fwd_config_setup(); 2273 } 2274 else 2275 simple_fwd_config_setup(); 2276 } 2277 2278 void 2279 pkt_fwd_config_display(struct fwd_config *cfg) 2280 { 2281 struct fwd_stream *fs; 2282 lcoreid_t lc_id; 2283 streamid_t sm_id; 2284 2285 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2286 "NUMA support %s, MP over anonymous pages %s\n", 2287 cfg->fwd_eng->fwd_mode_name, 2288 retry_enabled == 0 ? "" : " with retry", 2289 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2290 numa_support == 1 ? "enabled" : "disabled", 2291 mp_anon != 0 ? "enabled" : "disabled"); 2292 2293 if (retry_enabled) 2294 printf("TX retry num: %u, delay between TX retries: %uus\n", 2295 burst_tx_retry_num, burst_tx_delay_time); 2296 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2297 printf("Logical Core %u (socket %u) forwards packets on " 2298 "%d streams:", 2299 fwd_lcores_cpuids[lc_id], 2300 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2301 fwd_lcores[lc_id]->stream_nb); 2302 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2303 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2304 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2305 "P=%d/Q=%d (socket %u) ", 2306 fs->rx_port, fs->rx_queue, 2307 ports[fs->rx_port].socket_id, 2308 fs->tx_port, fs->tx_queue, 2309 ports[fs->tx_port].socket_id); 2310 print_ethaddr("peer=", 2311 &peer_eth_addrs[fs->peer_addr]); 2312 } 2313 printf("\n"); 2314 } 2315 printf("\n"); 2316 } 2317 2318 void 2319 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2320 { 2321 uint8_t c, new_peer_addr[6]; 2322 if (!rte_eth_dev_is_valid_port(port_id)) { 2323 printf("Error: Invalid port number %i\n", port_id); 2324 return; 2325 } 2326 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2327 sizeof(new_peer_addr)) < 0) { 2328 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2329 return; 2330 } 2331 for (c = 0; c < 6; c++) 2332 peer_eth_addrs[port_id].addr_bytes[c] = 2333 new_peer_addr[c]; 2334 } 2335 2336 int 2337 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2338 { 2339 unsigned int i; 2340 unsigned int lcore_cpuid; 2341 int record_now; 2342 2343 record_now = 0; 2344 again: 2345 for (i = 0; i < nb_lc; i++) { 2346 lcore_cpuid = lcorelist[i]; 2347 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2348 printf("lcore %u not enabled\n", lcore_cpuid); 2349 return -1; 2350 } 2351 if (lcore_cpuid == rte_get_master_lcore()) { 2352 printf("lcore %u cannot be masked on for running " 2353 "packet forwarding, which is the master lcore " 2354 "and reserved for command line parsing only\n", 2355 lcore_cpuid); 2356 return -1; 2357 } 2358 if (record_now) 2359 fwd_lcores_cpuids[i] = lcore_cpuid; 2360 } 2361 if (record_now == 0) { 2362 record_now = 1; 2363 goto again; 2364 } 2365 nb_cfg_lcores = (lcoreid_t) nb_lc; 2366 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2367 printf("previous number of forwarding cores %u - changed to " 2368 "number of configured cores %u\n", 2369 (unsigned int) nb_fwd_lcores, nb_lc); 2370 nb_fwd_lcores = (lcoreid_t) nb_lc; 2371 } 2372 2373 return 0; 2374 } 2375 2376 int 2377 set_fwd_lcores_mask(uint64_t lcoremask) 2378 { 2379 unsigned int lcorelist[64]; 2380 unsigned int nb_lc; 2381 unsigned int i; 2382 2383 if (lcoremask == 0) { 2384 printf("Invalid NULL mask of cores\n"); 2385 return -1; 2386 } 2387 nb_lc = 0; 2388 for (i = 0; i < 64; i++) { 2389 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2390 continue; 2391 lcorelist[nb_lc++] = i; 2392 } 2393 return set_fwd_lcores_list(lcorelist, nb_lc); 2394 } 2395 2396 void 2397 set_fwd_lcores_number(uint16_t nb_lc) 2398 { 2399 if (nb_lc > nb_cfg_lcores) { 2400 printf("nb fwd cores %u > %u (max. number of configured " 2401 "lcores) - ignored\n", 2402 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2403 return; 2404 } 2405 nb_fwd_lcores = (lcoreid_t) nb_lc; 2406 printf("Number of forwarding cores set to %u\n", 2407 (unsigned int) nb_fwd_lcores); 2408 } 2409 2410 void 2411 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2412 { 2413 unsigned int i; 2414 portid_t port_id; 2415 int record_now; 2416 2417 record_now = 0; 2418 again: 2419 for (i = 0; i < nb_pt; i++) { 2420 port_id = (portid_t) portlist[i]; 2421 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2422 return; 2423 if (record_now) 2424 fwd_ports_ids[i] = port_id; 2425 } 2426 if (record_now == 0) { 2427 record_now = 1; 2428 goto again; 2429 } 2430 nb_cfg_ports = (portid_t) nb_pt; 2431 if (nb_fwd_ports != (portid_t) nb_pt) { 2432 printf("previous number of forwarding ports %u - changed to " 2433 "number of configured ports %u\n", 2434 (unsigned int) nb_fwd_ports, nb_pt); 2435 nb_fwd_ports = (portid_t) nb_pt; 2436 } 2437 } 2438 2439 void 2440 set_fwd_ports_mask(uint64_t portmask) 2441 { 2442 unsigned int portlist[64]; 2443 unsigned int nb_pt; 2444 unsigned int i; 2445 2446 if (portmask == 0) { 2447 printf("Invalid NULL mask of ports\n"); 2448 return; 2449 } 2450 nb_pt = 0; 2451 RTE_ETH_FOREACH_DEV(i) { 2452 if (! ((uint64_t)(1ULL << i) & portmask)) 2453 continue; 2454 portlist[nb_pt++] = i; 2455 } 2456 set_fwd_ports_list(portlist, nb_pt); 2457 } 2458 2459 void 2460 set_fwd_ports_number(uint16_t nb_pt) 2461 { 2462 if (nb_pt > nb_cfg_ports) { 2463 printf("nb fwd ports %u > %u (number of configured " 2464 "ports) - ignored\n", 2465 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2466 return; 2467 } 2468 nb_fwd_ports = (portid_t) nb_pt; 2469 printf("Number of forwarding ports set to %u\n", 2470 (unsigned int) nb_fwd_ports); 2471 } 2472 2473 int 2474 port_is_forwarding(portid_t port_id) 2475 { 2476 unsigned int i; 2477 2478 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2479 return -1; 2480 2481 for (i = 0; i < nb_fwd_ports; i++) { 2482 if (fwd_ports_ids[i] == port_id) 2483 return 1; 2484 } 2485 2486 return 0; 2487 } 2488 2489 void 2490 set_nb_pkt_per_burst(uint16_t nb) 2491 { 2492 if (nb > MAX_PKT_BURST) { 2493 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2494 " ignored\n", 2495 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2496 return; 2497 } 2498 nb_pkt_per_burst = nb; 2499 printf("Number of packets per burst set to %u\n", 2500 (unsigned int) nb_pkt_per_burst); 2501 } 2502 2503 static const char * 2504 tx_split_get_name(enum tx_pkt_split split) 2505 { 2506 uint32_t i; 2507 2508 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2509 if (tx_split_name[i].split == split) 2510 return tx_split_name[i].name; 2511 } 2512 return NULL; 2513 } 2514 2515 void 2516 set_tx_pkt_split(const char *name) 2517 { 2518 uint32_t i; 2519 2520 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2521 if (strcmp(tx_split_name[i].name, name) == 0) { 2522 tx_pkt_split = tx_split_name[i].split; 2523 return; 2524 } 2525 } 2526 printf("unknown value: \"%s\"\n", name); 2527 } 2528 2529 void 2530 show_tx_pkt_segments(void) 2531 { 2532 uint32_t i, n; 2533 const char *split; 2534 2535 n = tx_pkt_nb_segs; 2536 split = tx_split_get_name(tx_pkt_split); 2537 2538 printf("Number of segments: %u\n", n); 2539 printf("Segment sizes: "); 2540 for (i = 0; i != n - 1; i++) 2541 printf("%hu,", tx_pkt_seg_lengths[i]); 2542 printf("%hu\n", tx_pkt_seg_lengths[i]); 2543 printf("Split packet: %s\n", split); 2544 } 2545 2546 void 2547 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2548 { 2549 uint16_t tx_pkt_len; 2550 unsigned i; 2551 2552 if (nb_segs >= (unsigned) nb_txd) { 2553 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2554 nb_segs, (unsigned int) nb_txd); 2555 return; 2556 } 2557 2558 /* 2559 * Check that each segment length is greater or equal than 2560 * the mbuf data sise. 2561 * Check also that the total packet length is greater or equal than the 2562 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2563 */ 2564 tx_pkt_len = 0; 2565 for (i = 0; i < nb_segs; i++) { 2566 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2567 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2568 i, seg_lengths[i], (unsigned) mbuf_data_size); 2569 return; 2570 } 2571 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2572 } 2573 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2574 printf("total packet length=%u < %d - give up\n", 2575 (unsigned) tx_pkt_len, 2576 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2577 return; 2578 } 2579 2580 for (i = 0; i < nb_segs; i++) 2581 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2582 2583 tx_pkt_length = tx_pkt_len; 2584 tx_pkt_nb_segs = (uint8_t) nb_segs; 2585 } 2586 2587 void 2588 setup_gro(const char *onoff, portid_t port_id) 2589 { 2590 if (!rte_eth_dev_is_valid_port(port_id)) { 2591 printf("invalid port id %u\n", port_id); 2592 return; 2593 } 2594 if (test_done == 0) { 2595 printf("Before enable/disable GRO," 2596 " please stop forwarding first\n"); 2597 return; 2598 } 2599 if (strcmp(onoff, "on") == 0) { 2600 if (gro_ports[port_id].enable != 0) { 2601 printf("Port %u has enabled GRO. Please" 2602 " disable GRO first\n", port_id); 2603 return; 2604 } 2605 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2606 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2607 gro_ports[port_id].param.max_flow_num = 2608 GRO_DEFAULT_FLOW_NUM; 2609 gro_ports[port_id].param.max_item_per_flow = 2610 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2611 } 2612 gro_ports[port_id].enable = 1; 2613 } else { 2614 if (gro_ports[port_id].enable == 0) { 2615 printf("Port %u has disabled GRO\n", port_id); 2616 return; 2617 } 2618 gro_ports[port_id].enable = 0; 2619 } 2620 } 2621 2622 void 2623 setup_gro_flush_cycles(uint8_t cycles) 2624 { 2625 if (test_done == 0) { 2626 printf("Before change flush interval for GRO," 2627 " please stop forwarding first.\n"); 2628 return; 2629 } 2630 2631 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2632 GRO_DEFAULT_FLUSH_CYCLES) { 2633 printf("The flushing cycle be in the range" 2634 " of 1 to %u. Revert to the default" 2635 " value %u.\n", 2636 GRO_MAX_FLUSH_CYCLES, 2637 GRO_DEFAULT_FLUSH_CYCLES); 2638 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2639 } 2640 2641 gro_flush_cycles = cycles; 2642 } 2643 2644 void 2645 show_gro(portid_t port_id) 2646 { 2647 struct rte_gro_param *param; 2648 uint32_t max_pkts_num; 2649 2650 param = &gro_ports[port_id].param; 2651 2652 if (!rte_eth_dev_is_valid_port(port_id)) { 2653 printf("Invalid port id %u.\n", port_id); 2654 return; 2655 } 2656 if (gro_ports[port_id].enable) { 2657 printf("GRO type: TCP/IPv4\n"); 2658 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2659 max_pkts_num = param->max_flow_num * 2660 param->max_item_per_flow; 2661 } else 2662 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2663 printf("Max number of packets to perform GRO: %u\n", 2664 max_pkts_num); 2665 printf("Flushing cycles: %u\n", gro_flush_cycles); 2666 } else 2667 printf("Port %u doesn't enable GRO.\n", port_id); 2668 } 2669 2670 void 2671 setup_gso(const char *mode, portid_t port_id) 2672 { 2673 if (!rte_eth_dev_is_valid_port(port_id)) { 2674 printf("invalid port id %u\n", port_id); 2675 return; 2676 } 2677 if (strcmp(mode, "on") == 0) { 2678 if (test_done == 0) { 2679 printf("before enabling GSO," 2680 " please stop forwarding first\n"); 2681 return; 2682 } 2683 gso_ports[port_id].enable = 1; 2684 } else if (strcmp(mode, "off") == 0) { 2685 if (test_done == 0) { 2686 printf("before disabling GSO," 2687 " please stop forwarding first\n"); 2688 return; 2689 } 2690 gso_ports[port_id].enable = 0; 2691 } 2692 } 2693 2694 char* 2695 list_pkt_forwarding_modes(void) 2696 { 2697 static char fwd_modes[128] = ""; 2698 const char *separator = "|"; 2699 struct fwd_engine *fwd_eng; 2700 unsigned i = 0; 2701 2702 if (strlen (fwd_modes) == 0) { 2703 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2704 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2705 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2706 strncat(fwd_modes, separator, 2707 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2708 } 2709 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2710 } 2711 2712 return fwd_modes; 2713 } 2714 2715 char* 2716 list_pkt_forwarding_retry_modes(void) 2717 { 2718 static char fwd_modes[128] = ""; 2719 const char *separator = "|"; 2720 struct fwd_engine *fwd_eng; 2721 unsigned i = 0; 2722 2723 if (strlen(fwd_modes) == 0) { 2724 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2725 if (fwd_eng == &rx_only_engine) 2726 continue; 2727 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2728 sizeof(fwd_modes) - 2729 strlen(fwd_modes) - 1); 2730 strncat(fwd_modes, separator, 2731 sizeof(fwd_modes) - 2732 strlen(fwd_modes) - 1); 2733 } 2734 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2735 } 2736 2737 return fwd_modes; 2738 } 2739 2740 void 2741 set_pkt_forwarding_mode(const char *fwd_mode_name) 2742 { 2743 struct fwd_engine *fwd_eng; 2744 unsigned i; 2745 2746 i = 0; 2747 while ((fwd_eng = fwd_engines[i]) != NULL) { 2748 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2749 printf("Set %s packet forwarding mode%s\n", 2750 fwd_mode_name, 2751 retry_enabled == 0 ? "" : " with retry"); 2752 cur_fwd_eng = fwd_eng; 2753 return; 2754 } 2755 i++; 2756 } 2757 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2758 } 2759 2760 void 2761 set_verbose_level(uint16_t vb_level) 2762 { 2763 printf("Change verbose level from %u to %u\n", 2764 (unsigned int) verbose_level, (unsigned int) vb_level); 2765 verbose_level = vb_level; 2766 } 2767 2768 void 2769 vlan_extend_set(portid_t port_id, int on) 2770 { 2771 int diag; 2772 int vlan_offload; 2773 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2774 2775 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2776 return; 2777 2778 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2779 2780 if (on) { 2781 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2782 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2783 } else { 2784 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2785 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2786 } 2787 2788 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2789 if (diag < 0) 2790 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2791 "diag=%d\n", port_id, on, diag); 2792 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2793 } 2794 2795 void 2796 rx_vlan_strip_set(portid_t port_id, int on) 2797 { 2798 int diag; 2799 int vlan_offload; 2800 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2801 2802 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2803 return; 2804 2805 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2806 2807 if (on) { 2808 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2809 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2810 } else { 2811 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2812 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2813 } 2814 2815 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2816 if (diag < 0) 2817 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2818 "diag=%d\n", port_id, on, diag); 2819 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2820 } 2821 2822 void 2823 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2824 { 2825 int diag; 2826 2827 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2828 return; 2829 2830 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2831 if (diag < 0) 2832 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2833 "diag=%d\n", port_id, queue_id, on, diag); 2834 } 2835 2836 void 2837 rx_vlan_filter_set(portid_t port_id, int on) 2838 { 2839 int diag; 2840 int vlan_offload; 2841 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2842 2843 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2844 return; 2845 2846 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2847 2848 if (on) { 2849 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2850 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2851 } else { 2852 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2853 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2854 } 2855 2856 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2857 if (diag < 0) 2858 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2859 "diag=%d\n", port_id, on, diag); 2860 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2861 } 2862 2863 int 2864 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2865 { 2866 int diag; 2867 2868 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2869 return 1; 2870 if (vlan_id_is_invalid(vlan_id)) 2871 return 1; 2872 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2873 if (diag == 0) 2874 return 0; 2875 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2876 "diag=%d\n", 2877 port_id, vlan_id, on, diag); 2878 return -1; 2879 } 2880 2881 void 2882 rx_vlan_all_filter_set(portid_t port_id, int on) 2883 { 2884 uint16_t vlan_id; 2885 2886 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2887 return; 2888 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2889 if (rx_vft_set(port_id, vlan_id, on)) 2890 break; 2891 } 2892 } 2893 2894 void 2895 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2896 { 2897 int diag; 2898 2899 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2900 return; 2901 2902 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2903 if (diag == 0) 2904 return; 2905 2906 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2907 "diag=%d\n", 2908 port_id, vlan_type, tp_id, diag); 2909 } 2910 2911 void 2912 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2913 { 2914 int vlan_offload; 2915 struct rte_eth_dev_info dev_info; 2916 2917 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2918 return; 2919 if (vlan_id_is_invalid(vlan_id)) 2920 return; 2921 2922 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2923 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2924 printf("Error, as QinQ has been enabled.\n"); 2925 return; 2926 } 2927 rte_eth_dev_info_get(port_id, &dev_info); 2928 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2929 printf("Error: vlan insert is not supported by port %d\n", 2930 port_id); 2931 return; 2932 } 2933 2934 tx_vlan_reset(port_id); 2935 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2936 ports[port_id].tx_vlan_id = vlan_id; 2937 } 2938 2939 void 2940 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2941 { 2942 int vlan_offload; 2943 struct rte_eth_dev_info dev_info; 2944 2945 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2946 return; 2947 if (vlan_id_is_invalid(vlan_id)) 2948 return; 2949 if (vlan_id_is_invalid(vlan_id_outer)) 2950 return; 2951 2952 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2953 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2954 printf("Error, as QinQ hasn't been enabled.\n"); 2955 return; 2956 } 2957 rte_eth_dev_info_get(port_id, &dev_info); 2958 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2959 printf("Error: qinq insert not supported by port %d\n", 2960 port_id); 2961 return; 2962 } 2963 2964 tx_vlan_reset(port_id); 2965 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2966 ports[port_id].tx_vlan_id = vlan_id; 2967 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2968 } 2969 2970 void 2971 tx_vlan_reset(portid_t port_id) 2972 { 2973 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2974 return; 2975 ports[port_id].dev_conf.txmode.offloads &= 2976 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2977 DEV_TX_OFFLOAD_QINQ_INSERT); 2978 ports[port_id].tx_vlan_id = 0; 2979 ports[port_id].tx_vlan_id_outer = 0; 2980 } 2981 2982 void 2983 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2984 { 2985 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2986 return; 2987 2988 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2989 } 2990 2991 void 2992 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2993 { 2994 uint16_t i; 2995 uint8_t existing_mapping_found = 0; 2996 2997 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2998 return; 2999 3000 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3001 return; 3002 3003 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3004 printf("map_value not in required range 0..%d\n", 3005 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3006 return; 3007 } 3008 3009 if (!is_rx) { /*then tx*/ 3010 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3011 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3012 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3013 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3014 existing_mapping_found = 1; 3015 break; 3016 } 3017 } 3018 if (!existing_mapping_found) { /* A new additional mapping... */ 3019 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3020 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3021 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3022 nb_tx_queue_stats_mappings++; 3023 } 3024 } 3025 else { /*rx*/ 3026 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3027 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3028 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3029 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3030 existing_mapping_found = 1; 3031 break; 3032 } 3033 } 3034 if (!existing_mapping_found) { /* A new additional mapping... */ 3035 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3036 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3037 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3038 nb_rx_queue_stats_mappings++; 3039 } 3040 } 3041 } 3042 3043 void 3044 set_xstats_hide_zero(uint8_t on_off) 3045 { 3046 xstats_hide_zero = on_off; 3047 } 3048 3049 static inline void 3050 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3051 { 3052 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3053 3054 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3055 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3056 " tunnel_id: 0x%08x", 3057 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3058 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3059 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3060 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3061 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3062 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3063 3064 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3065 rte_be_to_cpu_16(mask->src_port_mask), 3066 rte_be_to_cpu_16(mask->dst_port_mask)); 3067 3068 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3069 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3070 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3071 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3072 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3073 3074 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3075 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3076 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3077 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3078 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3079 } 3080 3081 printf("\n"); 3082 } 3083 3084 static inline void 3085 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3086 { 3087 struct rte_eth_flex_payload_cfg *cfg; 3088 uint32_t i, j; 3089 3090 for (i = 0; i < flex_conf->nb_payloads; i++) { 3091 cfg = &flex_conf->flex_set[i]; 3092 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3093 printf("\n RAW: "); 3094 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3095 printf("\n L2_PAYLOAD: "); 3096 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3097 printf("\n L3_PAYLOAD: "); 3098 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3099 printf("\n L4_PAYLOAD: "); 3100 else 3101 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3102 for (j = 0; j < num; j++) 3103 printf(" %-5u", cfg->src_offset[j]); 3104 } 3105 printf("\n"); 3106 } 3107 3108 static char * 3109 flowtype_to_str(uint16_t flow_type) 3110 { 3111 struct flow_type_info { 3112 char str[32]; 3113 uint16_t ftype; 3114 }; 3115 3116 uint8_t i; 3117 static struct flow_type_info flowtype_str_table[] = { 3118 {"raw", RTE_ETH_FLOW_RAW}, 3119 {"ipv4", RTE_ETH_FLOW_IPV4}, 3120 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3121 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3122 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3123 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3124 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3125 {"ipv6", RTE_ETH_FLOW_IPV6}, 3126 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3127 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3128 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3129 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3130 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3131 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3132 {"port", RTE_ETH_FLOW_PORT}, 3133 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3134 {"geneve", RTE_ETH_FLOW_GENEVE}, 3135 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3136 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3137 }; 3138 3139 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3140 if (flowtype_str_table[i].ftype == flow_type) 3141 return flowtype_str_table[i].str; 3142 } 3143 3144 return NULL; 3145 } 3146 3147 static inline void 3148 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3149 { 3150 struct rte_eth_fdir_flex_mask *mask; 3151 uint32_t i, j; 3152 char *p; 3153 3154 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3155 mask = &flex_conf->flex_mask[i]; 3156 p = flowtype_to_str(mask->flow_type); 3157 printf("\n %s:\t", p ? p : "unknown"); 3158 for (j = 0; j < num; j++) 3159 printf(" %02x", mask->mask[j]); 3160 } 3161 printf("\n"); 3162 } 3163 3164 static inline void 3165 print_fdir_flow_type(uint32_t flow_types_mask) 3166 { 3167 int i; 3168 char *p; 3169 3170 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3171 if (!(flow_types_mask & (1 << i))) 3172 continue; 3173 p = flowtype_to_str(i); 3174 if (p) 3175 printf(" %s", p); 3176 else 3177 printf(" unknown"); 3178 } 3179 printf("\n"); 3180 } 3181 3182 void 3183 fdir_get_infos(portid_t port_id) 3184 { 3185 struct rte_eth_fdir_stats fdir_stat; 3186 struct rte_eth_fdir_info fdir_info; 3187 int ret; 3188 3189 static const char *fdir_stats_border = "########################"; 3190 3191 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3192 return; 3193 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3194 if (ret < 0) { 3195 printf("\n FDIR is not supported on port %-2d\n", 3196 port_id); 3197 return; 3198 } 3199 3200 memset(&fdir_info, 0, sizeof(fdir_info)); 3201 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3202 RTE_ETH_FILTER_INFO, &fdir_info); 3203 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3204 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3205 RTE_ETH_FILTER_STATS, &fdir_stat); 3206 printf("\n %s FDIR infos for port %-2d %s\n", 3207 fdir_stats_border, port_id, fdir_stats_border); 3208 printf(" MODE: "); 3209 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3210 printf(" PERFECT\n"); 3211 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3212 printf(" PERFECT-MAC-VLAN\n"); 3213 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3214 printf(" PERFECT-TUNNEL\n"); 3215 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3216 printf(" SIGNATURE\n"); 3217 else 3218 printf(" DISABLE\n"); 3219 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3220 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3221 printf(" SUPPORTED FLOW TYPE: "); 3222 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3223 } 3224 printf(" FLEX PAYLOAD INFO:\n"); 3225 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3226 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3227 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3228 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3229 fdir_info.flex_payload_unit, 3230 fdir_info.max_flex_payload_segment_num, 3231 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3232 printf(" MASK: "); 3233 print_fdir_mask(&fdir_info.mask); 3234 if (fdir_info.flex_conf.nb_payloads > 0) { 3235 printf(" FLEX PAYLOAD SRC OFFSET:"); 3236 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3237 } 3238 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3239 printf(" FLEX MASK CFG:"); 3240 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3241 } 3242 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3243 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3244 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3245 fdir_info.guarant_spc, fdir_info.best_spc); 3246 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3247 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3248 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3249 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3250 fdir_stat.collision, fdir_stat.free, 3251 fdir_stat.maxhash, fdir_stat.maxlen, 3252 fdir_stat.add, fdir_stat.remove, 3253 fdir_stat.f_add, fdir_stat.f_remove); 3254 printf(" %s############################%s\n", 3255 fdir_stats_border, fdir_stats_border); 3256 } 3257 3258 void 3259 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3260 { 3261 struct rte_port *port; 3262 struct rte_eth_fdir_flex_conf *flex_conf; 3263 int i, idx = 0; 3264 3265 port = &ports[port_id]; 3266 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3267 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3268 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3269 idx = i; 3270 break; 3271 } 3272 } 3273 if (i >= RTE_ETH_FLOW_MAX) { 3274 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3275 idx = flex_conf->nb_flexmasks; 3276 flex_conf->nb_flexmasks++; 3277 } else { 3278 printf("The flex mask table is full. Can not set flex" 3279 " mask for flow_type(%u).", cfg->flow_type); 3280 return; 3281 } 3282 } 3283 rte_memcpy(&flex_conf->flex_mask[idx], 3284 cfg, 3285 sizeof(struct rte_eth_fdir_flex_mask)); 3286 } 3287 3288 void 3289 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3290 { 3291 struct rte_port *port; 3292 struct rte_eth_fdir_flex_conf *flex_conf; 3293 int i, idx = 0; 3294 3295 port = &ports[port_id]; 3296 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3297 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3298 if (cfg->type == flex_conf->flex_set[i].type) { 3299 idx = i; 3300 break; 3301 } 3302 } 3303 if (i >= RTE_ETH_PAYLOAD_MAX) { 3304 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3305 idx = flex_conf->nb_payloads; 3306 flex_conf->nb_payloads++; 3307 } else { 3308 printf("The flex payload table is full. Can not set" 3309 " flex payload for type(%u).", cfg->type); 3310 return; 3311 } 3312 } 3313 rte_memcpy(&flex_conf->flex_set[idx], 3314 cfg, 3315 sizeof(struct rte_eth_flex_payload_cfg)); 3316 3317 } 3318 3319 void 3320 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3321 { 3322 #ifdef RTE_LIBRTE_IXGBE_PMD 3323 int diag; 3324 3325 if (is_rx) 3326 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3327 else 3328 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3329 3330 if (diag == 0) 3331 return; 3332 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3333 is_rx ? "rx" : "tx", port_id, diag); 3334 return; 3335 #endif 3336 printf("VF %s setting not supported for port %d\n", 3337 is_rx ? "Rx" : "Tx", port_id); 3338 RTE_SET_USED(vf); 3339 RTE_SET_USED(on); 3340 } 3341 3342 int 3343 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3344 { 3345 int diag; 3346 struct rte_eth_link link; 3347 3348 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3349 return 1; 3350 rte_eth_link_get_nowait(port_id, &link); 3351 if (rate > link.link_speed) { 3352 printf("Invalid rate value:%u bigger than link speed: %u\n", 3353 rate, link.link_speed); 3354 return 1; 3355 } 3356 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3357 if (diag == 0) 3358 return diag; 3359 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3360 port_id, diag); 3361 return diag; 3362 } 3363 3364 int 3365 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3366 { 3367 int diag = -ENOTSUP; 3368 3369 RTE_SET_USED(vf); 3370 RTE_SET_USED(rate); 3371 RTE_SET_USED(q_msk); 3372 3373 #ifdef RTE_LIBRTE_IXGBE_PMD 3374 if (diag == -ENOTSUP) 3375 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3376 q_msk); 3377 #endif 3378 #ifdef RTE_LIBRTE_BNXT_PMD 3379 if (diag == -ENOTSUP) 3380 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3381 #endif 3382 if (diag == 0) 3383 return diag; 3384 3385 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3386 port_id, diag); 3387 return diag; 3388 } 3389 3390 /* 3391 * Functions to manage the set of filtered Multicast MAC addresses. 3392 * 3393 * A pool of filtered multicast MAC addresses is associated with each port. 3394 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3395 * The address of the pool and the number of valid multicast MAC addresses 3396 * recorded in the pool are stored in the fields "mc_addr_pool" and 3397 * "mc_addr_nb" of the "rte_port" data structure. 3398 * 3399 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3400 * to be supplied a contiguous array of multicast MAC addresses. 3401 * To comply with this constraint, the set of multicast addresses recorded 3402 * into the pool are systematically compacted at the beginning of the pool. 3403 * Hence, when a multicast address is removed from the pool, all following 3404 * addresses, if any, are copied back to keep the set contiguous. 3405 */ 3406 #define MCAST_POOL_INC 32 3407 3408 static int 3409 mcast_addr_pool_extend(struct rte_port *port) 3410 { 3411 struct ether_addr *mc_pool; 3412 size_t mc_pool_size; 3413 3414 /* 3415 * If a free entry is available at the end of the pool, just 3416 * increment the number of recorded multicast addresses. 3417 */ 3418 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3419 port->mc_addr_nb++; 3420 return 0; 3421 } 3422 3423 /* 3424 * [re]allocate a pool with MCAST_POOL_INC more entries. 3425 * The previous test guarantees that port->mc_addr_nb is a multiple 3426 * of MCAST_POOL_INC. 3427 */ 3428 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3429 MCAST_POOL_INC); 3430 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3431 mc_pool_size); 3432 if (mc_pool == NULL) { 3433 printf("allocation of pool of %u multicast addresses failed\n", 3434 port->mc_addr_nb + MCAST_POOL_INC); 3435 return -ENOMEM; 3436 } 3437 3438 port->mc_addr_pool = mc_pool; 3439 port->mc_addr_nb++; 3440 return 0; 3441 3442 } 3443 3444 static void 3445 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3446 { 3447 port->mc_addr_nb--; 3448 if (addr_idx == port->mc_addr_nb) { 3449 /* No need to recompact the set of multicast addressses. */ 3450 if (port->mc_addr_nb == 0) { 3451 /* free the pool of multicast addresses. */ 3452 free(port->mc_addr_pool); 3453 port->mc_addr_pool = NULL; 3454 } 3455 return; 3456 } 3457 memmove(&port->mc_addr_pool[addr_idx], 3458 &port->mc_addr_pool[addr_idx + 1], 3459 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3460 } 3461 3462 static void 3463 eth_port_multicast_addr_list_set(portid_t port_id) 3464 { 3465 struct rte_port *port; 3466 int diag; 3467 3468 port = &ports[port_id]; 3469 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3470 port->mc_addr_nb); 3471 if (diag == 0) 3472 return; 3473 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3474 port->mc_addr_nb, port_id, -diag); 3475 } 3476 3477 void 3478 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3479 { 3480 struct rte_port *port; 3481 uint32_t i; 3482 3483 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3484 return; 3485 3486 port = &ports[port_id]; 3487 3488 /* 3489 * Check that the added multicast MAC address is not already recorded 3490 * in the pool of multicast addresses. 3491 */ 3492 for (i = 0; i < port->mc_addr_nb; i++) { 3493 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3494 printf("multicast address already filtered by port\n"); 3495 return; 3496 } 3497 } 3498 3499 if (mcast_addr_pool_extend(port) != 0) 3500 return; 3501 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3502 eth_port_multicast_addr_list_set(port_id); 3503 } 3504 3505 void 3506 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3507 { 3508 struct rte_port *port; 3509 uint32_t i; 3510 3511 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3512 return; 3513 3514 port = &ports[port_id]; 3515 3516 /* 3517 * Search the pool of multicast MAC addresses for the removed address. 3518 */ 3519 for (i = 0; i < port->mc_addr_nb; i++) { 3520 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3521 break; 3522 } 3523 if (i == port->mc_addr_nb) { 3524 printf("multicast address not filtered by port %d\n", port_id); 3525 return; 3526 } 3527 3528 mcast_addr_pool_remove(port, i); 3529 eth_port_multicast_addr_list_set(port_id); 3530 } 3531 3532 void 3533 port_dcb_info_display(portid_t port_id) 3534 { 3535 struct rte_eth_dcb_info dcb_info; 3536 uint16_t i; 3537 int ret; 3538 static const char *border = "================"; 3539 3540 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3541 return; 3542 3543 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3544 if (ret) { 3545 printf("\n Failed to get dcb infos on port %-2d\n", 3546 port_id); 3547 return; 3548 } 3549 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3550 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3551 printf("\n TC : "); 3552 for (i = 0; i < dcb_info.nb_tcs; i++) 3553 printf("\t%4d", i); 3554 printf("\n Priority : "); 3555 for (i = 0; i < dcb_info.nb_tcs; i++) 3556 printf("\t%4d", dcb_info.prio_tc[i]); 3557 printf("\n BW percent :"); 3558 for (i = 0; i < dcb_info.nb_tcs; i++) 3559 printf("\t%4d%%", dcb_info.tc_bws[i]); 3560 printf("\n RXQ base : "); 3561 for (i = 0; i < dcb_info.nb_tcs; i++) 3562 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3563 printf("\n RXQ number :"); 3564 for (i = 0; i < dcb_info.nb_tcs; i++) 3565 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3566 printf("\n TXQ base : "); 3567 for (i = 0; i < dcb_info.nb_tcs; i++) 3568 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3569 printf("\n TXQ number :"); 3570 for (i = 0; i < dcb_info.nb_tcs; i++) 3571 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3572 printf("\n"); 3573 } 3574 3575 uint8_t * 3576 open_file(const char *file_path, uint32_t *size) 3577 { 3578 int fd = open(file_path, O_RDONLY); 3579 off_t pkg_size; 3580 uint8_t *buf = NULL; 3581 int ret = 0; 3582 struct stat st_buf; 3583 3584 if (size) 3585 *size = 0; 3586 3587 if (fd == -1) { 3588 printf("%s: Failed to open %s\n", __func__, file_path); 3589 return buf; 3590 } 3591 3592 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3593 close(fd); 3594 printf("%s: File operations failed\n", __func__); 3595 return buf; 3596 } 3597 3598 pkg_size = st_buf.st_size; 3599 if (pkg_size < 0) { 3600 close(fd); 3601 printf("%s: File operations failed\n", __func__); 3602 return buf; 3603 } 3604 3605 buf = (uint8_t *)malloc(pkg_size); 3606 if (!buf) { 3607 close(fd); 3608 printf("%s: Failed to malloc memory\n", __func__); 3609 return buf; 3610 } 3611 3612 ret = read(fd, buf, pkg_size); 3613 if (ret < 0) { 3614 close(fd); 3615 printf("%s: File read operation failed\n", __func__); 3616 close_file(buf); 3617 return NULL; 3618 } 3619 3620 if (size) 3621 *size = pkg_size; 3622 3623 close(fd); 3624 3625 return buf; 3626 } 3627 3628 int 3629 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3630 { 3631 FILE *fh = fopen(file_path, "wb"); 3632 3633 if (fh == NULL) { 3634 printf("%s: Failed to open %s\n", __func__, file_path); 3635 return -1; 3636 } 3637 3638 if (fwrite(buf, 1, size, fh) != size) { 3639 fclose(fh); 3640 printf("%s: File write operation failed\n", __func__); 3641 return -1; 3642 } 3643 3644 fclose(fh); 3645 3646 return 0; 3647 } 3648 3649 int 3650 close_file(uint8_t *buf) 3651 { 3652 if (buf) { 3653 free((void *)buf); 3654 return 0; 3655 } 3656 3657 return -1; 3658 } 3659 3660 void 3661 port_queue_region_info_display(portid_t port_id, void *buf) 3662 { 3663 #ifdef RTE_LIBRTE_I40E_PMD 3664 uint16_t i, j; 3665 struct rte_pmd_i40e_queue_regions *info = 3666 (struct rte_pmd_i40e_queue_regions *)buf; 3667 static const char *queue_region_info_stats_border = "-------"; 3668 3669 if (!info->queue_region_number) 3670 printf("there is no region has been set before"); 3671 3672 printf("\n %s All queue region info for port=%2d %s", 3673 queue_region_info_stats_border, port_id, 3674 queue_region_info_stats_border); 3675 printf("\n queue_region_number: %-14u \n", 3676 info->queue_region_number); 3677 3678 for (i = 0; i < info->queue_region_number; i++) { 3679 printf("\n region_id: %-14u queue_number: %-14u " 3680 "queue_start_index: %-14u \n", 3681 info->region[i].region_id, 3682 info->region[i].queue_num, 3683 info->region[i].queue_start_index); 3684 3685 printf(" user_priority_num is %-14u :", 3686 info->region[i].user_priority_num); 3687 for (j = 0; j < info->region[i].user_priority_num; j++) 3688 printf(" %-14u ", info->region[i].user_priority[j]); 3689 3690 printf("\n flowtype_num is %-14u :", 3691 info->region[i].flowtype_num); 3692 for (j = 0; j < info->region[i].flowtype_num; j++) 3693 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3694 } 3695 #else 3696 RTE_SET_USED(port_id); 3697 RTE_SET_USED(buf); 3698 #endif 3699 3700 printf("\n\n"); 3701 } 3702