1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 411 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 412 printf("Valid port range is [0"); 413 RTE_ETH_FOREACH_DEV(pid) 414 printf(", %d", pid); 415 printf("]\n"); 416 return; 417 } 418 port = &ports[port_id]; 419 rte_eth_link_get_nowait(port_id, &link); 420 memset(&dev_info, 0, sizeof(dev_info)); 421 rte_eth_dev_info_get(port_id, &dev_info); 422 printf("\n%s Infos for port %-2d %s\n", 423 info_border, port_id, info_border); 424 rte_eth_macaddr_get(port_id, &mac_addr); 425 print_ethaddr("MAC address: ", &mac_addr); 426 printf("\nDriver name: %s", dev_info.driver_name); 427 printf("\nConnect to socket: %u", port->socket_id); 428 429 if (port_numa[port_id] != NUMA_NO_CONFIG) { 430 mp = mbuf_pool_find(port_numa[port_id]); 431 if (mp) 432 printf("\nmemory allocation on the socket: %d", 433 port_numa[port_id]); 434 } else 435 printf("\nmemory allocation on the socket: %u",port->socket_id); 436 437 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 438 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 439 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 440 ("full-duplex") : ("half-duplex")); 441 442 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 443 printf("MTU: %u\n", mtu); 444 445 printf("Promiscuous mode: %s\n", 446 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 447 printf("Allmulticast mode: %s\n", 448 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 449 printf("Maximum number of MAC addresses: %u\n", 450 (unsigned int)(port->dev_info.max_mac_addrs)); 451 printf("Maximum number of MAC addresses of hash filtering: %u\n", 452 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 453 454 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 455 if (vlan_offload >= 0){ 456 printf("VLAN offload: \n"); 457 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 458 printf(" strip on \n"); 459 else 460 printf(" strip off \n"); 461 462 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 463 printf(" filter on \n"); 464 else 465 printf(" filter off \n"); 466 467 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 468 printf(" qinq(extend) on \n"); 469 else 470 printf(" qinq(extend) off \n"); 471 } 472 473 if (dev_info.hash_key_size > 0) 474 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 475 if (dev_info.reta_size > 0) 476 printf("Redirection table size: %u\n", dev_info.reta_size); 477 if (!dev_info.flow_type_rss_offloads) 478 printf("No flow type is supported.\n"); 479 else { 480 uint16_t i; 481 char *p; 482 483 printf("Supported flow types:\n"); 484 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 485 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 486 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 487 continue; 488 p = flowtype_to_str(i); 489 if (p) 490 printf(" %s\n", p); 491 else 492 printf(" user defined %d\n", i); 493 } 494 } 495 496 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 497 printf("Maximum configurable length of RX packet: %u\n", 498 dev_info.max_rx_pktlen); 499 if (dev_info.max_vfs) 500 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 501 if (dev_info.max_vmdq_pools) 502 printf("Maximum number of VMDq pools: %u\n", 503 dev_info.max_vmdq_pools); 504 505 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 506 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 507 printf("Max possible number of RXDs per queue: %hu\n", 508 dev_info.rx_desc_lim.nb_max); 509 printf("Min possible number of RXDs per queue: %hu\n", 510 dev_info.rx_desc_lim.nb_min); 511 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 512 513 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 514 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 515 printf("Max possible number of TXDs per queue: %hu\n", 516 dev_info.tx_desc_lim.nb_max); 517 printf("Min possible number of TXDs per queue: %hu\n", 518 dev_info.tx_desc_lim.nb_min); 519 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 520 } 521 522 void 523 port_offload_cap_display(portid_t port_id) 524 { 525 struct rte_eth_dev_info dev_info; 526 static const char *info_border = "************"; 527 528 if (port_id_is_invalid(port_id, ENABLED_WARN)) 529 return; 530 531 rte_eth_dev_info_get(port_id, &dev_info); 532 533 printf("\n%s Port %d supported offload features: %s\n", 534 info_border, port_id, info_border); 535 536 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 537 printf("VLAN stripped: "); 538 if (ports[port_id].dev_conf.rxmode.offloads & 539 DEV_RX_OFFLOAD_VLAN_STRIP) 540 printf("on\n"); 541 else 542 printf("off\n"); 543 } 544 545 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 546 printf("Double VLANs stripped: "); 547 if (ports[port_id].dev_conf.rxmode.offloads & 548 DEV_RX_OFFLOAD_VLAN_EXTEND) 549 printf("on\n"); 550 else 551 printf("off\n"); 552 } 553 554 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 555 printf("RX IPv4 checksum: "); 556 if (ports[port_id].dev_conf.rxmode.offloads & 557 DEV_RX_OFFLOAD_IPV4_CKSUM) 558 printf("on\n"); 559 else 560 printf("off\n"); 561 } 562 563 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 564 printf("RX UDP checksum: "); 565 if (ports[port_id].dev_conf.rxmode.offloads & 566 DEV_RX_OFFLOAD_UDP_CKSUM) 567 printf("on\n"); 568 else 569 printf("off\n"); 570 } 571 572 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 573 printf("RX TCP checksum: "); 574 if (ports[port_id].dev_conf.rxmode.offloads & 575 DEV_RX_OFFLOAD_TCP_CKSUM) 576 printf("on\n"); 577 else 578 printf("off\n"); 579 } 580 581 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 582 printf("RX Outer IPv4 checksum: "); 583 if (ports[port_id].dev_conf.rxmode.offloads & 584 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 585 printf("on\n"); 586 else 587 printf("off\n"); 588 } 589 590 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 591 printf("Large receive offload: "); 592 if (ports[port_id].dev_conf.rxmode.offloads & 593 DEV_RX_OFFLOAD_TCP_LRO) 594 printf("on\n"); 595 else 596 printf("off\n"); 597 } 598 599 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 600 printf("VLAN insert: "); 601 if (ports[port_id].dev_conf.txmode.offloads & 602 DEV_TX_OFFLOAD_VLAN_INSERT) 603 printf("on\n"); 604 else 605 printf("off\n"); 606 } 607 608 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 609 printf("HW timestamp: "); 610 if (ports[port_id].dev_conf.rxmode.offloads & 611 DEV_RX_OFFLOAD_TIMESTAMP) 612 printf("on\n"); 613 else 614 printf("off\n"); 615 } 616 617 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 618 printf("Double VLANs insert: "); 619 if (ports[port_id].dev_conf.txmode.offloads & 620 DEV_TX_OFFLOAD_QINQ_INSERT) 621 printf("on\n"); 622 else 623 printf("off\n"); 624 } 625 626 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 627 printf("TX IPv4 checksum: "); 628 if (ports[port_id].dev_conf.txmode.offloads & 629 DEV_TX_OFFLOAD_IPV4_CKSUM) 630 printf("on\n"); 631 else 632 printf("off\n"); 633 } 634 635 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 636 printf("TX UDP checksum: "); 637 if (ports[port_id].dev_conf.txmode.offloads & 638 DEV_TX_OFFLOAD_UDP_CKSUM) 639 printf("on\n"); 640 else 641 printf("off\n"); 642 } 643 644 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 645 printf("TX TCP checksum: "); 646 if (ports[port_id].dev_conf.txmode.offloads & 647 DEV_TX_OFFLOAD_TCP_CKSUM) 648 printf("on\n"); 649 else 650 printf("off\n"); 651 } 652 653 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 654 printf("TX SCTP checksum: "); 655 if (ports[port_id].dev_conf.txmode.offloads & 656 DEV_TX_OFFLOAD_SCTP_CKSUM) 657 printf("on\n"); 658 else 659 printf("off\n"); 660 } 661 662 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 663 printf("TX Outer IPv4 checksum: "); 664 if (ports[port_id].dev_conf.txmode.offloads & 665 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 666 printf("on\n"); 667 else 668 printf("off\n"); 669 } 670 671 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 672 printf("TX TCP segmentation: "); 673 if (ports[port_id].dev_conf.txmode.offloads & 674 DEV_TX_OFFLOAD_TCP_TSO) 675 printf("on\n"); 676 else 677 printf("off\n"); 678 } 679 680 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 681 printf("TX UDP segmentation: "); 682 if (ports[port_id].dev_conf.txmode.offloads & 683 DEV_TX_OFFLOAD_UDP_TSO) 684 printf("on\n"); 685 else 686 printf("off\n"); 687 } 688 689 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 690 printf("TSO for VXLAN tunnel packet: "); 691 if (ports[port_id].dev_conf.txmode.offloads & 692 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 693 printf("on\n"); 694 else 695 printf("off\n"); 696 } 697 698 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 699 printf("TSO for GRE tunnel packet: "); 700 if (ports[port_id].dev_conf.txmode.offloads & 701 DEV_TX_OFFLOAD_GRE_TNL_TSO) 702 printf("on\n"); 703 else 704 printf("off\n"); 705 } 706 707 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 708 printf("TSO for IPIP tunnel packet: "); 709 if (ports[port_id].dev_conf.txmode.offloads & 710 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 711 printf("on\n"); 712 else 713 printf("off\n"); 714 } 715 716 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 717 printf("TSO for GENEVE tunnel packet: "); 718 if (ports[port_id].dev_conf.txmode.offloads & 719 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 720 printf("on\n"); 721 else 722 printf("off\n"); 723 } 724 725 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 726 printf("IP tunnel TSO: "); 727 if (ports[port_id].dev_conf.txmode.offloads & 728 DEV_TX_OFFLOAD_IP_TNL_TSO) 729 printf("on\n"); 730 else 731 printf("off\n"); 732 } 733 734 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 735 printf("UDP tunnel TSO: "); 736 if (ports[port_id].dev_conf.txmode.offloads & 737 DEV_TX_OFFLOAD_UDP_TNL_TSO) 738 printf("on\n"); 739 else 740 printf("off\n"); 741 } 742 } 743 744 int 745 port_id_is_invalid(portid_t port_id, enum print_warning warning) 746 { 747 uint16_t pid; 748 749 if (port_id == (portid_t)RTE_PORT_ALL) 750 return 0; 751 752 RTE_ETH_FOREACH_DEV(pid) 753 if (port_id == pid) 754 return 0; 755 756 if (warning == ENABLED_WARN) 757 printf("Invalid port %d\n", port_id); 758 759 return 1; 760 } 761 762 static int 763 vlan_id_is_invalid(uint16_t vlan_id) 764 { 765 if (vlan_id < 4096) 766 return 0; 767 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 768 return 1; 769 } 770 771 static int 772 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 773 { 774 const struct rte_pci_device *pci_dev; 775 const struct rte_bus *bus; 776 uint64_t pci_len; 777 778 if (reg_off & 0x3) { 779 printf("Port register offset 0x%X not aligned on a 4-byte " 780 "boundary\n", 781 (unsigned)reg_off); 782 return 1; 783 } 784 785 if (!ports[port_id].dev_info.device) { 786 printf("Invalid device\n"); 787 return 0; 788 } 789 790 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 791 if (bus && !strcmp(bus->name, "pci")) { 792 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 793 } else { 794 printf("Not a PCI device\n"); 795 return 1; 796 } 797 798 pci_len = pci_dev->mem_resource[0].len; 799 if (reg_off >= pci_len) { 800 printf("Port %d: register offset %u (0x%X) out of port PCI " 801 "resource (length=%"PRIu64")\n", 802 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 803 return 1; 804 } 805 return 0; 806 } 807 808 static int 809 reg_bit_pos_is_invalid(uint8_t bit_pos) 810 { 811 if (bit_pos <= 31) 812 return 0; 813 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 814 return 1; 815 } 816 817 #define display_port_and_reg_off(port_id, reg_off) \ 818 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 819 820 static inline void 821 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 822 { 823 display_port_and_reg_off(port_id, (unsigned)reg_off); 824 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 825 } 826 827 void 828 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 829 { 830 uint32_t reg_v; 831 832 833 if (port_id_is_invalid(port_id, ENABLED_WARN)) 834 return; 835 if (port_reg_off_is_invalid(port_id, reg_off)) 836 return; 837 if (reg_bit_pos_is_invalid(bit_x)) 838 return; 839 reg_v = port_id_pci_reg_read(port_id, reg_off); 840 display_port_and_reg_off(port_id, (unsigned)reg_off); 841 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 842 } 843 844 void 845 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 846 uint8_t bit1_pos, uint8_t bit2_pos) 847 { 848 uint32_t reg_v; 849 uint8_t l_bit; 850 uint8_t h_bit; 851 852 if (port_id_is_invalid(port_id, ENABLED_WARN)) 853 return; 854 if (port_reg_off_is_invalid(port_id, reg_off)) 855 return; 856 if (reg_bit_pos_is_invalid(bit1_pos)) 857 return; 858 if (reg_bit_pos_is_invalid(bit2_pos)) 859 return; 860 if (bit1_pos > bit2_pos) 861 l_bit = bit2_pos, h_bit = bit1_pos; 862 else 863 l_bit = bit1_pos, h_bit = bit2_pos; 864 865 reg_v = port_id_pci_reg_read(port_id, reg_off); 866 reg_v >>= l_bit; 867 if (h_bit < 31) 868 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 869 display_port_and_reg_off(port_id, (unsigned)reg_off); 870 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 871 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 872 } 873 874 void 875 port_reg_display(portid_t port_id, uint32_t reg_off) 876 { 877 uint32_t reg_v; 878 879 if (port_id_is_invalid(port_id, ENABLED_WARN)) 880 return; 881 if (port_reg_off_is_invalid(port_id, reg_off)) 882 return; 883 reg_v = port_id_pci_reg_read(port_id, reg_off); 884 display_port_reg_value(port_id, reg_off, reg_v); 885 } 886 887 void 888 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 889 uint8_t bit_v) 890 { 891 uint32_t reg_v; 892 893 if (port_id_is_invalid(port_id, ENABLED_WARN)) 894 return; 895 if (port_reg_off_is_invalid(port_id, reg_off)) 896 return; 897 if (reg_bit_pos_is_invalid(bit_pos)) 898 return; 899 if (bit_v > 1) { 900 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 901 return; 902 } 903 reg_v = port_id_pci_reg_read(port_id, reg_off); 904 if (bit_v == 0) 905 reg_v &= ~(1 << bit_pos); 906 else 907 reg_v |= (1 << bit_pos); 908 port_id_pci_reg_write(port_id, reg_off, reg_v); 909 display_port_reg_value(port_id, reg_off, reg_v); 910 } 911 912 void 913 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 914 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 915 { 916 uint32_t max_v; 917 uint32_t reg_v; 918 uint8_t l_bit; 919 uint8_t h_bit; 920 921 if (port_id_is_invalid(port_id, ENABLED_WARN)) 922 return; 923 if (port_reg_off_is_invalid(port_id, reg_off)) 924 return; 925 if (reg_bit_pos_is_invalid(bit1_pos)) 926 return; 927 if (reg_bit_pos_is_invalid(bit2_pos)) 928 return; 929 if (bit1_pos > bit2_pos) 930 l_bit = bit2_pos, h_bit = bit1_pos; 931 else 932 l_bit = bit1_pos, h_bit = bit2_pos; 933 934 if ((h_bit - l_bit) < 31) 935 max_v = (1 << (h_bit - l_bit + 1)) - 1; 936 else 937 max_v = 0xFFFFFFFF; 938 939 if (value > max_v) { 940 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 941 (unsigned)value, (unsigned)value, 942 (unsigned)max_v, (unsigned)max_v); 943 return; 944 } 945 reg_v = port_id_pci_reg_read(port_id, reg_off); 946 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 947 reg_v |= (value << l_bit); /* Set changed bits */ 948 port_id_pci_reg_write(port_id, reg_off, reg_v); 949 display_port_reg_value(port_id, reg_off, reg_v); 950 } 951 952 void 953 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 954 { 955 if (port_id_is_invalid(port_id, ENABLED_WARN)) 956 return; 957 if (port_reg_off_is_invalid(port_id, reg_off)) 958 return; 959 port_id_pci_reg_write(port_id, reg_off, reg_v); 960 display_port_reg_value(port_id, reg_off, reg_v); 961 } 962 963 void 964 port_mtu_set(portid_t port_id, uint16_t mtu) 965 { 966 int diag; 967 968 if (port_id_is_invalid(port_id, ENABLED_WARN)) 969 return; 970 diag = rte_eth_dev_set_mtu(port_id, mtu); 971 if (diag == 0) 972 return; 973 printf("Set MTU failed. diag=%d\n", diag); 974 } 975 976 /* Generic flow management functions. */ 977 978 /** Generate flow_item[] entry. */ 979 #define MK_FLOW_ITEM(t, s) \ 980 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 981 .name = # t, \ 982 .size = s, \ 983 } 984 985 /** Information about known flow pattern items. */ 986 static const struct { 987 const char *name; 988 size_t size; 989 } flow_item[] = { 990 MK_FLOW_ITEM(END, 0), 991 MK_FLOW_ITEM(VOID, 0), 992 MK_FLOW_ITEM(INVERT, 0), 993 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 994 MK_FLOW_ITEM(PF, 0), 995 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 996 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 997 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 998 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 999 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1000 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1001 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1002 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1003 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1004 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1005 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1006 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1007 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1008 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1009 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1010 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1011 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1012 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1013 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1014 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1015 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1016 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1017 }; 1018 1019 /** Pattern item specification types. */ 1020 enum item_spec_type { 1021 ITEM_SPEC, 1022 ITEM_LAST, 1023 ITEM_MASK, 1024 }; 1025 1026 /** Compute storage space needed by item specification and copy it. */ 1027 static size_t 1028 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1029 enum item_spec_type type) 1030 { 1031 size_t size = 0; 1032 const void *item_spec = 1033 type == ITEM_SPEC ? item->spec : 1034 type == ITEM_LAST ? item->last : 1035 type == ITEM_MASK ? item->mask : 1036 NULL; 1037 1038 if (!item_spec) 1039 goto empty; 1040 switch (item->type) { 1041 union { 1042 const struct rte_flow_item_raw *raw; 1043 } src; 1044 union { 1045 struct rte_flow_item_raw *raw; 1046 } dst; 1047 size_t off; 1048 1049 case RTE_FLOW_ITEM_TYPE_RAW: 1050 src.raw = item_spec; 1051 dst.raw = buf; 1052 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1053 sizeof(*src.raw->pattern)); 1054 size = off + src.raw->length * sizeof(*src.raw->pattern); 1055 if (dst.raw) { 1056 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1057 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1058 src.raw->pattern, 1059 size - off); 1060 } 1061 break; 1062 default: 1063 size = flow_item[item->type].size; 1064 if (buf) 1065 memcpy(buf, item_spec, size); 1066 break; 1067 } 1068 empty: 1069 return RTE_ALIGN_CEIL(size, sizeof(double)); 1070 } 1071 1072 /** Generate flow_action[] entry. */ 1073 #define MK_FLOW_ACTION(t, s) \ 1074 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1075 .name = # t, \ 1076 .size = s, \ 1077 } 1078 1079 /** Information about known flow actions. */ 1080 static const struct { 1081 const char *name; 1082 size_t size; 1083 } flow_action[] = { 1084 MK_FLOW_ACTION(END, 0), 1085 MK_FLOW_ACTION(VOID, 0), 1086 MK_FLOW_ACTION(PASSTHRU, 0), 1087 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1088 MK_FLOW_ACTION(FLAG, 0), 1089 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1090 MK_FLOW_ACTION(DROP, 0), 1091 MK_FLOW_ACTION(COUNT, 0), 1092 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1093 MK_FLOW_ACTION(PF, 0), 1094 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1095 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1096 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1097 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1098 }; 1099 1100 /** Compute storage space needed by action configuration and copy it. */ 1101 static size_t 1102 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1103 { 1104 size_t size = 0; 1105 1106 if (!action->conf) 1107 goto empty; 1108 switch (action->type) { 1109 union { 1110 const struct rte_flow_action_rss *rss; 1111 } src; 1112 union { 1113 struct rte_flow_action_rss *rss; 1114 } dst; 1115 size_t off; 1116 1117 case RTE_FLOW_ACTION_TYPE_RSS: 1118 src.rss = action->conf; 1119 dst.rss = buf; 1120 off = 0; 1121 if (dst.rss) 1122 *dst.rss = (struct rte_flow_action_rss){ 1123 .func = src.rss->func, 1124 .level = src.rss->level, 1125 .types = src.rss->types, 1126 .key_len = src.rss->key_len, 1127 .queue_num = src.rss->queue_num, 1128 }; 1129 off += sizeof(*src.rss); 1130 if (src.rss->key_len) { 1131 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1132 size = sizeof(*src.rss->key) * src.rss->key_len; 1133 if (dst.rss) 1134 dst.rss->key = memcpy 1135 ((void *)((uintptr_t)dst.rss + off), 1136 src.rss->key, size); 1137 off += size; 1138 } 1139 if (src.rss->queue_num) { 1140 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1141 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1142 if (dst.rss) 1143 dst.rss->queue = memcpy 1144 ((void *)((uintptr_t)dst.rss + off), 1145 src.rss->queue, size); 1146 off += size; 1147 } 1148 size = off; 1149 break; 1150 default: 1151 size = flow_action[action->type].size; 1152 if (buf) 1153 memcpy(buf, action->conf, size); 1154 break; 1155 } 1156 empty: 1157 return RTE_ALIGN_CEIL(size, sizeof(double)); 1158 } 1159 1160 /** Generate a port_flow entry from attributes/pattern/actions. */ 1161 static struct port_flow * 1162 port_flow_new(const struct rte_flow_attr *attr, 1163 const struct rte_flow_item *pattern, 1164 const struct rte_flow_action *actions) 1165 { 1166 const struct rte_flow_item *item; 1167 const struct rte_flow_action *action; 1168 struct port_flow *pf = NULL; 1169 size_t tmp; 1170 size_t off1 = 0; 1171 size_t off2 = 0; 1172 int err = ENOTSUP; 1173 1174 store: 1175 item = pattern; 1176 if (pf) 1177 pf->pattern = (void *)&pf->data[off1]; 1178 do { 1179 struct rte_flow_item *dst = NULL; 1180 1181 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1182 !flow_item[item->type].name) 1183 goto notsup; 1184 if (pf) 1185 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1186 off1 += sizeof(*item); 1187 if (item->spec) { 1188 if (pf) 1189 dst->spec = pf->data + off2; 1190 off2 += flow_item_spec_copy 1191 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1192 } 1193 if (item->last) { 1194 if (pf) 1195 dst->last = pf->data + off2; 1196 off2 += flow_item_spec_copy 1197 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1198 } 1199 if (item->mask) { 1200 if (pf) 1201 dst->mask = pf->data + off2; 1202 off2 += flow_item_spec_copy 1203 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1204 } 1205 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1206 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1207 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1208 action = actions; 1209 if (pf) 1210 pf->actions = (void *)&pf->data[off1]; 1211 do { 1212 struct rte_flow_action *dst = NULL; 1213 1214 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1215 !flow_action[action->type].name) 1216 goto notsup; 1217 if (pf) 1218 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1219 off1 += sizeof(*action); 1220 if (action->conf) { 1221 if (pf) 1222 dst->conf = pf->data + off2; 1223 off2 += flow_action_conf_copy 1224 (pf ? pf->data + off2 : NULL, action); 1225 } 1226 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1227 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1228 if (pf != NULL) 1229 return pf; 1230 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1231 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1232 pf = calloc(1, tmp + off1 + off2); 1233 if (pf == NULL) 1234 err = errno; 1235 else { 1236 *pf = (const struct port_flow){ 1237 .size = tmp + off1 + off2, 1238 .attr = *attr, 1239 }; 1240 tmp -= offsetof(struct port_flow, data); 1241 off2 = tmp + off1; 1242 off1 = tmp; 1243 goto store; 1244 } 1245 notsup: 1246 rte_errno = err; 1247 return NULL; 1248 } 1249 1250 /** Print a message out of a flow error. */ 1251 static int 1252 port_flow_complain(struct rte_flow_error *error) 1253 { 1254 static const char *const errstrlist[] = { 1255 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1256 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1257 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1258 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1259 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1260 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1261 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1262 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1263 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1264 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1265 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1266 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1267 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1268 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1269 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1270 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1271 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1272 }; 1273 const char *errstr; 1274 char buf[32]; 1275 int err = rte_errno; 1276 1277 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1278 !errstrlist[error->type]) 1279 errstr = "unknown type"; 1280 else 1281 errstr = errstrlist[error->type]; 1282 printf("Caught error type %d (%s): %s%s\n", 1283 error->type, errstr, 1284 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1285 error->cause), buf) : "", 1286 error->message ? error->message : "(no stated reason)"); 1287 return -err; 1288 } 1289 1290 /** Validate flow rule. */ 1291 int 1292 port_flow_validate(portid_t port_id, 1293 const struct rte_flow_attr *attr, 1294 const struct rte_flow_item *pattern, 1295 const struct rte_flow_action *actions) 1296 { 1297 struct rte_flow_error error; 1298 1299 /* Poisoning to make sure PMDs update it in case of error. */ 1300 memset(&error, 0x11, sizeof(error)); 1301 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1302 return port_flow_complain(&error); 1303 printf("Flow rule validated\n"); 1304 return 0; 1305 } 1306 1307 /** Create flow rule. */ 1308 int 1309 port_flow_create(portid_t port_id, 1310 const struct rte_flow_attr *attr, 1311 const struct rte_flow_item *pattern, 1312 const struct rte_flow_action *actions) 1313 { 1314 struct rte_flow *flow; 1315 struct rte_port *port; 1316 struct port_flow *pf; 1317 uint32_t id; 1318 struct rte_flow_error error; 1319 1320 /* Poisoning to make sure PMDs update it in case of error. */ 1321 memset(&error, 0x22, sizeof(error)); 1322 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1323 if (!flow) 1324 return port_flow_complain(&error); 1325 port = &ports[port_id]; 1326 if (port->flow_list) { 1327 if (port->flow_list->id == UINT32_MAX) { 1328 printf("Highest rule ID is already assigned, delete" 1329 " it first"); 1330 rte_flow_destroy(port_id, flow, NULL); 1331 return -ENOMEM; 1332 } 1333 id = port->flow_list->id + 1; 1334 } else 1335 id = 0; 1336 pf = port_flow_new(attr, pattern, actions); 1337 if (!pf) { 1338 int err = rte_errno; 1339 1340 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1341 rte_flow_destroy(port_id, flow, NULL); 1342 return -err; 1343 } 1344 pf->next = port->flow_list; 1345 pf->id = id; 1346 pf->flow = flow; 1347 port->flow_list = pf; 1348 printf("Flow rule #%u created\n", pf->id); 1349 return 0; 1350 } 1351 1352 /** Destroy a number of flow rules. */ 1353 int 1354 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1355 { 1356 struct rte_port *port; 1357 struct port_flow **tmp; 1358 uint32_t c = 0; 1359 int ret = 0; 1360 1361 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1362 port_id == (portid_t)RTE_PORT_ALL) 1363 return -EINVAL; 1364 port = &ports[port_id]; 1365 tmp = &port->flow_list; 1366 while (*tmp) { 1367 uint32_t i; 1368 1369 for (i = 0; i != n; ++i) { 1370 struct rte_flow_error error; 1371 struct port_flow *pf = *tmp; 1372 1373 if (rule[i] != pf->id) 1374 continue; 1375 /* 1376 * Poisoning to make sure PMDs update it in case 1377 * of error. 1378 */ 1379 memset(&error, 0x33, sizeof(error)); 1380 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1381 ret = port_flow_complain(&error); 1382 continue; 1383 } 1384 printf("Flow rule #%u destroyed\n", pf->id); 1385 *tmp = pf->next; 1386 free(pf); 1387 break; 1388 } 1389 if (i == n) 1390 tmp = &(*tmp)->next; 1391 ++c; 1392 } 1393 return ret; 1394 } 1395 1396 /** Remove all flow rules. */ 1397 int 1398 port_flow_flush(portid_t port_id) 1399 { 1400 struct rte_flow_error error; 1401 struct rte_port *port; 1402 int ret = 0; 1403 1404 /* Poisoning to make sure PMDs update it in case of error. */ 1405 memset(&error, 0x44, sizeof(error)); 1406 if (rte_flow_flush(port_id, &error)) { 1407 ret = port_flow_complain(&error); 1408 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1409 port_id == (portid_t)RTE_PORT_ALL) 1410 return ret; 1411 } 1412 port = &ports[port_id]; 1413 while (port->flow_list) { 1414 struct port_flow *pf = port->flow_list->next; 1415 1416 free(port->flow_list); 1417 port->flow_list = pf; 1418 } 1419 return ret; 1420 } 1421 1422 /** Query a flow rule. */ 1423 int 1424 port_flow_query(portid_t port_id, uint32_t rule, 1425 enum rte_flow_action_type action) 1426 { 1427 struct rte_flow_error error; 1428 struct rte_port *port; 1429 struct port_flow *pf; 1430 const char *name; 1431 union { 1432 struct rte_flow_query_count count; 1433 } query; 1434 1435 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1436 port_id == (portid_t)RTE_PORT_ALL) 1437 return -EINVAL; 1438 port = &ports[port_id]; 1439 for (pf = port->flow_list; pf; pf = pf->next) 1440 if (pf->id == rule) 1441 break; 1442 if (!pf) { 1443 printf("Flow rule #%u not found\n", rule); 1444 return -ENOENT; 1445 } 1446 if ((unsigned int)action >= RTE_DIM(flow_action) || 1447 !flow_action[action].name) 1448 name = "unknown"; 1449 else 1450 name = flow_action[action].name; 1451 switch (action) { 1452 case RTE_FLOW_ACTION_TYPE_COUNT: 1453 break; 1454 default: 1455 printf("Cannot query action type %d (%s)\n", action, name); 1456 return -ENOTSUP; 1457 } 1458 /* Poisoning to make sure PMDs update it in case of error. */ 1459 memset(&error, 0x55, sizeof(error)); 1460 memset(&query, 0, sizeof(query)); 1461 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1462 return port_flow_complain(&error); 1463 switch (action) { 1464 case RTE_FLOW_ACTION_TYPE_COUNT: 1465 printf("%s:\n" 1466 " hits_set: %u\n" 1467 " bytes_set: %u\n" 1468 " hits: %" PRIu64 "\n" 1469 " bytes: %" PRIu64 "\n", 1470 name, 1471 query.count.hits_set, 1472 query.count.bytes_set, 1473 query.count.hits, 1474 query.count.bytes); 1475 break; 1476 default: 1477 printf("Cannot display result for action type %d (%s)\n", 1478 action, name); 1479 break; 1480 } 1481 return 0; 1482 } 1483 1484 /** List flow rules. */ 1485 void 1486 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1487 { 1488 struct rte_port *port; 1489 struct port_flow *pf; 1490 struct port_flow *list = NULL; 1491 uint32_t i; 1492 1493 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1494 port_id == (portid_t)RTE_PORT_ALL) 1495 return; 1496 port = &ports[port_id]; 1497 if (!port->flow_list) 1498 return; 1499 /* Sort flows by group, priority and ID. */ 1500 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1501 struct port_flow **tmp; 1502 1503 if (n) { 1504 /* Filter out unwanted groups. */ 1505 for (i = 0; i != n; ++i) 1506 if (pf->attr.group == group[i]) 1507 break; 1508 if (i == n) 1509 continue; 1510 } 1511 tmp = &list; 1512 while (*tmp && 1513 (pf->attr.group > (*tmp)->attr.group || 1514 (pf->attr.group == (*tmp)->attr.group && 1515 pf->attr.priority > (*tmp)->attr.priority) || 1516 (pf->attr.group == (*tmp)->attr.group && 1517 pf->attr.priority == (*tmp)->attr.priority && 1518 pf->id > (*tmp)->id))) 1519 tmp = &(*tmp)->tmp; 1520 pf->tmp = *tmp; 1521 *tmp = pf; 1522 } 1523 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1524 for (pf = list; pf != NULL; pf = pf->tmp) { 1525 const struct rte_flow_item *item = pf->pattern; 1526 const struct rte_flow_action *action = pf->actions; 1527 1528 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1529 pf->id, 1530 pf->attr.group, 1531 pf->attr.priority, 1532 pf->attr.ingress ? 'i' : '-', 1533 pf->attr.egress ? 'e' : '-', 1534 pf->attr.transfer ? 't' : '-'); 1535 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1536 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1537 printf("%s ", flow_item[item->type].name); 1538 ++item; 1539 } 1540 printf("=>"); 1541 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1542 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1543 printf(" %s", flow_action[action->type].name); 1544 ++action; 1545 } 1546 printf("\n"); 1547 } 1548 } 1549 1550 /** Restrict ingress traffic to the defined flow rules. */ 1551 int 1552 port_flow_isolate(portid_t port_id, int set) 1553 { 1554 struct rte_flow_error error; 1555 1556 /* Poisoning to make sure PMDs update it in case of error. */ 1557 memset(&error, 0x66, sizeof(error)); 1558 if (rte_flow_isolate(port_id, set, &error)) 1559 return port_flow_complain(&error); 1560 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1561 port_id, 1562 set ? "now restricted" : "not restricted anymore"); 1563 return 0; 1564 } 1565 1566 /* 1567 * RX/TX ring descriptors display functions. 1568 */ 1569 int 1570 rx_queue_id_is_invalid(queueid_t rxq_id) 1571 { 1572 if (rxq_id < nb_rxq) 1573 return 0; 1574 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1575 return 1; 1576 } 1577 1578 int 1579 tx_queue_id_is_invalid(queueid_t txq_id) 1580 { 1581 if (txq_id < nb_txq) 1582 return 0; 1583 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1584 return 1; 1585 } 1586 1587 static int 1588 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1589 { 1590 if (rxdesc_id < nb_rxd) 1591 return 0; 1592 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1593 rxdesc_id, nb_rxd); 1594 return 1; 1595 } 1596 1597 static int 1598 tx_desc_id_is_invalid(uint16_t txdesc_id) 1599 { 1600 if (txdesc_id < nb_txd) 1601 return 0; 1602 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1603 txdesc_id, nb_txd); 1604 return 1; 1605 } 1606 1607 static const struct rte_memzone * 1608 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1609 { 1610 char mz_name[RTE_MEMZONE_NAMESIZE]; 1611 const struct rte_memzone *mz; 1612 1613 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1614 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1615 mz = rte_memzone_lookup(mz_name); 1616 if (mz == NULL) 1617 printf("%s ring memory zoneof (port %d, queue %d) not" 1618 "found (zone name = %s\n", 1619 ring_name, port_id, q_id, mz_name); 1620 return mz; 1621 } 1622 1623 union igb_ring_dword { 1624 uint64_t dword; 1625 struct { 1626 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1627 uint32_t lo; 1628 uint32_t hi; 1629 #else 1630 uint32_t hi; 1631 uint32_t lo; 1632 #endif 1633 } words; 1634 }; 1635 1636 struct igb_ring_desc_32_bytes { 1637 union igb_ring_dword lo_dword; 1638 union igb_ring_dword hi_dword; 1639 union igb_ring_dword resv1; 1640 union igb_ring_dword resv2; 1641 }; 1642 1643 struct igb_ring_desc_16_bytes { 1644 union igb_ring_dword lo_dword; 1645 union igb_ring_dword hi_dword; 1646 }; 1647 1648 static void 1649 ring_rxd_display_dword(union igb_ring_dword dword) 1650 { 1651 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1652 (unsigned)dword.words.hi); 1653 } 1654 1655 static void 1656 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1657 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1658 portid_t port_id, 1659 #else 1660 __rte_unused portid_t port_id, 1661 #endif 1662 uint16_t desc_id) 1663 { 1664 struct igb_ring_desc_16_bytes *ring = 1665 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1666 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1667 struct rte_eth_dev_info dev_info; 1668 1669 memset(&dev_info, 0, sizeof(dev_info)); 1670 rte_eth_dev_info_get(port_id, &dev_info); 1671 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1672 /* 32 bytes RX descriptor, i40e only */ 1673 struct igb_ring_desc_32_bytes *ring = 1674 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1675 ring[desc_id].lo_dword.dword = 1676 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1677 ring_rxd_display_dword(ring[desc_id].lo_dword); 1678 ring[desc_id].hi_dword.dword = 1679 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1680 ring_rxd_display_dword(ring[desc_id].hi_dword); 1681 ring[desc_id].resv1.dword = 1682 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1683 ring_rxd_display_dword(ring[desc_id].resv1); 1684 ring[desc_id].resv2.dword = 1685 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1686 ring_rxd_display_dword(ring[desc_id].resv2); 1687 1688 return; 1689 } 1690 #endif 1691 /* 16 bytes RX descriptor */ 1692 ring[desc_id].lo_dword.dword = 1693 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1694 ring_rxd_display_dword(ring[desc_id].lo_dword); 1695 ring[desc_id].hi_dword.dword = 1696 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1697 ring_rxd_display_dword(ring[desc_id].hi_dword); 1698 } 1699 1700 static void 1701 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1702 { 1703 struct igb_ring_desc_16_bytes *ring; 1704 struct igb_ring_desc_16_bytes txd; 1705 1706 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1707 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1708 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1709 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1710 (unsigned)txd.lo_dword.words.lo, 1711 (unsigned)txd.lo_dword.words.hi, 1712 (unsigned)txd.hi_dword.words.lo, 1713 (unsigned)txd.hi_dword.words.hi); 1714 } 1715 1716 void 1717 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1718 { 1719 const struct rte_memzone *rx_mz; 1720 1721 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1722 return; 1723 if (rx_queue_id_is_invalid(rxq_id)) 1724 return; 1725 if (rx_desc_id_is_invalid(rxd_id)) 1726 return; 1727 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1728 if (rx_mz == NULL) 1729 return; 1730 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1731 } 1732 1733 void 1734 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1735 { 1736 const struct rte_memzone *tx_mz; 1737 1738 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1739 return; 1740 if (tx_queue_id_is_invalid(txq_id)) 1741 return; 1742 if (tx_desc_id_is_invalid(txd_id)) 1743 return; 1744 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1745 if (tx_mz == NULL) 1746 return; 1747 ring_tx_descriptor_display(tx_mz, txd_id); 1748 } 1749 1750 void 1751 fwd_lcores_config_display(void) 1752 { 1753 lcoreid_t lc_id; 1754 1755 printf("List of forwarding lcores:"); 1756 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1757 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1758 printf("\n"); 1759 } 1760 void 1761 rxtx_config_display(void) 1762 { 1763 portid_t pid; 1764 queueid_t qid; 1765 1766 printf(" %s packet forwarding%s packets/burst=%d\n", 1767 cur_fwd_eng->fwd_mode_name, 1768 retry_enabled == 0 ? "" : " with retry", 1769 nb_pkt_per_burst); 1770 1771 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1772 printf(" packet len=%u - nb packet segments=%d\n", 1773 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1774 1775 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1776 nb_fwd_lcores, nb_fwd_ports); 1777 1778 RTE_ETH_FOREACH_DEV(pid) { 1779 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1780 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1781 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1782 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1783 1784 /* per port config */ 1785 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1786 (unsigned int)pid, nb_rxq, nb_txq); 1787 1788 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1789 ports[pid].dev_conf.rxmode.offloads, 1790 ports[pid].dev_conf.txmode.offloads); 1791 1792 /* per rx queue config only for first queue to be less verbose */ 1793 for (qid = 0; qid < 1; qid++) { 1794 printf(" RX queue: %d\n", qid); 1795 printf(" RX desc=%d - RX free threshold=%d\n", 1796 nb_rx_desc[qid], rx_conf[qid].rx_free_thresh); 1797 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1798 " wthresh=%d\n", 1799 rx_conf[qid].rx_thresh.pthresh, 1800 rx_conf[qid].rx_thresh.hthresh, 1801 rx_conf[qid].rx_thresh.wthresh); 1802 printf(" RX Offloads=0x%"PRIx64"\n", 1803 rx_conf[qid].offloads); 1804 } 1805 1806 /* per tx queue config only for first queue to be less verbose */ 1807 for (qid = 0; qid < 1; qid++) { 1808 printf(" TX queue: %d\n", qid); 1809 printf(" TX desc=%d - TX free threshold=%d\n", 1810 nb_tx_desc[qid], tx_conf[qid].tx_free_thresh); 1811 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1812 " wthresh=%d\n", 1813 tx_conf[qid].tx_thresh.pthresh, 1814 tx_conf[qid].tx_thresh.hthresh, 1815 tx_conf[qid].tx_thresh.wthresh); 1816 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1817 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1818 } 1819 } 1820 } 1821 1822 void 1823 port_rss_reta_info(portid_t port_id, 1824 struct rte_eth_rss_reta_entry64 *reta_conf, 1825 uint16_t nb_entries) 1826 { 1827 uint16_t i, idx, shift; 1828 int ret; 1829 1830 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1831 return; 1832 1833 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1834 if (ret != 0) { 1835 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1836 return; 1837 } 1838 1839 for (i = 0; i < nb_entries; i++) { 1840 idx = i / RTE_RETA_GROUP_SIZE; 1841 shift = i % RTE_RETA_GROUP_SIZE; 1842 if (!(reta_conf[idx].mask & (1ULL << shift))) 1843 continue; 1844 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1845 i, reta_conf[idx].reta[shift]); 1846 } 1847 } 1848 1849 /* 1850 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1851 * key of the port. 1852 */ 1853 void 1854 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1855 { 1856 struct rte_eth_rss_conf rss_conf; 1857 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1858 uint64_t rss_hf; 1859 uint8_t i; 1860 int diag; 1861 struct rte_eth_dev_info dev_info; 1862 uint8_t hash_key_size; 1863 1864 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1865 return; 1866 1867 memset(&dev_info, 0, sizeof(dev_info)); 1868 rte_eth_dev_info_get(port_id, &dev_info); 1869 if (dev_info.hash_key_size > 0 && 1870 dev_info.hash_key_size <= sizeof(rss_key)) 1871 hash_key_size = dev_info.hash_key_size; 1872 else { 1873 printf("dev_info did not provide a valid hash key size\n"); 1874 return; 1875 } 1876 1877 rss_conf.rss_hf = 0; 1878 for (i = 0; rss_type_table[i].str; i++) { 1879 if (!strcmp(rss_info, rss_type_table[i].str)) 1880 rss_conf.rss_hf = rss_type_table[i].rss_type; 1881 } 1882 1883 /* Get RSS hash key if asked to display it */ 1884 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1885 rss_conf.rss_key_len = hash_key_size; 1886 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1887 if (diag != 0) { 1888 switch (diag) { 1889 case -ENODEV: 1890 printf("port index %d invalid\n", port_id); 1891 break; 1892 case -ENOTSUP: 1893 printf("operation not supported by device\n"); 1894 break; 1895 default: 1896 printf("operation failed - diag=%d\n", diag); 1897 break; 1898 } 1899 return; 1900 } 1901 rss_hf = rss_conf.rss_hf; 1902 if (rss_hf == 0) { 1903 printf("RSS disabled\n"); 1904 return; 1905 } 1906 printf("RSS functions:\n "); 1907 for (i = 0; rss_type_table[i].str; i++) { 1908 if (rss_hf & rss_type_table[i].rss_type) 1909 printf("%s ", rss_type_table[i].str); 1910 } 1911 printf("\n"); 1912 if (!show_rss_key) 1913 return; 1914 printf("RSS key:\n"); 1915 for (i = 0; i < hash_key_size; i++) 1916 printf("%02X", rss_key[i]); 1917 printf("\n"); 1918 } 1919 1920 void 1921 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1922 uint hash_key_len) 1923 { 1924 struct rte_eth_rss_conf rss_conf; 1925 int diag; 1926 unsigned int i; 1927 1928 rss_conf.rss_key = NULL; 1929 rss_conf.rss_key_len = hash_key_len; 1930 rss_conf.rss_hf = 0; 1931 for (i = 0; rss_type_table[i].str; i++) { 1932 if (!strcmp(rss_type_table[i].str, rss_type)) 1933 rss_conf.rss_hf = rss_type_table[i].rss_type; 1934 } 1935 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1936 if (diag == 0) { 1937 rss_conf.rss_key = hash_key; 1938 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1939 } 1940 if (diag == 0) 1941 return; 1942 1943 switch (diag) { 1944 case -ENODEV: 1945 printf("port index %d invalid\n", port_id); 1946 break; 1947 case -ENOTSUP: 1948 printf("operation not supported by device\n"); 1949 break; 1950 default: 1951 printf("operation failed - diag=%d\n", diag); 1952 break; 1953 } 1954 } 1955 1956 /* 1957 * Setup forwarding configuration for each logical core. 1958 */ 1959 static void 1960 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1961 { 1962 streamid_t nb_fs_per_lcore; 1963 streamid_t nb_fs; 1964 streamid_t sm_id; 1965 lcoreid_t nb_extra; 1966 lcoreid_t nb_fc; 1967 lcoreid_t nb_lc; 1968 lcoreid_t lc_id; 1969 1970 nb_fs = cfg->nb_fwd_streams; 1971 nb_fc = cfg->nb_fwd_lcores; 1972 if (nb_fs <= nb_fc) { 1973 nb_fs_per_lcore = 1; 1974 nb_extra = 0; 1975 } else { 1976 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1977 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1978 } 1979 1980 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1981 sm_id = 0; 1982 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1983 fwd_lcores[lc_id]->stream_idx = sm_id; 1984 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1985 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1986 } 1987 1988 /* 1989 * Assign extra remaining streams, if any. 1990 */ 1991 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1992 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1993 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1994 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1995 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1996 } 1997 } 1998 1999 static portid_t 2000 fwd_topology_tx_port_get(portid_t rxp) 2001 { 2002 static int warning_once = 1; 2003 2004 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2005 2006 switch (port_topology) { 2007 default: 2008 case PORT_TOPOLOGY_PAIRED: 2009 if ((rxp & 0x1) == 0) { 2010 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2011 return rxp + 1; 2012 if (warning_once) { 2013 printf("\nWarning! port-topology=paired" 2014 " and odd forward ports number," 2015 " the last port will pair with" 2016 " itself.\n\n"); 2017 warning_once = 0; 2018 } 2019 return rxp; 2020 } 2021 return rxp - 1; 2022 case PORT_TOPOLOGY_CHAINED: 2023 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2024 case PORT_TOPOLOGY_LOOP: 2025 return rxp; 2026 } 2027 } 2028 2029 static void 2030 simple_fwd_config_setup(void) 2031 { 2032 portid_t i; 2033 2034 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2035 cur_fwd_config.nb_fwd_streams = 2036 (streamid_t) cur_fwd_config.nb_fwd_ports; 2037 2038 /* reinitialize forwarding streams */ 2039 init_fwd_streams(); 2040 2041 /* 2042 * In the simple forwarding test, the number of forwarding cores 2043 * must be lower or equal to the number of forwarding ports. 2044 */ 2045 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2046 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2047 cur_fwd_config.nb_fwd_lcores = 2048 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2049 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2050 2051 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2052 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2053 fwd_streams[i]->rx_queue = 0; 2054 fwd_streams[i]->tx_port = 2055 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2056 fwd_streams[i]->tx_queue = 0; 2057 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2058 fwd_streams[i]->retry_enabled = retry_enabled; 2059 } 2060 } 2061 2062 /** 2063 * For the RSS forwarding test all streams distributed over lcores. Each stream 2064 * being composed of a RX queue to poll on a RX port for input messages, 2065 * associated with a TX queue of a TX port where to send forwarded packets. 2066 */ 2067 static void 2068 rss_fwd_config_setup(void) 2069 { 2070 portid_t rxp; 2071 portid_t txp; 2072 queueid_t rxq; 2073 queueid_t nb_q; 2074 streamid_t sm_id; 2075 2076 nb_q = nb_rxq; 2077 if (nb_q > nb_txq) 2078 nb_q = nb_txq; 2079 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2080 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2081 cur_fwd_config.nb_fwd_streams = 2082 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2083 2084 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2085 cur_fwd_config.nb_fwd_lcores = 2086 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2087 2088 /* reinitialize forwarding streams */ 2089 init_fwd_streams(); 2090 2091 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2092 rxp = 0; rxq = 0; 2093 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2094 struct fwd_stream *fs; 2095 2096 fs = fwd_streams[sm_id]; 2097 txp = fwd_topology_tx_port_get(rxp); 2098 fs->rx_port = fwd_ports_ids[rxp]; 2099 fs->rx_queue = rxq; 2100 fs->tx_port = fwd_ports_ids[txp]; 2101 fs->tx_queue = rxq; 2102 fs->peer_addr = fs->tx_port; 2103 fs->retry_enabled = retry_enabled; 2104 rxq = (queueid_t) (rxq + 1); 2105 if (rxq < nb_q) 2106 continue; 2107 /* 2108 * rxq == nb_q 2109 * Restart from RX queue 0 on next RX port 2110 */ 2111 rxq = 0; 2112 rxp++; 2113 } 2114 } 2115 2116 /** 2117 * For the DCB forwarding test, each core is assigned on each traffic class. 2118 * 2119 * Each core is assigned a multi-stream, each stream being composed of 2120 * a RX queue to poll on a RX port for input messages, associated with 2121 * a TX queue of a TX port where to send forwarded packets. All RX and 2122 * TX queues are mapping to the same traffic class. 2123 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2124 * the same core 2125 */ 2126 static void 2127 dcb_fwd_config_setup(void) 2128 { 2129 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2130 portid_t txp, rxp = 0; 2131 queueid_t txq, rxq = 0; 2132 lcoreid_t lc_id; 2133 uint16_t nb_rx_queue, nb_tx_queue; 2134 uint16_t i, j, k, sm_id = 0; 2135 uint8_t tc = 0; 2136 2137 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2138 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2139 cur_fwd_config.nb_fwd_streams = 2140 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2141 2142 /* reinitialize forwarding streams */ 2143 init_fwd_streams(); 2144 sm_id = 0; 2145 txp = 1; 2146 /* get the dcb info on the first RX and TX ports */ 2147 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2148 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2149 2150 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2151 fwd_lcores[lc_id]->stream_nb = 0; 2152 fwd_lcores[lc_id]->stream_idx = sm_id; 2153 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2154 /* if the nb_queue is zero, means this tc is 2155 * not enabled on the POOL 2156 */ 2157 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2158 break; 2159 k = fwd_lcores[lc_id]->stream_nb + 2160 fwd_lcores[lc_id]->stream_idx; 2161 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2162 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2163 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2164 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2165 for (j = 0; j < nb_rx_queue; j++) { 2166 struct fwd_stream *fs; 2167 2168 fs = fwd_streams[k + j]; 2169 fs->rx_port = fwd_ports_ids[rxp]; 2170 fs->rx_queue = rxq + j; 2171 fs->tx_port = fwd_ports_ids[txp]; 2172 fs->tx_queue = txq + j % nb_tx_queue; 2173 fs->peer_addr = fs->tx_port; 2174 fs->retry_enabled = retry_enabled; 2175 } 2176 fwd_lcores[lc_id]->stream_nb += 2177 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2178 } 2179 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2180 2181 tc++; 2182 if (tc < rxp_dcb_info.nb_tcs) 2183 continue; 2184 /* Restart from TC 0 on next RX port */ 2185 tc = 0; 2186 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2187 rxp = (portid_t) 2188 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2189 else 2190 rxp++; 2191 if (rxp >= nb_fwd_ports) 2192 return; 2193 /* get the dcb information on next RX and TX ports */ 2194 if ((rxp & 0x1) == 0) 2195 txp = (portid_t) (rxp + 1); 2196 else 2197 txp = (portid_t) (rxp - 1); 2198 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2199 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2200 } 2201 } 2202 2203 static void 2204 icmp_echo_config_setup(void) 2205 { 2206 portid_t rxp; 2207 queueid_t rxq; 2208 lcoreid_t lc_id; 2209 uint16_t sm_id; 2210 2211 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2212 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2213 (nb_txq * nb_fwd_ports); 2214 else 2215 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2216 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2217 cur_fwd_config.nb_fwd_streams = 2218 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2219 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2220 cur_fwd_config.nb_fwd_lcores = 2221 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2222 if (verbose_level > 0) { 2223 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2224 __FUNCTION__, 2225 cur_fwd_config.nb_fwd_lcores, 2226 cur_fwd_config.nb_fwd_ports, 2227 cur_fwd_config.nb_fwd_streams); 2228 } 2229 2230 /* reinitialize forwarding streams */ 2231 init_fwd_streams(); 2232 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2233 rxp = 0; rxq = 0; 2234 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2235 if (verbose_level > 0) 2236 printf(" core=%d: \n", lc_id); 2237 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2238 struct fwd_stream *fs; 2239 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2240 fs->rx_port = fwd_ports_ids[rxp]; 2241 fs->rx_queue = rxq; 2242 fs->tx_port = fs->rx_port; 2243 fs->tx_queue = rxq; 2244 fs->peer_addr = fs->tx_port; 2245 fs->retry_enabled = retry_enabled; 2246 if (verbose_level > 0) 2247 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2248 sm_id, fs->rx_port, fs->rx_queue, 2249 fs->tx_queue); 2250 rxq = (queueid_t) (rxq + 1); 2251 if (rxq == nb_rxq) { 2252 rxq = 0; 2253 rxp = (portid_t) (rxp + 1); 2254 } 2255 } 2256 } 2257 } 2258 2259 void 2260 fwd_config_setup(void) 2261 { 2262 cur_fwd_config.fwd_eng = cur_fwd_eng; 2263 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2264 icmp_echo_config_setup(); 2265 return; 2266 } 2267 if ((nb_rxq > 1) && (nb_txq > 1)){ 2268 if (dcb_config) 2269 dcb_fwd_config_setup(); 2270 else 2271 rss_fwd_config_setup(); 2272 } 2273 else 2274 simple_fwd_config_setup(); 2275 } 2276 2277 void 2278 pkt_fwd_config_display(struct fwd_config *cfg) 2279 { 2280 struct fwd_stream *fs; 2281 lcoreid_t lc_id; 2282 streamid_t sm_id; 2283 2284 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2285 "NUMA support %s, MP over anonymous pages %s\n", 2286 cfg->fwd_eng->fwd_mode_name, 2287 retry_enabled == 0 ? "" : " with retry", 2288 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2289 numa_support == 1 ? "enabled" : "disabled", 2290 mp_anon != 0 ? "enabled" : "disabled"); 2291 2292 if (retry_enabled) 2293 printf("TX retry num: %u, delay between TX retries: %uus\n", 2294 burst_tx_retry_num, burst_tx_delay_time); 2295 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2296 printf("Logical Core %u (socket %u) forwards packets on " 2297 "%d streams:", 2298 fwd_lcores_cpuids[lc_id], 2299 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2300 fwd_lcores[lc_id]->stream_nb); 2301 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2302 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2303 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2304 "P=%d/Q=%d (socket %u) ", 2305 fs->rx_port, fs->rx_queue, 2306 ports[fs->rx_port].socket_id, 2307 fs->tx_port, fs->tx_queue, 2308 ports[fs->tx_port].socket_id); 2309 print_ethaddr("peer=", 2310 &peer_eth_addrs[fs->peer_addr]); 2311 } 2312 printf("\n"); 2313 } 2314 printf("\n"); 2315 } 2316 2317 void 2318 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2319 { 2320 uint8_t c, new_peer_addr[6]; 2321 if (!rte_eth_dev_is_valid_port(port_id)) { 2322 printf("Error: Invalid port number %i\n", port_id); 2323 return; 2324 } 2325 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2326 sizeof(new_peer_addr)) < 0) { 2327 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2328 return; 2329 } 2330 for (c = 0; c < 6; c++) 2331 peer_eth_addrs[port_id].addr_bytes[c] = 2332 new_peer_addr[c]; 2333 } 2334 2335 int 2336 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2337 { 2338 unsigned int i; 2339 unsigned int lcore_cpuid; 2340 int record_now; 2341 2342 record_now = 0; 2343 again: 2344 for (i = 0; i < nb_lc; i++) { 2345 lcore_cpuid = lcorelist[i]; 2346 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2347 printf("lcore %u not enabled\n", lcore_cpuid); 2348 return -1; 2349 } 2350 if (lcore_cpuid == rte_get_master_lcore()) { 2351 printf("lcore %u cannot be masked on for running " 2352 "packet forwarding, which is the master lcore " 2353 "and reserved for command line parsing only\n", 2354 lcore_cpuid); 2355 return -1; 2356 } 2357 if (record_now) 2358 fwd_lcores_cpuids[i] = lcore_cpuid; 2359 } 2360 if (record_now == 0) { 2361 record_now = 1; 2362 goto again; 2363 } 2364 nb_cfg_lcores = (lcoreid_t) nb_lc; 2365 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2366 printf("previous number of forwarding cores %u - changed to " 2367 "number of configured cores %u\n", 2368 (unsigned int) nb_fwd_lcores, nb_lc); 2369 nb_fwd_lcores = (lcoreid_t) nb_lc; 2370 } 2371 2372 return 0; 2373 } 2374 2375 int 2376 set_fwd_lcores_mask(uint64_t lcoremask) 2377 { 2378 unsigned int lcorelist[64]; 2379 unsigned int nb_lc; 2380 unsigned int i; 2381 2382 if (lcoremask == 0) { 2383 printf("Invalid NULL mask of cores\n"); 2384 return -1; 2385 } 2386 nb_lc = 0; 2387 for (i = 0; i < 64; i++) { 2388 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2389 continue; 2390 lcorelist[nb_lc++] = i; 2391 } 2392 return set_fwd_lcores_list(lcorelist, nb_lc); 2393 } 2394 2395 void 2396 set_fwd_lcores_number(uint16_t nb_lc) 2397 { 2398 if (nb_lc > nb_cfg_lcores) { 2399 printf("nb fwd cores %u > %u (max. number of configured " 2400 "lcores) - ignored\n", 2401 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2402 return; 2403 } 2404 nb_fwd_lcores = (lcoreid_t) nb_lc; 2405 printf("Number of forwarding cores set to %u\n", 2406 (unsigned int) nb_fwd_lcores); 2407 } 2408 2409 void 2410 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2411 { 2412 unsigned int i; 2413 portid_t port_id; 2414 int record_now; 2415 2416 record_now = 0; 2417 again: 2418 for (i = 0; i < nb_pt; i++) { 2419 port_id = (portid_t) portlist[i]; 2420 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2421 return; 2422 if (record_now) 2423 fwd_ports_ids[i] = port_id; 2424 } 2425 if (record_now == 0) { 2426 record_now = 1; 2427 goto again; 2428 } 2429 nb_cfg_ports = (portid_t) nb_pt; 2430 if (nb_fwd_ports != (portid_t) nb_pt) { 2431 printf("previous number of forwarding ports %u - changed to " 2432 "number of configured ports %u\n", 2433 (unsigned int) nb_fwd_ports, nb_pt); 2434 nb_fwd_ports = (portid_t) nb_pt; 2435 } 2436 } 2437 2438 void 2439 set_fwd_ports_mask(uint64_t portmask) 2440 { 2441 unsigned int portlist[64]; 2442 unsigned int nb_pt; 2443 unsigned int i; 2444 2445 if (portmask == 0) { 2446 printf("Invalid NULL mask of ports\n"); 2447 return; 2448 } 2449 nb_pt = 0; 2450 RTE_ETH_FOREACH_DEV(i) { 2451 if (! ((uint64_t)(1ULL << i) & portmask)) 2452 continue; 2453 portlist[nb_pt++] = i; 2454 } 2455 set_fwd_ports_list(portlist, nb_pt); 2456 } 2457 2458 void 2459 set_fwd_ports_number(uint16_t nb_pt) 2460 { 2461 if (nb_pt > nb_cfg_ports) { 2462 printf("nb fwd ports %u > %u (number of configured " 2463 "ports) - ignored\n", 2464 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2465 return; 2466 } 2467 nb_fwd_ports = (portid_t) nb_pt; 2468 printf("Number of forwarding ports set to %u\n", 2469 (unsigned int) nb_fwd_ports); 2470 } 2471 2472 int 2473 port_is_forwarding(portid_t port_id) 2474 { 2475 unsigned int i; 2476 2477 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2478 return -1; 2479 2480 for (i = 0; i < nb_fwd_ports; i++) { 2481 if (fwd_ports_ids[i] == port_id) 2482 return 1; 2483 } 2484 2485 return 0; 2486 } 2487 2488 void 2489 set_nb_pkt_per_burst(uint16_t nb) 2490 { 2491 if (nb > MAX_PKT_BURST) { 2492 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2493 " ignored\n", 2494 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2495 return; 2496 } 2497 nb_pkt_per_burst = nb; 2498 printf("Number of packets per burst set to %u\n", 2499 (unsigned int) nb_pkt_per_burst); 2500 } 2501 2502 static const char * 2503 tx_split_get_name(enum tx_pkt_split split) 2504 { 2505 uint32_t i; 2506 2507 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2508 if (tx_split_name[i].split == split) 2509 return tx_split_name[i].name; 2510 } 2511 return NULL; 2512 } 2513 2514 void 2515 set_tx_pkt_split(const char *name) 2516 { 2517 uint32_t i; 2518 2519 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2520 if (strcmp(tx_split_name[i].name, name) == 0) { 2521 tx_pkt_split = tx_split_name[i].split; 2522 return; 2523 } 2524 } 2525 printf("unknown value: \"%s\"\n", name); 2526 } 2527 2528 void 2529 show_tx_pkt_segments(void) 2530 { 2531 uint32_t i, n; 2532 const char *split; 2533 2534 n = tx_pkt_nb_segs; 2535 split = tx_split_get_name(tx_pkt_split); 2536 2537 printf("Number of segments: %u\n", n); 2538 printf("Segment sizes: "); 2539 for (i = 0; i != n - 1; i++) 2540 printf("%hu,", tx_pkt_seg_lengths[i]); 2541 printf("%hu\n", tx_pkt_seg_lengths[i]); 2542 printf("Split packet: %s\n", split); 2543 } 2544 2545 void 2546 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2547 { 2548 uint16_t tx_pkt_len; 2549 unsigned i; 2550 2551 if (nb_segs >= (unsigned) nb_txd) { 2552 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2553 nb_segs, (unsigned int) nb_txd); 2554 return; 2555 } 2556 2557 /* 2558 * Check that each segment length is greater or equal than 2559 * the mbuf data sise. 2560 * Check also that the total packet length is greater or equal than the 2561 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2562 */ 2563 tx_pkt_len = 0; 2564 for (i = 0; i < nb_segs; i++) { 2565 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2566 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2567 i, seg_lengths[i], (unsigned) mbuf_data_size); 2568 return; 2569 } 2570 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2571 } 2572 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2573 printf("total packet length=%u < %d - give up\n", 2574 (unsigned) tx_pkt_len, 2575 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2576 return; 2577 } 2578 2579 for (i = 0; i < nb_segs; i++) 2580 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2581 2582 tx_pkt_length = tx_pkt_len; 2583 tx_pkt_nb_segs = (uint8_t) nb_segs; 2584 } 2585 2586 void 2587 setup_gro(const char *onoff, portid_t port_id) 2588 { 2589 if (!rte_eth_dev_is_valid_port(port_id)) { 2590 printf("invalid port id %u\n", port_id); 2591 return; 2592 } 2593 if (test_done == 0) { 2594 printf("Before enable/disable GRO," 2595 " please stop forwarding first\n"); 2596 return; 2597 } 2598 if (strcmp(onoff, "on") == 0) { 2599 if (gro_ports[port_id].enable != 0) { 2600 printf("Port %u has enabled GRO. Please" 2601 " disable GRO first\n", port_id); 2602 return; 2603 } 2604 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2605 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2606 gro_ports[port_id].param.max_flow_num = 2607 GRO_DEFAULT_FLOW_NUM; 2608 gro_ports[port_id].param.max_item_per_flow = 2609 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2610 } 2611 gro_ports[port_id].enable = 1; 2612 } else { 2613 if (gro_ports[port_id].enable == 0) { 2614 printf("Port %u has disabled GRO\n", port_id); 2615 return; 2616 } 2617 gro_ports[port_id].enable = 0; 2618 } 2619 } 2620 2621 void 2622 setup_gro_flush_cycles(uint8_t cycles) 2623 { 2624 if (test_done == 0) { 2625 printf("Before change flush interval for GRO," 2626 " please stop forwarding first.\n"); 2627 return; 2628 } 2629 2630 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2631 GRO_DEFAULT_FLUSH_CYCLES) { 2632 printf("The flushing cycle be in the range" 2633 " of 1 to %u. Revert to the default" 2634 " value %u.\n", 2635 GRO_MAX_FLUSH_CYCLES, 2636 GRO_DEFAULT_FLUSH_CYCLES); 2637 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2638 } 2639 2640 gro_flush_cycles = cycles; 2641 } 2642 2643 void 2644 show_gro(portid_t port_id) 2645 { 2646 struct rte_gro_param *param; 2647 uint32_t max_pkts_num; 2648 2649 param = &gro_ports[port_id].param; 2650 2651 if (!rte_eth_dev_is_valid_port(port_id)) { 2652 printf("Invalid port id %u.\n", port_id); 2653 return; 2654 } 2655 if (gro_ports[port_id].enable) { 2656 printf("GRO type: TCP/IPv4\n"); 2657 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2658 max_pkts_num = param->max_flow_num * 2659 param->max_item_per_flow; 2660 } else 2661 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2662 printf("Max number of packets to perform GRO: %u\n", 2663 max_pkts_num); 2664 printf("Flushing cycles: %u\n", gro_flush_cycles); 2665 } else 2666 printf("Port %u doesn't enable GRO.\n", port_id); 2667 } 2668 2669 void 2670 setup_gso(const char *mode, portid_t port_id) 2671 { 2672 if (!rte_eth_dev_is_valid_port(port_id)) { 2673 printf("invalid port id %u\n", port_id); 2674 return; 2675 } 2676 if (strcmp(mode, "on") == 0) { 2677 if (test_done == 0) { 2678 printf("before enabling GSO," 2679 " please stop forwarding first\n"); 2680 return; 2681 } 2682 gso_ports[port_id].enable = 1; 2683 } else if (strcmp(mode, "off") == 0) { 2684 if (test_done == 0) { 2685 printf("before disabling GSO," 2686 " please stop forwarding first\n"); 2687 return; 2688 } 2689 gso_ports[port_id].enable = 0; 2690 } 2691 } 2692 2693 char* 2694 list_pkt_forwarding_modes(void) 2695 { 2696 static char fwd_modes[128] = ""; 2697 const char *separator = "|"; 2698 struct fwd_engine *fwd_eng; 2699 unsigned i = 0; 2700 2701 if (strlen (fwd_modes) == 0) { 2702 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2703 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2704 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2705 strncat(fwd_modes, separator, 2706 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2707 } 2708 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2709 } 2710 2711 return fwd_modes; 2712 } 2713 2714 char* 2715 list_pkt_forwarding_retry_modes(void) 2716 { 2717 static char fwd_modes[128] = ""; 2718 const char *separator = "|"; 2719 struct fwd_engine *fwd_eng; 2720 unsigned i = 0; 2721 2722 if (strlen(fwd_modes) == 0) { 2723 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2724 if (fwd_eng == &rx_only_engine) 2725 continue; 2726 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2727 sizeof(fwd_modes) - 2728 strlen(fwd_modes) - 1); 2729 strncat(fwd_modes, separator, 2730 sizeof(fwd_modes) - 2731 strlen(fwd_modes) - 1); 2732 } 2733 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2734 } 2735 2736 return fwd_modes; 2737 } 2738 2739 void 2740 set_pkt_forwarding_mode(const char *fwd_mode_name) 2741 { 2742 struct fwd_engine *fwd_eng; 2743 unsigned i; 2744 2745 i = 0; 2746 while ((fwd_eng = fwd_engines[i]) != NULL) { 2747 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2748 printf("Set %s packet forwarding mode%s\n", 2749 fwd_mode_name, 2750 retry_enabled == 0 ? "" : " with retry"); 2751 cur_fwd_eng = fwd_eng; 2752 return; 2753 } 2754 i++; 2755 } 2756 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2757 } 2758 2759 void 2760 set_verbose_level(uint16_t vb_level) 2761 { 2762 printf("Change verbose level from %u to %u\n", 2763 (unsigned int) verbose_level, (unsigned int) vb_level); 2764 verbose_level = vb_level; 2765 } 2766 2767 void 2768 vlan_extend_set(portid_t port_id, int on) 2769 { 2770 int diag; 2771 int vlan_offload; 2772 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2773 2774 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2775 return; 2776 2777 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2778 2779 if (on) { 2780 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2781 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2782 } else { 2783 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2784 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2785 } 2786 2787 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2788 if (diag < 0) 2789 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2790 "diag=%d\n", port_id, on, diag); 2791 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2792 } 2793 2794 void 2795 rx_vlan_strip_set(portid_t port_id, int on) 2796 { 2797 int diag; 2798 int vlan_offload; 2799 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2800 2801 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2802 return; 2803 2804 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2805 2806 if (on) { 2807 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2808 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2809 } else { 2810 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2811 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2812 } 2813 2814 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2815 if (diag < 0) 2816 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2817 "diag=%d\n", port_id, on, diag); 2818 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2819 } 2820 2821 void 2822 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2823 { 2824 int diag; 2825 2826 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2827 return; 2828 2829 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2830 if (diag < 0) 2831 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2832 "diag=%d\n", port_id, queue_id, on, diag); 2833 } 2834 2835 void 2836 rx_vlan_filter_set(portid_t port_id, int on) 2837 { 2838 int diag; 2839 int vlan_offload; 2840 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2841 2842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2843 return; 2844 2845 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2846 2847 if (on) { 2848 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2849 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2850 } else { 2851 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2852 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2853 } 2854 2855 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2856 if (diag < 0) 2857 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2858 "diag=%d\n", port_id, on, diag); 2859 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2860 } 2861 2862 int 2863 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2864 { 2865 int diag; 2866 2867 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2868 return 1; 2869 if (vlan_id_is_invalid(vlan_id)) 2870 return 1; 2871 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2872 if (diag == 0) 2873 return 0; 2874 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2875 "diag=%d\n", 2876 port_id, vlan_id, on, diag); 2877 return -1; 2878 } 2879 2880 void 2881 rx_vlan_all_filter_set(portid_t port_id, int on) 2882 { 2883 uint16_t vlan_id; 2884 2885 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2886 return; 2887 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2888 if (rx_vft_set(port_id, vlan_id, on)) 2889 break; 2890 } 2891 } 2892 2893 void 2894 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2895 { 2896 int diag; 2897 2898 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2899 return; 2900 2901 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2902 if (diag == 0) 2903 return; 2904 2905 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2906 "diag=%d\n", 2907 port_id, vlan_type, tp_id, diag); 2908 } 2909 2910 void 2911 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2912 { 2913 int vlan_offload; 2914 struct rte_eth_dev_info dev_info; 2915 2916 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2917 return; 2918 if (vlan_id_is_invalid(vlan_id)) 2919 return; 2920 2921 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2922 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2923 printf("Error, as QinQ has been enabled.\n"); 2924 return; 2925 } 2926 rte_eth_dev_info_get(port_id, &dev_info); 2927 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2928 printf("Error: vlan insert is not supported by port %d\n", 2929 port_id); 2930 return; 2931 } 2932 2933 tx_vlan_reset(port_id); 2934 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2935 ports[port_id].tx_vlan_id = vlan_id; 2936 } 2937 2938 void 2939 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2940 { 2941 int vlan_offload; 2942 struct rte_eth_dev_info dev_info; 2943 2944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2945 return; 2946 if (vlan_id_is_invalid(vlan_id)) 2947 return; 2948 if (vlan_id_is_invalid(vlan_id_outer)) 2949 return; 2950 2951 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2952 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2953 printf("Error, as QinQ hasn't been enabled.\n"); 2954 return; 2955 } 2956 rte_eth_dev_info_get(port_id, &dev_info); 2957 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2958 printf("Error: qinq insert not supported by port %d\n", 2959 port_id); 2960 return; 2961 } 2962 2963 tx_vlan_reset(port_id); 2964 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 2965 ports[port_id].tx_vlan_id = vlan_id; 2966 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 2967 } 2968 2969 void 2970 tx_vlan_reset(portid_t port_id) 2971 { 2972 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2973 return; 2974 ports[port_id].dev_conf.txmode.offloads &= 2975 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 2976 DEV_TX_OFFLOAD_QINQ_INSERT); 2977 ports[port_id].tx_vlan_id = 0; 2978 ports[port_id].tx_vlan_id_outer = 0; 2979 } 2980 2981 void 2982 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 2983 { 2984 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2985 return; 2986 2987 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 2988 } 2989 2990 void 2991 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 2992 { 2993 uint16_t i; 2994 uint8_t existing_mapping_found = 0; 2995 2996 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2997 return; 2998 2999 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3000 return; 3001 3002 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3003 printf("map_value not in required range 0..%d\n", 3004 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3005 return; 3006 } 3007 3008 if (!is_rx) { /*then tx*/ 3009 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3010 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3011 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3012 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3013 existing_mapping_found = 1; 3014 break; 3015 } 3016 } 3017 if (!existing_mapping_found) { /* A new additional mapping... */ 3018 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3019 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3020 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3021 nb_tx_queue_stats_mappings++; 3022 } 3023 } 3024 else { /*rx*/ 3025 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3026 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3027 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3028 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3029 existing_mapping_found = 1; 3030 break; 3031 } 3032 } 3033 if (!existing_mapping_found) { /* A new additional mapping... */ 3034 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3035 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3036 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3037 nb_rx_queue_stats_mappings++; 3038 } 3039 } 3040 } 3041 3042 void 3043 set_xstats_hide_zero(uint8_t on_off) 3044 { 3045 xstats_hide_zero = on_off; 3046 } 3047 3048 static inline void 3049 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3050 { 3051 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3052 3053 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3054 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3055 " tunnel_id: 0x%08x", 3056 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3057 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3058 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3059 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3060 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3061 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3062 3063 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3064 rte_be_to_cpu_16(mask->src_port_mask), 3065 rte_be_to_cpu_16(mask->dst_port_mask)); 3066 3067 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3068 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3069 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3070 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3071 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3072 3073 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3074 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3075 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3076 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3077 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3078 } 3079 3080 printf("\n"); 3081 } 3082 3083 static inline void 3084 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3085 { 3086 struct rte_eth_flex_payload_cfg *cfg; 3087 uint32_t i, j; 3088 3089 for (i = 0; i < flex_conf->nb_payloads; i++) { 3090 cfg = &flex_conf->flex_set[i]; 3091 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3092 printf("\n RAW: "); 3093 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3094 printf("\n L2_PAYLOAD: "); 3095 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3096 printf("\n L3_PAYLOAD: "); 3097 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3098 printf("\n L4_PAYLOAD: "); 3099 else 3100 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3101 for (j = 0; j < num; j++) 3102 printf(" %-5u", cfg->src_offset[j]); 3103 } 3104 printf("\n"); 3105 } 3106 3107 static char * 3108 flowtype_to_str(uint16_t flow_type) 3109 { 3110 struct flow_type_info { 3111 char str[32]; 3112 uint16_t ftype; 3113 }; 3114 3115 uint8_t i; 3116 static struct flow_type_info flowtype_str_table[] = { 3117 {"raw", RTE_ETH_FLOW_RAW}, 3118 {"ipv4", RTE_ETH_FLOW_IPV4}, 3119 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3120 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3121 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3122 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3123 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3124 {"ipv6", RTE_ETH_FLOW_IPV6}, 3125 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3126 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3127 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3128 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3129 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3130 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3131 {"port", RTE_ETH_FLOW_PORT}, 3132 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3133 {"geneve", RTE_ETH_FLOW_GENEVE}, 3134 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3135 }; 3136 3137 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3138 if (flowtype_str_table[i].ftype == flow_type) 3139 return flowtype_str_table[i].str; 3140 } 3141 3142 return NULL; 3143 } 3144 3145 static inline void 3146 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3147 { 3148 struct rte_eth_fdir_flex_mask *mask; 3149 uint32_t i, j; 3150 char *p; 3151 3152 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3153 mask = &flex_conf->flex_mask[i]; 3154 p = flowtype_to_str(mask->flow_type); 3155 printf("\n %s:\t", p ? p : "unknown"); 3156 for (j = 0; j < num; j++) 3157 printf(" %02x", mask->mask[j]); 3158 } 3159 printf("\n"); 3160 } 3161 3162 static inline void 3163 print_fdir_flow_type(uint32_t flow_types_mask) 3164 { 3165 int i; 3166 char *p; 3167 3168 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3169 if (!(flow_types_mask & (1 << i))) 3170 continue; 3171 p = flowtype_to_str(i); 3172 if (p) 3173 printf(" %s", p); 3174 else 3175 printf(" unknown"); 3176 } 3177 printf("\n"); 3178 } 3179 3180 void 3181 fdir_get_infos(portid_t port_id) 3182 { 3183 struct rte_eth_fdir_stats fdir_stat; 3184 struct rte_eth_fdir_info fdir_info; 3185 int ret; 3186 3187 static const char *fdir_stats_border = "########################"; 3188 3189 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3190 return; 3191 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3192 if (ret < 0) { 3193 printf("\n FDIR is not supported on port %-2d\n", 3194 port_id); 3195 return; 3196 } 3197 3198 memset(&fdir_info, 0, sizeof(fdir_info)); 3199 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3200 RTE_ETH_FILTER_INFO, &fdir_info); 3201 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3202 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3203 RTE_ETH_FILTER_STATS, &fdir_stat); 3204 printf("\n %s FDIR infos for port %-2d %s\n", 3205 fdir_stats_border, port_id, fdir_stats_border); 3206 printf(" MODE: "); 3207 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3208 printf(" PERFECT\n"); 3209 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3210 printf(" PERFECT-MAC-VLAN\n"); 3211 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3212 printf(" PERFECT-TUNNEL\n"); 3213 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3214 printf(" SIGNATURE\n"); 3215 else 3216 printf(" DISABLE\n"); 3217 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3218 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3219 printf(" SUPPORTED FLOW TYPE: "); 3220 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3221 } 3222 printf(" FLEX PAYLOAD INFO:\n"); 3223 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3224 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3225 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3226 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3227 fdir_info.flex_payload_unit, 3228 fdir_info.max_flex_payload_segment_num, 3229 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3230 printf(" MASK: "); 3231 print_fdir_mask(&fdir_info.mask); 3232 if (fdir_info.flex_conf.nb_payloads > 0) { 3233 printf(" FLEX PAYLOAD SRC OFFSET:"); 3234 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3235 } 3236 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3237 printf(" FLEX MASK CFG:"); 3238 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3239 } 3240 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3241 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3242 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3243 fdir_info.guarant_spc, fdir_info.best_spc); 3244 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3245 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3246 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3247 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3248 fdir_stat.collision, fdir_stat.free, 3249 fdir_stat.maxhash, fdir_stat.maxlen, 3250 fdir_stat.add, fdir_stat.remove, 3251 fdir_stat.f_add, fdir_stat.f_remove); 3252 printf(" %s############################%s\n", 3253 fdir_stats_border, fdir_stats_border); 3254 } 3255 3256 void 3257 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3258 { 3259 struct rte_port *port; 3260 struct rte_eth_fdir_flex_conf *flex_conf; 3261 int i, idx = 0; 3262 3263 port = &ports[port_id]; 3264 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3265 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3266 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3267 idx = i; 3268 break; 3269 } 3270 } 3271 if (i >= RTE_ETH_FLOW_MAX) { 3272 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3273 idx = flex_conf->nb_flexmasks; 3274 flex_conf->nb_flexmasks++; 3275 } else { 3276 printf("The flex mask table is full. Can not set flex" 3277 " mask for flow_type(%u).", cfg->flow_type); 3278 return; 3279 } 3280 } 3281 rte_memcpy(&flex_conf->flex_mask[idx], 3282 cfg, 3283 sizeof(struct rte_eth_fdir_flex_mask)); 3284 } 3285 3286 void 3287 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3288 { 3289 struct rte_port *port; 3290 struct rte_eth_fdir_flex_conf *flex_conf; 3291 int i, idx = 0; 3292 3293 port = &ports[port_id]; 3294 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3295 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3296 if (cfg->type == flex_conf->flex_set[i].type) { 3297 idx = i; 3298 break; 3299 } 3300 } 3301 if (i >= RTE_ETH_PAYLOAD_MAX) { 3302 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3303 idx = flex_conf->nb_payloads; 3304 flex_conf->nb_payloads++; 3305 } else { 3306 printf("The flex payload table is full. Can not set" 3307 " flex payload for type(%u).", cfg->type); 3308 return; 3309 } 3310 } 3311 rte_memcpy(&flex_conf->flex_set[idx], 3312 cfg, 3313 sizeof(struct rte_eth_flex_payload_cfg)); 3314 3315 } 3316 3317 void 3318 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3319 { 3320 #ifdef RTE_LIBRTE_IXGBE_PMD 3321 int diag; 3322 3323 if (is_rx) 3324 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3325 else 3326 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3327 3328 if (diag == 0) 3329 return; 3330 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3331 is_rx ? "rx" : "tx", port_id, diag); 3332 return; 3333 #endif 3334 printf("VF %s setting not supported for port %d\n", 3335 is_rx ? "Rx" : "Tx", port_id); 3336 RTE_SET_USED(vf); 3337 RTE_SET_USED(on); 3338 } 3339 3340 int 3341 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3342 { 3343 int diag; 3344 struct rte_eth_link link; 3345 3346 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3347 return 1; 3348 rte_eth_link_get_nowait(port_id, &link); 3349 if (rate > link.link_speed) { 3350 printf("Invalid rate value:%u bigger than link speed: %u\n", 3351 rate, link.link_speed); 3352 return 1; 3353 } 3354 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3355 if (diag == 0) 3356 return diag; 3357 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3358 port_id, diag); 3359 return diag; 3360 } 3361 3362 int 3363 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3364 { 3365 int diag = -ENOTSUP; 3366 3367 RTE_SET_USED(vf); 3368 RTE_SET_USED(rate); 3369 RTE_SET_USED(q_msk); 3370 3371 #ifdef RTE_LIBRTE_IXGBE_PMD 3372 if (diag == -ENOTSUP) 3373 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3374 q_msk); 3375 #endif 3376 #ifdef RTE_LIBRTE_BNXT_PMD 3377 if (diag == -ENOTSUP) 3378 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3379 #endif 3380 if (diag == 0) 3381 return diag; 3382 3383 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3384 port_id, diag); 3385 return diag; 3386 } 3387 3388 /* 3389 * Functions to manage the set of filtered Multicast MAC addresses. 3390 * 3391 * A pool of filtered multicast MAC addresses is associated with each port. 3392 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3393 * The address of the pool and the number of valid multicast MAC addresses 3394 * recorded in the pool are stored in the fields "mc_addr_pool" and 3395 * "mc_addr_nb" of the "rte_port" data structure. 3396 * 3397 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3398 * to be supplied a contiguous array of multicast MAC addresses. 3399 * To comply with this constraint, the set of multicast addresses recorded 3400 * into the pool are systematically compacted at the beginning of the pool. 3401 * Hence, when a multicast address is removed from the pool, all following 3402 * addresses, if any, are copied back to keep the set contiguous. 3403 */ 3404 #define MCAST_POOL_INC 32 3405 3406 static int 3407 mcast_addr_pool_extend(struct rte_port *port) 3408 { 3409 struct ether_addr *mc_pool; 3410 size_t mc_pool_size; 3411 3412 /* 3413 * If a free entry is available at the end of the pool, just 3414 * increment the number of recorded multicast addresses. 3415 */ 3416 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3417 port->mc_addr_nb++; 3418 return 0; 3419 } 3420 3421 /* 3422 * [re]allocate a pool with MCAST_POOL_INC more entries. 3423 * The previous test guarantees that port->mc_addr_nb is a multiple 3424 * of MCAST_POOL_INC. 3425 */ 3426 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3427 MCAST_POOL_INC); 3428 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3429 mc_pool_size); 3430 if (mc_pool == NULL) { 3431 printf("allocation of pool of %u multicast addresses failed\n", 3432 port->mc_addr_nb + MCAST_POOL_INC); 3433 return -ENOMEM; 3434 } 3435 3436 port->mc_addr_pool = mc_pool; 3437 port->mc_addr_nb++; 3438 return 0; 3439 3440 } 3441 3442 static void 3443 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3444 { 3445 port->mc_addr_nb--; 3446 if (addr_idx == port->mc_addr_nb) { 3447 /* No need to recompact the set of multicast addressses. */ 3448 if (port->mc_addr_nb == 0) { 3449 /* free the pool of multicast addresses. */ 3450 free(port->mc_addr_pool); 3451 port->mc_addr_pool = NULL; 3452 } 3453 return; 3454 } 3455 memmove(&port->mc_addr_pool[addr_idx], 3456 &port->mc_addr_pool[addr_idx + 1], 3457 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3458 } 3459 3460 static void 3461 eth_port_multicast_addr_list_set(portid_t port_id) 3462 { 3463 struct rte_port *port; 3464 int diag; 3465 3466 port = &ports[port_id]; 3467 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3468 port->mc_addr_nb); 3469 if (diag == 0) 3470 return; 3471 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3472 port->mc_addr_nb, port_id, -diag); 3473 } 3474 3475 void 3476 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3477 { 3478 struct rte_port *port; 3479 uint32_t i; 3480 3481 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3482 return; 3483 3484 port = &ports[port_id]; 3485 3486 /* 3487 * Check that the added multicast MAC address is not already recorded 3488 * in the pool of multicast addresses. 3489 */ 3490 for (i = 0; i < port->mc_addr_nb; i++) { 3491 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3492 printf("multicast address already filtered by port\n"); 3493 return; 3494 } 3495 } 3496 3497 if (mcast_addr_pool_extend(port) != 0) 3498 return; 3499 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3500 eth_port_multicast_addr_list_set(port_id); 3501 } 3502 3503 void 3504 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3505 { 3506 struct rte_port *port; 3507 uint32_t i; 3508 3509 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3510 return; 3511 3512 port = &ports[port_id]; 3513 3514 /* 3515 * Search the pool of multicast MAC addresses for the removed address. 3516 */ 3517 for (i = 0; i < port->mc_addr_nb; i++) { 3518 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3519 break; 3520 } 3521 if (i == port->mc_addr_nb) { 3522 printf("multicast address not filtered by port %d\n", port_id); 3523 return; 3524 } 3525 3526 mcast_addr_pool_remove(port, i); 3527 eth_port_multicast_addr_list_set(port_id); 3528 } 3529 3530 void 3531 port_dcb_info_display(portid_t port_id) 3532 { 3533 struct rte_eth_dcb_info dcb_info; 3534 uint16_t i; 3535 int ret; 3536 static const char *border = "================"; 3537 3538 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3539 return; 3540 3541 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3542 if (ret) { 3543 printf("\n Failed to get dcb infos on port %-2d\n", 3544 port_id); 3545 return; 3546 } 3547 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3548 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3549 printf("\n TC : "); 3550 for (i = 0; i < dcb_info.nb_tcs; i++) 3551 printf("\t%4d", i); 3552 printf("\n Priority : "); 3553 for (i = 0; i < dcb_info.nb_tcs; i++) 3554 printf("\t%4d", dcb_info.prio_tc[i]); 3555 printf("\n BW percent :"); 3556 for (i = 0; i < dcb_info.nb_tcs; i++) 3557 printf("\t%4d%%", dcb_info.tc_bws[i]); 3558 printf("\n RXQ base : "); 3559 for (i = 0; i < dcb_info.nb_tcs; i++) 3560 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3561 printf("\n RXQ number :"); 3562 for (i = 0; i < dcb_info.nb_tcs; i++) 3563 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3564 printf("\n TXQ base : "); 3565 for (i = 0; i < dcb_info.nb_tcs; i++) 3566 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3567 printf("\n TXQ number :"); 3568 for (i = 0; i < dcb_info.nb_tcs; i++) 3569 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3570 printf("\n"); 3571 } 3572 3573 uint8_t * 3574 open_file(const char *file_path, uint32_t *size) 3575 { 3576 int fd = open(file_path, O_RDONLY); 3577 off_t pkg_size; 3578 uint8_t *buf = NULL; 3579 int ret = 0; 3580 struct stat st_buf; 3581 3582 if (size) 3583 *size = 0; 3584 3585 if (fd == -1) { 3586 printf("%s: Failed to open %s\n", __func__, file_path); 3587 return buf; 3588 } 3589 3590 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3591 close(fd); 3592 printf("%s: File operations failed\n", __func__); 3593 return buf; 3594 } 3595 3596 pkg_size = st_buf.st_size; 3597 if (pkg_size < 0) { 3598 close(fd); 3599 printf("%s: File operations failed\n", __func__); 3600 return buf; 3601 } 3602 3603 buf = (uint8_t *)malloc(pkg_size); 3604 if (!buf) { 3605 close(fd); 3606 printf("%s: Failed to malloc memory\n", __func__); 3607 return buf; 3608 } 3609 3610 ret = read(fd, buf, pkg_size); 3611 if (ret < 0) { 3612 close(fd); 3613 printf("%s: File read operation failed\n", __func__); 3614 close_file(buf); 3615 return NULL; 3616 } 3617 3618 if (size) 3619 *size = pkg_size; 3620 3621 close(fd); 3622 3623 return buf; 3624 } 3625 3626 int 3627 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3628 { 3629 FILE *fh = fopen(file_path, "wb"); 3630 3631 if (fh == NULL) { 3632 printf("%s: Failed to open %s\n", __func__, file_path); 3633 return -1; 3634 } 3635 3636 if (fwrite(buf, 1, size, fh) != size) { 3637 fclose(fh); 3638 printf("%s: File write operation failed\n", __func__); 3639 return -1; 3640 } 3641 3642 fclose(fh); 3643 3644 return 0; 3645 } 3646 3647 int 3648 close_file(uint8_t *buf) 3649 { 3650 if (buf) { 3651 free((void *)buf); 3652 return 0; 3653 } 3654 3655 return -1; 3656 } 3657 3658 void 3659 port_queue_region_info_display(portid_t port_id, void *buf) 3660 { 3661 #ifdef RTE_LIBRTE_I40E_PMD 3662 uint16_t i, j; 3663 struct rte_pmd_i40e_queue_regions *info = 3664 (struct rte_pmd_i40e_queue_regions *)buf; 3665 static const char *queue_region_info_stats_border = "-------"; 3666 3667 if (!info->queue_region_number) 3668 printf("there is no region has been set before"); 3669 3670 printf("\n %s All queue region info for port=%2d %s", 3671 queue_region_info_stats_border, port_id, 3672 queue_region_info_stats_border); 3673 printf("\n queue_region_number: %-14u \n", 3674 info->queue_region_number); 3675 3676 for (i = 0; i < info->queue_region_number; i++) { 3677 printf("\n region_id: %-14u queue_number: %-14u " 3678 "queue_start_index: %-14u \n", 3679 info->region[i].region_id, 3680 info->region[i].queue_num, 3681 info->region[i].queue_start_index); 3682 3683 printf(" user_priority_num is %-14u :", 3684 info->region[i].user_priority_num); 3685 for (j = 0; j < info->region[i].user_priority_num; j++) 3686 printf(" %-14u ", info->region[i].user_priority[j]); 3687 3688 printf("\n flowtype_num is %-14u :", 3689 info->region[i].flowtype_num); 3690 for (j = 0; j < info->region[i].flowtype_num; j++) 3691 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3692 } 3693 #else 3694 RTE_SET_USED(port_id); 3695 RTE_SET_USED(buf); 3696 #endif 3697 3698 printf("\n\n"); 3699 } 3700