1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 #include <rte_config.h> 54 55 #include "testpmd.h" 56 57 static char *flowtype_to_str(uint16_t flow_type); 58 59 static const struct { 60 enum tx_pkt_split split; 61 const char *name; 62 } tx_split_name[] = { 63 { 64 .split = TX_PKT_SPLIT_OFF, 65 .name = "off", 66 }, 67 { 68 .split = TX_PKT_SPLIT_ON, 69 .name = "on", 70 }, 71 { 72 .split = TX_PKT_SPLIT_RND, 73 .name = "rand", 74 }, 75 }; 76 77 const struct rss_type_info rss_type_table[] = { 78 { "all", ETH_RSS_IP | ETH_RSS_TCP | 79 ETH_RSS_UDP | ETH_RSS_SCTP | 80 ETH_RSS_L2_PAYLOAD }, 81 { "none", 0 }, 82 { "ipv4", ETH_RSS_IPV4 }, 83 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 84 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 85 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 86 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 87 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 88 { "ipv6", ETH_RSS_IPV6 }, 89 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 90 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 91 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 92 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 93 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 94 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 95 { "ipv6-ex", ETH_RSS_IPV6_EX }, 96 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 97 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 98 { "port", ETH_RSS_PORT }, 99 { "vxlan", ETH_RSS_VXLAN }, 100 { "geneve", ETH_RSS_GENEVE }, 101 { "nvgre", ETH_RSS_NVGRE }, 102 { "ip", ETH_RSS_IP }, 103 { "udp", ETH_RSS_UDP }, 104 { "tcp", ETH_RSS_TCP }, 105 { "sctp", ETH_RSS_SCTP }, 106 { "tunnel", ETH_RSS_TUNNEL }, 107 { NULL, 0 }, 108 }; 109 110 static void 111 print_ethaddr(const char *name, struct ether_addr *eth_addr) 112 { 113 char buf[ETHER_ADDR_FMT_SIZE]; 114 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 115 printf("%s%s", name, buf); 116 } 117 118 void 119 nic_stats_display(portid_t port_id) 120 { 121 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 122 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 123 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 124 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 125 uint64_t mpps_rx, mpps_tx; 126 struct rte_eth_stats stats; 127 struct rte_port *port = &ports[port_id]; 128 uint8_t i; 129 130 static const char *nic_stats_border = "########################"; 131 132 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 133 print_valid_ports(); 134 return; 135 } 136 rte_eth_stats_get(port_id, &stats); 137 printf("\n %s NIC statistics for port %-2d %s\n", 138 nic_stats_border, port_id, nic_stats_border); 139 140 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 141 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 142 "%-"PRIu64"\n", 143 stats.ipackets, stats.imissed, stats.ibytes); 144 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 145 printf(" RX-nombuf: %-10"PRIu64"\n", 146 stats.rx_nombuf); 147 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 148 "%-"PRIu64"\n", 149 stats.opackets, stats.oerrors, stats.obytes); 150 } 151 else { 152 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 153 " RX-bytes: %10"PRIu64"\n", 154 stats.ipackets, stats.ierrors, stats.ibytes); 155 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 156 printf(" RX-nombuf: %10"PRIu64"\n", 157 stats.rx_nombuf); 158 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 159 " TX-bytes: %10"PRIu64"\n", 160 stats.opackets, stats.oerrors, stats.obytes); 161 } 162 163 if (port->rx_queue_stats_mapping_enabled) { 164 printf("\n"); 165 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 166 printf(" Stats reg %2d RX-packets: %10"PRIu64 167 " RX-errors: %10"PRIu64 168 " RX-bytes: %10"PRIu64"\n", 169 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 170 } 171 } 172 if (port->tx_queue_stats_mapping_enabled) { 173 printf("\n"); 174 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 175 printf(" Stats reg %2d TX-packets: %10"PRIu64 176 " TX-bytes: %10"PRIu64"\n", 177 i, stats.q_opackets[i], stats.q_obytes[i]); 178 } 179 } 180 181 diff_cycles = prev_cycles[port_id]; 182 prev_cycles[port_id] = rte_rdtsc(); 183 if (diff_cycles > 0) 184 diff_cycles = prev_cycles[port_id] - diff_cycles; 185 186 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 187 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 188 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 189 (stats.opackets - prev_pkts_tx[port_id]) : 0; 190 prev_pkts_rx[port_id] = stats.ipackets; 191 prev_pkts_tx[port_id] = stats.opackets; 192 mpps_rx = diff_cycles > 0 ? 193 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 194 mpps_tx = diff_cycles > 0 ? 195 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 196 printf("\n Throughput (since last show)\n"); 197 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 198 mpps_rx, mpps_tx); 199 200 printf(" %s############################%s\n", 201 nic_stats_border, nic_stats_border); 202 } 203 204 void 205 nic_stats_clear(portid_t port_id) 206 { 207 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 208 print_valid_ports(); 209 return; 210 } 211 rte_eth_stats_reset(port_id); 212 printf("\n NIC statistics for port %d cleared\n", port_id); 213 } 214 215 void 216 nic_xstats_display(portid_t port_id) 217 { 218 struct rte_eth_xstat *xstats; 219 int cnt_xstats, idx_xstat; 220 struct rte_eth_xstat_name *xstats_names; 221 222 printf("###### NIC extended statistics for port %-2d\n", port_id); 223 if (!rte_eth_dev_is_valid_port(port_id)) { 224 printf("Error: Invalid port number %i\n", port_id); 225 return; 226 } 227 228 /* Get count */ 229 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 230 if (cnt_xstats < 0) { 231 printf("Error: Cannot get count of xstats\n"); 232 return; 233 } 234 235 /* Get id-name lookup table */ 236 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 237 if (xstats_names == NULL) { 238 printf("Cannot allocate memory for xstats lookup\n"); 239 return; 240 } 241 if (cnt_xstats != rte_eth_xstats_get_names( 242 port_id, xstats_names, cnt_xstats)) { 243 printf("Error: Cannot get xstats lookup\n"); 244 free(xstats_names); 245 return; 246 } 247 248 /* Get stats themselves */ 249 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 250 if (xstats == NULL) { 251 printf("Cannot allocate memory for xstats\n"); 252 free(xstats_names); 253 return; 254 } 255 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 256 printf("Error: Unable to get xstats\n"); 257 free(xstats_names); 258 free(xstats); 259 return; 260 } 261 262 /* Display xstats */ 263 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 264 if (xstats_hide_zero && !xstats[idx_xstat].value) 265 continue; 266 printf("%s: %"PRIu64"\n", 267 xstats_names[idx_xstat].name, 268 xstats[idx_xstat].value); 269 } 270 free(xstats_names); 271 free(xstats); 272 } 273 274 void 275 nic_xstats_clear(portid_t port_id) 276 { 277 rte_eth_xstats_reset(port_id); 278 } 279 280 void 281 nic_stats_mapping_display(portid_t port_id) 282 { 283 struct rte_port *port = &ports[port_id]; 284 uint16_t i; 285 286 static const char *nic_stats_mapping_border = "########################"; 287 288 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 289 print_valid_ports(); 290 return; 291 } 292 293 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 294 printf("Port id %d - either does not support queue statistic mapping or" 295 " no queue statistic mapping set\n", port_id); 296 return; 297 } 298 299 printf("\n %s NIC statistics mapping for port %-2d %s\n", 300 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 301 302 if (port->rx_queue_stats_mapping_enabled) { 303 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 304 if (rx_queue_stats_mappings[i].port_id == port_id) { 305 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 306 rx_queue_stats_mappings[i].queue_id, 307 rx_queue_stats_mappings[i].stats_counter_id); 308 } 309 } 310 printf("\n"); 311 } 312 313 314 if (port->tx_queue_stats_mapping_enabled) { 315 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 316 if (tx_queue_stats_mappings[i].port_id == port_id) { 317 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 318 tx_queue_stats_mappings[i].queue_id, 319 tx_queue_stats_mappings[i].stats_counter_id); 320 } 321 } 322 } 323 324 printf(" %s####################################%s\n", 325 nic_stats_mapping_border, nic_stats_mapping_border); 326 } 327 328 void 329 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 330 { 331 struct rte_eth_rxq_info qinfo; 332 int32_t rc; 333 static const char *info_border = "*********************"; 334 335 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 336 if (rc != 0) { 337 printf("Failed to retrieve information for port: %u, " 338 "RX queue: %hu\nerror desc: %s(%d)\n", 339 port_id, queue_id, strerror(-rc), rc); 340 return; 341 } 342 343 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 344 info_border, port_id, queue_id, info_border); 345 346 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 347 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 348 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 349 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 350 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 351 printf("\nRX drop packets: %s", 352 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 353 printf("\nRX deferred start: %s", 354 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 355 printf("\nRX scattered packets: %s", 356 (qinfo.scattered_rx != 0) ? "on" : "off"); 357 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 358 printf("\n"); 359 } 360 361 void 362 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 363 { 364 struct rte_eth_txq_info qinfo; 365 int32_t rc; 366 static const char *info_border = "*********************"; 367 368 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 369 if (rc != 0) { 370 printf("Failed to retrieve information for port: %u, " 371 "TX queue: %hu\nerror desc: %s(%d)\n", 372 port_id, queue_id, strerror(-rc), rc); 373 return; 374 } 375 376 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 377 info_border, port_id, queue_id, info_border); 378 379 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 380 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 381 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 382 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 383 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 384 printf("\nTX deferred start: %s", 385 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 386 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 387 printf("\n"); 388 } 389 390 void 391 port_infos_display(portid_t port_id) 392 { 393 struct rte_port *port; 394 struct ether_addr mac_addr; 395 struct rte_eth_link link; 396 struct rte_eth_dev_info dev_info; 397 int vlan_offload; 398 struct rte_mempool * mp; 399 static const char *info_border = "*********************"; 400 uint16_t mtu; 401 char name[RTE_ETH_NAME_MAX_LEN]; 402 403 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 404 print_valid_ports(); 405 return; 406 } 407 port = &ports[port_id]; 408 rte_eth_link_get_nowait(port_id, &link); 409 memset(&dev_info, 0, sizeof(dev_info)); 410 rte_eth_dev_info_get(port_id, &dev_info); 411 printf("\n%s Infos for port %-2d %s\n", 412 info_border, port_id, info_border); 413 rte_eth_macaddr_get(port_id, &mac_addr); 414 print_ethaddr("MAC address: ", &mac_addr); 415 rte_eth_dev_get_name_by_port(port_id, name); 416 printf("\nDevice name: %s", name); 417 printf("\nDriver name: %s", dev_info.driver_name); 418 if (dev_info.device->devargs && dev_info.device->devargs->args) 419 printf("\nDevargs: %s", dev_info.device->devargs->args); 420 printf("\nConnect to socket: %u", port->socket_id); 421 422 if (port_numa[port_id] != NUMA_NO_CONFIG) { 423 mp = mbuf_pool_find(port_numa[port_id]); 424 if (mp) 425 printf("\nmemory allocation on the socket: %d", 426 port_numa[port_id]); 427 } else 428 printf("\nmemory allocation on the socket: %u",port->socket_id); 429 430 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 431 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 432 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 433 ("full-duplex") : ("half-duplex")); 434 435 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 436 printf("MTU: %u\n", mtu); 437 438 printf("Promiscuous mode: %s\n", 439 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 440 printf("Allmulticast mode: %s\n", 441 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 442 printf("Maximum number of MAC addresses: %u\n", 443 (unsigned int)(port->dev_info.max_mac_addrs)); 444 printf("Maximum number of MAC addresses of hash filtering: %u\n", 445 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 446 447 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 448 if (vlan_offload >= 0){ 449 printf("VLAN offload: \n"); 450 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 451 printf(" strip on \n"); 452 else 453 printf(" strip off \n"); 454 455 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 456 printf(" filter on \n"); 457 else 458 printf(" filter off \n"); 459 460 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 461 printf(" qinq(extend) on \n"); 462 else 463 printf(" qinq(extend) off \n"); 464 } 465 466 if (dev_info.hash_key_size > 0) 467 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 468 if (dev_info.reta_size > 0) 469 printf("Redirection table size: %u\n", dev_info.reta_size); 470 if (!dev_info.flow_type_rss_offloads) 471 printf("No RSS offload flow type is supported.\n"); 472 else { 473 uint16_t i; 474 char *p; 475 476 printf("Supported RSS offload flow types:\n"); 477 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 478 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 479 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 480 continue; 481 p = flowtype_to_str(i); 482 if (p) 483 printf(" %s\n", p); 484 else 485 printf(" user defined %d\n", i); 486 } 487 } 488 489 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 490 printf("Maximum configurable length of RX packet: %u\n", 491 dev_info.max_rx_pktlen); 492 if (dev_info.max_vfs) 493 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 494 if (dev_info.max_vmdq_pools) 495 printf("Maximum number of VMDq pools: %u\n", 496 dev_info.max_vmdq_pools); 497 498 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 499 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 500 printf("Max possible number of RXDs per queue: %hu\n", 501 dev_info.rx_desc_lim.nb_max); 502 printf("Min possible number of RXDs per queue: %hu\n", 503 dev_info.rx_desc_lim.nb_min); 504 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 505 506 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 507 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 508 printf("Max possible number of TXDs per queue: %hu\n", 509 dev_info.tx_desc_lim.nb_max); 510 printf("Min possible number of TXDs per queue: %hu\n", 511 dev_info.tx_desc_lim.nb_min); 512 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 513 514 /* Show switch info only if valid switch domain and port id is set */ 515 if (dev_info.switch_info.domain_id != 516 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 517 if (dev_info.switch_info.name) 518 printf("Switch name: %s\n", dev_info.switch_info.name); 519 520 printf("Switch domain Id: %u\n", 521 dev_info.switch_info.domain_id); 522 printf("Switch Port Id: %u\n", 523 dev_info.switch_info.port_id); 524 } 525 } 526 527 void 528 port_summary_header_display(void) 529 { 530 uint16_t port_number; 531 532 port_number = rte_eth_dev_count_avail(); 533 printf("Number of available ports: %i\n", port_number); 534 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 535 "Driver", "Status", "Link"); 536 } 537 538 void 539 port_summary_display(portid_t port_id) 540 { 541 struct ether_addr mac_addr; 542 struct rte_eth_link link; 543 struct rte_eth_dev_info dev_info; 544 char name[RTE_ETH_NAME_MAX_LEN]; 545 546 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 547 print_valid_ports(); 548 return; 549 } 550 551 rte_eth_link_get_nowait(port_id, &link); 552 rte_eth_dev_info_get(port_id, &dev_info); 553 rte_eth_dev_get_name_by_port(port_id, name); 554 rte_eth_macaddr_get(port_id, &mac_addr); 555 556 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", 557 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 558 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 559 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, 560 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 561 (unsigned int) link.link_speed); 562 } 563 564 void 565 port_offload_cap_display(portid_t port_id) 566 { 567 struct rte_eth_dev_info dev_info; 568 static const char *info_border = "************"; 569 570 if (port_id_is_invalid(port_id, ENABLED_WARN)) 571 return; 572 573 rte_eth_dev_info_get(port_id, &dev_info); 574 575 printf("\n%s Port %d supported offload features: %s\n", 576 info_border, port_id, info_border); 577 578 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 579 printf("VLAN stripped: "); 580 if (ports[port_id].dev_conf.rxmode.offloads & 581 DEV_RX_OFFLOAD_VLAN_STRIP) 582 printf("on\n"); 583 else 584 printf("off\n"); 585 } 586 587 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 588 printf("Double VLANs stripped: "); 589 if (ports[port_id].dev_conf.rxmode.offloads & 590 DEV_RX_OFFLOAD_QINQ_STRIP) 591 printf("on\n"); 592 else 593 printf("off\n"); 594 } 595 596 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 597 printf("RX IPv4 checksum: "); 598 if (ports[port_id].dev_conf.rxmode.offloads & 599 DEV_RX_OFFLOAD_IPV4_CKSUM) 600 printf("on\n"); 601 else 602 printf("off\n"); 603 } 604 605 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 606 printf("RX UDP checksum: "); 607 if (ports[port_id].dev_conf.rxmode.offloads & 608 DEV_RX_OFFLOAD_UDP_CKSUM) 609 printf("on\n"); 610 else 611 printf("off\n"); 612 } 613 614 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 615 printf("RX TCP checksum: "); 616 if (ports[port_id].dev_conf.rxmode.offloads & 617 DEV_RX_OFFLOAD_TCP_CKSUM) 618 printf("on\n"); 619 else 620 printf("off\n"); 621 } 622 623 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { 624 printf("RX SCTP checksum: "); 625 if (ports[port_id].dev_conf.rxmode.offloads & 626 DEV_RX_OFFLOAD_SCTP_CKSUM) 627 printf("on\n"); 628 else 629 printf("off\n"); 630 } 631 632 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 633 printf("RX Outer IPv4 checksum: "); 634 if (ports[port_id].dev_conf.rxmode.offloads & 635 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 636 printf("on\n"); 637 else 638 printf("off\n"); 639 } 640 641 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { 642 printf("RX Outer UDP checksum: "); 643 if (ports[port_id].dev_conf.rxmode.offloads & 644 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) 645 printf("on\n"); 646 else 647 printf("off\n"); 648 } 649 650 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 651 printf("Large receive offload: "); 652 if (ports[port_id].dev_conf.rxmode.offloads & 653 DEV_RX_OFFLOAD_TCP_LRO) 654 printf("on\n"); 655 else 656 printf("off\n"); 657 } 658 659 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 660 printf("HW timestamp: "); 661 if (ports[port_id].dev_conf.rxmode.offloads & 662 DEV_RX_OFFLOAD_TIMESTAMP) 663 printf("on\n"); 664 else 665 printf("off\n"); 666 } 667 668 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { 669 printf("Rx Keep CRC: "); 670 if (ports[port_id].dev_conf.rxmode.offloads & 671 DEV_RX_OFFLOAD_KEEP_CRC) 672 printf("on\n"); 673 else 674 printf("off\n"); 675 } 676 677 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { 678 printf("RX offload security: "); 679 if (ports[port_id].dev_conf.rxmode.offloads & 680 DEV_RX_OFFLOAD_SECURITY) 681 printf("on\n"); 682 else 683 printf("off\n"); 684 } 685 686 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 687 printf("VLAN insert: "); 688 if (ports[port_id].dev_conf.txmode.offloads & 689 DEV_TX_OFFLOAD_VLAN_INSERT) 690 printf("on\n"); 691 else 692 printf("off\n"); 693 } 694 695 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 696 printf("Double VLANs insert: "); 697 if (ports[port_id].dev_conf.txmode.offloads & 698 DEV_TX_OFFLOAD_QINQ_INSERT) 699 printf("on\n"); 700 else 701 printf("off\n"); 702 } 703 704 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 705 printf("TX IPv4 checksum: "); 706 if (ports[port_id].dev_conf.txmode.offloads & 707 DEV_TX_OFFLOAD_IPV4_CKSUM) 708 printf("on\n"); 709 else 710 printf("off\n"); 711 } 712 713 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 714 printf("TX UDP checksum: "); 715 if (ports[port_id].dev_conf.txmode.offloads & 716 DEV_TX_OFFLOAD_UDP_CKSUM) 717 printf("on\n"); 718 else 719 printf("off\n"); 720 } 721 722 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 723 printf("TX TCP checksum: "); 724 if (ports[port_id].dev_conf.txmode.offloads & 725 DEV_TX_OFFLOAD_TCP_CKSUM) 726 printf("on\n"); 727 else 728 printf("off\n"); 729 } 730 731 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 732 printf("TX SCTP checksum: "); 733 if (ports[port_id].dev_conf.txmode.offloads & 734 DEV_TX_OFFLOAD_SCTP_CKSUM) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 741 printf("TX Outer IPv4 checksum: "); 742 if (ports[port_id].dev_conf.txmode.offloads & 743 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 750 printf("TX TCP segmentation: "); 751 if (ports[port_id].dev_conf.txmode.offloads & 752 DEV_TX_OFFLOAD_TCP_TSO) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 758 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 759 printf("TX UDP segmentation: "); 760 if (ports[port_id].dev_conf.txmode.offloads & 761 DEV_TX_OFFLOAD_UDP_TSO) 762 printf("on\n"); 763 else 764 printf("off\n"); 765 } 766 767 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 768 printf("TSO for VXLAN tunnel packet: "); 769 if (ports[port_id].dev_conf.txmode.offloads & 770 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 771 printf("on\n"); 772 else 773 printf("off\n"); 774 } 775 776 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 777 printf("TSO for GRE tunnel packet: "); 778 if (ports[port_id].dev_conf.txmode.offloads & 779 DEV_TX_OFFLOAD_GRE_TNL_TSO) 780 printf("on\n"); 781 else 782 printf("off\n"); 783 } 784 785 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 786 printf("TSO for IPIP tunnel packet: "); 787 if (ports[port_id].dev_conf.txmode.offloads & 788 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 789 printf("on\n"); 790 else 791 printf("off\n"); 792 } 793 794 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 795 printf("TSO for GENEVE tunnel packet: "); 796 if (ports[port_id].dev_conf.txmode.offloads & 797 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 798 printf("on\n"); 799 else 800 printf("off\n"); 801 } 802 803 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 804 printf("IP tunnel TSO: "); 805 if (ports[port_id].dev_conf.txmode.offloads & 806 DEV_TX_OFFLOAD_IP_TNL_TSO) 807 printf("on\n"); 808 else 809 printf("off\n"); 810 } 811 812 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 813 printf("UDP tunnel TSO: "); 814 if (ports[port_id].dev_conf.txmode.offloads & 815 DEV_TX_OFFLOAD_UDP_TNL_TSO) 816 printf("on\n"); 817 else 818 printf("off\n"); 819 } 820 821 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { 822 printf("TX Outer UDP checksum: "); 823 if (ports[port_id].dev_conf.txmode.offloads & 824 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 825 printf("on\n"); 826 else 827 printf("off\n"); 828 } 829 830 } 831 832 int 833 port_id_is_invalid(portid_t port_id, enum print_warning warning) 834 { 835 uint16_t pid; 836 837 if (port_id == (portid_t)RTE_PORT_ALL) 838 return 0; 839 840 RTE_ETH_FOREACH_DEV(pid) 841 if (port_id == pid) 842 return 0; 843 844 if (warning == ENABLED_WARN) 845 printf("Invalid port %d\n", port_id); 846 847 return 1; 848 } 849 850 void print_valid_ports(void) 851 { 852 portid_t pid; 853 854 printf("The valid ports array is ["); 855 RTE_ETH_FOREACH_DEV(pid) { 856 printf(" %d", pid); 857 } 858 printf(" ]\n"); 859 } 860 861 static int 862 vlan_id_is_invalid(uint16_t vlan_id) 863 { 864 if (vlan_id < 4096) 865 return 0; 866 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 867 return 1; 868 } 869 870 static int 871 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 872 { 873 const struct rte_pci_device *pci_dev; 874 const struct rte_bus *bus; 875 uint64_t pci_len; 876 877 if (reg_off & 0x3) { 878 printf("Port register offset 0x%X not aligned on a 4-byte " 879 "boundary\n", 880 (unsigned)reg_off); 881 return 1; 882 } 883 884 if (!ports[port_id].dev_info.device) { 885 printf("Invalid device\n"); 886 return 0; 887 } 888 889 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 890 if (bus && !strcmp(bus->name, "pci")) { 891 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 892 } else { 893 printf("Not a PCI device\n"); 894 return 1; 895 } 896 897 pci_len = pci_dev->mem_resource[0].len; 898 if (reg_off >= pci_len) { 899 printf("Port %d: register offset %u (0x%X) out of port PCI " 900 "resource (length=%"PRIu64")\n", 901 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 902 return 1; 903 } 904 return 0; 905 } 906 907 static int 908 reg_bit_pos_is_invalid(uint8_t bit_pos) 909 { 910 if (bit_pos <= 31) 911 return 0; 912 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 913 return 1; 914 } 915 916 #define display_port_and_reg_off(port_id, reg_off) \ 917 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 918 919 static inline void 920 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 921 { 922 display_port_and_reg_off(port_id, (unsigned)reg_off); 923 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 924 } 925 926 void 927 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 928 { 929 uint32_t reg_v; 930 931 932 if (port_id_is_invalid(port_id, ENABLED_WARN)) 933 return; 934 if (port_reg_off_is_invalid(port_id, reg_off)) 935 return; 936 if (reg_bit_pos_is_invalid(bit_x)) 937 return; 938 reg_v = port_id_pci_reg_read(port_id, reg_off); 939 display_port_and_reg_off(port_id, (unsigned)reg_off); 940 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 941 } 942 943 void 944 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 945 uint8_t bit1_pos, uint8_t bit2_pos) 946 { 947 uint32_t reg_v; 948 uint8_t l_bit; 949 uint8_t h_bit; 950 951 if (port_id_is_invalid(port_id, ENABLED_WARN)) 952 return; 953 if (port_reg_off_is_invalid(port_id, reg_off)) 954 return; 955 if (reg_bit_pos_is_invalid(bit1_pos)) 956 return; 957 if (reg_bit_pos_is_invalid(bit2_pos)) 958 return; 959 if (bit1_pos > bit2_pos) 960 l_bit = bit2_pos, h_bit = bit1_pos; 961 else 962 l_bit = bit1_pos, h_bit = bit2_pos; 963 964 reg_v = port_id_pci_reg_read(port_id, reg_off); 965 reg_v >>= l_bit; 966 if (h_bit < 31) 967 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 968 display_port_and_reg_off(port_id, (unsigned)reg_off); 969 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 970 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 971 } 972 973 void 974 port_reg_display(portid_t port_id, uint32_t reg_off) 975 { 976 uint32_t reg_v; 977 978 if (port_id_is_invalid(port_id, ENABLED_WARN)) 979 return; 980 if (port_reg_off_is_invalid(port_id, reg_off)) 981 return; 982 reg_v = port_id_pci_reg_read(port_id, reg_off); 983 display_port_reg_value(port_id, reg_off, reg_v); 984 } 985 986 void 987 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 988 uint8_t bit_v) 989 { 990 uint32_t reg_v; 991 992 if (port_id_is_invalid(port_id, ENABLED_WARN)) 993 return; 994 if (port_reg_off_is_invalid(port_id, reg_off)) 995 return; 996 if (reg_bit_pos_is_invalid(bit_pos)) 997 return; 998 if (bit_v > 1) { 999 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 1000 return; 1001 } 1002 reg_v = port_id_pci_reg_read(port_id, reg_off); 1003 if (bit_v == 0) 1004 reg_v &= ~(1 << bit_pos); 1005 else 1006 reg_v |= (1 << bit_pos); 1007 port_id_pci_reg_write(port_id, reg_off, reg_v); 1008 display_port_reg_value(port_id, reg_off, reg_v); 1009 } 1010 1011 void 1012 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1013 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1014 { 1015 uint32_t max_v; 1016 uint32_t reg_v; 1017 uint8_t l_bit; 1018 uint8_t h_bit; 1019 1020 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1021 return; 1022 if (port_reg_off_is_invalid(port_id, reg_off)) 1023 return; 1024 if (reg_bit_pos_is_invalid(bit1_pos)) 1025 return; 1026 if (reg_bit_pos_is_invalid(bit2_pos)) 1027 return; 1028 if (bit1_pos > bit2_pos) 1029 l_bit = bit2_pos, h_bit = bit1_pos; 1030 else 1031 l_bit = bit1_pos, h_bit = bit2_pos; 1032 1033 if ((h_bit - l_bit) < 31) 1034 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1035 else 1036 max_v = 0xFFFFFFFF; 1037 1038 if (value > max_v) { 1039 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 1040 (unsigned)value, (unsigned)value, 1041 (unsigned)max_v, (unsigned)max_v); 1042 return; 1043 } 1044 reg_v = port_id_pci_reg_read(port_id, reg_off); 1045 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1046 reg_v |= (value << l_bit); /* Set changed bits */ 1047 port_id_pci_reg_write(port_id, reg_off, reg_v); 1048 display_port_reg_value(port_id, reg_off, reg_v); 1049 } 1050 1051 void 1052 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1053 { 1054 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1055 return; 1056 if (port_reg_off_is_invalid(port_id, reg_off)) 1057 return; 1058 port_id_pci_reg_write(port_id, reg_off, reg_v); 1059 display_port_reg_value(port_id, reg_off, reg_v); 1060 } 1061 1062 void 1063 port_mtu_set(portid_t port_id, uint16_t mtu) 1064 { 1065 int diag; 1066 1067 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1068 return; 1069 diag = rte_eth_dev_set_mtu(port_id, mtu); 1070 if (diag == 0) 1071 return; 1072 printf("Set MTU failed. diag=%d\n", diag); 1073 } 1074 1075 /* Generic flow management functions. */ 1076 1077 /** Generate a port_flow entry from attributes/pattern/actions. */ 1078 static struct port_flow * 1079 port_flow_new(const struct rte_flow_attr *attr, 1080 const struct rte_flow_item *pattern, 1081 const struct rte_flow_action *actions, 1082 struct rte_flow_error *error) 1083 { 1084 const struct rte_flow_conv_rule rule = { 1085 .attr_ro = attr, 1086 .pattern_ro = pattern, 1087 .actions_ro = actions, 1088 }; 1089 struct port_flow *pf; 1090 int ret; 1091 1092 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1093 if (ret < 0) 1094 return NULL; 1095 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1096 if (!pf) { 1097 rte_flow_error_set 1098 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1099 "calloc() failed"); 1100 return NULL; 1101 } 1102 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1103 error) >= 0) 1104 return pf; 1105 free(pf); 1106 return NULL; 1107 } 1108 1109 /** Print a message out of a flow error. */ 1110 static int 1111 port_flow_complain(struct rte_flow_error *error) 1112 { 1113 static const char *const errstrlist[] = { 1114 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1115 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1116 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1117 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1118 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1119 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1120 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1121 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1122 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1123 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1124 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1125 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1126 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1127 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1128 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1129 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1130 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1131 }; 1132 const char *errstr; 1133 char buf[32]; 1134 int err = rte_errno; 1135 1136 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1137 !errstrlist[error->type]) 1138 errstr = "unknown type"; 1139 else 1140 errstr = errstrlist[error->type]; 1141 printf("Caught error type %d (%s): %s%s: %s\n", 1142 error->type, errstr, 1143 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1144 error->cause), buf) : "", 1145 error->message ? error->message : "(no stated reason)", 1146 rte_strerror(err)); 1147 return -err; 1148 } 1149 1150 /** Validate flow rule. */ 1151 int 1152 port_flow_validate(portid_t port_id, 1153 const struct rte_flow_attr *attr, 1154 const struct rte_flow_item *pattern, 1155 const struct rte_flow_action *actions) 1156 { 1157 struct rte_flow_error error; 1158 1159 /* Poisoning to make sure PMDs update it in case of error. */ 1160 memset(&error, 0x11, sizeof(error)); 1161 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1162 return port_flow_complain(&error); 1163 printf("Flow rule validated\n"); 1164 return 0; 1165 } 1166 1167 /** Create flow rule. */ 1168 int 1169 port_flow_create(portid_t port_id, 1170 const struct rte_flow_attr *attr, 1171 const struct rte_flow_item *pattern, 1172 const struct rte_flow_action *actions) 1173 { 1174 struct rte_flow *flow; 1175 struct rte_port *port; 1176 struct port_flow *pf; 1177 uint32_t id; 1178 struct rte_flow_error error; 1179 1180 /* Poisoning to make sure PMDs update it in case of error. */ 1181 memset(&error, 0x22, sizeof(error)); 1182 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1183 if (!flow) 1184 return port_flow_complain(&error); 1185 port = &ports[port_id]; 1186 if (port->flow_list) { 1187 if (port->flow_list->id == UINT32_MAX) { 1188 printf("Highest rule ID is already assigned, delete" 1189 " it first"); 1190 rte_flow_destroy(port_id, flow, NULL); 1191 return -ENOMEM; 1192 } 1193 id = port->flow_list->id + 1; 1194 } else 1195 id = 0; 1196 pf = port_flow_new(attr, pattern, actions, &error); 1197 if (!pf) { 1198 rte_flow_destroy(port_id, flow, NULL); 1199 return port_flow_complain(&error); 1200 } 1201 pf->next = port->flow_list; 1202 pf->id = id; 1203 pf->flow = flow; 1204 port->flow_list = pf; 1205 printf("Flow rule #%u created\n", pf->id); 1206 return 0; 1207 } 1208 1209 /** Destroy a number of flow rules. */ 1210 int 1211 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1212 { 1213 struct rte_port *port; 1214 struct port_flow **tmp; 1215 uint32_t c = 0; 1216 int ret = 0; 1217 1218 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1219 port_id == (portid_t)RTE_PORT_ALL) 1220 return -EINVAL; 1221 port = &ports[port_id]; 1222 tmp = &port->flow_list; 1223 while (*tmp) { 1224 uint32_t i; 1225 1226 for (i = 0; i != n; ++i) { 1227 struct rte_flow_error error; 1228 struct port_flow *pf = *tmp; 1229 1230 if (rule[i] != pf->id) 1231 continue; 1232 /* 1233 * Poisoning to make sure PMDs update it in case 1234 * of error. 1235 */ 1236 memset(&error, 0x33, sizeof(error)); 1237 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1238 ret = port_flow_complain(&error); 1239 continue; 1240 } 1241 printf("Flow rule #%u destroyed\n", pf->id); 1242 *tmp = pf->next; 1243 free(pf); 1244 break; 1245 } 1246 if (i == n) 1247 tmp = &(*tmp)->next; 1248 ++c; 1249 } 1250 return ret; 1251 } 1252 1253 /** Remove all flow rules. */ 1254 int 1255 port_flow_flush(portid_t port_id) 1256 { 1257 struct rte_flow_error error; 1258 struct rte_port *port; 1259 int ret = 0; 1260 1261 /* Poisoning to make sure PMDs update it in case of error. */ 1262 memset(&error, 0x44, sizeof(error)); 1263 if (rte_flow_flush(port_id, &error)) { 1264 ret = port_flow_complain(&error); 1265 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1266 port_id == (portid_t)RTE_PORT_ALL) 1267 return ret; 1268 } 1269 port = &ports[port_id]; 1270 while (port->flow_list) { 1271 struct port_flow *pf = port->flow_list->next; 1272 1273 free(port->flow_list); 1274 port->flow_list = pf; 1275 } 1276 return ret; 1277 } 1278 1279 /** Query a flow rule. */ 1280 int 1281 port_flow_query(portid_t port_id, uint32_t rule, 1282 const struct rte_flow_action *action) 1283 { 1284 struct rte_flow_error error; 1285 struct rte_port *port; 1286 struct port_flow *pf; 1287 const char *name; 1288 union { 1289 struct rte_flow_query_count count; 1290 } query; 1291 int ret; 1292 1293 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1294 port_id == (portid_t)RTE_PORT_ALL) 1295 return -EINVAL; 1296 port = &ports[port_id]; 1297 for (pf = port->flow_list; pf; pf = pf->next) 1298 if (pf->id == rule) 1299 break; 1300 if (!pf) { 1301 printf("Flow rule #%u not found\n", rule); 1302 return -ENOENT; 1303 } 1304 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1305 &name, sizeof(name), 1306 (void *)(uintptr_t)action->type, &error); 1307 if (ret < 0) 1308 return port_flow_complain(&error); 1309 switch (action->type) { 1310 case RTE_FLOW_ACTION_TYPE_COUNT: 1311 break; 1312 default: 1313 printf("Cannot query action type %d (%s)\n", 1314 action->type, name); 1315 return -ENOTSUP; 1316 } 1317 /* Poisoning to make sure PMDs update it in case of error. */ 1318 memset(&error, 0x55, sizeof(error)); 1319 memset(&query, 0, sizeof(query)); 1320 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1321 return port_flow_complain(&error); 1322 switch (action->type) { 1323 case RTE_FLOW_ACTION_TYPE_COUNT: 1324 printf("%s:\n" 1325 " hits_set: %u\n" 1326 " bytes_set: %u\n" 1327 " hits: %" PRIu64 "\n" 1328 " bytes: %" PRIu64 "\n", 1329 name, 1330 query.count.hits_set, 1331 query.count.bytes_set, 1332 query.count.hits, 1333 query.count.bytes); 1334 break; 1335 default: 1336 printf("Cannot display result for action type %d (%s)\n", 1337 action->type, name); 1338 break; 1339 } 1340 return 0; 1341 } 1342 1343 /** List flow rules. */ 1344 void 1345 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1346 { 1347 struct rte_port *port; 1348 struct port_flow *pf; 1349 struct port_flow *list = NULL; 1350 uint32_t i; 1351 1352 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1353 port_id == (portid_t)RTE_PORT_ALL) 1354 return; 1355 port = &ports[port_id]; 1356 if (!port->flow_list) 1357 return; 1358 /* Sort flows by group, priority and ID. */ 1359 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1360 struct port_flow **tmp; 1361 const struct rte_flow_attr *curr = pf->rule.attr; 1362 1363 if (n) { 1364 /* Filter out unwanted groups. */ 1365 for (i = 0; i != n; ++i) 1366 if (curr->group == group[i]) 1367 break; 1368 if (i == n) 1369 continue; 1370 } 1371 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 1372 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 1373 1374 if (curr->group > comp->group || 1375 (curr->group == comp->group && 1376 curr->priority > comp->priority) || 1377 (curr->group == comp->group && 1378 curr->priority == comp->priority && 1379 pf->id > (*tmp)->id)) 1380 continue; 1381 break; 1382 } 1383 pf->tmp = *tmp; 1384 *tmp = pf; 1385 } 1386 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1387 for (pf = list; pf != NULL; pf = pf->tmp) { 1388 const struct rte_flow_item *item = pf->rule.pattern; 1389 const struct rte_flow_action *action = pf->rule.actions; 1390 const char *name; 1391 1392 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1393 pf->id, 1394 pf->rule.attr->group, 1395 pf->rule.attr->priority, 1396 pf->rule.attr->ingress ? 'i' : '-', 1397 pf->rule.attr->egress ? 'e' : '-', 1398 pf->rule.attr->transfer ? 't' : '-'); 1399 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1400 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 1401 &name, sizeof(name), 1402 (void *)(uintptr_t)item->type, 1403 NULL) <= 0) 1404 name = "[UNKNOWN]"; 1405 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1406 printf("%s ", name); 1407 ++item; 1408 } 1409 printf("=>"); 1410 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1411 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 1412 &name, sizeof(name), 1413 (void *)(uintptr_t)action->type, 1414 NULL) <= 0) 1415 name = "[UNKNOWN]"; 1416 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1417 printf(" %s", name); 1418 ++action; 1419 } 1420 printf("\n"); 1421 } 1422 } 1423 1424 /** Restrict ingress traffic to the defined flow rules. */ 1425 int 1426 port_flow_isolate(portid_t port_id, int set) 1427 { 1428 struct rte_flow_error error; 1429 1430 /* Poisoning to make sure PMDs update it in case of error. */ 1431 memset(&error, 0x66, sizeof(error)); 1432 if (rte_flow_isolate(port_id, set, &error)) 1433 return port_flow_complain(&error); 1434 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1435 port_id, 1436 set ? "now restricted" : "not restricted anymore"); 1437 return 0; 1438 } 1439 1440 /* 1441 * RX/TX ring descriptors display functions. 1442 */ 1443 int 1444 rx_queue_id_is_invalid(queueid_t rxq_id) 1445 { 1446 if (rxq_id < nb_rxq) 1447 return 0; 1448 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1449 return 1; 1450 } 1451 1452 int 1453 tx_queue_id_is_invalid(queueid_t txq_id) 1454 { 1455 if (txq_id < nb_txq) 1456 return 0; 1457 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1458 return 1; 1459 } 1460 1461 static int 1462 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1463 { 1464 if (rxdesc_id < nb_rxd) 1465 return 0; 1466 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1467 rxdesc_id, nb_rxd); 1468 return 1; 1469 } 1470 1471 static int 1472 tx_desc_id_is_invalid(uint16_t txdesc_id) 1473 { 1474 if (txdesc_id < nb_txd) 1475 return 0; 1476 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1477 txdesc_id, nb_txd); 1478 return 1; 1479 } 1480 1481 static const struct rte_memzone * 1482 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1483 { 1484 char mz_name[RTE_MEMZONE_NAMESIZE]; 1485 const struct rte_memzone *mz; 1486 1487 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 1488 port_id, q_id, ring_name); 1489 mz = rte_memzone_lookup(mz_name); 1490 if (mz == NULL) 1491 printf("%s ring memory zoneof (port %d, queue %d) not" 1492 "found (zone name = %s\n", 1493 ring_name, port_id, q_id, mz_name); 1494 return mz; 1495 } 1496 1497 union igb_ring_dword { 1498 uint64_t dword; 1499 struct { 1500 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1501 uint32_t lo; 1502 uint32_t hi; 1503 #else 1504 uint32_t hi; 1505 uint32_t lo; 1506 #endif 1507 } words; 1508 }; 1509 1510 struct igb_ring_desc_32_bytes { 1511 union igb_ring_dword lo_dword; 1512 union igb_ring_dword hi_dword; 1513 union igb_ring_dword resv1; 1514 union igb_ring_dword resv2; 1515 }; 1516 1517 struct igb_ring_desc_16_bytes { 1518 union igb_ring_dword lo_dword; 1519 union igb_ring_dword hi_dword; 1520 }; 1521 1522 static void 1523 ring_rxd_display_dword(union igb_ring_dword dword) 1524 { 1525 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1526 (unsigned)dword.words.hi); 1527 } 1528 1529 static void 1530 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1531 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1532 portid_t port_id, 1533 #else 1534 __rte_unused portid_t port_id, 1535 #endif 1536 uint16_t desc_id) 1537 { 1538 struct igb_ring_desc_16_bytes *ring = 1539 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1540 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1541 struct rte_eth_dev_info dev_info; 1542 1543 memset(&dev_info, 0, sizeof(dev_info)); 1544 rte_eth_dev_info_get(port_id, &dev_info); 1545 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1546 /* 32 bytes RX descriptor, i40e only */ 1547 struct igb_ring_desc_32_bytes *ring = 1548 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1549 ring[desc_id].lo_dword.dword = 1550 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1551 ring_rxd_display_dword(ring[desc_id].lo_dword); 1552 ring[desc_id].hi_dword.dword = 1553 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1554 ring_rxd_display_dword(ring[desc_id].hi_dword); 1555 ring[desc_id].resv1.dword = 1556 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1557 ring_rxd_display_dword(ring[desc_id].resv1); 1558 ring[desc_id].resv2.dword = 1559 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1560 ring_rxd_display_dword(ring[desc_id].resv2); 1561 1562 return; 1563 } 1564 #endif 1565 /* 16 bytes RX descriptor */ 1566 ring[desc_id].lo_dword.dword = 1567 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1568 ring_rxd_display_dword(ring[desc_id].lo_dword); 1569 ring[desc_id].hi_dword.dword = 1570 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1571 ring_rxd_display_dword(ring[desc_id].hi_dword); 1572 } 1573 1574 static void 1575 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1576 { 1577 struct igb_ring_desc_16_bytes *ring; 1578 struct igb_ring_desc_16_bytes txd; 1579 1580 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1581 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1582 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1583 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1584 (unsigned)txd.lo_dword.words.lo, 1585 (unsigned)txd.lo_dword.words.hi, 1586 (unsigned)txd.hi_dword.words.lo, 1587 (unsigned)txd.hi_dword.words.hi); 1588 } 1589 1590 void 1591 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1592 { 1593 const struct rte_memzone *rx_mz; 1594 1595 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1596 return; 1597 if (rx_queue_id_is_invalid(rxq_id)) 1598 return; 1599 if (rx_desc_id_is_invalid(rxd_id)) 1600 return; 1601 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1602 if (rx_mz == NULL) 1603 return; 1604 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1605 } 1606 1607 void 1608 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1609 { 1610 const struct rte_memzone *tx_mz; 1611 1612 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1613 return; 1614 if (tx_queue_id_is_invalid(txq_id)) 1615 return; 1616 if (tx_desc_id_is_invalid(txd_id)) 1617 return; 1618 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1619 if (tx_mz == NULL) 1620 return; 1621 ring_tx_descriptor_display(tx_mz, txd_id); 1622 } 1623 1624 void 1625 fwd_lcores_config_display(void) 1626 { 1627 lcoreid_t lc_id; 1628 1629 printf("List of forwarding lcores:"); 1630 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1631 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1632 printf("\n"); 1633 } 1634 void 1635 rxtx_config_display(void) 1636 { 1637 portid_t pid; 1638 queueid_t qid; 1639 1640 printf(" %s packet forwarding%s packets/burst=%d\n", 1641 cur_fwd_eng->fwd_mode_name, 1642 retry_enabled == 0 ? "" : " with retry", 1643 nb_pkt_per_burst); 1644 1645 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1646 printf(" packet len=%u - nb packet segments=%d\n", 1647 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1648 1649 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1650 nb_fwd_lcores, nb_fwd_ports); 1651 1652 RTE_ETH_FOREACH_DEV(pid) { 1653 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1654 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1655 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1656 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1657 uint16_t nb_rx_desc_tmp; 1658 uint16_t nb_tx_desc_tmp; 1659 struct rte_eth_rxq_info rx_qinfo; 1660 struct rte_eth_txq_info tx_qinfo; 1661 int32_t rc; 1662 1663 /* per port config */ 1664 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1665 (unsigned int)pid, nb_rxq, nb_txq); 1666 1667 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1668 ports[pid].dev_conf.rxmode.offloads, 1669 ports[pid].dev_conf.txmode.offloads); 1670 1671 /* per rx queue config only for first queue to be less verbose */ 1672 for (qid = 0; qid < 1; qid++) { 1673 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1674 if (rc) 1675 nb_rx_desc_tmp = nb_rx_desc[qid]; 1676 else 1677 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1678 1679 printf(" RX queue: %d\n", qid); 1680 printf(" RX desc=%d - RX free threshold=%d\n", 1681 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1682 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1683 " wthresh=%d\n", 1684 rx_conf[qid].rx_thresh.pthresh, 1685 rx_conf[qid].rx_thresh.hthresh, 1686 rx_conf[qid].rx_thresh.wthresh); 1687 printf(" RX Offloads=0x%"PRIx64"\n", 1688 rx_conf[qid].offloads); 1689 } 1690 1691 /* per tx queue config only for first queue to be less verbose */ 1692 for (qid = 0; qid < 1; qid++) { 1693 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1694 if (rc) 1695 nb_tx_desc_tmp = nb_tx_desc[qid]; 1696 else 1697 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1698 1699 printf(" TX queue: %d\n", qid); 1700 printf(" TX desc=%d - TX free threshold=%d\n", 1701 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1702 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1703 " wthresh=%d\n", 1704 tx_conf[qid].tx_thresh.pthresh, 1705 tx_conf[qid].tx_thresh.hthresh, 1706 tx_conf[qid].tx_thresh.wthresh); 1707 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1708 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1709 } 1710 } 1711 } 1712 1713 void 1714 port_rss_reta_info(portid_t port_id, 1715 struct rte_eth_rss_reta_entry64 *reta_conf, 1716 uint16_t nb_entries) 1717 { 1718 uint16_t i, idx, shift; 1719 int ret; 1720 1721 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1722 return; 1723 1724 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1725 if (ret != 0) { 1726 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1727 return; 1728 } 1729 1730 for (i = 0; i < nb_entries; i++) { 1731 idx = i / RTE_RETA_GROUP_SIZE; 1732 shift = i % RTE_RETA_GROUP_SIZE; 1733 if (!(reta_conf[idx].mask & (1ULL << shift))) 1734 continue; 1735 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1736 i, reta_conf[idx].reta[shift]); 1737 } 1738 } 1739 1740 /* 1741 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1742 * key of the port. 1743 */ 1744 void 1745 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 1746 { 1747 struct rte_eth_rss_conf rss_conf = {0}; 1748 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1749 uint64_t rss_hf; 1750 uint8_t i; 1751 int diag; 1752 struct rte_eth_dev_info dev_info; 1753 uint8_t hash_key_size; 1754 1755 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1756 return; 1757 1758 rte_eth_dev_info_get(port_id, &dev_info); 1759 if (dev_info.hash_key_size > 0 && 1760 dev_info.hash_key_size <= sizeof(rss_key)) 1761 hash_key_size = dev_info.hash_key_size; 1762 else { 1763 printf("dev_info did not provide a valid hash key size\n"); 1764 return; 1765 } 1766 1767 /* Get RSS hash key if asked to display it */ 1768 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1769 rss_conf.rss_key_len = hash_key_size; 1770 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1771 if (diag != 0) { 1772 switch (diag) { 1773 case -ENODEV: 1774 printf("port index %d invalid\n", port_id); 1775 break; 1776 case -ENOTSUP: 1777 printf("operation not supported by device\n"); 1778 break; 1779 default: 1780 printf("operation failed - diag=%d\n", diag); 1781 break; 1782 } 1783 return; 1784 } 1785 rss_hf = rss_conf.rss_hf; 1786 if (rss_hf == 0) { 1787 printf("RSS disabled\n"); 1788 return; 1789 } 1790 printf("RSS functions:\n "); 1791 for (i = 0; rss_type_table[i].str; i++) { 1792 if (rss_hf & rss_type_table[i].rss_type) 1793 printf("%s ", rss_type_table[i].str); 1794 } 1795 printf("\n"); 1796 if (!show_rss_key) 1797 return; 1798 printf("RSS key:\n"); 1799 for (i = 0; i < hash_key_size; i++) 1800 printf("%02X", rss_key[i]); 1801 printf("\n"); 1802 } 1803 1804 void 1805 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1806 uint hash_key_len) 1807 { 1808 struct rte_eth_rss_conf rss_conf; 1809 int diag; 1810 unsigned int i; 1811 1812 rss_conf.rss_key = NULL; 1813 rss_conf.rss_key_len = hash_key_len; 1814 rss_conf.rss_hf = 0; 1815 for (i = 0; rss_type_table[i].str; i++) { 1816 if (!strcmp(rss_type_table[i].str, rss_type)) 1817 rss_conf.rss_hf = rss_type_table[i].rss_type; 1818 } 1819 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1820 if (diag == 0) { 1821 rss_conf.rss_key = hash_key; 1822 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1823 } 1824 if (diag == 0) 1825 return; 1826 1827 switch (diag) { 1828 case -ENODEV: 1829 printf("port index %d invalid\n", port_id); 1830 break; 1831 case -ENOTSUP: 1832 printf("operation not supported by device\n"); 1833 break; 1834 default: 1835 printf("operation failed - diag=%d\n", diag); 1836 break; 1837 } 1838 } 1839 1840 /* 1841 * Setup forwarding configuration for each logical core. 1842 */ 1843 static void 1844 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 1845 { 1846 streamid_t nb_fs_per_lcore; 1847 streamid_t nb_fs; 1848 streamid_t sm_id; 1849 lcoreid_t nb_extra; 1850 lcoreid_t nb_fc; 1851 lcoreid_t nb_lc; 1852 lcoreid_t lc_id; 1853 1854 nb_fs = cfg->nb_fwd_streams; 1855 nb_fc = cfg->nb_fwd_lcores; 1856 if (nb_fs <= nb_fc) { 1857 nb_fs_per_lcore = 1; 1858 nb_extra = 0; 1859 } else { 1860 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 1861 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 1862 } 1863 1864 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 1865 sm_id = 0; 1866 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 1867 fwd_lcores[lc_id]->stream_idx = sm_id; 1868 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 1869 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1870 } 1871 1872 /* 1873 * Assign extra remaining streams, if any. 1874 */ 1875 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 1876 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 1877 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 1878 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 1879 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 1880 } 1881 } 1882 1883 static portid_t 1884 fwd_topology_tx_port_get(portid_t rxp) 1885 { 1886 static int warning_once = 1; 1887 1888 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 1889 1890 switch (port_topology) { 1891 default: 1892 case PORT_TOPOLOGY_PAIRED: 1893 if ((rxp & 0x1) == 0) { 1894 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 1895 return rxp + 1; 1896 if (warning_once) { 1897 printf("\nWarning! port-topology=paired" 1898 " and odd forward ports number," 1899 " the last port will pair with" 1900 " itself.\n\n"); 1901 warning_once = 0; 1902 } 1903 return rxp; 1904 } 1905 return rxp - 1; 1906 case PORT_TOPOLOGY_CHAINED: 1907 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 1908 case PORT_TOPOLOGY_LOOP: 1909 return rxp; 1910 } 1911 } 1912 1913 static void 1914 simple_fwd_config_setup(void) 1915 { 1916 portid_t i; 1917 1918 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 1919 cur_fwd_config.nb_fwd_streams = 1920 (streamid_t) cur_fwd_config.nb_fwd_ports; 1921 1922 /* reinitialize forwarding streams */ 1923 init_fwd_streams(); 1924 1925 /* 1926 * In the simple forwarding test, the number of forwarding cores 1927 * must be lower or equal to the number of forwarding ports. 1928 */ 1929 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1930 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 1931 cur_fwd_config.nb_fwd_lcores = 1932 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 1933 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1934 1935 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1936 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 1937 fwd_streams[i]->rx_queue = 0; 1938 fwd_streams[i]->tx_port = 1939 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 1940 fwd_streams[i]->tx_queue = 0; 1941 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 1942 fwd_streams[i]->retry_enabled = retry_enabled; 1943 } 1944 } 1945 1946 /** 1947 * For the RSS forwarding test all streams distributed over lcores. Each stream 1948 * being composed of a RX queue to poll on a RX port for input messages, 1949 * associated with a TX queue of a TX port where to send forwarded packets. 1950 */ 1951 static void 1952 rss_fwd_config_setup(void) 1953 { 1954 portid_t rxp; 1955 portid_t txp; 1956 queueid_t rxq; 1957 queueid_t nb_q; 1958 streamid_t sm_id; 1959 1960 nb_q = nb_rxq; 1961 if (nb_q > nb_txq) 1962 nb_q = nb_txq; 1963 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 1964 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 1965 cur_fwd_config.nb_fwd_streams = 1966 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 1967 1968 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 1969 cur_fwd_config.nb_fwd_lcores = 1970 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 1971 1972 /* reinitialize forwarding streams */ 1973 init_fwd_streams(); 1974 1975 setup_fwd_config_of_each_lcore(&cur_fwd_config); 1976 rxp = 0; rxq = 0; 1977 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1978 struct fwd_stream *fs; 1979 1980 fs = fwd_streams[sm_id]; 1981 txp = fwd_topology_tx_port_get(rxp); 1982 fs->rx_port = fwd_ports_ids[rxp]; 1983 fs->rx_queue = rxq; 1984 fs->tx_port = fwd_ports_ids[txp]; 1985 fs->tx_queue = rxq; 1986 fs->peer_addr = fs->tx_port; 1987 fs->retry_enabled = retry_enabled; 1988 rxp++; 1989 if (rxp < nb_fwd_ports) 1990 continue; 1991 rxp = 0; 1992 rxq++; 1993 } 1994 } 1995 1996 /** 1997 * For the DCB forwarding test, each core is assigned on each traffic class. 1998 * 1999 * Each core is assigned a multi-stream, each stream being composed of 2000 * a RX queue to poll on a RX port for input messages, associated with 2001 * a TX queue of a TX port where to send forwarded packets. All RX and 2002 * TX queues are mapping to the same traffic class. 2003 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2004 * the same core 2005 */ 2006 static void 2007 dcb_fwd_config_setup(void) 2008 { 2009 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2010 portid_t txp, rxp = 0; 2011 queueid_t txq, rxq = 0; 2012 lcoreid_t lc_id; 2013 uint16_t nb_rx_queue, nb_tx_queue; 2014 uint16_t i, j, k, sm_id = 0; 2015 uint8_t tc = 0; 2016 2017 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2018 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2019 cur_fwd_config.nb_fwd_streams = 2020 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2021 2022 /* reinitialize forwarding streams */ 2023 init_fwd_streams(); 2024 sm_id = 0; 2025 txp = 1; 2026 /* get the dcb info on the first RX and TX ports */ 2027 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2028 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2029 2030 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2031 fwd_lcores[lc_id]->stream_nb = 0; 2032 fwd_lcores[lc_id]->stream_idx = sm_id; 2033 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2034 /* if the nb_queue is zero, means this tc is 2035 * not enabled on the POOL 2036 */ 2037 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2038 break; 2039 k = fwd_lcores[lc_id]->stream_nb + 2040 fwd_lcores[lc_id]->stream_idx; 2041 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2042 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2043 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2044 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2045 for (j = 0; j < nb_rx_queue; j++) { 2046 struct fwd_stream *fs; 2047 2048 fs = fwd_streams[k + j]; 2049 fs->rx_port = fwd_ports_ids[rxp]; 2050 fs->rx_queue = rxq + j; 2051 fs->tx_port = fwd_ports_ids[txp]; 2052 fs->tx_queue = txq + j % nb_tx_queue; 2053 fs->peer_addr = fs->tx_port; 2054 fs->retry_enabled = retry_enabled; 2055 } 2056 fwd_lcores[lc_id]->stream_nb += 2057 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2058 } 2059 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2060 2061 tc++; 2062 if (tc < rxp_dcb_info.nb_tcs) 2063 continue; 2064 /* Restart from TC 0 on next RX port */ 2065 tc = 0; 2066 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2067 rxp = (portid_t) 2068 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2069 else 2070 rxp++; 2071 if (rxp >= nb_fwd_ports) 2072 return; 2073 /* get the dcb information on next RX and TX ports */ 2074 if ((rxp & 0x1) == 0) 2075 txp = (portid_t) (rxp + 1); 2076 else 2077 txp = (portid_t) (rxp - 1); 2078 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2079 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2080 } 2081 } 2082 2083 static void 2084 icmp_echo_config_setup(void) 2085 { 2086 portid_t rxp; 2087 queueid_t rxq; 2088 lcoreid_t lc_id; 2089 uint16_t sm_id; 2090 2091 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2092 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2093 (nb_txq * nb_fwd_ports); 2094 else 2095 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2096 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2097 cur_fwd_config.nb_fwd_streams = 2098 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2099 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2100 cur_fwd_config.nb_fwd_lcores = 2101 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2102 if (verbose_level > 0) { 2103 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2104 __FUNCTION__, 2105 cur_fwd_config.nb_fwd_lcores, 2106 cur_fwd_config.nb_fwd_ports, 2107 cur_fwd_config.nb_fwd_streams); 2108 } 2109 2110 /* reinitialize forwarding streams */ 2111 init_fwd_streams(); 2112 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2113 rxp = 0; rxq = 0; 2114 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2115 if (verbose_level > 0) 2116 printf(" core=%d: \n", lc_id); 2117 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2118 struct fwd_stream *fs; 2119 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2120 fs->rx_port = fwd_ports_ids[rxp]; 2121 fs->rx_queue = rxq; 2122 fs->tx_port = fs->rx_port; 2123 fs->tx_queue = rxq; 2124 fs->peer_addr = fs->tx_port; 2125 fs->retry_enabled = retry_enabled; 2126 if (verbose_level > 0) 2127 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2128 sm_id, fs->rx_port, fs->rx_queue, 2129 fs->tx_queue); 2130 rxq = (queueid_t) (rxq + 1); 2131 if (rxq == nb_rxq) { 2132 rxq = 0; 2133 rxp = (portid_t) (rxp + 1); 2134 } 2135 } 2136 } 2137 } 2138 2139 #if defined RTE_LIBRTE_PMD_SOFTNIC 2140 static void 2141 softnic_fwd_config_setup(void) 2142 { 2143 struct rte_port *port; 2144 portid_t pid, softnic_portid; 2145 queueid_t i; 2146 uint8_t softnic_enable = 0; 2147 2148 RTE_ETH_FOREACH_DEV(pid) { 2149 port = &ports[pid]; 2150 const char *driver = port->dev_info.driver_name; 2151 2152 if (strcmp(driver, "net_softnic") == 0) { 2153 softnic_portid = pid; 2154 softnic_enable = 1; 2155 break; 2156 } 2157 } 2158 2159 if (softnic_enable == 0) { 2160 printf("Softnic mode not configured(%s)!\n", __func__); 2161 return; 2162 } 2163 2164 cur_fwd_config.nb_fwd_ports = 1; 2165 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2166 2167 /* Re-initialize forwarding streams */ 2168 init_fwd_streams(); 2169 2170 /* 2171 * In the softnic forwarding test, the number of forwarding cores 2172 * is set to one and remaining are used for softnic packet processing. 2173 */ 2174 cur_fwd_config.nb_fwd_lcores = 1; 2175 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2176 2177 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2178 fwd_streams[i]->rx_port = softnic_portid; 2179 fwd_streams[i]->rx_queue = i; 2180 fwd_streams[i]->tx_port = softnic_portid; 2181 fwd_streams[i]->tx_queue = i; 2182 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2183 fwd_streams[i]->retry_enabled = retry_enabled; 2184 } 2185 } 2186 #endif 2187 2188 void 2189 fwd_config_setup(void) 2190 { 2191 cur_fwd_config.fwd_eng = cur_fwd_eng; 2192 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2193 icmp_echo_config_setup(); 2194 return; 2195 } 2196 2197 #if defined RTE_LIBRTE_PMD_SOFTNIC 2198 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2199 softnic_fwd_config_setup(); 2200 return; 2201 } 2202 #endif 2203 2204 if ((nb_rxq > 1) && (nb_txq > 1)){ 2205 if (dcb_config) 2206 dcb_fwd_config_setup(); 2207 else 2208 rss_fwd_config_setup(); 2209 } 2210 else 2211 simple_fwd_config_setup(); 2212 } 2213 2214 static const char * 2215 mp_alloc_to_str(uint8_t mode) 2216 { 2217 switch (mode) { 2218 case MP_ALLOC_NATIVE: 2219 return "native"; 2220 case MP_ALLOC_ANON: 2221 return "anon"; 2222 case MP_ALLOC_XMEM: 2223 return "xmem"; 2224 case MP_ALLOC_XMEM_HUGE: 2225 return "xmemhuge"; 2226 default: 2227 return "invalid"; 2228 } 2229 } 2230 2231 void 2232 pkt_fwd_config_display(struct fwd_config *cfg) 2233 { 2234 struct fwd_stream *fs; 2235 lcoreid_t lc_id; 2236 streamid_t sm_id; 2237 2238 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2239 "NUMA support %s, MP allocation mode: %s\n", 2240 cfg->fwd_eng->fwd_mode_name, 2241 retry_enabled == 0 ? "" : " with retry", 2242 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2243 numa_support == 1 ? "enabled" : "disabled", 2244 mp_alloc_to_str(mp_alloc_type)); 2245 2246 if (retry_enabled) 2247 printf("TX retry num: %u, delay between TX retries: %uus\n", 2248 burst_tx_retry_num, burst_tx_delay_time); 2249 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2250 printf("Logical Core %u (socket %u) forwards packets on " 2251 "%d streams:", 2252 fwd_lcores_cpuids[lc_id], 2253 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2254 fwd_lcores[lc_id]->stream_nb); 2255 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2256 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2257 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2258 "P=%d/Q=%d (socket %u) ", 2259 fs->rx_port, fs->rx_queue, 2260 ports[fs->rx_port].socket_id, 2261 fs->tx_port, fs->tx_queue, 2262 ports[fs->tx_port].socket_id); 2263 print_ethaddr("peer=", 2264 &peer_eth_addrs[fs->peer_addr]); 2265 } 2266 printf("\n"); 2267 } 2268 printf("\n"); 2269 } 2270 2271 void 2272 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2273 { 2274 uint8_t c, new_peer_addr[6]; 2275 if (!rte_eth_dev_is_valid_port(port_id)) { 2276 printf("Error: Invalid port number %i\n", port_id); 2277 return; 2278 } 2279 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2280 sizeof(new_peer_addr)) < 0) { 2281 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2282 return; 2283 } 2284 for (c = 0; c < 6; c++) 2285 peer_eth_addrs[port_id].addr_bytes[c] = 2286 new_peer_addr[c]; 2287 } 2288 2289 int 2290 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2291 { 2292 unsigned int i; 2293 unsigned int lcore_cpuid; 2294 int record_now; 2295 2296 record_now = 0; 2297 again: 2298 for (i = 0; i < nb_lc; i++) { 2299 lcore_cpuid = lcorelist[i]; 2300 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2301 printf("lcore %u not enabled\n", lcore_cpuid); 2302 return -1; 2303 } 2304 if (lcore_cpuid == rte_get_master_lcore()) { 2305 printf("lcore %u cannot be masked on for running " 2306 "packet forwarding, which is the master lcore " 2307 "and reserved for command line parsing only\n", 2308 lcore_cpuid); 2309 return -1; 2310 } 2311 if (record_now) 2312 fwd_lcores_cpuids[i] = lcore_cpuid; 2313 } 2314 if (record_now == 0) { 2315 record_now = 1; 2316 goto again; 2317 } 2318 nb_cfg_lcores = (lcoreid_t) nb_lc; 2319 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2320 printf("previous number of forwarding cores %u - changed to " 2321 "number of configured cores %u\n", 2322 (unsigned int) nb_fwd_lcores, nb_lc); 2323 nb_fwd_lcores = (lcoreid_t) nb_lc; 2324 } 2325 2326 return 0; 2327 } 2328 2329 int 2330 set_fwd_lcores_mask(uint64_t lcoremask) 2331 { 2332 unsigned int lcorelist[64]; 2333 unsigned int nb_lc; 2334 unsigned int i; 2335 2336 if (lcoremask == 0) { 2337 printf("Invalid NULL mask of cores\n"); 2338 return -1; 2339 } 2340 nb_lc = 0; 2341 for (i = 0; i < 64; i++) { 2342 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2343 continue; 2344 lcorelist[nb_lc++] = i; 2345 } 2346 return set_fwd_lcores_list(lcorelist, nb_lc); 2347 } 2348 2349 void 2350 set_fwd_lcores_number(uint16_t nb_lc) 2351 { 2352 if (nb_lc > nb_cfg_lcores) { 2353 printf("nb fwd cores %u > %u (max. number of configured " 2354 "lcores) - ignored\n", 2355 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2356 return; 2357 } 2358 nb_fwd_lcores = (lcoreid_t) nb_lc; 2359 printf("Number of forwarding cores set to %u\n", 2360 (unsigned int) nb_fwd_lcores); 2361 } 2362 2363 void 2364 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2365 { 2366 unsigned int i; 2367 portid_t port_id; 2368 int record_now; 2369 2370 record_now = 0; 2371 again: 2372 for (i = 0; i < nb_pt; i++) { 2373 port_id = (portid_t) portlist[i]; 2374 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2375 return; 2376 if (record_now) 2377 fwd_ports_ids[i] = port_id; 2378 } 2379 if (record_now == 0) { 2380 record_now = 1; 2381 goto again; 2382 } 2383 nb_cfg_ports = (portid_t) nb_pt; 2384 if (nb_fwd_ports != (portid_t) nb_pt) { 2385 printf("previous number of forwarding ports %u - changed to " 2386 "number of configured ports %u\n", 2387 (unsigned int) nb_fwd_ports, nb_pt); 2388 nb_fwd_ports = (portid_t) nb_pt; 2389 } 2390 } 2391 2392 void 2393 set_fwd_ports_mask(uint64_t portmask) 2394 { 2395 unsigned int portlist[64]; 2396 unsigned int nb_pt; 2397 unsigned int i; 2398 2399 if (portmask == 0) { 2400 printf("Invalid NULL mask of ports\n"); 2401 return; 2402 } 2403 nb_pt = 0; 2404 RTE_ETH_FOREACH_DEV(i) { 2405 if (! ((uint64_t)(1ULL << i) & portmask)) 2406 continue; 2407 portlist[nb_pt++] = i; 2408 } 2409 set_fwd_ports_list(portlist, nb_pt); 2410 } 2411 2412 void 2413 set_fwd_ports_number(uint16_t nb_pt) 2414 { 2415 if (nb_pt > nb_cfg_ports) { 2416 printf("nb fwd ports %u > %u (number of configured " 2417 "ports) - ignored\n", 2418 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2419 return; 2420 } 2421 nb_fwd_ports = (portid_t) nb_pt; 2422 printf("Number of forwarding ports set to %u\n", 2423 (unsigned int) nb_fwd_ports); 2424 } 2425 2426 int 2427 port_is_forwarding(portid_t port_id) 2428 { 2429 unsigned int i; 2430 2431 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2432 return -1; 2433 2434 for (i = 0; i < nb_fwd_ports; i++) { 2435 if (fwd_ports_ids[i] == port_id) 2436 return 1; 2437 } 2438 2439 return 0; 2440 } 2441 2442 void 2443 set_nb_pkt_per_burst(uint16_t nb) 2444 { 2445 if (nb > MAX_PKT_BURST) { 2446 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2447 " ignored\n", 2448 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2449 return; 2450 } 2451 nb_pkt_per_burst = nb; 2452 printf("Number of packets per burst set to %u\n", 2453 (unsigned int) nb_pkt_per_burst); 2454 } 2455 2456 static const char * 2457 tx_split_get_name(enum tx_pkt_split split) 2458 { 2459 uint32_t i; 2460 2461 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2462 if (tx_split_name[i].split == split) 2463 return tx_split_name[i].name; 2464 } 2465 return NULL; 2466 } 2467 2468 void 2469 set_tx_pkt_split(const char *name) 2470 { 2471 uint32_t i; 2472 2473 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2474 if (strcmp(tx_split_name[i].name, name) == 0) { 2475 tx_pkt_split = tx_split_name[i].split; 2476 return; 2477 } 2478 } 2479 printf("unknown value: \"%s\"\n", name); 2480 } 2481 2482 void 2483 show_tx_pkt_segments(void) 2484 { 2485 uint32_t i, n; 2486 const char *split; 2487 2488 n = tx_pkt_nb_segs; 2489 split = tx_split_get_name(tx_pkt_split); 2490 2491 printf("Number of segments: %u\n", n); 2492 printf("Segment sizes: "); 2493 for (i = 0; i != n - 1; i++) 2494 printf("%hu,", tx_pkt_seg_lengths[i]); 2495 printf("%hu\n", tx_pkt_seg_lengths[i]); 2496 printf("Split packet: %s\n", split); 2497 } 2498 2499 void 2500 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2501 { 2502 uint16_t tx_pkt_len; 2503 unsigned i; 2504 2505 if (nb_segs >= (unsigned) nb_txd) { 2506 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2507 nb_segs, (unsigned int) nb_txd); 2508 return; 2509 } 2510 2511 /* 2512 * Check that each segment length is greater or equal than 2513 * the mbuf data sise. 2514 * Check also that the total packet length is greater or equal than the 2515 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2516 */ 2517 tx_pkt_len = 0; 2518 for (i = 0; i < nb_segs; i++) { 2519 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2520 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2521 i, seg_lengths[i], (unsigned) mbuf_data_size); 2522 return; 2523 } 2524 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2525 } 2526 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2527 printf("total packet length=%u < %d - give up\n", 2528 (unsigned) tx_pkt_len, 2529 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2530 return; 2531 } 2532 2533 for (i = 0; i < nb_segs; i++) 2534 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2535 2536 tx_pkt_length = tx_pkt_len; 2537 tx_pkt_nb_segs = (uint8_t) nb_segs; 2538 } 2539 2540 void 2541 setup_gro(const char *onoff, portid_t port_id) 2542 { 2543 if (!rte_eth_dev_is_valid_port(port_id)) { 2544 printf("invalid port id %u\n", port_id); 2545 return; 2546 } 2547 if (test_done == 0) { 2548 printf("Before enable/disable GRO," 2549 " please stop forwarding first\n"); 2550 return; 2551 } 2552 if (strcmp(onoff, "on") == 0) { 2553 if (gro_ports[port_id].enable != 0) { 2554 printf("Port %u has enabled GRO. Please" 2555 " disable GRO first\n", port_id); 2556 return; 2557 } 2558 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2559 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2560 gro_ports[port_id].param.max_flow_num = 2561 GRO_DEFAULT_FLOW_NUM; 2562 gro_ports[port_id].param.max_item_per_flow = 2563 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2564 } 2565 gro_ports[port_id].enable = 1; 2566 } else { 2567 if (gro_ports[port_id].enable == 0) { 2568 printf("Port %u has disabled GRO\n", port_id); 2569 return; 2570 } 2571 gro_ports[port_id].enable = 0; 2572 } 2573 } 2574 2575 void 2576 setup_gro_flush_cycles(uint8_t cycles) 2577 { 2578 if (test_done == 0) { 2579 printf("Before change flush interval for GRO," 2580 " please stop forwarding first.\n"); 2581 return; 2582 } 2583 2584 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2585 GRO_DEFAULT_FLUSH_CYCLES) { 2586 printf("The flushing cycle be in the range" 2587 " of 1 to %u. Revert to the default" 2588 " value %u.\n", 2589 GRO_MAX_FLUSH_CYCLES, 2590 GRO_DEFAULT_FLUSH_CYCLES); 2591 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2592 } 2593 2594 gro_flush_cycles = cycles; 2595 } 2596 2597 void 2598 show_gro(portid_t port_id) 2599 { 2600 struct rte_gro_param *param; 2601 uint32_t max_pkts_num; 2602 2603 param = &gro_ports[port_id].param; 2604 2605 if (!rte_eth_dev_is_valid_port(port_id)) { 2606 printf("Invalid port id %u.\n", port_id); 2607 return; 2608 } 2609 if (gro_ports[port_id].enable) { 2610 printf("GRO type: TCP/IPv4\n"); 2611 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2612 max_pkts_num = param->max_flow_num * 2613 param->max_item_per_flow; 2614 } else 2615 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2616 printf("Max number of packets to perform GRO: %u\n", 2617 max_pkts_num); 2618 printf("Flushing cycles: %u\n", gro_flush_cycles); 2619 } else 2620 printf("Port %u doesn't enable GRO.\n", port_id); 2621 } 2622 2623 void 2624 setup_gso(const char *mode, portid_t port_id) 2625 { 2626 if (!rte_eth_dev_is_valid_port(port_id)) { 2627 printf("invalid port id %u\n", port_id); 2628 return; 2629 } 2630 if (strcmp(mode, "on") == 0) { 2631 if (test_done == 0) { 2632 printf("before enabling GSO," 2633 " please stop forwarding first\n"); 2634 return; 2635 } 2636 gso_ports[port_id].enable = 1; 2637 } else if (strcmp(mode, "off") == 0) { 2638 if (test_done == 0) { 2639 printf("before disabling GSO," 2640 " please stop forwarding first\n"); 2641 return; 2642 } 2643 gso_ports[port_id].enable = 0; 2644 } 2645 } 2646 2647 char* 2648 list_pkt_forwarding_modes(void) 2649 { 2650 static char fwd_modes[128] = ""; 2651 const char *separator = "|"; 2652 struct fwd_engine *fwd_eng; 2653 unsigned i = 0; 2654 2655 if (strlen (fwd_modes) == 0) { 2656 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2657 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2658 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2659 strncat(fwd_modes, separator, 2660 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2661 } 2662 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2663 } 2664 2665 return fwd_modes; 2666 } 2667 2668 char* 2669 list_pkt_forwarding_retry_modes(void) 2670 { 2671 static char fwd_modes[128] = ""; 2672 const char *separator = "|"; 2673 struct fwd_engine *fwd_eng; 2674 unsigned i = 0; 2675 2676 if (strlen(fwd_modes) == 0) { 2677 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2678 if (fwd_eng == &rx_only_engine) 2679 continue; 2680 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2681 sizeof(fwd_modes) - 2682 strlen(fwd_modes) - 1); 2683 strncat(fwd_modes, separator, 2684 sizeof(fwd_modes) - 2685 strlen(fwd_modes) - 1); 2686 } 2687 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2688 } 2689 2690 return fwd_modes; 2691 } 2692 2693 void 2694 set_pkt_forwarding_mode(const char *fwd_mode_name) 2695 { 2696 struct fwd_engine *fwd_eng; 2697 unsigned i; 2698 2699 i = 0; 2700 while ((fwd_eng = fwd_engines[i]) != NULL) { 2701 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2702 printf("Set %s packet forwarding mode%s\n", 2703 fwd_mode_name, 2704 retry_enabled == 0 ? "" : " with retry"); 2705 cur_fwd_eng = fwd_eng; 2706 return; 2707 } 2708 i++; 2709 } 2710 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2711 } 2712 2713 void 2714 add_rx_dump_callbacks(portid_t portid) 2715 { 2716 struct rte_eth_dev_info dev_info; 2717 uint16_t queue; 2718 2719 if (port_id_is_invalid(portid, ENABLED_WARN)) 2720 return; 2721 2722 rte_eth_dev_info_get(portid, &dev_info); 2723 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2724 if (!ports[portid].rx_dump_cb[queue]) 2725 ports[portid].rx_dump_cb[queue] = 2726 rte_eth_add_rx_callback(portid, queue, 2727 dump_rx_pkts, NULL); 2728 } 2729 2730 void 2731 add_tx_dump_callbacks(portid_t portid) 2732 { 2733 struct rte_eth_dev_info dev_info; 2734 uint16_t queue; 2735 2736 if (port_id_is_invalid(portid, ENABLED_WARN)) 2737 return; 2738 rte_eth_dev_info_get(portid, &dev_info); 2739 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2740 if (!ports[portid].tx_dump_cb[queue]) 2741 ports[portid].tx_dump_cb[queue] = 2742 rte_eth_add_tx_callback(portid, queue, 2743 dump_tx_pkts, NULL); 2744 } 2745 2746 void 2747 remove_rx_dump_callbacks(portid_t portid) 2748 { 2749 struct rte_eth_dev_info dev_info; 2750 uint16_t queue; 2751 2752 if (port_id_is_invalid(portid, ENABLED_WARN)) 2753 return; 2754 rte_eth_dev_info_get(portid, &dev_info); 2755 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 2756 if (ports[portid].rx_dump_cb[queue]) { 2757 rte_eth_remove_rx_callback(portid, queue, 2758 ports[portid].rx_dump_cb[queue]); 2759 ports[portid].rx_dump_cb[queue] = NULL; 2760 } 2761 } 2762 2763 void 2764 remove_tx_dump_callbacks(portid_t portid) 2765 { 2766 struct rte_eth_dev_info dev_info; 2767 uint16_t queue; 2768 2769 if (port_id_is_invalid(portid, ENABLED_WARN)) 2770 return; 2771 rte_eth_dev_info_get(portid, &dev_info); 2772 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 2773 if (ports[portid].tx_dump_cb[queue]) { 2774 rte_eth_remove_tx_callback(portid, queue, 2775 ports[portid].tx_dump_cb[queue]); 2776 ports[portid].tx_dump_cb[queue] = NULL; 2777 } 2778 } 2779 2780 void 2781 configure_rxtx_dump_callbacks(uint16_t verbose) 2782 { 2783 portid_t portid; 2784 2785 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 2786 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 2787 return; 2788 #endif 2789 2790 RTE_ETH_FOREACH_DEV(portid) 2791 { 2792 if (verbose == 1 || verbose > 2) 2793 add_rx_dump_callbacks(portid); 2794 else 2795 remove_rx_dump_callbacks(portid); 2796 if (verbose >= 2) 2797 add_tx_dump_callbacks(portid); 2798 else 2799 remove_tx_dump_callbacks(portid); 2800 } 2801 } 2802 2803 void 2804 set_verbose_level(uint16_t vb_level) 2805 { 2806 printf("Change verbose level from %u to %u\n", 2807 (unsigned int) verbose_level, (unsigned int) vb_level); 2808 verbose_level = vb_level; 2809 configure_rxtx_dump_callbacks(verbose_level); 2810 } 2811 2812 void 2813 vlan_extend_set(portid_t port_id, int on) 2814 { 2815 int diag; 2816 int vlan_offload; 2817 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2818 2819 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2820 return; 2821 2822 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2823 2824 if (on) { 2825 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2826 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2827 } else { 2828 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2829 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2830 } 2831 2832 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2833 if (diag < 0) 2834 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2835 "diag=%d\n", port_id, on, diag); 2836 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2837 } 2838 2839 void 2840 rx_vlan_strip_set(portid_t port_id, int on) 2841 { 2842 int diag; 2843 int vlan_offload; 2844 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2845 2846 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2847 return; 2848 2849 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2850 2851 if (on) { 2852 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2853 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2854 } else { 2855 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2856 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2857 } 2858 2859 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2860 if (diag < 0) 2861 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2862 "diag=%d\n", port_id, on, diag); 2863 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2864 } 2865 2866 void 2867 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2868 { 2869 int diag; 2870 2871 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2872 return; 2873 2874 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2875 if (diag < 0) 2876 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2877 "diag=%d\n", port_id, queue_id, on, diag); 2878 } 2879 2880 void 2881 rx_vlan_filter_set(portid_t port_id, int on) 2882 { 2883 int diag; 2884 int vlan_offload; 2885 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2886 2887 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2888 return; 2889 2890 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2891 2892 if (on) { 2893 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2894 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2895 } else { 2896 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2897 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2898 } 2899 2900 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2901 if (diag < 0) 2902 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2903 "diag=%d\n", port_id, on, diag); 2904 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2905 } 2906 2907 int 2908 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2909 { 2910 int diag; 2911 2912 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2913 return 1; 2914 if (vlan_id_is_invalid(vlan_id)) 2915 return 1; 2916 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2917 if (diag == 0) 2918 return 0; 2919 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2920 "diag=%d\n", 2921 port_id, vlan_id, on, diag); 2922 return -1; 2923 } 2924 2925 void 2926 rx_vlan_all_filter_set(portid_t port_id, int on) 2927 { 2928 uint16_t vlan_id; 2929 2930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2931 return; 2932 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2933 if (rx_vft_set(port_id, vlan_id, on)) 2934 break; 2935 } 2936 } 2937 2938 void 2939 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2940 { 2941 int diag; 2942 2943 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2944 return; 2945 2946 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2947 if (diag == 0) 2948 return; 2949 2950 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2951 "diag=%d\n", 2952 port_id, vlan_type, tp_id, diag); 2953 } 2954 2955 void 2956 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2957 { 2958 int vlan_offload; 2959 struct rte_eth_dev_info dev_info; 2960 2961 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2962 return; 2963 if (vlan_id_is_invalid(vlan_id)) 2964 return; 2965 2966 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2967 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2968 printf("Error, as QinQ has been enabled.\n"); 2969 return; 2970 } 2971 rte_eth_dev_info_get(port_id, &dev_info); 2972 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2973 printf("Error: vlan insert is not supported by port %d\n", 2974 port_id); 2975 return; 2976 } 2977 2978 tx_vlan_reset(port_id); 2979 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2980 ports[port_id].tx_vlan_id = vlan_id; 2981 } 2982 2983 void 2984 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2985 { 2986 int vlan_offload; 2987 struct rte_eth_dev_info dev_info; 2988 2989 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2990 return; 2991 if (vlan_id_is_invalid(vlan_id)) 2992 return; 2993 if (vlan_id_is_invalid(vlan_id_outer)) 2994 return; 2995 2996 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2997 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2998 printf("Error, as QinQ hasn't been enabled.\n"); 2999 return; 3000 } 3001 rte_eth_dev_info_get(port_id, &dev_info); 3002 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3003 printf("Error: qinq insert not supported by port %d\n", 3004 port_id); 3005 return; 3006 } 3007 3008 tx_vlan_reset(port_id); 3009 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 3010 ports[port_id].tx_vlan_id = vlan_id; 3011 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3012 } 3013 3014 void 3015 tx_vlan_reset(portid_t port_id) 3016 { 3017 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3018 return; 3019 ports[port_id].dev_conf.txmode.offloads &= 3020 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3021 DEV_TX_OFFLOAD_QINQ_INSERT); 3022 ports[port_id].tx_vlan_id = 0; 3023 ports[port_id].tx_vlan_id_outer = 0; 3024 } 3025 3026 void 3027 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3028 { 3029 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3030 return; 3031 3032 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3033 } 3034 3035 void 3036 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3037 { 3038 uint16_t i; 3039 uint8_t existing_mapping_found = 0; 3040 3041 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3042 return; 3043 3044 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3045 return; 3046 3047 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3048 printf("map_value not in required range 0..%d\n", 3049 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3050 return; 3051 } 3052 3053 if (!is_rx) { /*then tx*/ 3054 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3055 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3056 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3057 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3058 existing_mapping_found = 1; 3059 break; 3060 } 3061 } 3062 if (!existing_mapping_found) { /* A new additional mapping... */ 3063 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3064 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3065 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3066 nb_tx_queue_stats_mappings++; 3067 } 3068 } 3069 else { /*rx*/ 3070 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3071 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3072 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3073 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3074 existing_mapping_found = 1; 3075 break; 3076 } 3077 } 3078 if (!existing_mapping_found) { /* A new additional mapping... */ 3079 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3080 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3081 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3082 nb_rx_queue_stats_mappings++; 3083 } 3084 } 3085 } 3086 3087 void 3088 set_xstats_hide_zero(uint8_t on_off) 3089 { 3090 xstats_hide_zero = on_off; 3091 } 3092 3093 static inline void 3094 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3095 { 3096 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3097 3098 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3099 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3100 " tunnel_id: 0x%08x", 3101 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3102 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3103 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3104 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3105 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3106 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3107 3108 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3109 rte_be_to_cpu_16(mask->src_port_mask), 3110 rte_be_to_cpu_16(mask->dst_port_mask)); 3111 3112 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3113 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3114 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3115 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3116 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3117 3118 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3119 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3120 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3121 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3122 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3123 } 3124 3125 printf("\n"); 3126 } 3127 3128 static inline void 3129 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3130 { 3131 struct rte_eth_flex_payload_cfg *cfg; 3132 uint32_t i, j; 3133 3134 for (i = 0; i < flex_conf->nb_payloads; i++) { 3135 cfg = &flex_conf->flex_set[i]; 3136 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3137 printf("\n RAW: "); 3138 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3139 printf("\n L2_PAYLOAD: "); 3140 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3141 printf("\n L3_PAYLOAD: "); 3142 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3143 printf("\n L4_PAYLOAD: "); 3144 else 3145 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3146 for (j = 0; j < num; j++) 3147 printf(" %-5u", cfg->src_offset[j]); 3148 } 3149 printf("\n"); 3150 } 3151 3152 static char * 3153 flowtype_to_str(uint16_t flow_type) 3154 { 3155 struct flow_type_info { 3156 char str[32]; 3157 uint16_t ftype; 3158 }; 3159 3160 uint8_t i; 3161 static struct flow_type_info flowtype_str_table[] = { 3162 {"raw", RTE_ETH_FLOW_RAW}, 3163 {"ipv4", RTE_ETH_FLOW_IPV4}, 3164 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3165 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3166 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3167 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3168 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3169 {"ipv6", RTE_ETH_FLOW_IPV6}, 3170 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3171 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3172 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3173 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3174 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3175 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3176 {"port", RTE_ETH_FLOW_PORT}, 3177 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3178 {"geneve", RTE_ETH_FLOW_GENEVE}, 3179 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3180 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3181 }; 3182 3183 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3184 if (flowtype_str_table[i].ftype == flow_type) 3185 return flowtype_str_table[i].str; 3186 } 3187 3188 return NULL; 3189 } 3190 3191 static inline void 3192 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3193 { 3194 struct rte_eth_fdir_flex_mask *mask; 3195 uint32_t i, j; 3196 char *p; 3197 3198 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3199 mask = &flex_conf->flex_mask[i]; 3200 p = flowtype_to_str(mask->flow_type); 3201 printf("\n %s:\t", p ? p : "unknown"); 3202 for (j = 0; j < num; j++) 3203 printf(" %02x", mask->mask[j]); 3204 } 3205 printf("\n"); 3206 } 3207 3208 static inline void 3209 print_fdir_flow_type(uint32_t flow_types_mask) 3210 { 3211 int i; 3212 char *p; 3213 3214 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3215 if (!(flow_types_mask & (1 << i))) 3216 continue; 3217 p = flowtype_to_str(i); 3218 if (p) 3219 printf(" %s", p); 3220 else 3221 printf(" unknown"); 3222 } 3223 printf("\n"); 3224 } 3225 3226 void 3227 fdir_get_infos(portid_t port_id) 3228 { 3229 struct rte_eth_fdir_stats fdir_stat; 3230 struct rte_eth_fdir_info fdir_info; 3231 int ret; 3232 3233 static const char *fdir_stats_border = "########################"; 3234 3235 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3236 return; 3237 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3238 if (ret < 0) { 3239 printf("\n FDIR is not supported on port %-2d\n", 3240 port_id); 3241 return; 3242 } 3243 3244 memset(&fdir_info, 0, sizeof(fdir_info)); 3245 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3246 RTE_ETH_FILTER_INFO, &fdir_info); 3247 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3248 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3249 RTE_ETH_FILTER_STATS, &fdir_stat); 3250 printf("\n %s FDIR infos for port %-2d %s\n", 3251 fdir_stats_border, port_id, fdir_stats_border); 3252 printf(" MODE: "); 3253 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3254 printf(" PERFECT\n"); 3255 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3256 printf(" PERFECT-MAC-VLAN\n"); 3257 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3258 printf(" PERFECT-TUNNEL\n"); 3259 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3260 printf(" SIGNATURE\n"); 3261 else 3262 printf(" DISABLE\n"); 3263 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3264 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3265 printf(" SUPPORTED FLOW TYPE: "); 3266 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3267 } 3268 printf(" FLEX PAYLOAD INFO:\n"); 3269 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3270 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3271 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3272 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3273 fdir_info.flex_payload_unit, 3274 fdir_info.max_flex_payload_segment_num, 3275 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3276 printf(" MASK: "); 3277 print_fdir_mask(&fdir_info.mask); 3278 if (fdir_info.flex_conf.nb_payloads > 0) { 3279 printf(" FLEX PAYLOAD SRC OFFSET:"); 3280 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3281 } 3282 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3283 printf(" FLEX MASK CFG:"); 3284 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3285 } 3286 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3287 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3288 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3289 fdir_info.guarant_spc, fdir_info.best_spc); 3290 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3291 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3292 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3293 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3294 fdir_stat.collision, fdir_stat.free, 3295 fdir_stat.maxhash, fdir_stat.maxlen, 3296 fdir_stat.add, fdir_stat.remove, 3297 fdir_stat.f_add, fdir_stat.f_remove); 3298 printf(" %s############################%s\n", 3299 fdir_stats_border, fdir_stats_border); 3300 } 3301 3302 void 3303 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3304 { 3305 struct rte_port *port; 3306 struct rte_eth_fdir_flex_conf *flex_conf; 3307 int i, idx = 0; 3308 3309 port = &ports[port_id]; 3310 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3311 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3312 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3313 idx = i; 3314 break; 3315 } 3316 } 3317 if (i >= RTE_ETH_FLOW_MAX) { 3318 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3319 idx = flex_conf->nb_flexmasks; 3320 flex_conf->nb_flexmasks++; 3321 } else { 3322 printf("The flex mask table is full. Can not set flex" 3323 " mask for flow_type(%u).", cfg->flow_type); 3324 return; 3325 } 3326 } 3327 rte_memcpy(&flex_conf->flex_mask[idx], 3328 cfg, 3329 sizeof(struct rte_eth_fdir_flex_mask)); 3330 } 3331 3332 void 3333 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3334 { 3335 struct rte_port *port; 3336 struct rte_eth_fdir_flex_conf *flex_conf; 3337 int i, idx = 0; 3338 3339 port = &ports[port_id]; 3340 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3341 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3342 if (cfg->type == flex_conf->flex_set[i].type) { 3343 idx = i; 3344 break; 3345 } 3346 } 3347 if (i >= RTE_ETH_PAYLOAD_MAX) { 3348 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3349 idx = flex_conf->nb_payloads; 3350 flex_conf->nb_payloads++; 3351 } else { 3352 printf("The flex payload table is full. Can not set" 3353 " flex payload for type(%u).", cfg->type); 3354 return; 3355 } 3356 } 3357 rte_memcpy(&flex_conf->flex_set[idx], 3358 cfg, 3359 sizeof(struct rte_eth_flex_payload_cfg)); 3360 3361 } 3362 3363 void 3364 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3365 { 3366 #ifdef RTE_LIBRTE_IXGBE_PMD 3367 int diag; 3368 3369 if (is_rx) 3370 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3371 else 3372 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3373 3374 if (diag == 0) 3375 return; 3376 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3377 is_rx ? "rx" : "tx", port_id, diag); 3378 return; 3379 #endif 3380 printf("VF %s setting not supported for port %d\n", 3381 is_rx ? "Rx" : "Tx", port_id); 3382 RTE_SET_USED(vf); 3383 RTE_SET_USED(on); 3384 } 3385 3386 int 3387 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3388 { 3389 int diag; 3390 struct rte_eth_link link; 3391 3392 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3393 return 1; 3394 rte_eth_link_get_nowait(port_id, &link); 3395 if (rate > link.link_speed) { 3396 printf("Invalid rate value:%u bigger than link speed: %u\n", 3397 rate, link.link_speed); 3398 return 1; 3399 } 3400 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3401 if (diag == 0) 3402 return diag; 3403 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3404 port_id, diag); 3405 return diag; 3406 } 3407 3408 int 3409 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3410 { 3411 int diag = -ENOTSUP; 3412 3413 RTE_SET_USED(vf); 3414 RTE_SET_USED(rate); 3415 RTE_SET_USED(q_msk); 3416 3417 #ifdef RTE_LIBRTE_IXGBE_PMD 3418 if (diag == -ENOTSUP) 3419 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3420 q_msk); 3421 #endif 3422 #ifdef RTE_LIBRTE_BNXT_PMD 3423 if (diag == -ENOTSUP) 3424 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3425 #endif 3426 if (diag == 0) 3427 return diag; 3428 3429 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3430 port_id, diag); 3431 return diag; 3432 } 3433 3434 /* 3435 * Functions to manage the set of filtered Multicast MAC addresses. 3436 * 3437 * A pool of filtered multicast MAC addresses is associated with each port. 3438 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3439 * The address of the pool and the number of valid multicast MAC addresses 3440 * recorded in the pool are stored in the fields "mc_addr_pool" and 3441 * "mc_addr_nb" of the "rte_port" data structure. 3442 * 3443 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3444 * to be supplied a contiguous array of multicast MAC addresses. 3445 * To comply with this constraint, the set of multicast addresses recorded 3446 * into the pool are systematically compacted at the beginning of the pool. 3447 * Hence, when a multicast address is removed from the pool, all following 3448 * addresses, if any, are copied back to keep the set contiguous. 3449 */ 3450 #define MCAST_POOL_INC 32 3451 3452 static int 3453 mcast_addr_pool_extend(struct rte_port *port) 3454 { 3455 struct ether_addr *mc_pool; 3456 size_t mc_pool_size; 3457 3458 /* 3459 * If a free entry is available at the end of the pool, just 3460 * increment the number of recorded multicast addresses. 3461 */ 3462 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3463 port->mc_addr_nb++; 3464 return 0; 3465 } 3466 3467 /* 3468 * [re]allocate a pool with MCAST_POOL_INC more entries. 3469 * The previous test guarantees that port->mc_addr_nb is a multiple 3470 * of MCAST_POOL_INC. 3471 */ 3472 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3473 MCAST_POOL_INC); 3474 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3475 mc_pool_size); 3476 if (mc_pool == NULL) { 3477 printf("allocation of pool of %u multicast addresses failed\n", 3478 port->mc_addr_nb + MCAST_POOL_INC); 3479 return -ENOMEM; 3480 } 3481 3482 port->mc_addr_pool = mc_pool; 3483 port->mc_addr_nb++; 3484 return 0; 3485 3486 } 3487 3488 static void 3489 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3490 { 3491 port->mc_addr_nb--; 3492 if (addr_idx == port->mc_addr_nb) { 3493 /* No need to recompact the set of multicast addressses. */ 3494 if (port->mc_addr_nb == 0) { 3495 /* free the pool of multicast addresses. */ 3496 free(port->mc_addr_pool); 3497 port->mc_addr_pool = NULL; 3498 } 3499 return; 3500 } 3501 memmove(&port->mc_addr_pool[addr_idx], 3502 &port->mc_addr_pool[addr_idx + 1], 3503 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3504 } 3505 3506 static void 3507 eth_port_multicast_addr_list_set(portid_t port_id) 3508 { 3509 struct rte_port *port; 3510 int diag; 3511 3512 port = &ports[port_id]; 3513 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3514 port->mc_addr_nb); 3515 if (diag == 0) 3516 return; 3517 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3518 port->mc_addr_nb, port_id, -diag); 3519 } 3520 3521 void 3522 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3523 { 3524 struct rte_port *port; 3525 uint32_t i; 3526 3527 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3528 return; 3529 3530 port = &ports[port_id]; 3531 3532 /* 3533 * Check that the added multicast MAC address is not already recorded 3534 * in the pool of multicast addresses. 3535 */ 3536 for (i = 0; i < port->mc_addr_nb; i++) { 3537 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3538 printf("multicast address already filtered by port\n"); 3539 return; 3540 } 3541 } 3542 3543 if (mcast_addr_pool_extend(port) != 0) 3544 return; 3545 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3546 eth_port_multicast_addr_list_set(port_id); 3547 } 3548 3549 void 3550 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3551 { 3552 struct rte_port *port; 3553 uint32_t i; 3554 3555 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3556 return; 3557 3558 port = &ports[port_id]; 3559 3560 /* 3561 * Search the pool of multicast MAC addresses for the removed address. 3562 */ 3563 for (i = 0; i < port->mc_addr_nb; i++) { 3564 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3565 break; 3566 } 3567 if (i == port->mc_addr_nb) { 3568 printf("multicast address not filtered by port %d\n", port_id); 3569 return; 3570 } 3571 3572 mcast_addr_pool_remove(port, i); 3573 eth_port_multicast_addr_list_set(port_id); 3574 } 3575 3576 void 3577 port_dcb_info_display(portid_t port_id) 3578 { 3579 struct rte_eth_dcb_info dcb_info; 3580 uint16_t i; 3581 int ret; 3582 static const char *border = "================"; 3583 3584 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3585 return; 3586 3587 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3588 if (ret) { 3589 printf("\n Failed to get dcb infos on port %-2d\n", 3590 port_id); 3591 return; 3592 } 3593 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3594 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3595 printf("\n TC : "); 3596 for (i = 0; i < dcb_info.nb_tcs; i++) 3597 printf("\t%4d", i); 3598 printf("\n Priority : "); 3599 for (i = 0; i < dcb_info.nb_tcs; i++) 3600 printf("\t%4d", dcb_info.prio_tc[i]); 3601 printf("\n BW percent :"); 3602 for (i = 0; i < dcb_info.nb_tcs; i++) 3603 printf("\t%4d%%", dcb_info.tc_bws[i]); 3604 printf("\n RXQ base : "); 3605 for (i = 0; i < dcb_info.nb_tcs; i++) 3606 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3607 printf("\n RXQ number :"); 3608 for (i = 0; i < dcb_info.nb_tcs; i++) 3609 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3610 printf("\n TXQ base : "); 3611 for (i = 0; i < dcb_info.nb_tcs; i++) 3612 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3613 printf("\n TXQ number :"); 3614 for (i = 0; i < dcb_info.nb_tcs; i++) 3615 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3616 printf("\n"); 3617 } 3618 3619 uint8_t * 3620 open_file(const char *file_path, uint32_t *size) 3621 { 3622 int fd = open(file_path, O_RDONLY); 3623 off_t pkg_size; 3624 uint8_t *buf = NULL; 3625 int ret = 0; 3626 struct stat st_buf; 3627 3628 if (size) 3629 *size = 0; 3630 3631 if (fd == -1) { 3632 printf("%s: Failed to open %s\n", __func__, file_path); 3633 return buf; 3634 } 3635 3636 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3637 close(fd); 3638 printf("%s: File operations failed\n", __func__); 3639 return buf; 3640 } 3641 3642 pkg_size = st_buf.st_size; 3643 if (pkg_size < 0) { 3644 close(fd); 3645 printf("%s: File operations failed\n", __func__); 3646 return buf; 3647 } 3648 3649 buf = (uint8_t *)malloc(pkg_size); 3650 if (!buf) { 3651 close(fd); 3652 printf("%s: Failed to malloc memory\n", __func__); 3653 return buf; 3654 } 3655 3656 ret = read(fd, buf, pkg_size); 3657 if (ret < 0) { 3658 close(fd); 3659 printf("%s: File read operation failed\n", __func__); 3660 close_file(buf); 3661 return NULL; 3662 } 3663 3664 if (size) 3665 *size = pkg_size; 3666 3667 close(fd); 3668 3669 return buf; 3670 } 3671 3672 int 3673 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3674 { 3675 FILE *fh = fopen(file_path, "wb"); 3676 3677 if (fh == NULL) { 3678 printf("%s: Failed to open %s\n", __func__, file_path); 3679 return -1; 3680 } 3681 3682 if (fwrite(buf, 1, size, fh) != size) { 3683 fclose(fh); 3684 printf("%s: File write operation failed\n", __func__); 3685 return -1; 3686 } 3687 3688 fclose(fh); 3689 3690 return 0; 3691 } 3692 3693 int 3694 close_file(uint8_t *buf) 3695 { 3696 if (buf) { 3697 free((void *)buf); 3698 return 0; 3699 } 3700 3701 return -1; 3702 } 3703 3704 void 3705 port_queue_region_info_display(portid_t port_id, void *buf) 3706 { 3707 #ifdef RTE_LIBRTE_I40E_PMD 3708 uint16_t i, j; 3709 struct rte_pmd_i40e_queue_regions *info = 3710 (struct rte_pmd_i40e_queue_regions *)buf; 3711 static const char *queue_region_info_stats_border = "-------"; 3712 3713 if (!info->queue_region_number) 3714 printf("there is no region has been set before"); 3715 3716 printf("\n %s All queue region info for port=%2d %s", 3717 queue_region_info_stats_border, port_id, 3718 queue_region_info_stats_border); 3719 printf("\n queue_region_number: %-14u \n", 3720 info->queue_region_number); 3721 3722 for (i = 0; i < info->queue_region_number; i++) { 3723 printf("\n region_id: %-14u queue_number: %-14u " 3724 "queue_start_index: %-14u \n", 3725 info->region[i].region_id, 3726 info->region[i].queue_num, 3727 info->region[i].queue_start_index); 3728 3729 printf(" user_priority_num is %-14u :", 3730 info->region[i].user_priority_num); 3731 for (j = 0; j < info->region[i].user_priority_num; j++) 3732 printf(" %-14u ", info->region[i].user_priority[j]); 3733 3734 printf("\n flowtype_num is %-14u :", 3735 info->region[i].flowtype_num); 3736 for (j = 0; j < info->region[i].flowtype_num; j++) 3737 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3738 } 3739 #else 3740 RTE_SET_USED(port_id); 3741 RTE_SET_USED(buf); 3742 #endif 3743 3744 printf("\n\n"); 3745 } 3746