1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 125 static const char *nic_stats_border = "########################"; 126 127 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 128 print_valid_ports(); 129 return; 130 } 131 rte_eth_stats_get(port_id, &stats); 132 printf("\n %s NIC statistics for port %-2d %s\n", 133 nic_stats_border, port_id, nic_stats_border); 134 135 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 136 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 137 "%-"PRIu64"\n", 138 stats.ipackets, stats.imissed, stats.ibytes); 139 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 140 printf(" RX-nombuf: %-10"PRIu64"\n", 141 stats.rx_nombuf); 142 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 143 "%-"PRIu64"\n", 144 stats.opackets, stats.oerrors, stats.obytes); 145 } 146 else { 147 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 148 " RX-bytes: %10"PRIu64"\n", 149 stats.ipackets, stats.ierrors, stats.ibytes); 150 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 151 printf(" RX-nombuf: %10"PRIu64"\n", 152 stats.rx_nombuf); 153 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 154 " TX-bytes: %10"PRIu64"\n", 155 stats.opackets, stats.oerrors, stats.obytes); 156 } 157 158 if (port->rx_queue_stats_mapping_enabled) { 159 printf("\n"); 160 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 161 printf(" Stats reg %2d RX-packets: %10"PRIu64 162 " RX-errors: %10"PRIu64 163 " RX-bytes: %10"PRIu64"\n", 164 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 165 } 166 } 167 if (port->tx_queue_stats_mapping_enabled) { 168 printf("\n"); 169 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 170 printf(" Stats reg %2d TX-packets: %10"PRIu64 171 " TX-bytes: %10"PRIu64"\n", 172 i, stats.q_opackets[i], stats.q_obytes[i]); 173 } 174 } 175 176 diff_cycles = prev_cycles[port_id]; 177 prev_cycles[port_id] = rte_rdtsc(); 178 if (diff_cycles > 0) 179 diff_cycles = prev_cycles[port_id] - diff_cycles; 180 181 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 182 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 183 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 184 (stats.opackets - prev_pkts_tx[port_id]) : 0; 185 prev_pkts_rx[port_id] = stats.ipackets; 186 prev_pkts_tx[port_id] = stats.opackets; 187 mpps_rx = diff_cycles > 0 ? 188 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 189 mpps_tx = diff_cycles > 0 ? 190 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 191 printf("\n Throughput (since last show)\n"); 192 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 193 mpps_rx, mpps_tx); 194 195 printf(" %s############################%s\n", 196 nic_stats_border, nic_stats_border); 197 } 198 199 void 200 nic_stats_clear(portid_t port_id) 201 { 202 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 203 print_valid_ports(); 204 return; 205 } 206 rte_eth_stats_reset(port_id); 207 printf("\n NIC statistics for port %d cleared\n", port_id); 208 } 209 210 void 211 nic_xstats_display(portid_t port_id) 212 { 213 struct rte_eth_xstat *xstats; 214 int cnt_xstats, idx_xstat; 215 struct rte_eth_xstat_name *xstats_names; 216 217 printf("###### NIC extended statistics for port %-2d\n", port_id); 218 if (!rte_eth_dev_is_valid_port(port_id)) { 219 printf("Error: Invalid port number %i\n", port_id); 220 return; 221 } 222 223 /* Get count */ 224 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 225 if (cnt_xstats < 0) { 226 printf("Error: Cannot get count of xstats\n"); 227 return; 228 } 229 230 /* Get id-name lookup table */ 231 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 232 if (xstats_names == NULL) { 233 printf("Cannot allocate memory for xstats lookup\n"); 234 return; 235 } 236 if (cnt_xstats != rte_eth_xstats_get_names( 237 port_id, xstats_names, cnt_xstats)) { 238 printf("Error: Cannot get xstats lookup\n"); 239 free(xstats_names); 240 return; 241 } 242 243 /* Get stats themselves */ 244 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 245 if (xstats == NULL) { 246 printf("Cannot allocate memory for xstats\n"); 247 free(xstats_names); 248 return; 249 } 250 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 251 printf("Error: Unable to get xstats\n"); 252 free(xstats_names); 253 free(xstats); 254 return; 255 } 256 257 /* Display xstats */ 258 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 259 if (xstats_hide_zero && !xstats[idx_xstat].value) 260 continue; 261 printf("%s: %"PRIu64"\n", 262 xstats_names[idx_xstat].name, 263 xstats[idx_xstat].value); 264 } 265 free(xstats_names); 266 free(xstats); 267 } 268 269 void 270 nic_xstats_clear(portid_t port_id) 271 { 272 rte_eth_xstats_reset(port_id); 273 } 274 275 void 276 nic_stats_mapping_display(portid_t port_id) 277 { 278 struct rte_port *port = &ports[port_id]; 279 uint16_t i; 280 281 static const char *nic_stats_mapping_border = "########################"; 282 283 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 284 print_valid_ports(); 285 return; 286 } 287 288 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 289 printf("Port id %d - either does not support queue statistic mapping or" 290 " no queue statistic mapping set\n", port_id); 291 return; 292 } 293 294 printf("\n %s NIC statistics mapping for port %-2d %s\n", 295 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 296 297 if (port->rx_queue_stats_mapping_enabled) { 298 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 299 if (rx_queue_stats_mappings[i].port_id == port_id) { 300 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 301 rx_queue_stats_mappings[i].queue_id, 302 rx_queue_stats_mappings[i].stats_counter_id); 303 } 304 } 305 printf("\n"); 306 } 307 308 309 if (port->tx_queue_stats_mapping_enabled) { 310 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 311 if (tx_queue_stats_mappings[i].port_id == port_id) { 312 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 313 tx_queue_stats_mappings[i].queue_id, 314 tx_queue_stats_mappings[i].stats_counter_id); 315 } 316 } 317 } 318 319 printf(" %s####################################%s\n", 320 nic_stats_mapping_border, nic_stats_mapping_border); 321 } 322 323 void 324 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 325 { 326 struct rte_eth_rxq_info qinfo; 327 int32_t rc; 328 static const char *info_border = "*********************"; 329 330 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 331 if (rc != 0) { 332 printf("Failed to retrieve information for port: %u, " 333 "RX queue: %hu\nerror desc: %s(%d)\n", 334 port_id, queue_id, strerror(-rc), rc); 335 return; 336 } 337 338 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 339 info_border, port_id, queue_id, info_border); 340 341 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 342 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 343 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 344 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 345 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 346 printf("\nRX drop packets: %s", 347 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 348 printf("\nRX deferred start: %s", 349 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 350 printf("\nRX scattered packets: %s", 351 (qinfo.scattered_rx != 0) ? "on" : "off"); 352 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 353 printf("\n"); 354 } 355 356 void 357 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 358 { 359 struct rte_eth_txq_info qinfo; 360 int32_t rc; 361 static const char *info_border = "*********************"; 362 363 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 364 if (rc != 0) { 365 printf("Failed to retrieve information for port: %u, " 366 "TX queue: %hu\nerror desc: %s(%d)\n", 367 port_id, queue_id, strerror(-rc), rc); 368 return; 369 } 370 371 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 372 info_border, port_id, queue_id, info_border); 373 374 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 375 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 376 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 377 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 378 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 379 printf("\nTX deferred start: %s", 380 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 381 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 382 printf("\n"); 383 } 384 385 void 386 port_infos_display(portid_t port_id) 387 { 388 struct rte_port *port; 389 struct ether_addr mac_addr; 390 struct rte_eth_link link; 391 struct rte_eth_dev_info dev_info; 392 int vlan_offload; 393 struct rte_mempool * mp; 394 static const char *info_border = "*********************"; 395 uint16_t mtu; 396 char name[RTE_ETH_NAME_MAX_LEN]; 397 398 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 399 print_valid_ports(); 400 return; 401 } 402 port = &ports[port_id]; 403 rte_eth_link_get_nowait(port_id, &link); 404 memset(&dev_info, 0, sizeof(dev_info)); 405 rte_eth_dev_info_get(port_id, &dev_info); 406 printf("\n%s Infos for port %-2d %s\n", 407 info_border, port_id, info_border); 408 rte_eth_macaddr_get(port_id, &mac_addr); 409 print_ethaddr("MAC address: ", &mac_addr); 410 rte_eth_dev_get_name_by_port(port_id, name); 411 printf("\nDevice name: %s", name); 412 printf("\nDriver name: %s", dev_info.driver_name); 413 printf("\nConnect to socket: %u", port->socket_id); 414 415 if (port_numa[port_id] != NUMA_NO_CONFIG) { 416 mp = mbuf_pool_find(port_numa[port_id]); 417 if (mp) 418 printf("\nmemory allocation on the socket: %d", 419 port_numa[port_id]); 420 } else 421 printf("\nmemory allocation on the socket: %u",port->socket_id); 422 423 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 424 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 425 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 426 ("full-duplex") : ("half-duplex")); 427 428 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 429 printf("MTU: %u\n", mtu); 430 431 printf("Promiscuous mode: %s\n", 432 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 433 printf("Allmulticast mode: %s\n", 434 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 435 printf("Maximum number of MAC addresses: %u\n", 436 (unsigned int)(port->dev_info.max_mac_addrs)); 437 printf("Maximum number of MAC addresses of hash filtering: %u\n", 438 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 439 440 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 441 if (vlan_offload >= 0){ 442 printf("VLAN offload: \n"); 443 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 444 printf(" strip on \n"); 445 else 446 printf(" strip off \n"); 447 448 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 449 printf(" filter on \n"); 450 else 451 printf(" filter off \n"); 452 453 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 454 printf(" qinq(extend) on \n"); 455 else 456 printf(" qinq(extend) off \n"); 457 } 458 459 if (dev_info.hash_key_size > 0) 460 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 461 if (dev_info.reta_size > 0) 462 printf("Redirection table size: %u\n", dev_info.reta_size); 463 if (!dev_info.flow_type_rss_offloads) 464 printf("No flow type is supported.\n"); 465 else { 466 uint16_t i; 467 char *p; 468 469 printf("Supported flow types:\n"); 470 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 471 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 472 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 473 continue; 474 p = flowtype_to_str(i); 475 if (p) 476 printf(" %s\n", p); 477 else 478 printf(" user defined %d\n", i); 479 } 480 } 481 482 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 483 printf("Maximum configurable length of RX packet: %u\n", 484 dev_info.max_rx_pktlen); 485 if (dev_info.max_vfs) 486 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 487 if (dev_info.max_vmdq_pools) 488 printf("Maximum number of VMDq pools: %u\n", 489 dev_info.max_vmdq_pools); 490 491 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 492 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 493 printf("Max possible number of RXDs per queue: %hu\n", 494 dev_info.rx_desc_lim.nb_max); 495 printf("Min possible number of RXDs per queue: %hu\n", 496 dev_info.rx_desc_lim.nb_min); 497 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 498 499 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 500 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 501 printf("Max possible number of TXDs per queue: %hu\n", 502 dev_info.tx_desc_lim.nb_max); 503 printf("Min possible number of TXDs per queue: %hu\n", 504 dev_info.tx_desc_lim.nb_min); 505 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 506 507 /* Show switch info only if valid switch domain and port id is set */ 508 if (dev_info.switch_info.domain_id != 509 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 510 if (dev_info.switch_info.name) 511 printf("Switch name: %s\n", dev_info.switch_info.name); 512 513 printf("Switch domain Id: %u\n", 514 dev_info.switch_info.domain_id); 515 printf("Switch Port Id: %u\n", 516 dev_info.switch_info.port_id); 517 } 518 } 519 520 void 521 port_offload_cap_display(portid_t port_id) 522 { 523 struct rte_eth_dev_info dev_info; 524 static const char *info_border = "************"; 525 526 if (port_id_is_invalid(port_id, ENABLED_WARN)) 527 return; 528 529 rte_eth_dev_info_get(port_id, &dev_info); 530 531 printf("\n%s Port %d supported offload features: %s\n", 532 info_border, port_id, info_border); 533 534 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 535 printf("VLAN stripped: "); 536 if (ports[port_id].dev_conf.rxmode.offloads & 537 DEV_RX_OFFLOAD_VLAN_STRIP) 538 printf("on\n"); 539 else 540 printf("off\n"); 541 } 542 543 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 544 printf("Double VLANs stripped: "); 545 if (ports[port_id].dev_conf.rxmode.offloads & 546 DEV_RX_OFFLOAD_VLAN_EXTEND) 547 printf("on\n"); 548 else 549 printf("off\n"); 550 } 551 552 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 553 printf("RX IPv4 checksum: "); 554 if (ports[port_id].dev_conf.rxmode.offloads & 555 DEV_RX_OFFLOAD_IPV4_CKSUM) 556 printf("on\n"); 557 else 558 printf("off\n"); 559 } 560 561 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 562 printf("RX UDP checksum: "); 563 if (ports[port_id].dev_conf.rxmode.offloads & 564 DEV_RX_OFFLOAD_UDP_CKSUM) 565 printf("on\n"); 566 else 567 printf("off\n"); 568 } 569 570 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 571 printf("RX TCP checksum: "); 572 if (ports[port_id].dev_conf.rxmode.offloads & 573 DEV_RX_OFFLOAD_TCP_CKSUM) 574 printf("on\n"); 575 else 576 printf("off\n"); 577 } 578 579 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 580 printf("RX Outer IPv4 checksum: "); 581 if (ports[port_id].dev_conf.rxmode.offloads & 582 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 583 printf("on\n"); 584 else 585 printf("off\n"); 586 } 587 588 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 589 printf("Large receive offload: "); 590 if (ports[port_id].dev_conf.rxmode.offloads & 591 DEV_RX_OFFLOAD_TCP_LRO) 592 printf("on\n"); 593 else 594 printf("off\n"); 595 } 596 597 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 598 printf("VLAN insert: "); 599 if (ports[port_id].dev_conf.txmode.offloads & 600 DEV_TX_OFFLOAD_VLAN_INSERT) 601 printf("on\n"); 602 else 603 printf("off\n"); 604 } 605 606 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 607 printf("HW timestamp: "); 608 if (ports[port_id].dev_conf.rxmode.offloads & 609 DEV_RX_OFFLOAD_TIMESTAMP) 610 printf("on\n"); 611 else 612 printf("off\n"); 613 } 614 615 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 616 printf("Double VLANs insert: "); 617 if (ports[port_id].dev_conf.txmode.offloads & 618 DEV_TX_OFFLOAD_QINQ_INSERT) 619 printf("on\n"); 620 else 621 printf("off\n"); 622 } 623 624 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 625 printf("TX IPv4 checksum: "); 626 if (ports[port_id].dev_conf.txmode.offloads & 627 DEV_TX_OFFLOAD_IPV4_CKSUM) 628 printf("on\n"); 629 else 630 printf("off\n"); 631 } 632 633 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 634 printf("TX UDP checksum: "); 635 if (ports[port_id].dev_conf.txmode.offloads & 636 DEV_TX_OFFLOAD_UDP_CKSUM) 637 printf("on\n"); 638 else 639 printf("off\n"); 640 } 641 642 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 643 printf("TX TCP checksum: "); 644 if (ports[port_id].dev_conf.txmode.offloads & 645 DEV_TX_OFFLOAD_TCP_CKSUM) 646 printf("on\n"); 647 else 648 printf("off\n"); 649 } 650 651 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 652 printf("TX SCTP checksum: "); 653 if (ports[port_id].dev_conf.txmode.offloads & 654 DEV_TX_OFFLOAD_SCTP_CKSUM) 655 printf("on\n"); 656 else 657 printf("off\n"); 658 } 659 660 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 661 printf("TX Outer IPv4 checksum: "); 662 if (ports[port_id].dev_conf.txmode.offloads & 663 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 664 printf("on\n"); 665 else 666 printf("off\n"); 667 } 668 669 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 670 printf("TX TCP segmentation: "); 671 if (ports[port_id].dev_conf.txmode.offloads & 672 DEV_TX_OFFLOAD_TCP_TSO) 673 printf("on\n"); 674 else 675 printf("off\n"); 676 } 677 678 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 679 printf("TX UDP segmentation: "); 680 if (ports[port_id].dev_conf.txmode.offloads & 681 DEV_TX_OFFLOAD_UDP_TSO) 682 printf("on\n"); 683 else 684 printf("off\n"); 685 } 686 687 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 688 printf("TSO for VXLAN tunnel packet: "); 689 if (ports[port_id].dev_conf.txmode.offloads & 690 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 691 printf("on\n"); 692 else 693 printf("off\n"); 694 } 695 696 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 697 printf("TSO for GRE tunnel packet: "); 698 if (ports[port_id].dev_conf.txmode.offloads & 699 DEV_TX_OFFLOAD_GRE_TNL_TSO) 700 printf("on\n"); 701 else 702 printf("off\n"); 703 } 704 705 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 706 printf("TSO for IPIP tunnel packet: "); 707 if (ports[port_id].dev_conf.txmode.offloads & 708 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 709 printf("on\n"); 710 else 711 printf("off\n"); 712 } 713 714 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 715 printf("TSO for GENEVE tunnel packet: "); 716 if (ports[port_id].dev_conf.txmode.offloads & 717 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 718 printf("on\n"); 719 else 720 printf("off\n"); 721 } 722 723 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 724 printf("IP tunnel TSO: "); 725 if (ports[port_id].dev_conf.txmode.offloads & 726 DEV_TX_OFFLOAD_IP_TNL_TSO) 727 printf("on\n"); 728 else 729 printf("off\n"); 730 } 731 732 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 733 printf("UDP tunnel TSO: "); 734 if (ports[port_id].dev_conf.txmode.offloads & 735 DEV_TX_OFFLOAD_UDP_TNL_TSO) 736 printf("on\n"); 737 else 738 printf("off\n"); 739 } 740 } 741 742 int 743 port_id_is_invalid(portid_t port_id, enum print_warning warning) 744 { 745 uint16_t pid; 746 747 if (port_id == (portid_t)RTE_PORT_ALL) 748 return 0; 749 750 RTE_ETH_FOREACH_DEV(pid) 751 if (port_id == pid) 752 return 0; 753 754 if (warning == ENABLED_WARN) 755 printf("Invalid port %d\n", port_id); 756 757 return 1; 758 } 759 760 void print_valid_ports(void) 761 { 762 portid_t pid; 763 764 printf("The valid ports array is ["); 765 RTE_ETH_FOREACH_DEV(pid) { 766 printf(" %d", pid); 767 } 768 printf(" ]\n"); 769 } 770 771 static int 772 vlan_id_is_invalid(uint16_t vlan_id) 773 { 774 if (vlan_id < 4096) 775 return 0; 776 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 777 return 1; 778 } 779 780 static int 781 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 782 { 783 const struct rte_pci_device *pci_dev; 784 const struct rte_bus *bus; 785 uint64_t pci_len; 786 787 if (reg_off & 0x3) { 788 printf("Port register offset 0x%X not aligned on a 4-byte " 789 "boundary\n", 790 (unsigned)reg_off); 791 return 1; 792 } 793 794 if (!ports[port_id].dev_info.device) { 795 printf("Invalid device\n"); 796 return 0; 797 } 798 799 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 800 if (bus && !strcmp(bus->name, "pci")) { 801 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 802 } else { 803 printf("Not a PCI device\n"); 804 return 1; 805 } 806 807 pci_len = pci_dev->mem_resource[0].len; 808 if (reg_off >= pci_len) { 809 printf("Port %d: register offset %u (0x%X) out of port PCI " 810 "resource (length=%"PRIu64")\n", 811 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 812 return 1; 813 } 814 return 0; 815 } 816 817 static int 818 reg_bit_pos_is_invalid(uint8_t bit_pos) 819 { 820 if (bit_pos <= 31) 821 return 0; 822 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 823 return 1; 824 } 825 826 #define display_port_and_reg_off(port_id, reg_off) \ 827 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 828 829 static inline void 830 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 831 { 832 display_port_and_reg_off(port_id, (unsigned)reg_off); 833 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 834 } 835 836 void 837 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 838 { 839 uint32_t reg_v; 840 841 842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 843 return; 844 if (port_reg_off_is_invalid(port_id, reg_off)) 845 return; 846 if (reg_bit_pos_is_invalid(bit_x)) 847 return; 848 reg_v = port_id_pci_reg_read(port_id, reg_off); 849 display_port_and_reg_off(port_id, (unsigned)reg_off); 850 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 851 } 852 853 void 854 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 855 uint8_t bit1_pos, uint8_t bit2_pos) 856 { 857 uint32_t reg_v; 858 uint8_t l_bit; 859 uint8_t h_bit; 860 861 if (port_id_is_invalid(port_id, ENABLED_WARN)) 862 return; 863 if (port_reg_off_is_invalid(port_id, reg_off)) 864 return; 865 if (reg_bit_pos_is_invalid(bit1_pos)) 866 return; 867 if (reg_bit_pos_is_invalid(bit2_pos)) 868 return; 869 if (bit1_pos > bit2_pos) 870 l_bit = bit2_pos, h_bit = bit1_pos; 871 else 872 l_bit = bit1_pos, h_bit = bit2_pos; 873 874 reg_v = port_id_pci_reg_read(port_id, reg_off); 875 reg_v >>= l_bit; 876 if (h_bit < 31) 877 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 878 display_port_and_reg_off(port_id, (unsigned)reg_off); 879 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 880 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 881 } 882 883 void 884 port_reg_display(portid_t port_id, uint32_t reg_off) 885 { 886 uint32_t reg_v; 887 888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 889 return; 890 if (port_reg_off_is_invalid(port_id, reg_off)) 891 return; 892 reg_v = port_id_pci_reg_read(port_id, reg_off); 893 display_port_reg_value(port_id, reg_off, reg_v); 894 } 895 896 void 897 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 898 uint8_t bit_v) 899 { 900 uint32_t reg_v; 901 902 if (port_id_is_invalid(port_id, ENABLED_WARN)) 903 return; 904 if (port_reg_off_is_invalid(port_id, reg_off)) 905 return; 906 if (reg_bit_pos_is_invalid(bit_pos)) 907 return; 908 if (bit_v > 1) { 909 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 910 return; 911 } 912 reg_v = port_id_pci_reg_read(port_id, reg_off); 913 if (bit_v == 0) 914 reg_v &= ~(1 << bit_pos); 915 else 916 reg_v |= (1 << bit_pos); 917 port_id_pci_reg_write(port_id, reg_off, reg_v); 918 display_port_reg_value(port_id, reg_off, reg_v); 919 } 920 921 void 922 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 923 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 924 { 925 uint32_t max_v; 926 uint32_t reg_v; 927 uint8_t l_bit; 928 uint8_t h_bit; 929 930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 931 return; 932 if (port_reg_off_is_invalid(port_id, reg_off)) 933 return; 934 if (reg_bit_pos_is_invalid(bit1_pos)) 935 return; 936 if (reg_bit_pos_is_invalid(bit2_pos)) 937 return; 938 if (bit1_pos > bit2_pos) 939 l_bit = bit2_pos, h_bit = bit1_pos; 940 else 941 l_bit = bit1_pos, h_bit = bit2_pos; 942 943 if ((h_bit - l_bit) < 31) 944 max_v = (1 << (h_bit - l_bit + 1)) - 1; 945 else 946 max_v = 0xFFFFFFFF; 947 948 if (value > max_v) { 949 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 950 (unsigned)value, (unsigned)value, 951 (unsigned)max_v, (unsigned)max_v); 952 return; 953 } 954 reg_v = port_id_pci_reg_read(port_id, reg_off); 955 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 956 reg_v |= (value << l_bit); /* Set changed bits */ 957 port_id_pci_reg_write(port_id, reg_off, reg_v); 958 display_port_reg_value(port_id, reg_off, reg_v); 959 } 960 961 void 962 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 963 { 964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 965 return; 966 if (port_reg_off_is_invalid(port_id, reg_off)) 967 return; 968 port_id_pci_reg_write(port_id, reg_off, reg_v); 969 display_port_reg_value(port_id, reg_off, reg_v); 970 } 971 972 void 973 port_mtu_set(portid_t port_id, uint16_t mtu) 974 { 975 int diag; 976 977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 978 return; 979 diag = rte_eth_dev_set_mtu(port_id, mtu); 980 if (diag == 0) 981 return; 982 printf("Set MTU failed. diag=%d\n", diag); 983 } 984 985 /* Generic flow management functions. */ 986 987 /** Generate flow_item[] entry. */ 988 #define MK_FLOW_ITEM(t, s) \ 989 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 990 .name = # t, \ 991 .size = s, \ 992 } 993 994 /** Information about known flow pattern items. */ 995 static const struct { 996 const char *name; 997 size_t size; 998 } flow_item[] = { 999 MK_FLOW_ITEM(END, 0), 1000 MK_FLOW_ITEM(VOID, 0), 1001 MK_FLOW_ITEM(INVERT, 0), 1002 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 1003 MK_FLOW_ITEM(PF, 0), 1004 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 1005 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 1006 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 1007 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 1008 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1009 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1010 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1011 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1012 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1013 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1014 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1015 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1016 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1017 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1018 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1019 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1020 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1021 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1022 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1023 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1024 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1025 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1026 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 1027 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 1028 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 1029 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 1030 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 1031 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 1032 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 1033 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 1034 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 1035 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 1036 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 1037 }; 1038 1039 /** Pattern item specification types. */ 1040 enum item_spec_type { 1041 ITEM_SPEC, 1042 ITEM_LAST, 1043 ITEM_MASK, 1044 }; 1045 1046 /** Compute storage space needed by item specification and copy it. */ 1047 static size_t 1048 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1049 enum item_spec_type type) 1050 { 1051 size_t size = 0; 1052 const void *item_spec = 1053 type == ITEM_SPEC ? item->spec : 1054 type == ITEM_LAST ? item->last : 1055 type == ITEM_MASK ? item->mask : 1056 NULL; 1057 1058 if (!item_spec) 1059 goto empty; 1060 switch (item->type) { 1061 union { 1062 const struct rte_flow_item_raw *raw; 1063 } src; 1064 union { 1065 struct rte_flow_item_raw *raw; 1066 } dst; 1067 size_t off; 1068 1069 case RTE_FLOW_ITEM_TYPE_RAW: 1070 src.raw = item_spec; 1071 dst.raw = buf; 1072 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1073 sizeof(*src.raw->pattern)); 1074 size = off + ((const struct rte_flow_item_raw *)item->spec)-> 1075 length * sizeof(*src.raw->pattern); 1076 if (dst.raw) { 1077 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1078 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1079 src.raw->pattern, 1080 size - off); 1081 } 1082 break; 1083 default: 1084 size = flow_item[item->type].size; 1085 if (buf) 1086 memcpy(buf, item_spec, size); 1087 break; 1088 } 1089 empty: 1090 return RTE_ALIGN_CEIL(size, sizeof(double)); 1091 } 1092 1093 /** Generate flow_action[] entry. */ 1094 #define MK_FLOW_ACTION(t, s) \ 1095 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1096 .name = # t, \ 1097 .size = s, \ 1098 } 1099 1100 /** Information about known flow actions. */ 1101 static const struct { 1102 const char *name; 1103 size_t size; 1104 } flow_action[] = { 1105 MK_FLOW_ACTION(END, 0), 1106 MK_FLOW_ACTION(VOID, 0), 1107 MK_FLOW_ACTION(PASSTHRU, 0), 1108 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1109 MK_FLOW_ACTION(FLAG, 0), 1110 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1111 MK_FLOW_ACTION(DROP, 0), 1112 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 1113 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1114 MK_FLOW_ACTION(PF, 0), 1115 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1116 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1117 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1118 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1119 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 1120 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 1121 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 1122 MK_FLOW_ACTION(OF_SET_NW_TTL, 1123 sizeof(struct rte_flow_action_of_set_nw_ttl)), 1124 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 1125 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 1126 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 1127 MK_FLOW_ACTION(OF_POP_VLAN, 0), 1128 MK_FLOW_ACTION(OF_PUSH_VLAN, 1129 sizeof(struct rte_flow_action_of_push_vlan)), 1130 MK_FLOW_ACTION(OF_SET_VLAN_VID, 1131 sizeof(struct rte_flow_action_of_set_vlan_vid)), 1132 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 1133 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 1134 MK_FLOW_ACTION(OF_POP_MPLS, 1135 sizeof(struct rte_flow_action_of_pop_mpls)), 1136 MK_FLOW_ACTION(OF_PUSH_MPLS, 1137 sizeof(struct rte_flow_action_of_push_mpls)), 1138 }; 1139 1140 /** Compute storage space needed by action configuration and copy it. */ 1141 static size_t 1142 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1143 { 1144 size_t size = 0; 1145 1146 if (!action->conf) 1147 goto empty; 1148 switch (action->type) { 1149 union { 1150 const struct rte_flow_action_rss *rss; 1151 } src; 1152 union { 1153 struct rte_flow_action_rss *rss; 1154 } dst; 1155 size_t off; 1156 1157 case RTE_FLOW_ACTION_TYPE_RSS: 1158 src.rss = action->conf; 1159 dst.rss = buf; 1160 off = 0; 1161 if (dst.rss) 1162 *dst.rss = (struct rte_flow_action_rss){ 1163 .func = src.rss->func, 1164 .level = src.rss->level, 1165 .types = src.rss->types, 1166 .key_len = src.rss->key_len, 1167 .queue_num = src.rss->queue_num, 1168 }; 1169 off += sizeof(*src.rss); 1170 if (src.rss->key_len) { 1171 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1172 size = sizeof(*src.rss->key) * src.rss->key_len; 1173 if (dst.rss) 1174 dst.rss->key = memcpy 1175 ((void *)((uintptr_t)dst.rss + off), 1176 src.rss->key, size); 1177 off += size; 1178 } 1179 if (src.rss->queue_num) { 1180 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1181 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1182 if (dst.rss) 1183 dst.rss->queue = memcpy 1184 ((void *)((uintptr_t)dst.rss + off), 1185 src.rss->queue, size); 1186 off += size; 1187 } 1188 size = off; 1189 break; 1190 default: 1191 size = flow_action[action->type].size; 1192 if (buf) 1193 memcpy(buf, action->conf, size); 1194 break; 1195 } 1196 empty: 1197 return RTE_ALIGN_CEIL(size, sizeof(double)); 1198 } 1199 1200 /** Generate a port_flow entry from attributes/pattern/actions. */ 1201 static struct port_flow * 1202 port_flow_new(const struct rte_flow_attr *attr, 1203 const struct rte_flow_item *pattern, 1204 const struct rte_flow_action *actions) 1205 { 1206 const struct rte_flow_item *item; 1207 const struct rte_flow_action *action; 1208 struct port_flow *pf = NULL; 1209 size_t tmp; 1210 size_t off1 = 0; 1211 size_t off2 = 0; 1212 int err = ENOTSUP; 1213 1214 store: 1215 item = pattern; 1216 if (pf) 1217 pf->pattern = (void *)&pf->data[off1]; 1218 do { 1219 struct rte_flow_item *dst = NULL; 1220 1221 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1222 !flow_item[item->type].name) 1223 goto notsup; 1224 if (pf) 1225 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1226 off1 += sizeof(*item); 1227 if (item->spec) { 1228 if (pf) 1229 dst->spec = pf->data + off2; 1230 off2 += flow_item_spec_copy 1231 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1232 } 1233 if (item->last) { 1234 if (pf) 1235 dst->last = pf->data + off2; 1236 off2 += flow_item_spec_copy 1237 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1238 } 1239 if (item->mask) { 1240 if (pf) 1241 dst->mask = pf->data + off2; 1242 off2 += flow_item_spec_copy 1243 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1244 } 1245 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1246 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1247 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1248 action = actions; 1249 if (pf) 1250 pf->actions = (void *)&pf->data[off1]; 1251 do { 1252 struct rte_flow_action *dst = NULL; 1253 1254 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1255 !flow_action[action->type].name) 1256 goto notsup; 1257 if (pf) 1258 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1259 off1 += sizeof(*action); 1260 if (action->conf) { 1261 if (pf) 1262 dst->conf = pf->data + off2; 1263 off2 += flow_action_conf_copy 1264 (pf ? pf->data + off2 : NULL, action); 1265 } 1266 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1267 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1268 if (pf != NULL) 1269 return pf; 1270 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1271 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1272 pf = calloc(1, tmp + off1 + off2); 1273 if (pf == NULL) 1274 err = errno; 1275 else { 1276 *pf = (const struct port_flow){ 1277 .size = tmp + off1 + off2, 1278 .attr = *attr, 1279 }; 1280 tmp -= offsetof(struct port_flow, data); 1281 off2 = tmp + off1; 1282 off1 = tmp; 1283 goto store; 1284 } 1285 notsup: 1286 rte_errno = err; 1287 return NULL; 1288 } 1289 1290 /** Print a message out of a flow error. */ 1291 static int 1292 port_flow_complain(struct rte_flow_error *error) 1293 { 1294 static const char *const errstrlist[] = { 1295 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1296 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1297 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1298 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1299 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1300 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1301 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1302 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1303 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1304 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1305 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1306 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1307 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1308 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1309 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1310 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1311 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1312 }; 1313 const char *errstr; 1314 char buf[32]; 1315 int err = rte_errno; 1316 1317 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1318 !errstrlist[error->type]) 1319 errstr = "unknown type"; 1320 else 1321 errstr = errstrlist[error->type]; 1322 printf("Caught error type %d (%s): %s%s\n", 1323 error->type, errstr, 1324 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1325 error->cause), buf) : "", 1326 error->message ? error->message : "(no stated reason)"); 1327 return -err; 1328 } 1329 1330 /** Validate flow rule. */ 1331 int 1332 port_flow_validate(portid_t port_id, 1333 const struct rte_flow_attr *attr, 1334 const struct rte_flow_item *pattern, 1335 const struct rte_flow_action *actions) 1336 { 1337 struct rte_flow_error error; 1338 1339 /* Poisoning to make sure PMDs update it in case of error. */ 1340 memset(&error, 0x11, sizeof(error)); 1341 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1342 return port_flow_complain(&error); 1343 printf("Flow rule validated\n"); 1344 return 0; 1345 } 1346 1347 /** Create flow rule. */ 1348 int 1349 port_flow_create(portid_t port_id, 1350 const struct rte_flow_attr *attr, 1351 const struct rte_flow_item *pattern, 1352 const struct rte_flow_action *actions) 1353 { 1354 struct rte_flow *flow; 1355 struct rte_port *port; 1356 struct port_flow *pf; 1357 uint32_t id; 1358 struct rte_flow_error error; 1359 1360 /* Poisoning to make sure PMDs update it in case of error. */ 1361 memset(&error, 0x22, sizeof(error)); 1362 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1363 if (!flow) 1364 return port_flow_complain(&error); 1365 port = &ports[port_id]; 1366 if (port->flow_list) { 1367 if (port->flow_list->id == UINT32_MAX) { 1368 printf("Highest rule ID is already assigned, delete" 1369 " it first"); 1370 rte_flow_destroy(port_id, flow, NULL); 1371 return -ENOMEM; 1372 } 1373 id = port->flow_list->id + 1; 1374 } else 1375 id = 0; 1376 pf = port_flow_new(attr, pattern, actions); 1377 if (!pf) { 1378 int err = rte_errno; 1379 1380 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1381 rte_flow_destroy(port_id, flow, NULL); 1382 return -err; 1383 } 1384 pf->next = port->flow_list; 1385 pf->id = id; 1386 pf->flow = flow; 1387 port->flow_list = pf; 1388 printf("Flow rule #%u created\n", pf->id); 1389 return 0; 1390 } 1391 1392 /** Destroy a number of flow rules. */ 1393 int 1394 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1395 { 1396 struct rte_port *port; 1397 struct port_flow **tmp; 1398 uint32_t c = 0; 1399 int ret = 0; 1400 1401 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1402 port_id == (portid_t)RTE_PORT_ALL) 1403 return -EINVAL; 1404 port = &ports[port_id]; 1405 tmp = &port->flow_list; 1406 while (*tmp) { 1407 uint32_t i; 1408 1409 for (i = 0; i != n; ++i) { 1410 struct rte_flow_error error; 1411 struct port_flow *pf = *tmp; 1412 1413 if (rule[i] != pf->id) 1414 continue; 1415 /* 1416 * Poisoning to make sure PMDs update it in case 1417 * of error. 1418 */ 1419 memset(&error, 0x33, sizeof(error)); 1420 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1421 ret = port_flow_complain(&error); 1422 continue; 1423 } 1424 printf("Flow rule #%u destroyed\n", pf->id); 1425 *tmp = pf->next; 1426 free(pf); 1427 break; 1428 } 1429 if (i == n) 1430 tmp = &(*tmp)->next; 1431 ++c; 1432 } 1433 return ret; 1434 } 1435 1436 /** Remove all flow rules. */ 1437 int 1438 port_flow_flush(portid_t port_id) 1439 { 1440 struct rte_flow_error error; 1441 struct rte_port *port; 1442 int ret = 0; 1443 1444 /* Poisoning to make sure PMDs update it in case of error. */ 1445 memset(&error, 0x44, sizeof(error)); 1446 if (rte_flow_flush(port_id, &error)) { 1447 ret = port_flow_complain(&error); 1448 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1449 port_id == (portid_t)RTE_PORT_ALL) 1450 return ret; 1451 } 1452 port = &ports[port_id]; 1453 while (port->flow_list) { 1454 struct port_flow *pf = port->flow_list->next; 1455 1456 free(port->flow_list); 1457 port->flow_list = pf; 1458 } 1459 return ret; 1460 } 1461 1462 /** Query a flow rule. */ 1463 int 1464 port_flow_query(portid_t port_id, uint32_t rule, 1465 const struct rte_flow_action *action) 1466 { 1467 struct rte_flow_error error; 1468 struct rte_port *port; 1469 struct port_flow *pf; 1470 const char *name; 1471 union { 1472 struct rte_flow_query_count count; 1473 } query; 1474 1475 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1476 port_id == (portid_t)RTE_PORT_ALL) 1477 return -EINVAL; 1478 port = &ports[port_id]; 1479 for (pf = port->flow_list; pf; pf = pf->next) 1480 if (pf->id == rule) 1481 break; 1482 if (!pf) { 1483 printf("Flow rule #%u not found\n", rule); 1484 return -ENOENT; 1485 } 1486 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1487 !flow_action[action->type].name) 1488 name = "unknown"; 1489 else 1490 name = flow_action[action->type].name; 1491 switch (action->type) { 1492 case RTE_FLOW_ACTION_TYPE_COUNT: 1493 break; 1494 default: 1495 printf("Cannot query action type %d (%s)\n", 1496 action->type, name); 1497 return -ENOTSUP; 1498 } 1499 /* Poisoning to make sure PMDs update it in case of error. */ 1500 memset(&error, 0x55, sizeof(error)); 1501 memset(&query, 0, sizeof(query)); 1502 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1503 return port_flow_complain(&error); 1504 switch (action->type) { 1505 case RTE_FLOW_ACTION_TYPE_COUNT: 1506 printf("%s:\n" 1507 " hits_set: %u\n" 1508 " bytes_set: %u\n" 1509 " hits: %" PRIu64 "\n" 1510 " bytes: %" PRIu64 "\n", 1511 name, 1512 query.count.hits_set, 1513 query.count.bytes_set, 1514 query.count.hits, 1515 query.count.bytes); 1516 break; 1517 default: 1518 printf("Cannot display result for action type %d (%s)\n", 1519 action->type, name); 1520 break; 1521 } 1522 return 0; 1523 } 1524 1525 /** List flow rules. */ 1526 void 1527 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1528 { 1529 struct rte_port *port; 1530 struct port_flow *pf; 1531 struct port_flow *list = NULL; 1532 uint32_t i; 1533 1534 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1535 port_id == (portid_t)RTE_PORT_ALL) 1536 return; 1537 port = &ports[port_id]; 1538 if (!port->flow_list) 1539 return; 1540 /* Sort flows by group, priority and ID. */ 1541 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1542 struct port_flow **tmp; 1543 1544 if (n) { 1545 /* Filter out unwanted groups. */ 1546 for (i = 0; i != n; ++i) 1547 if (pf->attr.group == group[i]) 1548 break; 1549 if (i == n) 1550 continue; 1551 } 1552 tmp = &list; 1553 while (*tmp && 1554 (pf->attr.group > (*tmp)->attr.group || 1555 (pf->attr.group == (*tmp)->attr.group && 1556 pf->attr.priority > (*tmp)->attr.priority) || 1557 (pf->attr.group == (*tmp)->attr.group && 1558 pf->attr.priority == (*tmp)->attr.priority && 1559 pf->id > (*tmp)->id))) 1560 tmp = &(*tmp)->tmp; 1561 pf->tmp = *tmp; 1562 *tmp = pf; 1563 } 1564 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1565 for (pf = list; pf != NULL; pf = pf->tmp) { 1566 const struct rte_flow_item *item = pf->pattern; 1567 const struct rte_flow_action *action = pf->actions; 1568 1569 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1570 pf->id, 1571 pf->attr.group, 1572 pf->attr.priority, 1573 pf->attr.ingress ? 'i' : '-', 1574 pf->attr.egress ? 'e' : '-', 1575 pf->attr.transfer ? 't' : '-'); 1576 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1577 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1578 printf("%s ", flow_item[item->type].name); 1579 ++item; 1580 } 1581 printf("=>"); 1582 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1583 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1584 printf(" %s", flow_action[action->type].name); 1585 ++action; 1586 } 1587 printf("\n"); 1588 } 1589 } 1590 1591 /** Restrict ingress traffic to the defined flow rules. */ 1592 int 1593 port_flow_isolate(portid_t port_id, int set) 1594 { 1595 struct rte_flow_error error; 1596 1597 /* Poisoning to make sure PMDs update it in case of error. */ 1598 memset(&error, 0x66, sizeof(error)); 1599 if (rte_flow_isolate(port_id, set, &error)) 1600 return port_flow_complain(&error); 1601 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1602 port_id, 1603 set ? "now restricted" : "not restricted anymore"); 1604 return 0; 1605 } 1606 1607 /* 1608 * RX/TX ring descriptors display functions. 1609 */ 1610 int 1611 rx_queue_id_is_invalid(queueid_t rxq_id) 1612 { 1613 if (rxq_id < nb_rxq) 1614 return 0; 1615 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1616 return 1; 1617 } 1618 1619 int 1620 tx_queue_id_is_invalid(queueid_t txq_id) 1621 { 1622 if (txq_id < nb_txq) 1623 return 0; 1624 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1625 return 1; 1626 } 1627 1628 static int 1629 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1630 { 1631 if (rxdesc_id < nb_rxd) 1632 return 0; 1633 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1634 rxdesc_id, nb_rxd); 1635 return 1; 1636 } 1637 1638 static int 1639 tx_desc_id_is_invalid(uint16_t txdesc_id) 1640 { 1641 if (txdesc_id < nb_txd) 1642 return 0; 1643 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1644 txdesc_id, nb_txd); 1645 return 1; 1646 } 1647 1648 static const struct rte_memzone * 1649 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1650 { 1651 char mz_name[RTE_MEMZONE_NAMESIZE]; 1652 const struct rte_memzone *mz; 1653 1654 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1655 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1656 mz = rte_memzone_lookup(mz_name); 1657 if (mz == NULL) 1658 printf("%s ring memory zoneof (port %d, queue %d) not" 1659 "found (zone name = %s\n", 1660 ring_name, port_id, q_id, mz_name); 1661 return mz; 1662 } 1663 1664 union igb_ring_dword { 1665 uint64_t dword; 1666 struct { 1667 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1668 uint32_t lo; 1669 uint32_t hi; 1670 #else 1671 uint32_t hi; 1672 uint32_t lo; 1673 #endif 1674 } words; 1675 }; 1676 1677 struct igb_ring_desc_32_bytes { 1678 union igb_ring_dword lo_dword; 1679 union igb_ring_dword hi_dword; 1680 union igb_ring_dword resv1; 1681 union igb_ring_dword resv2; 1682 }; 1683 1684 struct igb_ring_desc_16_bytes { 1685 union igb_ring_dword lo_dword; 1686 union igb_ring_dword hi_dword; 1687 }; 1688 1689 static void 1690 ring_rxd_display_dword(union igb_ring_dword dword) 1691 { 1692 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1693 (unsigned)dword.words.hi); 1694 } 1695 1696 static void 1697 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1698 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1699 portid_t port_id, 1700 #else 1701 __rte_unused portid_t port_id, 1702 #endif 1703 uint16_t desc_id) 1704 { 1705 struct igb_ring_desc_16_bytes *ring = 1706 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1707 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1708 struct rte_eth_dev_info dev_info; 1709 1710 memset(&dev_info, 0, sizeof(dev_info)); 1711 rte_eth_dev_info_get(port_id, &dev_info); 1712 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1713 /* 32 bytes RX descriptor, i40e only */ 1714 struct igb_ring_desc_32_bytes *ring = 1715 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1716 ring[desc_id].lo_dword.dword = 1717 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1718 ring_rxd_display_dword(ring[desc_id].lo_dword); 1719 ring[desc_id].hi_dword.dword = 1720 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1721 ring_rxd_display_dword(ring[desc_id].hi_dword); 1722 ring[desc_id].resv1.dword = 1723 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1724 ring_rxd_display_dword(ring[desc_id].resv1); 1725 ring[desc_id].resv2.dword = 1726 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1727 ring_rxd_display_dword(ring[desc_id].resv2); 1728 1729 return; 1730 } 1731 #endif 1732 /* 16 bytes RX descriptor */ 1733 ring[desc_id].lo_dword.dword = 1734 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1735 ring_rxd_display_dword(ring[desc_id].lo_dword); 1736 ring[desc_id].hi_dword.dword = 1737 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1738 ring_rxd_display_dword(ring[desc_id].hi_dword); 1739 } 1740 1741 static void 1742 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1743 { 1744 struct igb_ring_desc_16_bytes *ring; 1745 struct igb_ring_desc_16_bytes txd; 1746 1747 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1748 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1749 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1750 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1751 (unsigned)txd.lo_dword.words.lo, 1752 (unsigned)txd.lo_dword.words.hi, 1753 (unsigned)txd.hi_dword.words.lo, 1754 (unsigned)txd.hi_dword.words.hi); 1755 } 1756 1757 void 1758 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1759 { 1760 const struct rte_memzone *rx_mz; 1761 1762 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1763 return; 1764 if (rx_queue_id_is_invalid(rxq_id)) 1765 return; 1766 if (rx_desc_id_is_invalid(rxd_id)) 1767 return; 1768 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1769 if (rx_mz == NULL) 1770 return; 1771 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1772 } 1773 1774 void 1775 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1776 { 1777 const struct rte_memzone *tx_mz; 1778 1779 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1780 return; 1781 if (tx_queue_id_is_invalid(txq_id)) 1782 return; 1783 if (tx_desc_id_is_invalid(txd_id)) 1784 return; 1785 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1786 if (tx_mz == NULL) 1787 return; 1788 ring_tx_descriptor_display(tx_mz, txd_id); 1789 } 1790 1791 void 1792 fwd_lcores_config_display(void) 1793 { 1794 lcoreid_t lc_id; 1795 1796 printf("List of forwarding lcores:"); 1797 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1798 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1799 printf("\n"); 1800 } 1801 void 1802 rxtx_config_display(void) 1803 { 1804 portid_t pid; 1805 queueid_t qid; 1806 1807 printf(" %s packet forwarding%s packets/burst=%d\n", 1808 cur_fwd_eng->fwd_mode_name, 1809 retry_enabled == 0 ? "" : " with retry", 1810 nb_pkt_per_burst); 1811 1812 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1813 printf(" packet len=%u - nb packet segments=%d\n", 1814 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1815 1816 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1817 nb_fwd_lcores, nb_fwd_ports); 1818 1819 RTE_ETH_FOREACH_DEV(pid) { 1820 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1821 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1822 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1823 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1824 1825 /* per port config */ 1826 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1827 (unsigned int)pid, nb_rxq, nb_txq); 1828 1829 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1830 ports[pid].dev_conf.rxmode.offloads, 1831 ports[pid].dev_conf.txmode.offloads); 1832 1833 /* per rx queue config only for first queue to be less verbose */ 1834 for (qid = 0; qid < 1; qid++) { 1835 printf(" RX queue: %d\n", qid); 1836 printf(" RX desc=%d - RX free threshold=%d\n", 1837 nb_rx_desc[qid], rx_conf[qid].rx_free_thresh); 1838 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1839 " wthresh=%d\n", 1840 rx_conf[qid].rx_thresh.pthresh, 1841 rx_conf[qid].rx_thresh.hthresh, 1842 rx_conf[qid].rx_thresh.wthresh); 1843 printf(" RX Offloads=0x%"PRIx64"\n", 1844 rx_conf[qid].offloads); 1845 } 1846 1847 /* per tx queue config only for first queue to be less verbose */ 1848 for (qid = 0; qid < 1; qid++) { 1849 printf(" TX queue: %d\n", qid); 1850 printf(" TX desc=%d - TX free threshold=%d\n", 1851 nb_tx_desc[qid], tx_conf[qid].tx_free_thresh); 1852 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1853 " wthresh=%d\n", 1854 tx_conf[qid].tx_thresh.pthresh, 1855 tx_conf[qid].tx_thresh.hthresh, 1856 tx_conf[qid].tx_thresh.wthresh); 1857 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1858 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1859 } 1860 } 1861 } 1862 1863 void 1864 port_rss_reta_info(portid_t port_id, 1865 struct rte_eth_rss_reta_entry64 *reta_conf, 1866 uint16_t nb_entries) 1867 { 1868 uint16_t i, idx, shift; 1869 int ret; 1870 1871 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1872 return; 1873 1874 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1875 if (ret != 0) { 1876 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1877 return; 1878 } 1879 1880 for (i = 0; i < nb_entries; i++) { 1881 idx = i / RTE_RETA_GROUP_SIZE; 1882 shift = i % RTE_RETA_GROUP_SIZE; 1883 if (!(reta_conf[idx].mask & (1ULL << shift))) 1884 continue; 1885 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1886 i, reta_conf[idx].reta[shift]); 1887 } 1888 } 1889 1890 /* 1891 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1892 * key of the port. 1893 */ 1894 void 1895 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1896 { 1897 struct rte_eth_rss_conf rss_conf; 1898 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1899 uint64_t rss_hf; 1900 uint8_t i; 1901 int diag; 1902 struct rte_eth_dev_info dev_info; 1903 uint8_t hash_key_size; 1904 1905 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1906 return; 1907 1908 memset(&dev_info, 0, sizeof(dev_info)); 1909 rte_eth_dev_info_get(port_id, &dev_info); 1910 if (dev_info.hash_key_size > 0 && 1911 dev_info.hash_key_size <= sizeof(rss_key)) 1912 hash_key_size = dev_info.hash_key_size; 1913 else { 1914 printf("dev_info did not provide a valid hash key size\n"); 1915 return; 1916 } 1917 1918 rss_conf.rss_hf = 0; 1919 for (i = 0; rss_type_table[i].str; i++) { 1920 if (!strcmp(rss_info, rss_type_table[i].str)) 1921 rss_conf.rss_hf = rss_type_table[i].rss_type; 1922 } 1923 1924 /* Get RSS hash key if asked to display it */ 1925 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1926 rss_conf.rss_key_len = hash_key_size; 1927 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1928 if (diag != 0) { 1929 switch (diag) { 1930 case -ENODEV: 1931 printf("port index %d invalid\n", port_id); 1932 break; 1933 case -ENOTSUP: 1934 printf("operation not supported by device\n"); 1935 break; 1936 default: 1937 printf("operation failed - diag=%d\n", diag); 1938 break; 1939 } 1940 return; 1941 } 1942 rss_hf = rss_conf.rss_hf; 1943 if (rss_hf == 0) { 1944 printf("RSS disabled\n"); 1945 return; 1946 } 1947 printf("RSS functions:\n "); 1948 for (i = 0; rss_type_table[i].str; i++) { 1949 if (rss_hf & rss_type_table[i].rss_type) 1950 printf("%s ", rss_type_table[i].str); 1951 } 1952 printf("\n"); 1953 if (!show_rss_key) 1954 return; 1955 printf("RSS key:\n"); 1956 for (i = 0; i < hash_key_size; i++) 1957 printf("%02X", rss_key[i]); 1958 printf("\n"); 1959 } 1960 1961 void 1962 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1963 uint hash_key_len) 1964 { 1965 struct rte_eth_rss_conf rss_conf; 1966 int diag; 1967 unsigned int i; 1968 1969 rss_conf.rss_key = NULL; 1970 rss_conf.rss_key_len = hash_key_len; 1971 rss_conf.rss_hf = 0; 1972 for (i = 0; rss_type_table[i].str; i++) { 1973 if (!strcmp(rss_type_table[i].str, rss_type)) 1974 rss_conf.rss_hf = rss_type_table[i].rss_type; 1975 } 1976 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1977 if (diag == 0) { 1978 rss_conf.rss_key = hash_key; 1979 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1980 } 1981 if (diag == 0) 1982 return; 1983 1984 switch (diag) { 1985 case -ENODEV: 1986 printf("port index %d invalid\n", port_id); 1987 break; 1988 case -ENOTSUP: 1989 printf("operation not supported by device\n"); 1990 break; 1991 default: 1992 printf("operation failed - diag=%d\n", diag); 1993 break; 1994 } 1995 } 1996 1997 /* 1998 * Setup forwarding configuration for each logical core. 1999 */ 2000 static void 2001 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2002 { 2003 streamid_t nb_fs_per_lcore; 2004 streamid_t nb_fs; 2005 streamid_t sm_id; 2006 lcoreid_t nb_extra; 2007 lcoreid_t nb_fc; 2008 lcoreid_t nb_lc; 2009 lcoreid_t lc_id; 2010 2011 nb_fs = cfg->nb_fwd_streams; 2012 nb_fc = cfg->nb_fwd_lcores; 2013 if (nb_fs <= nb_fc) { 2014 nb_fs_per_lcore = 1; 2015 nb_extra = 0; 2016 } else { 2017 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2018 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2019 } 2020 2021 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2022 sm_id = 0; 2023 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2024 fwd_lcores[lc_id]->stream_idx = sm_id; 2025 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2026 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2027 } 2028 2029 /* 2030 * Assign extra remaining streams, if any. 2031 */ 2032 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2033 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2034 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2035 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2036 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2037 } 2038 } 2039 2040 static portid_t 2041 fwd_topology_tx_port_get(portid_t rxp) 2042 { 2043 static int warning_once = 1; 2044 2045 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2046 2047 switch (port_topology) { 2048 default: 2049 case PORT_TOPOLOGY_PAIRED: 2050 if ((rxp & 0x1) == 0) { 2051 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2052 return rxp + 1; 2053 if (warning_once) { 2054 printf("\nWarning! port-topology=paired" 2055 " and odd forward ports number," 2056 " the last port will pair with" 2057 " itself.\n\n"); 2058 warning_once = 0; 2059 } 2060 return rxp; 2061 } 2062 return rxp - 1; 2063 case PORT_TOPOLOGY_CHAINED: 2064 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2065 case PORT_TOPOLOGY_LOOP: 2066 return rxp; 2067 } 2068 } 2069 2070 static void 2071 simple_fwd_config_setup(void) 2072 { 2073 portid_t i; 2074 2075 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2076 cur_fwd_config.nb_fwd_streams = 2077 (streamid_t) cur_fwd_config.nb_fwd_ports; 2078 2079 /* reinitialize forwarding streams */ 2080 init_fwd_streams(); 2081 2082 /* 2083 * In the simple forwarding test, the number of forwarding cores 2084 * must be lower or equal to the number of forwarding ports. 2085 */ 2086 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2087 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2088 cur_fwd_config.nb_fwd_lcores = 2089 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2090 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2091 2092 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2093 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2094 fwd_streams[i]->rx_queue = 0; 2095 fwd_streams[i]->tx_port = 2096 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2097 fwd_streams[i]->tx_queue = 0; 2098 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2099 fwd_streams[i]->retry_enabled = retry_enabled; 2100 } 2101 } 2102 2103 /** 2104 * For the RSS forwarding test all streams distributed over lcores. Each stream 2105 * being composed of a RX queue to poll on a RX port for input messages, 2106 * associated with a TX queue of a TX port where to send forwarded packets. 2107 */ 2108 static void 2109 rss_fwd_config_setup(void) 2110 { 2111 portid_t rxp; 2112 portid_t txp; 2113 queueid_t rxq; 2114 queueid_t nb_q; 2115 streamid_t sm_id; 2116 2117 nb_q = nb_rxq; 2118 if (nb_q > nb_txq) 2119 nb_q = nb_txq; 2120 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2121 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2122 cur_fwd_config.nb_fwd_streams = 2123 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2124 2125 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2126 cur_fwd_config.nb_fwd_lcores = 2127 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2128 2129 /* reinitialize forwarding streams */ 2130 init_fwd_streams(); 2131 2132 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2133 rxp = 0; rxq = 0; 2134 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2135 struct fwd_stream *fs; 2136 2137 fs = fwd_streams[sm_id]; 2138 txp = fwd_topology_tx_port_get(rxp); 2139 fs->rx_port = fwd_ports_ids[rxp]; 2140 fs->rx_queue = rxq; 2141 fs->tx_port = fwd_ports_ids[txp]; 2142 fs->tx_queue = rxq; 2143 fs->peer_addr = fs->tx_port; 2144 fs->retry_enabled = retry_enabled; 2145 rxq = (queueid_t) (rxq + 1); 2146 if (rxq < nb_q) 2147 continue; 2148 /* 2149 * rxq == nb_q 2150 * Restart from RX queue 0 on next RX port 2151 */ 2152 rxq = 0; 2153 rxp++; 2154 } 2155 } 2156 2157 /** 2158 * For the DCB forwarding test, each core is assigned on each traffic class. 2159 * 2160 * Each core is assigned a multi-stream, each stream being composed of 2161 * a RX queue to poll on a RX port for input messages, associated with 2162 * a TX queue of a TX port where to send forwarded packets. All RX and 2163 * TX queues are mapping to the same traffic class. 2164 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2165 * the same core 2166 */ 2167 static void 2168 dcb_fwd_config_setup(void) 2169 { 2170 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2171 portid_t txp, rxp = 0; 2172 queueid_t txq, rxq = 0; 2173 lcoreid_t lc_id; 2174 uint16_t nb_rx_queue, nb_tx_queue; 2175 uint16_t i, j, k, sm_id = 0; 2176 uint8_t tc = 0; 2177 2178 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2179 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2180 cur_fwd_config.nb_fwd_streams = 2181 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2182 2183 /* reinitialize forwarding streams */ 2184 init_fwd_streams(); 2185 sm_id = 0; 2186 txp = 1; 2187 /* get the dcb info on the first RX and TX ports */ 2188 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2189 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2190 2191 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2192 fwd_lcores[lc_id]->stream_nb = 0; 2193 fwd_lcores[lc_id]->stream_idx = sm_id; 2194 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2195 /* if the nb_queue is zero, means this tc is 2196 * not enabled on the POOL 2197 */ 2198 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2199 break; 2200 k = fwd_lcores[lc_id]->stream_nb + 2201 fwd_lcores[lc_id]->stream_idx; 2202 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2203 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2204 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2205 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2206 for (j = 0; j < nb_rx_queue; j++) { 2207 struct fwd_stream *fs; 2208 2209 fs = fwd_streams[k + j]; 2210 fs->rx_port = fwd_ports_ids[rxp]; 2211 fs->rx_queue = rxq + j; 2212 fs->tx_port = fwd_ports_ids[txp]; 2213 fs->tx_queue = txq + j % nb_tx_queue; 2214 fs->peer_addr = fs->tx_port; 2215 fs->retry_enabled = retry_enabled; 2216 } 2217 fwd_lcores[lc_id]->stream_nb += 2218 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2219 } 2220 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2221 2222 tc++; 2223 if (tc < rxp_dcb_info.nb_tcs) 2224 continue; 2225 /* Restart from TC 0 on next RX port */ 2226 tc = 0; 2227 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2228 rxp = (portid_t) 2229 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2230 else 2231 rxp++; 2232 if (rxp >= nb_fwd_ports) 2233 return; 2234 /* get the dcb information on next RX and TX ports */ 2235 if ((rxp & 0x1) == 0) 2236 txp = (portid_t) (rxp + 1); 2237 else 2238 txp = (portid_t) (rxp - 1); 2239 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2240 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2241 } 2242 } 2243 2244 static void 2245 icmp_echo_config_setup(void) 2246 { 2247 portid_t rxp; 2248 queueid_t rxq; 2249 lcoreid_t lc_id; 2250 uint16_t sm_id; 2251 2252 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2253 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2254 (nb_txq * nb_fwd_ports); 2255 else 2256 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2257 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2258 cur_fwd_config.nb_fwd_streams = 2259 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2260 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2261 cur_fwd_config.nb_fwd_lcores = 2262 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2263 if (verbose_level > 0) { 2264 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2265 __FUNCTION__, 2266 cur_fwd_config.nb_fwd_lcores, 2267 cur_fwd_config.nb_fwd_ports, 2268 cur_fwd_config.nb_fwd_streams); 2269 } 2270 2271 /* reinitialize forwarding streams */ 2272 init_fwd_streams(); 2273 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2274 rxp = 0; rxq = 0; 2275 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2276 if (verbose_level > 0) 2277 printf(" core=%d: \n", lc_id); 2278 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2279 struct fwd_stream *fs; 2280 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2281 fs->rx_port = fwd_ports_ids[rxp]; 2282 fs->rx_queue = rxq; 2283 fs->tx_port = fs->rx_port; 2284 fs->tx_queue = rxq; 2285 fs->peer_addr = fs->tx_port; 2286 fs->retry_enabled = retry_enabled; 2287 if (verbose_level > 0) 2288 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2289 sm_id, fs->rx_port, fs->rx_queue, 2290 fs->tx_queue); 2291 rxq = (queueid_t) (rxq + 1); 2292 if (rxq == nb_rxq) { 2293 rxq = 0; 2294 rxp = (portid_t) (rxp + 1); 2295 } 2296 } 2297 } 2298 } 2299 2300 void 2301 fwd_config_setup(void) 2302 { 2303 cur_fwd_config.fwd_eng = cur_fwd_eng; 2304 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2305 icmp_echo_config_setup(); 2306 return; 2307 } 2308 if ((nb_rxq > 1) && (nb_txq > 1)){ 2309 if (dcb_config) 2310 dcb_fwd_config_setup(); 2311 else 2312 rss_fwd_config_setup(); 2313 } 2314 else 2315 simple_fwd_config_setup(); 2316 } 2317 2318 void 2319 pkt_fwd_config_display(struct fwd_config *cfg) 2320 { 2321 struct fwd_stream *fs; 2322 lcoreid_t lc_id; 2323 streamid_t sm_id; 2324 2325 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2326 "NUMA support %s, MP over anonymous pages %s\n", 2327 cfg->fwd_eng->fwd_mode_name, 2328 retry_enabled == 0 ? "" : " with retry", 2329 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2330 numa_support == 1 ? "enabled" : "disabled", 2331 mp_anon != 0 ? "enabled" : "disabled"); 2332 2333 if (retry_enabled) 2334 printf("TX retry num: %u, delay between TX retries: %uus\n", 2335 burst_tx_retry_num, burst_tx_delay_time); 2336 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2337 printf("Logical Core %u (socket %u) forwards packets on " 2338 "%d streams:", 2339 fwd_lcores_cpuids[lc_id], 2340 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2341 fwd_lcores[lc_id]->stream_nb); 2342 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2343 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2344 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2345 "P=%d/Q=%d (socket %u) ", 2346 fs->rx_port, fs->rx_queue, 2347 ports[fs->rx_port].socket_id, 2348 fs->tx_port, fs->tx_queue, 2349 ports[fs->tx_port].socket_id); 2350 print_ethaddr("peer=", 2351 &peer_eth_addrs[fs->peer_addr]); 2352 } 2353 printf("\n"); 2354 } 2355 printf("\n"); 2356 } 2357 2358 void 2359 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2360 { 2361 uint8_t c, new_peer_addr[6]; 2362 if (!rte_eth_dev_is_valid_port(port_id)) { 2363 printf("Error: Invalid port number %i\n", port_id); 2364 return; 2365 } 2366 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2367 sizeof(new_peer_addr)) < 0) { 2368 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2369 return; 2370 } 2371 for (c = 0; c < 6; c++) 2372 peer_eth_addrs[port_id].addr_bytes[c] = 2373 new_peer_addr[c]; 2374 } 2375 2376 int 2377 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2378 { 2379 unsigned int i; 2380 unsigned int lcore_cpuid; 2381 int record_now; 2382 2383 record_now = 0; 2384 again: 2385 for (i = 0; i < nb_lc; i++) { 2386 lcore_cpuid = lcorelist[i]; 2387 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2388 printf("lcore %u not enabled\n", lcore_cpuid); 2389 return -1; 2390 } 2391 if (lcore_cpuid == rte_get_master_lcore()) { 2392 printf("lcore %u cannot be masked on for running " 2393 "packet forwarding, which is the master lcore " 2394 "and reserved for command line parsing only\n", 2395 lcore_cpuid); 2396 return -1; 2397 } 2398 if (record_now) 2399 fwd_lcores_cpuids[i] = lcore_cpuid; 2400 } 2401 if (record_now == 0) { 2402 record_now = 1; 2403 goto again; 2404 } 2405 nb_cfg_lcores = (lcoreid_t) nb_lc; 2406 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2407 printf("previous number of forwarding cores %u - changed to " 2408 "number of configured cores %u\n", 2409 (unsigned int) nb_fwd_lcores, nb_lc); 2410 nb_fwd_lcores = (lcoreid_t) nb_lc; 2411 } 2412 2413 return 0; 2414 } 2415 2416 int 2417 set_fwd_lcores_mask(uint64_t lcoremask) 2418 { 2419 unsigned int lcorelist[64]; 2420 unsigned int nb_lc; 2421 unsigned int i; 2422 2423 if (lcoremask == 0) { 2424 printf("Invalid NULL mask of cores\n"); 2425 return -1; 2426 } 2427 nb_lc = 0; 2428 for (i = 0; i < 64; i++) { 2429 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2430 continue; 2431 lcorelist[nb_lc++] = i; 2432 } 2433 return set_fwd_lcores_list(lcorelist, nb_lc); 2434 } 2435 2436 void 2437 set_fwd_lcores_number(uint16_t nb_lc) 2438 { 2439 if (nb_lc > nb_cfg_lcores) { 2440 printf("nb fwd cores %u > %u (max. number of configured " 2441 "lcores) - ignored\n", 2442 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2443 return; 2444 } 2445 nb_fwd_lcores = (lcoreid_t) nb_lc; 2446 printf("Number of forwarding cores set to %u\n", 2447 (unsigned int) nb_fwd_lcores); 2448 } 2449 2450 void 2451 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2452 { 2453 unsigned int i; 2454 portid_t port_id; 2455 int record_now; 2456 2457 record_now = 0; 2458 again: 2459 for (i = 0; i < nb_pt; i++) { 2460 port_id = (portid_t) portlist[i]; 2461 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2462 return; 2463 if (record_now) 2464 fwd_ports_ids[i] = port_id; 2465 } 2466 if (record_now == 0) { 2467 record_now = 1; 2468 goto again; 2469 } 2470 nb_cfg_ports = (portid_t) nb_pt; 2471 if (nb_fwd_ports != (portid_t) nb_pt) { 2472 printf("previous number of forwarding ports %u - changed to " 2473 "number of configured ports %u\n", 2474 (unsigned int) nb_fwd_ports, nb_pt); 2475 nb_fwd_ports = (portid_t) nb_pt; 2476 } 2477 } 2478 2479 void 2480 set_fwd_ports_mask(uint64_t portmask) 2481 { 2482 unsigned int portlist[64]; 2483 unsigned int nb_pt; 2484 unsigned int i; 2485 2486 if (portmask == 0) { 2487 printf("Invalid NULL mask of ports\n"); 2488 return; 2489 } 2490 nb_pt = 0; 2491 RTE_ETH_FOREACH_DEV(i) { 2492 if (! ((uint64_t)(1ULL << i) & portmask)) 2493 continue; 2494 portlist[nb_pt++] = i; 2495 } 2496 set_fwd_ports_list(portlist, nb_pt); 2497 } 2498 2499 void 2500 set_fwd_ports_number(uint16_t nb_pt) 2501 { 2502 if (nb_pt > nb_cfg_ports) { 2503 printf("nb fwd ports %u > %u (number of configured " 2504 "ports) - ignored\n", 2505 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2506 return; 2507 } 2508 nb_fwd_ports = (portid_t) nb_pt; 2509 printf("Number of forwarding ports set to %u\n", 2510 (unsigned int) nb_fwd_ports); 2511 } 2512 2513 int 2514 port_is_forwarding(portid_t port_id) 2515 { 2516 unsigned int i; 2517 2518 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2519 return -1; 2520 2521 for (i = 0; i < nb_fwd_ports; i++) { 2522 if (fwd_ports_ids[i] == port_id) 2523 return 1; 2524 } 2525 2526 return 0; 2527 } 2528 2529 void 2530 set_nb_pkt_per_burst(uint16_t nb) 2531 { 2532 if (nb > MAX_PKT_BURST) { 2533 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2534 " ignored\n", 2535 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2536 return; 2537 } 2538 nb_pkt_per_burst = nb; 2539 printf("Number of packets per burst set to %u\n", 2540 (unsigned int) nb_pkt_per_burst); 2541 } 2542 2543 static const char * 2544 tx_split_get_name(enum tx_pkt_split split) 2545 { 2546 uint32_t i; 2547 2548 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2549 if (tx_split_name[i].split == split) 2550 return tx_split_name[i].name; 2551 } 2552 return NULL; 2553 } 2554 2555 void 2556 set_tx_pkt_split(const char *name) 2557 { 2558 uint32_t i; 2559 2560 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2561 if (strcmp(tx_split_name[i].name, name) == 0) { 2562 tx_pkt_split = tx_split_name[i].split; 2563 return; 2564 } 2565 } 2566 printf("unknown value: \"%s\"\n", name); 2567 } 2568 2569 void 2570 show_tx_pkt_segments(void) 2571 { 2572 uint32_t i, n; 2573 const char *split; 2574 2575 n = tx_pkt_nb_segs; 2576 split = tx_split_get_name(tx_pkt_split); 2577 2578 printf("Number of segments: %u\n", n); 2579 printf("Segment sizes: "); 2580 for (i = 0; i != n - 1; i++) 2581 printf("%hu,", tx_pkt_seg_lengths[i]); 2582 printf("%hu\n", tx_pkt_seg_lengths[i]); 2583 printf("Split packet: %s\n", split); 2584 } 2585 2586 void 2587 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2588 { 2589 uint16_t tx_pkt_len; 2590 unsigned i; 2591 2592 if (nb_segs >= (unsigned) nb_txd) { 2593 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2594 nb_segs, (unsigned int) nb_txd); 2595 return; 2596 } 2597 2598 /* 2599 * Check that each segment length is greater or equal than 2600 * the mbuf data sise. 2601 * Check also that the total packet length is greater or equal than the 2602 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2603 */ 2604 tx_pkt_len = 0; 2605 for (i = 0; i < nb_segs; i++) { 2606 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2607 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2608 i, seg_lengths[i], (unsigned) mbuf_data_size); 2609 return; 2610 } 2611 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2612 } 2613 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2614 printf("total packet length=%u < %d - give up\n", 2615 (unsigned) tx_pkt_len, 2616 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2617 return; 2618 } 2619 2620 for (i = 0; i < nb_segs; i++) 2621 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2622 2623 tx_pkt_length = tx_pkt_len; 2624 tx_pkt_nb_segs = (uint8_t) nb_segs; 2625 } 2626 2627 void 2628 setup_gro(const char *onoff, portid_t port_id) 2629 { 2630 if (!rte_eth_dev_is_valid_port(port_id)) { 2631 printf("invalid port id %u\n", port_id); 2632 return; 2633 } 2634 if (test_done == 0) { 2635 printf("Before enable/disable GRO," 2636 " please stop forwarding first\n"); 2637 return; 2638 } 2639 if (strcmp(onoff, "on") == 0) { 2640 if (gro_ports[port_id].enable != 0) { 2641 printf("Port %u has enabled GRO. Please" 2642 " disable GRO first\n", port_id); 2643 return; 2644 } 2645 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2646 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2647 gro_ports[port_id].param.max_flow_num = 2648 GRO_DEFAULT_FLOW_NUM; 2649 gro_ports[port_id].param.max_item_per_flow = 2650 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2651 } 2652 gro_ports[port_id].enable = 1; 2653 } else { 2654 if (gro_ports[port_id].enable == 0) { 2655 printf("Port %u has disabled GRO\n", port_id); 2656 return; 2657 } 2658 gro_ports[port_id].enable = 0; 2659 } 2660 } 2661 2662 void 2663 setup_gro_flush_cycles(uint8_t cycles) 2664 { 2665 if (test_done == 0) { 2666 printf("Before change flush interval for GRO," 2667 " please stop forwarding first.\n"); 2668 return; 2669 } 2670 2671 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2672 GRO_DEFAULT_FLUSH_CYCLES) { 2673 printf("The flushing cycle be in the range" 2674 " of 1 to %u. Revert to the default" 2675 " value %u.\n", 2676 GRO_MAX_FLUSH_CYCLES, 2677 GRO_DEFAULT_FLUSH_CYCLES); 2678 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2679 } 2680 2681 gro_flush_cycles = cycles; 2682 } 2683 2684 void 2685 show_gro(portid_t port_id) 2686 { 2687 struct rte_gro_param *param; 2688 uint32_t max_pkts_num; 2689 2690 param = &gro_ports[port_id].param; 2691 2692 if (!rte_eth_dev_is_valid_port(port_id)) { 2693 printf("Invalid port id %u.\n", port_id); 2694 return; 2695 } 2696 if (gro_ports[port_id].enable) { 2697 printf("GRO type: TCP/IPv4\n"); 2698 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2699 max_pkts_num = param->max_flow_num * 2700 param->max_item_per_flow; 2701 } else 2702 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2703 printf("Max number of packets to perform GRO: %u\n", 2704 max_pkts_num); 2705 printf("Flushing cycles: %u\n", gro_flush_cycles); 2706 } else 2707 printf("Port %u doesn't enable GRO.\n", port_id); 2708 } 2709 2710 void 2711 setup_gso(const char *mode, portid_t port_id) 2712 { 2713 if (!rte_eth_dev_is_valid_port(port_id)) { 2714 printf("invalid port id %u\n", port_id); 2715 return; 2716 } 2717 if (strcmp(mode, "on") == 0) { 2718 if (test_done == 0) { 2719 printf("before enabling GSO," 2720 " please stop forwarding first\n"); 2721 return; 2722 } 2723 gso_ports[port_id].enable = 1; 2724 } else if (strcmp(mode, "off") == 0) { 2725 if (test_done == 0) { 2726 printf("before disabling GSO," 2727 " please stop forwarding first\n"); 2728 return; 2729 } 2730 gso_ports[port_id].enable = 0; 2731 } 2732 } 2733 2734 char* 2735 list_pkt_forwarding_modes(void) 2736 { 2737 static char fwd_modes[128] = ""; 2738 const char *separator = "|"; 2739 struct fwd_engine *fwd_eng; 2740 unsigned i = 0; 2741 2742 if (strlen (fwd_modes) == 0) { 2743 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2744 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2745 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2746 strncat(fwd_modes, separator, 2747 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2748 } 2749 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2750 } 2751 2752 return fwd_modes; 2753 } 2754 2755 char* 2756 list_pkt_forwarding_retry_modes(void) 2757 { 2758 static char fwd_modes[128] = ""; 2759 const char *separator = "|"; 2760 struct fwd_engine *fwd_eng; 2761 unsigned i = 0; 2762 2763 if (strlen(fwd_modes) == 0) { 2764 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2765 if (fwd_eng == &rx_only_engine) 2766 continue; 2767 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2768 sizeof(fwd_modes) - 2769 strlen(fwd_modes) - 1); 2770 strncat(fwd_modes, separator, 2771 sizeof(fwd_modes) - 2772 strlen(fwd_modes) - 1); 2773 } 2774 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2775 } 2776 2777 return fwd_modes; 2778 } 2779 2780 void 2781 set_pkt_forwarding_mode(const char *fwd_mode_name) 2782 { 2783 struct fwd_engine *fwd_eng; 2784 unsigned i; 2785 2786 i = 0; 2787 while ((fwd_eng = fwd_engines[i]) != NULL) { 2788 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2789 printf("Set %s packet forwarding mode%s\n", 2790 fwd_mode_name, 2791 retry_enabled == 0 ? "" : " with retry"); 2792 cur_fwd_eng = fwd_eng; 2793 return; 2794 } 2795 i++; 2796 } 2797 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2798 } 2799 2800 void 2801 set_verbose_level(uint16_t vb_level) 2802 { 2803 printf("Change verbose level from %u to %u\n", 2804 (unsigned int) verbose_level, (unsigned int) vb_level); 2805 verbose_level = vb_level; 2806 } 2807 2808 void 2809 vlan_extend_set(portid_t port_id, int on) 2810 { 2811 int diag; 2812 int vlan_offload; 2813 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2814 2815 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2816 return; 2817 2818 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2819 2820 if (on) { 2821 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2822 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2823 } else { 2824 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2825 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2826 } 2827 2828 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2829 if (diag < 0) 2830 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2831 "diag=%d\n", port_id, on, diag); 2832 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2833 } 2834 2835 void 2836 rx_vlan_strip_set(portid_t port_id, int on) 2837 { 2838 int diag; 2839 int vlan_offload; 2840 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2841 2842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2843 return; 2844 2845 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2846 2847 if (on) { 2848 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2849 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2850 } else { 2851 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2852 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2853 } 2854 2855 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2856 if (diag < 0) 2857 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2858 "diag=%d\n", port_id, on, diag); 2859 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2860 } 2861 2862 void 2863 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2864 { 2865 int diag; 2866 2867 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2868 return; 2869 2870 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2871 if (diag < 0) 2872 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2873 "diag=%d\n", port_id, queue_id, on, diag); 2874 } 2875 2876 void 2877 rx_vlan_filter_set(portid_t port_id, int on) 2878 { 2879 int diag; 2880 int vlan_offload; 2881 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2882 2883 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2884 return; 2885 2886 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2887 2888 if (on) { 2889 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2890 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2891 } else { 2892 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2893 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2894 } 2895 2896 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2897 if (diag < 0) 2898 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2899 "diag=%d\n", port_id, on, diag); 2900 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2901 } 2902 2903 int 2904 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2905 { 2906 int diag; 2907 2908 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2909 return 1; 2910 if (vlan_id_is_invalid(vlan_id)) 2911 return 1; 2912 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2913 if (diag == 0) 2914 return 0; 2915 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2916 "diag=%d\n", 2917 port_id, vlan_id, on, diag); 2918 return -1; 2919 } 2920 2921 void 2922 rx_vlan_all_filter_set(portid_t port_id, int on) 2923 { 2924 uint16_t vlan_id; 2925 2926 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2927 return; 2928 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2929 if (rx_vft_set(port_id, vlan_id, on)) 2930 break; 2931 } 2932 } 2933 2934 void 2935 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2936 { 2937 int diag; 2938 2939 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2940 return; 2941 2942 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2943 if (diag == 0) 2944 return; 2945 2946 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2947 "diag=%d\n", 2948 port_id, vlan_type, tp_id, diag); 2949 } 2950 2951 void 2952 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2953 { 2954 int vlan_offload; 2955 struct rte_eth_dev_info dev_info; 2956 2957 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2958 return; 2959 if (vlan_id_is_invalid(vlan_id)) 2960 return; 2961 2962 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2963 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2964 printf("Error, as QinQ has been enabled.\n"); 2965 return; 2966 } 2967 rte_eth_dev_info_get(port_id, &dev_info); 2968 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2969 printf("Error: vlan insert is not supported by port %d\n", 2970 port_id); 2971 return; 2972 } 2973 2974 tx_vlan_reset(port_id); 2975 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2976 ports[port_id].tx_vlan_id = vlan_id; 2977 } 2978 2979 void 2980 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2981 { 2982 int vlan_offload; 2983 struct rte_eth_dev_info dev_info; 2984 2985 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2986 return; 2987 if (vlan_id_is_invalid(vlan_id)) 2988 return; 2989 if (vlan_id_is_invalid(vlan_id_outer)) 2990 return; 2991 2992 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2993 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2994 printf("Error, as QinQ hasn't been enabled.\n"); 2995 return; 2996 } 2997 rte_eth_dev_info_get(port_id, &dev_info); 2998 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 2999 printf("Error: qinq insert not supported by port %d\n", 3000 port_id); 3001 return; 3002 } 3003 3004 tx_vlan_reset(port_id); 3005 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 3006 ports[port_id].tx_vlan_id = vlan_id; 3007 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3008 } 3009 3010 void 3011 tx_vlan_reset(portid_t port_id) 3012 { 3013 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3014 return; 3015 ports[port_id].dev_conf.txmode.offloads &= 3016 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3017 DEV_TX_OFFLOAD_QINQ_INSERT); 3018 ports[port_id].tx_vlan_id = 0; 3019 ports[port_id].tx_vlan_id_outer = 0; 3020 } 3021 3022 void 3023 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3024 { 3025 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3026 return; 3027 3028 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3029 } 3030 3031 void 3032 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3033 { 3034 uint16_t i; 3035 uint8_t existing_mapping_found = 0; 3036 3037 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3038 return; 3039 3040 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3041 return; 3042 3043 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3044 printf("map_value not in required range 0..%d\n", 3045 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3046 return; 3047 } 3048 3049 if (!is_rx) { /*then tx*/ 3050 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3051 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3052 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3053 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3054 existing_mapping_found = 1; 3055 break; 3056 } 3057 } 3058 if (!existing_mapping_found) { /* A new additional mapping... */ 3059 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3060 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3061 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3062 nb_tx_queue_stats_mappings++; 3063 } 3064 } 3065 else { /*rx*/ 3066 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3067 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3068 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3069 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3070 existing_mapping_found = 1; 3071 break; 3072 } 3073 } 3074 if (!existing_mapping_found) { /* A new additional mapping... */ 3075 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3076 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3077 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3078 nb_rx_queue_stats_mappings++; 3079 } 3080 } 3081 } 3082 3083 void 3084 set_xstats_hide_zero(uint8_t on_off) 3085 { 3086 xstats_hide_zero = on_off; 3087 } 3088 3089 static inline void 3090 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3091 { 3092 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3093 3094 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3095 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3096 " tunnel_id: 0x%08x", 3097 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3098 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3099 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3100 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3101 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3102 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3103 3104 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3105 rte_be_to_cpu_16(mask->src_port_mask), 3106 rte_be_to_cpu_16(mask->dst_port_mask)); 3107 3108 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3109 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3110 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3111 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3112 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3113 3114 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3115 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3116 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3117 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3118 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3119 } 3120 3121 printf("\n"); 3122 } 3123 3124 static inline void 3125 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3126 { 3127 struct rte_eth_flex_payload_cfg *cfg; 3128 uint32_t i, j; 3129 3130 for (i = 0; i < flex_conf->nb_payloads; i++) { 3131 cfg = &flex_conf->flex_set[i]; 3132 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3133 printf("\n RAW: "); 3134 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3135 printf("\n L2_PAYLOAD: "); 3136 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3137 printf("\n L3_PAYLOAD: "); 3138 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3139 printf("\n L4_PAYLOAD: "); 3140 else 3141 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3142 for (j = 0; j < num; j++) 3143 printf(" %-5u", cfg->src_offset[j]); 3144 } 3145 printf("\n"); 3146 } 3147 3148 static char * 3149 flowtype_to_str(uint16_t flow_type) 3150 { 3151 struct flow_type_info { 3152 char str[32]; 3153 uint16_t ftype; 3154 }; 3155 3156 uint8_t i; 3157 static struct flow_type_info flowtype_str_table[] = { 3158 {"raw", RTE_ETH_FLOW_RAW}, 3159 {"ipv4", RTE_ETH_FLOW_IPV4}, 3160 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3161 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3162 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3163 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3164 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3165 {"ipv6", RTE_ETH_FLOW_IPV6}, 3166 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3167 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3168 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3169 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3170 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3171 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3172 {"port", RTE_ETH_FLOW_PORT}, 3173 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3174 {"geneve", RTE_ETH_FLOW_GENEVE}, 3175 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3176 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3177 }; 3178 3179 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3180 if (flowtype_str_table[i].ftype == flow_type) 3181 return flowtype_str_table[i].str; 3182 } 3183 3184 return NULL; 3185 } 3186 3187 static inline void 3188 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3189 { 3190 struct rte_eth_fdir_flex_mask *mask; 3191 uint32_t i, j; 3192 char *p; 3193 3194 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3195 mask = &flex_conf->flex_mask[i]; 3196 p = flowtype_to_str(mask->flow_type); 3197 printf("\n %s:\t", p ? p : "unknown"); 3198 for (j = 0; j < num; j++) 3199 printf(" %02x", mask->mask[j]); 3200 } 3201 printf("\n"); 3202 } 3203 3204 static inline void 3205 print_fdir_flow_type(uint32_t flow_types_mask) 3206 { 3207 int i; 3208 char *p; 3209 3210 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3211 if (!(flow_types_mask & (1 << i))) 3212 continue; 3213 p = flowtype_to_str(i); 3214 if (p) 3215 printf(" %s", p); 3216 else 3217 printf(" unknown"); 3218 } 3219 printf("\n"); 3220 } 3221 3222 void 3223 fdir_get_infos(portid_t port_id) 3224 { 3225 struct rte_eth_fdir_stats fdir_stat; 3226 struct rte_eth_fdir_info fdir_info; 3227 int ret; 3228 3229 static const char *fdir_stats_border = "########################"; 3230 3231 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3232 return; 3233 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3234 if (ret < 0) { 3235 printf("\n FDIR is not supported on port %-2d\n", 3236 port_id); 3237 return; 3238 } 3239 3240 memset(&fdir_info, 0, sizeof(fdir_info)); 3241 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3242 RTE_ETH_FILTER_INFO, &fdir_info); 3243 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3244 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3245 RTE_ETH_FILTER_STATS, &fdir_stat); 3246 printf("\n %s FDIR infos for port %-2d %s\n", 3247 fdir_stats_border, port_id, fdir_stats_border); 3248 printf(" MODE: "); 3249 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3250 printf(" PERFECT\n"); 3251 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3252 printf(" PERFECT-MAC-VLAN\n"); 3253 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3254 printf(" PERFECT-TUNNEL\n"); 3255 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3256 printf(" SIGNATURE\n"); 3257 else 3258 printf(" DISABLE\n"); 3259 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3260 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3261 printf(" SUPPORTED FLOW TYPE: "); 3262 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3263 } 3264 printf(" FLEX PAYLOAD INFO:\n"); 3265 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3266 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3267 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3268 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3269 fdir_info.flex_payload_unit, 3270 fdir_info.max_flex_payload_segment_num, 3271 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3272 printf(" MASK: "); 3273 print_fdir_mask(&fdir_info.mask); 3274 if (fdir_info.flex_conf.nb_payloads > 0) { 3275 printf(" FLEX PAYLOAD SRC OFFSET:"); 3276 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3277 } 3278 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3279 printf(" FLEX MASK CFG:"); 3280 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3281 } 3282 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3283 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3284 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3285 fdir_info.guarant_spc, fdir_info.best_spc); 3286 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3287 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3288 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3289 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3290 fdir_stat.collision, fdir_stat.free, 3291 fdir_stat.maxhash, fdir_stat.maxlen, 3292 fdir_stat.add, fdir_stat.remove, 3293 fdir_stat.f_add, fdir_stat.f_remove); 3294 printf(" %s############################%s\n", 3295 fdir_stats_border, fdir_stats_border); 3296 } 3297 3298 void 3299 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3300 { 3301 struct rte_port *port; 3302 struct rte_eth_fdir_flex_conf *flex_conf; 3303 int i, idx = 0; 3304 3305 port = &ports[port_id]; 3306 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3307 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3308 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3309 idx = i; 3310 break; 3311 } 3312 } 3313 if (i >= RTE_ETH_FLOW_MAX) { 3314 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3315 idx = flex_conf->nb_flexmasks; 3316 flex_conf->nb_flexmasks++; 3317 } else { 3318 printf("The flex mask table is full. Can not set flex" 3319 " mask for flow_type(%u).", cfg->flow_type); 3320 return; 3321 } 3322 } 3323 rte_memcpy(&flex_conf->flex_mask[idx], 3324 cfg, 3325 sizeof(struct rte_eth_fdir_flex_mask)); 3326 } 3327 3328 void 3329 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3330 { 3331 struct rte_port *port; 3332 struct rte_eth_fdir_flex_conf *flex_conf; 3333 int i, idx = 0; 3334 3335 port = &ports[port_id]; 3336 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3337 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3338 if (cfg->type == flex_conf->flex_set[i].type) { 3339 idx = i; 3340 break; 3341 } 3342 } 3343 if (i >= RTE_ETH_PAYLOAD_MAX) { 3344 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3345 idx = flex_conf->nb_payloads; 3346 flex_conf->nb_payloads++; 3347 } else { 3348 printf("The flex payload table is full. Can not set" 3349 " flex payload for type(%u).", cfg->type); 3350 return; 3351 } 3352 } 3353 rte_memcpy(&flex_conf->flex_set[idx], 3354 cfg, 3355 sizeof(struct rte_eth_flex_payload_cfg)); 3356 3357 } 3358 3359 void 3360 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3361 { 3362 #ifdef RTE_LIBRTE_IXGBE_PMD 3363 int diag; 3364 3365 if (is_rx) 3366 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3367 else 3368 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3369 3370 if (diag == 0) 3371 return; 3372 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3373 is_rx ? "rx" : "tx", port_id, diag); 3374 return; 3375 #endif 3376 printf("VF %s setting not supported for port %d\n", 3377 is_rx ? "Rx" : "Tx", port_id); 3378 RTE_SET_USED(vf); 3379 RTE_SET_USED(on); 3380 } 3381 3382 int 3383 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3384 { 3385 int diag; 3386 struct rte_eth_link link; 3387 3388 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3389 return 1; 3390 rte_eth_link_get_nowait(port_id, &link); 3391 if (rate > link.link_speed) { 3392 printf("Invalid rate value:%u bigger than link speed: %u\n", 3393 rate, link.link_speed); 3394 return 1; 3395 } 3396 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3397 if (diag == 0) 3398 return diag; 3399 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3400 port_id, diag); 3401 return diag; 3402 } 3403 3404 int 3405 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3406 { 3407 int diag = -ENOTSUP; 3408 3409 RTE_SET_USED(vf); 3410 RTE_SET_USED(rate); 3411 RTE_SET_USED(q_msk); 3412 3413 #ifdef RTE_LIBRTE_IXGBE_PMD 3414 if (diag == -ENOTSUP) 3415 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3416 q_msk); 3417 #endif 3418 #ifdef RTE_LIBRTE_BNXT_PMD 3419 if (diag == -ENOTSUP) 3420 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3421 #endif 3422 if (diag == 0) 3423 return diag; 3424 3425 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3426 port_id, diag); 3427 return diag; 3428 } 3429 3430 /* 3431 * Functions to manage the set of filtered Multicast MAC addresses. 3432 * 3433 * A pool of filtered multicast MAC addresses is associated with each port. 3434 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3435 * The address of the pool and the number of valid multicast MAC addresses 3436 * recorded in the pool are stored in the fields "mc_addr_pool" and 3437 * "mc_addr_nb" of the "rte_port" data structure. 3438 * 3439 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3440 * to be supplied a contiguous array of multicast MAC addresses. 3441 * To comply with this constraint, the set of multicast addresses recorded 3442 * into the pool are systematically compacted at the beginning of the pool. 3443 * Hence, when a multicast address is removed from the pool, all following 3444 * addresses, if any, are copied back to keep the set contiguous. 3445 */ 3446 #define MCAST_POOL_INC 32 3447 3448 static int 3449 mcast_addr_pool_extend(struct rte_port *port) 3450 { 3451 struct ether_addr *mc_pool; 3452 size_t mc_pool_size; 3453 3454 /* 3455 * If a free entry is available at the end of the pool, just 3456 * increment the number of recorded multicast addresses. 3457 */ 3458 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3459 port->mc_addr_nb++; 3460 return 0; 3461 } 3462 3463 /* 3464 * [re]allocate a pool with MCAST_POOL_INC more entries. 3465 * The previous test guarantees that port->mc_addr_nb is a multiple 3466 * of MCAST_POOL_INC. 3467 */ 3468 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3469 MCAST_POOL_INC); 3470 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3471 mc_pool_size); 3472 if (mc_pool == NULL) { 3473 printf("allocation of pool of %u multicast addresses failed\n", 3474 port->mc_addr_nb + MCAST_POOL_INC); 3475 return -ENOMEM; 3476 } 3477 3478 port->mc_addr_pool = mc_pool; 3479 port->mc_addr_nb++; 3480 return 0; 3481 3482 } 3483 3484 static void 3485 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3486 { 3487 port->mc_addr_nb--; 3488 if (addr_idx == port->mc_addr_nb) { 3489 /* No need to recompact the set of multicast addressses. */ 3490 if (port->mc_addr_nb == 0) { 3491 /* free the pool of multicast addresses. */ 3492 free(port->mc_addr_pool); 3493 port->mc_addr_pool = NULL; 3494 } 3495 return; 3496 } 3497 memmove(&port->mc_addr_pool[addr_idx], 3498 &port->mc_addr_pool[addr_idx + 1], 3499 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3500 } 3501 3502 static void 3503 eth_port_multicast_addr_list_set(portid_t port_id) 3504 { 3505 struct rte_port *port; 3506 int diag; 3507 3508 port = &ports[port_id]; 3509 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3510 port->mc_addr_nb); 3511 if (diag == 0) 3512 return; 3513 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3514 port->mc_addr_nb, port_id, -diag); 3515 } 3516 3517 void 3518 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3519 { 3520 struct rte_port *port; 3521 uint32_t i; 3522 3523 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3524 return; 3525 3526 port = &ports[port_id]; 3527 3528 /* 3529 * Check that the added multicast MAC address is not already recorded 3530 * in the pool of multicast addresses. 3531 */ 3532 for (i = 0; i < port->mc_addr_nb; i++) { 3533 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3534 printf("multicast address already filtered by port\n"); 3535 return; 3536 } 3537 } 3538 3539 if (mcast_addr_pool_extend(port) != 0) 3540 return; 3541 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3542 eth_port_multicast_addr_list_set(port_id); 3543 } 3544 3545 void 3546 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3547 { 3548 struct rte_port *port; 3549 uint32_t i; 3550 3551 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3552 return; 3553 3554 port = &ports[port_id]; 3555 3556 /* 3557 * Search the pool of multicast MAC addresses for the removed address. 3558 */ 3559 for (i = 0; i < port->mc_addr_nb; i++) { 3560 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3561 break; 3562 } 3563 if (i == port->mc_addr_nb) { 3564 printf("multicast address not filtered by port %d\n", port_id); 3565 return; 3566 } 3567 3568 mcast_addr_pool_remove(port, i); 3569 eth_port_multicast_addr_list_set(port_id); 3570 } 3571 3572 void 3573 port_dcb_info_display(portid_t port_id) 3574 { 3575 struct rte_eth_dcb_info dcb_info; 3576 uint16_t i; 3577 int ret; 3578 static const char *border = "================"; 3579 3580 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3581 return; 3582 3583 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3584 if (ret) { 3585 printf("\n Failed to get dcb infos on port %-2d\n", 3586 port_id); 3587 return; 3588 } 3589 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3590 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3591 printf("\n TC : "); 3592 for (i = 0; i < dcb_info.nb_tcs; i++) 3593 printf("\t%4d", i); 3594 printf("\n Priority : "); 3595 for (i = 0; i < dcb_info.nb_tcs; i++) 3596 printf("\t%4d", dcb_info.prio_tc[i]); 3597 printf("\n BW percent :"); 3598 for (i = 0; i < dcb_info.nb_tcs; i++) 3599 printf("\t%4d%%", dcb_info.tc_bws[i]); 3600 printf("\n RXQ base : "); 3601 for (i = 0; i < dcb_info.nb_tcs; i++) 3602 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3603 printf("\n RXQ number :"); 3604 for (i = 0; i < dcb_info.nb_tcs; i++) 3605 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3606 printf("\n TXQ base : "); 3607 for (i = 0; i < dcb_info.nb_tcs; i++) 3608 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3609 printf("\n TXQ number :"); 3610 for (i = 0; i < dcb_info.nb_tcs; i++) 3611 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3612 printf("\n"); 3613 } 3614 3615 uint8_t * 3616 open_file(const char *file_path, uint32_t *size) 3617 { 3618 int fd = open(file_path, O_RDONLY); 3619 off_t pkg_size; 3620 uint8_t *buf = NULL; 3621 int ret = 0; 3622 struct stat st_buf; 3623 3624 if (size) 3625 *size = 0; 3626 3627 if (fd == -1) { 3628 printf("%s: Failed to open %s\n", __func__, file_path); 3629 return buf; 3630 } 3631 3632 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3633 close(fd); 3634 printf("%s: File operations failed\n", __func__); 3635 return buf; 3636 } 3637 3638 pkg_size = st_buf.st_size; 3639 if (pkg_size < 0) { 3640 close(fd); 3641 printf("%s: File operations failed\n", __func__); 3642 return buf; 3643 } 3644 3645 buf = (uint8_t *)malloc(pkg_size); 3646 if (!buf) { 3647 close(fd); 3648 printf("%s: Failed to malloc memory\n", __func__); 3649 return buf; 3650 } 3651 3652 ret = read(fd, buf, pkg_size); 3653 if (ret < 0) { 3654 close(fd); 3655 printf("%s: File read operation failed\n", __func__); 3656 close_file(buf); 3657 return NULL; 3658 } 3659 3660 if (size) 3661 *size = pkg_size; 3662 3663 close(fd); 3664 3665 return buf; 3666 } 3667 3668 int 3669 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3670 { 3671 FILE *fh = fopen(file_path, "wb"); 3672 3673 if (fh == NULL) { 3674 printf("%s: Failed to open %s\n", __func__, file_path); 3675 return -1; 3676 } 3677 3678 if (fwrite(buf, 1, size, fh) != size) { 3679 fclose(fh); 3680 printf("%s: File write operation failed\n", __func__); 3681 return -1; 3682 } 3683 3684 fclose(fh); 3685 3686 return 0; 3687 } 3688 3689 int 3690 close_file(uint8_t *buf) 3691 { 3692 if (buf) { 3693 free((void *)buf); 3694 return 0; 3695 } 3696 3697 return -1; 3698 } 3699 3700 void 3701 port_queue_region_info_display(portid_t port_id, void *buf) 3702 { 3703 #ifdef RTE_LIBRTE_I40E_PMD 3704 uint16_t i, j; 3705 struct rte_pmd_i40e_queue_regions *info = 3706 (struct rte_pmd_i40e_queue_regions *)buf; 3707 static const char *queue_region_info_stats_border = "-------"; 3708 3709 if (!info->queue_region_number) 3710 printf("there is no region has been set before"); 3711 3712 printf("\n %s All queue region info for port=%2d %s", 3713 queue_region_info_stats_border, port_id, 3714 queue_region_info_stats_border); 3715 printf("\n queue_region_number: %-14u \n", 3716 info->queue_region_number); 3717 3718 for (i = 0; i < info->queue_region_number; i++) { 3719 printf("\n region_id: %-14u queue_number: %-14u " 3720 "queue_start_index: %-14u \n", 3721 info->region[i].region_id, 3722 info->region[i].queue_num, 3723 info->region[i].queue_start_index); 3724 3725 printf(" user_priority_num is %-14u :", 3726 info->region[i].user_priority_num); 3727 for (j = 0; j < info->region[i].user_priority_num; j++) 3728 printf(" %-14u ", info->region[i].user_priority[j]); 3729 3730 printf("\n flowtype_num is %-14u :", 3731 info->region[i].flowtype_num); 3732 for (j = 0; j < info->region[i].flowtype_num; j++) 3733 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3734 } 3735 #else 3736 RTE_SET_USED(port_id); 3737 RTE_SET_USED(buf); 3738 #endif 3739 3740 printf("\n\n"); 3741 } 3742