1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 char name[RTE_ETH_NAME_MAX_LEN]; 411 412 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 413 printf("Valid port range is [0"); 414 RTE_ETH_FOREACH_DEV(pid) 415 printf(", %d", pid); 416 printf("]\n"); 417 return; 418 } 419 port = &ports[port_id]; 420 rte_eth_link_get_nowait(port_id, &link); 421 memset(&dev_info, 0, sizeof(dev_info)); 422 rte_eth_dev_info_get(port_id, &dev_info); 423 printf("\n%s Infos for port %-2d %s\n", 424 info_border, port_id, info_border); 425 rte_eth_macaddr_get(port_id, &mac_addr); 426 print_ethaddr("MAC address: ", &mac_addr); 427 rte_eth_dev_get_name_by_port(port_id, name); 428 printf("\nDevice name: %s", name); 429 printf("\nDriver name: %s", dev_info.driver_name); 430 printf("\nConnect to socket: %u", port->socket_id); 431 432 if (port_numa[port_id] != NUMA_NO_CONFIG) { 433 mp = mbuf_pool_find(port_numa[port_id]); 434 if (mp) 435 printf("\nmemory allocation on the socket: %d", 436 port_numa[port_id]); 437 } else 438 printf("\nmemory allocation on the socket: %u",port->socket_id); 439 440 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 441 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 442 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 443 ("full-duplex") : ("half-duplex")); 444 445 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 446 printf("MTU: %u\n", mtu); 447 448 printf("Promiscuous mode: %s\n", 449 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 450 printf("Allmulticast mode: %s\n", 451 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 452 printf("Maximum number of MAC addresses: %u\n", 453 (unsigned int)(port->dev_info.max_mac_addrs)); 454 printf("Maximum number of MAC addresses of hash filtering: %u\n", 455 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 456 457 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 458 if (vlan_offload >= 0){ 459 printf("VLAN offload: \n"); 460 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 461 printf(" strip on \n"); 462 else 463 printf(" strip off \n"); 464 465 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 466 printf(" filter on \n"); 467 else 468 printf(" filter off \n"); 469 470 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 471 printf(" qinq(extend) on \n"); 472 else 473 printf(" qinq(extend) off \n"); 474 } 475 476 if (dev_info.hash_key_size > 0) 477 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 478 if (dev_info.reta_size > 0) 479 printf("Redirection table size: %u\n", dev_info.reta_size); 480 if (!dev_info.flow_type_rss_offloads) 481 printf("No flow type is supported.\n"); 482 else { 483 uint16_t i; 484 char *p; 485 486 printf("Supported flow types:\n"); 487 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 488 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 489 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 490 continue; 491 p = flowtype_to_str(i); 492 if (p) 493 printf(" %s\n", p); 494 else 495 printf(" user defined %d\n", i); 496 } 497 } 498 499 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 500 printf("Maximum configurable length of RX packet: %u\n", 501 dev_info.max_rx_pktlen); 502 if (dev_info.max_vfs) 503 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 504 if (dev_info.max_vmdq_pools) 505 printf("Maximum number of VMDq pools: %u\n", 506 dev_info.max_vmdq_pools); 507 508 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 509 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 510 printf("Max possible number of RXDs per queue: %hu\n", 511 dev_info.rx_desc_lim.nb_max); 512 printf("Min possible number of RXDs per queue: %hu\n", 513 dev_info.rx_desc_lim.nb_min); 514 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 515 516 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 517 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 518 printf("Max possible number of TXDs per queue: %hu\n", 519 dev_info.tx_desc_lim.nb_max); 520 printf("Min possible number of TXDs per queue: %hu\n", 521 dev_info.tx_desc_lim.nb_min); 522 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 523 524 /* Show switch info only if valid switch domain and port id is set */ 525 if (dev_info.switch_info.domain_id != 526 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 527 if (dev_info.switch_info.name) 528 printf("Switch name: %s\n", dev_info.switch_info.name); 529 530 printf("Switch domain Id: %u\n", 531 dev_info.switch_info.domain_id); 532 printf("Switch Port Id: %u\n", 533 dev_info.switch_info.port_id); 534 } 535 } 536 537 void 538 port_offload_cap_display(portid_t port_id) 539 { 540 struct rte_eth_dev_info dev_info; 541 static const char *info_border = "************"; 542 543 if (port_id_is_invalid(port_id, ENABLED_WARN)) 544 return; 545 546 rte_eth_dev_info_get(port_id, &dev_info); 547 548 printf("\n%s Port %d supported offload features: %s\n", 549 info_border, port_id, info_border); 550 551 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 552 printf("VLAN stripped: "); 553 if (ports[port_id].dev_conf.rxmode.offloads & 554 DEV_RX_OFFLOAD_VLAN_STRIP) 555 printf("on\n"); 556 else 557 printf("off\n"); 558 } 559 560 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 561 printf("Double VLANs stripped: "); 562 if (ports[port_id].dev_conf.rxmode.offloads & 563 DEV_RX_OFFLOAD_VLAN_EXTEND) 564 printf("on\n"); 565 else 566 printf("off\n"); 567 } 568 569 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 570 printf("RX IPv4 checksum: "); 571 if (ports[port_id].dev_conf.rxmode.offloads & 572 DEV_RX_OFFLOAD_IPV4_CKSUM) 573 printf("on\n"); 574 else 575 printf("off\n"); 576 } 577 578 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 579 printf("RX UDP checksum: "); 580 if (ports[port_id].dev_conf.rxmode.offloads & 581 DEV_RX_OFFLOAD_UDP_CKSUM) 582 printf("on\n"); 583 else 584 printf("off\n"); 585 } 586 587 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 588 printf("RX TCP checksum: "); 589 if (ports[port_id].dev_conf.rxmode.offloads & 590 DEV_RX_OFFLOAD_TCP_CKSUM) 591 printf("on\n"); 592 else 593 printf("off\n"); 594 } 595 596 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 597 printf("RX Outer IPv4 checksum: "); 598 if (ports[port_id].dev_conf.rxmode.offloads & 599 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 600 printf("on\n"); 601 else 602 printf("off\n"); 603 } 604 605 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 606 printf("Large receive offload: "); 607 if (ports[port_id].dev_conf.rxmode.offloads & 608 DEV_RX_OFFLOAD_TCP_LRO) 609 printf("on\n"); 610 else 611 printf("off\n"); 612 } 613 614 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 615 printf("VLAN insert: "); 616 if (ports[port_id].dev_conf.txmode.offloads & 617 DEV_TX_OFFLOAD_VLAN_INSERT) 618 printf("on\n"); 619 else 620 printf("off\n"); 621 } 622 623 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 624 printf("HW timestamp: "); 625 if (ports[port_id].dev_conf.rxmode.offloads & 626 DEV_RX_OFFLOAD_TIMESTAMP) 627 printf("on\n"); 628 else 629 printf("off\n"); 630 } 631 632 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 633 printf("Double VLANs insert: "); 634 if (ports[port_id].dev_conf.txmode.offloads & 635 DEV_TX_OFFLOAD_QINQ_INSERT) 636 printf("on\n"); 637 else 638 printf("off\n"); 639 } 640 641 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 642 printf("TX IPv4 checksum: "); 643 if (ports[port_id].dev_conf.txmode.offloads & 644 DEV_TX_OFFLOAD_IPV4_CKSUM) 645 printf("on\n"); 646 else 647 printf("off\n"); 648 } 649 650 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 651 printf("TX UDP checksum: "); 652 if (ports[port_id].dev_conf.txmode.offloads & 653 DEV_TX_OFFLOAD_UDP_CKSUM) 654 printf("on\n"); 655 else 656 printf("off\n"); 657 } 658 659 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 660 printf("TX TCP checksum: "); 661 if (ports[port_id].dev_conf.txmode.offloads & 662 DEV_TX_OFFLOAD_TCP_CKSUM) 663 printf("on\n"); 664 else 665 printf("off\n"); 666 } 667 668 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 669 printf("TX SCTP checksum: "); 670 if (ports[port_id].dev_conf.txmode.offloads & 671 DEV_TX_OFFLOAD_SCTP_CKSUM) 672 printf("on\n"); 673 else 674 printf("off\n"); 675 } 676 677 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 678 printf("TX Outer IPv4 checksum: "); 679 if (ports[port_id].dev_conf.txmode.offloads & 680 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 681 printf("on\n"); 682 else 683 printf("off\n"); 684 } 685 686 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 687 printf("TX TCP segmentation: "); 688 if (ports[port_id].dev_conf.txmode.offloads & 689 DEV_TX_OFFLOAD_TCP_TSO) 690 printf("on\n"); 691 else 692 printf("off\n"); 693 } 694 695 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 696 printf("TX UDP segmentation: "); 697 if (ports[port_id].dev_conf.txmode.offloads & 698 DEV_TX_OFFLOAD_UDP_TSO) 699 printf("on\n"); 700 else 701 printf("off\n"); 702 } 703 704 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 705 printf("TSO for VXLAN tunnel packet: "); 706 if (ports[port_id].dev_conf.txmode.offloads & 707 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 708 printf("on\n"); 709 else 710 printf("off\n"); 711 } 712 713 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 714 printf("TSO for GRE tunnel packet: "); 715 if (ports[port_id].dev_conf.txmode.offloads & 716 DEV_TX_OFFLOAD_GRE_TNL_TSO) 717 printf("on\n"); 718 else 719 printf("off\n"); 720 } 721 722 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 723 printf("TSO for IPIP tunnel packet: "); 724 if (ports[port_id].dev_conf.txmode.offloads & 725 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 726 printf("on\n"); 727 else 728 printf("off\n"); 729 } 730 731 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 732 printf("TSO for GENEVE tunnel packet: "); 733 if (ports[port_id].dev_conf.txmode.offloads & 734 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 741 printf("IP tunnel TSO: "); 742 if (ports[port_id].dev_conf.txmode.offloads & 743 DEV_TX_OFFLOAD_IP_TNL_TSO) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 750 printf("UDP tunnel TSO: "); 751 if (ports[port_id].dev_conf.txmode.offloads & 752 DEV_TX_OFFLOAD_UDP_TNL_TSO) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 } 758 759 int 760 port_id_is_invalid(portid_t port_id, enum print_warning warning) 761 { 762 uint16_t pid; 763 764 if (port_id == (portid_t)RTE_PORT_ALL) 765 return 0; 766 767 RTE_ETH_FOREACH_DEV(pid) 768 if (port_id == pid) 769 return 0; 770 771 if (warning == ENABLED_WARN) 772 printf("Invalid port %d\n", port_id); 773 774 return 1; 775 } 776 777 static int 778 vlan_id_is_invalid(uint16_t vlan_id) 779 { 780 if (vlan_id < 4096) 781 return 0; 782 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 783 return 1; 784 } 785 786 static int 787 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 788 { 789 const struct rte_pci_device *pci_dev; 790 const struct rte_bus *bus; 791 uint64_t pci_len; 792 793 if (reg_off & 0x3) { 794 printf("Port register offset 0x%X not aligned on a 4-byte " 795 "boundary\n", 796 (unsigned)reg_off); 797 return 1; 798 } 799 800 if (!ports[port_id].dev_info.device) { 801 printf("Invalid device\n"); 802 return 0; 803 } 804 805 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 806 if (bus && !strcmp(bus->name, "pci")) { 807 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 808 } else { 809 printf("Not a PCI device\n"); 810 return 1; 811 } 812 813 pci_len = pci_dev->mem_resource[0].len; 814 if (reg_off >= pci_len) { 815 printf("Port %d: register offset %u (0x%X) out of port PCI " 816 "resource (length=%"PRIu64")\n", 817 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 818 return 1; 819 } 820 return 0; 821 } 822 823 static int 824 reg_bit_pos_is_invalid(uint8_t bit_pos) 825 { 826 if (bit_pos <= 31) 827 return 0; 828 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 829 return 1; 830 } 831 832 #define display_port_and_reg_off(port_id, reg_off) \ 833 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 834 835 static inline void 836 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 837 { 838 display_port_and_reg_off(port_id, (unsigned)reg_off); 839 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 840 } 841 842 void 843 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 844 { 845 uint32_t reg_v; 846 847 848 if (port_id_is_invalid(port_id, ENABLED_WARN)) 849 return; 850 if (port_reg_off_is_invalid(port_id, reg_off)) 851 return; 852 if (reg_bit_pos_is_invalid(bit_x)) 853 return; 854 reg_v = port_id_pci_reg_read(port_id, reg_off); 855 display_port_and_reg_off(port_id, (unsigned)reg_off); 856 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 857 } 858 859 void 860 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 861 uint8_t bit1_pos, uint8_t bit2_pos) 862 { 863 uint32_t reg_v; 864 uint8_t l_bit; 865 uint8_t h_bit; 866 867 if (port_id_is_invalid(port_id, ENABLED_WARN)) 868 return; 869 if (port_reg_off_is_invalid(port_id, reg_off)) 870 return; 871 if (reg_bit_pos_is_invalid(bit1_pos)) 872 return; 873 if (reg_bit_pos_is_invalid(bit2_pos)) 874 return; 875 if (bit1_pos > bit2_pos) 876 l_bit = bit2_pos, h_bit = bit1_pos; 877 else 878 l_bit = bit1_pos, h_bit = bit2_pos; 879 880 reg_v = port_id_pci_reg_read(port_id, reg_off); 881 reg_v >>= l_bit; 882 if (h_bit < 31) 883 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 884 display_port_and_reg_off(port_id, (unsigned)reg_off); 885 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 886 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 887 } 888 889 void 890 port_reg_display(portid_t port_id, uint32_t reg_off) 891 { 892 uint32_t reg_v; 893 894 if (port_id_is_invalid(port_id, ENABLED_WARN)) 895 return; 896 if (port_reg_off_is_invalid(port_id, reg_off)) 897 return; 898 reg_v = port_id_pci_reg_read(port_id, reg_off); 899 display_port_reg_value(port_id, reg_off, reg_v); 900 } 901 902 void 903 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 904 uint8_t bit_v) 905 { 906 uint32_t reg_v; 907 908 if (port_id_is_invalid(port_id, ENABLED_WARN)) 909 return; 910 if (port_reg_off_is_invalid(port_id, reg_off)) 911 return; 912 if (reg_bit_pos_is_invalid(bit_pos)) 913 return; 914 if (bit_v > 1) { 915 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 916 return; 917 } 918 reg_v = port_id_pci_reg_read(port_id, reg_off); 919 if (bit_v == 0) 920 reg_v &= ~(1 << bit_pos); 921 else 922 reg_v |= (1 << bit_pos); 923 port_id_pci_reg_write(port_id, reg_off, reg_v); 924 display_port_reg_value(port_id, reg_off, reg_v); 925 } 926 927 void 928 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 929 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 930 { 931 uint32_t max_v; 932 uint32_t reg_v; 933 uint8_t l_bit; 934 uint8_t h_bit; 935 936 if (port_id_is_invalid(port_id, ENABLED_WARN)) 937 return; 938 if (port_reg_off_is_invalid(port_id, reg_off)) 939 return; 940 if (reg_bit_pos_is_invalid(bit1_pos)) 941 return; 942 if (reg_bit_pos_is_invalid(bit2_pos)) 943 return; 944 if (bit1_pos > bit2_pos) 945 l_bit = bit2_pos, h_bit = bit1_pos; 946 else 947 l_bit = bit1_pos, h_bit = bit2_pos; 948 949 if ((h_bit - l_bit) < 31) 950 max_v = (1 << (h_bit - l_bit + 1)) - 1; 951 else 952 max_v = 0xFFFFFFFF; 953 954 if (value > max_v) { 955 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 956 (unsigned)value, (unsigned)value, 957 (unsigned)max_v, (unsigned)max_v); 958 return; 959 } 960 reg_v = port_id_pci_reg_read(port_id, reg_off); 961 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 962 reg_v |= (value << l_bit); /* Set changed bits */ 963 port_id_pci_reg_write(port_id, reg_off, reg_v); 964 display_port_reg_value(port_id, reg_off, reg_v); 965 } 966 967 void 968 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 969 { 970 if (port_id_is_invalid(port_id, ENABLED_WARN)) 971 return; 972 if (port_reg_off_is_invalid(port_id, reg_off)) 973 return; 974 port_id_pci_reg_write(port_id, reg_off, reg_v); 975 display_port_reg_value(port_id, reg_off, reg_v); 976 } 977 978 void 979 port_mtu_set(portid_t port_id, uint16_t mtu) 980 { 981 int diag; 982 983 if (port_id_is_invalid(port_id, ENABLED_WARN)) 984 return; 985 diag = rte_eth_dev_set_mtu(port_id, mtu); 986 if (diag == 0) 987 return; 988 printf("Set MTU failed. diag=%d\n", diag); 989 } 990 991 /* Generic flow management functions. */ 992 993 /** Generate flow_item[] entry. */ 994 #define MK_FLOW_ITEM(t, s) \ 995 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 996 .name = # t, \ 997 .size = s, \ 998 } 999 1000 /** Information about known flow pattern items. */ 1001 static const struct { 1002 const char *name; 1003 size_t size; 1004 } flow_item[] = { 1005 MK_FLOW_ITEM(END, 0), 1006 MK_FLOW_ITEM(VOID, 0), 1007 MK_FLOW_ITEM(INVERT, 0), 1008 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 1009 MK_FLOW_ITEM(PF, 0), 1010 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 1011 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 1012 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 1013 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 1014 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1015 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1016 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1017 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1018 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1019 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1020 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1021 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1022 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1023 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1024 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1025 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1026 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1027 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1028 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1029 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1030 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1031 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1032 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 1033 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 1034 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 1035 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 1036 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 1037 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 1038 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 1039 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 1040 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 1041 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 1042 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 1043 }; 1044 1045 /** Pattern item specification types. */ 1046 enum item_spec_type { 1047 ITEM_SPEC, 1048 ITEM_LAST, 1049 ITEM_MASK, 1050 }; 1051 1052 /** Compute storage space needed by item specification and copy it. */ 1053 static size_t 1054 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1055 enum item_spec_type type) 1056 { 1057 size_t size = 0; 1058 const void *item_spec = 1059 type == ITEM_SPEC ? item->spec : 1060 type == ITEM_LAST ? item->last : 1061 type == ITEM_MASK ? item->mask : 1062 NULL; 1063 1064 if (!item_spec) 1065 goto empty; 1066 switch (item->type) { 1067 union { 1068 const struct rte_flow_item_raw *raw; 1069 } src; 1070 union { 1071 struct rte_flow_item_raw *raw; 1072 } dst; 1073 size_t off; 1074 1075 case RTE_FLOW_ITEM_TYPE_RAW: 1076 src.raw = item_spec; 1077 dst.raw = buf; 1078 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1079 sizeof(*src.raw->pattern)); 1080 size = off + src.raw->length * sizeof(*src.raw->pattern); 1081 if (dst.raw) { 1082 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1083 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1084 src.raw->pattern, 1085 size - off); 1086 } 1087 break; 1088 default: 1089 size = flow_item[item->type].size; 1090 if (buf) 1091 memcpy(buf, item_spec, size); 1092 break; 1093 } 1094 empty: 1095 return RTE_ALIGN_CEIL(size, sizeof(double)); 1096 } 1097 1098 /** Generate flow_action[] entry. */ 1099 #define MK_FLOW_ACTION(t, s) \ 1100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1101 .name = # t, \ 1102 .size = s, \ 1103 } 1104 1105 /** Information about known flow actions. */ 1106 static const struct { 1107 const char *name; 1108 size_t size; 1109 } flow_action[] = { 1110 MK_FLOW_ACTION(END, 0), 1111 MK_FLOW_ACTION(VOID, 0), 1112 MK_FLOW_ACTION(PASSTHRU, 0), 1113 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1114 MK_FLOW_ACTION(FLAG, 0), 1115 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1116 MK_FLOW_ACTION(DROP, 0), 1117 MK_FLOW_ACTION(COUNT, 0), 1118 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1119 MK_FLOW_ACTION(PF, 0), 1120 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1121 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1122 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1123 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1124 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 1125 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 1126 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 1127 MK_FLOW_ACTION(OF_SET_NW_TTL, 1128 sizeof(struct rte_flow_action_of_set_nw_ttl)), 1129 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 1130 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 1131 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 1132 MK_FLOW_ACTION(OF_POP_VLAN, 0), 1133 MK_FLOW_ACTION(OF_PUSH_VLAN, 1134 sizeof(struct rte_flow_action_of_push_vlan)), 1135 MK_FLOW_ACTION(OF_SET_VLAN_VID, 1136 sizeof(struct rte_flow_action_of_set_vlan_vid)), 1137 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 1138 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 1139 MK_FLOW_ACTION(OF_POP_MPLS, 1140 sizeof(struct rte_flow_action_of_pop_mpls)), 1141 MK_FLOW_ACTION(OF_PUSH_MPLS, 1142 sizeof(struct rte_flow_action_of_push_mpls)), 1143 }; 1144 1145 /** Compute storage space needed by action configuration and copy it. */ 1146 static size_t 1147 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1148 { 1149 size_t size = 0; 1150 1151 if (!action->conf) 1152 goto empty; 1153 switch (action->type) { 1154 union { 1155 const struct rte_flow_action_rss *rss; 1156 } src; 1157 union { 1158 struct rte_flow_action_rss *rss; 1159 } dst; 1160 size_t off; 1161 1162 case RTE_FLOW_ACTION_TYPE_RSS: 1163 src.rss = action->conf; 1164 dst.rss = buf; 1165 off = 0; 1166 if (dst.rss) 1167 *dst.rss = (struct rte_flow_action_rss){ 1168 .func = src.rss->func, 1169 .level = src.rss->level, 1170 .types = src.rss->types, 1171 .key_len = src.rss->key_len, 1172 .queue_num = src.rss->queue_num, 1173 }; 1174 off += sizeof(*src.rss); 1175 if (src.rss->key_len) { 1176 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1177 size = sizeof(*src.rss->key) * src.rss->key_len; 1178 if (dst.rss) 1179 dst.rss->key = memcpy 1180 ((void *)((uintptr_t)dst.rss + off), 1181 src.rss->key, size); 1182 off += size; 1183 } 1184 if (src.rss->queue_num) { 1185 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1186 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1187 if (dst.rss) 1188 dst.rss->queue = memcpy 1189 ((void *)((uintptr_t)dst.rss + off), 1190 src.rss->queue, size); 1191 off += size; 1192 } 1193 size = off; 1194 break; 1195 default: 1196 size = flow_action[action->type].size; 1197 if (buf) 1198 memcpy(buf, action->conf, size); 1199 break; 1200 } 1201 empty: 1202 return RTE_ALIGN_CEIL(size, sizeof(double)); 1203 } 1204 1205 /** Generate a port_flow entry from attributes/pattern/actions. */ 1206 static struct port_flow * 1207 port_flow_new(const struct rte_flow_attr *attr, 1208 const struct rte_flow_item *pattern, 1209 const struct rte_flow_action *actions) 1210 { 1211 const struct rte_flow_item *item; 1212 const struct rte_flow_action *action; 1213 struct port_flow *pf = NULL; 1214 size_t tmp; 1215 size_t off1 = 0; 1216 size_t off2 = 0; 1217 int err = ENOTSUP; 1218 1219 store: 1220 item = pattern; 1221 if (pf) 1222 pf->pattern = (void *)&pf->data[off1]; 1223 do { 1224 struct rte_flow_item *dst = NULL; 1225 1226 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1227 !flow_item[item->type].name) 1228 goto notsup; 1229 if (pf) 1230 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1231 off1 += sizeof(*item); 1232 if (item->spec) { 1233 if (pf) 1234 dst->spec = pf->data + off2; 1235 off2 += flow_item_spec_copy 1236 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1237 } 1238 if (item->last) { 1239 if (pf) 1240 dst->last = pf->data + off2; 1241 off2 += flow_item_spec_copy 1242 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1243 } 1244 if (item->mask) { 1245 if (pf) 1246 dst->mask = pf->data + off2; 1247 off2 += flow_item_spec_copy 1248 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1249 } 1250 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1251 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1252 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1253 action = actions; 1254 if (pf) 1255 pf->actions = (void *)&pf->data[off1]; 1256 do { 1257 struct rte_flow_action *dst = NULL; 1258 1259 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1260 !flow_action[action->type].name) 1261 goto notsup; 1262 if (pf) 1263 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1264 off1 += sizeof(*action); 1265 if (action->conf) { 1266 if (pf) 1267 dst->conf = pf->data + off2; 1268 off2 += flow_action_conf_copy 1269 (pf ? pf->data + off2 : NULL, action); 1270 } 1271 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1272 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1273 if (pf != NULL) 1274 return pf; 1275 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1276 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1277 pf = calloc(1, tmp + off1 + off2); 1278 if (pf == NULL) 1279 err = errno; 1280 else { 1281 *pf = (const struct port_flow){ 1282 .size = tmp + off1 + off2, 1283 .attr = *attr, 1284 }; 1285 tmp -= offsetof(struct port_flow, data); 1286 off2 = tmp + off1; 1287 off1 = tmp; 1288 goto store; 1289 } 1290 notsup: 1291 rte_errno = err; 1292 return NULL; 1293 } 1294 1295 /** Print a message out of a flow error. */ 1296 static int 1297 port_flow_complain(struct rte_flow_error *error) 1298 { 1299 static const char *const errstrlist[] = { 1300 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1301 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1302 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1303 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1304 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1305 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1306 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1307 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1308 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1309 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1310 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1311 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1312 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1313 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1314 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1315 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1316 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1317 }; 1318 const char *errstr; 1319 char buf[32]; 1320 int err = rte_errno; 1321 1322 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1323 !errstrlist[error->type]) 1324 errstr = "unknown type"; 1325 else 1326 errstr = errstrlist[error->type]; 1327 printf("Caught error type %d (%s): %s%s\n", 1328 error->type, errstr, 1329 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1330 error->cause), buf) : "", 1331 error->message ? error->message : "(no stated reason)"); 1332 return -err; 1333 } 1334 1335 /** Validate flow rule. */ 1336 int 1337 port_flow_validate(portid_t port_id, 1338 const struct rte_flow_attr *attr, 1339 const struct rte_flow_item *pattern, 1340 const struct rte_flow_action *actions) 1341 { 1342 struct rte_flow_error error; 1343 1344 /* Poisoning to make sure PMDs update it in case of error. */ 1345 memset(&error, 0x11, sizeof(error)); 1346 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1347 return port_flow_complain(&error); 1348 printf("Flow rule validated\n"); 1349 return 0; 1350 } 1351 1352 /** Create flow rule. */ 1353 int 1354 port_flow_create(portid_t port_id, 1355 const struct rte_flow_attr *attr, 1356 const struct rte_flow_item *pattern, 1357 const struct rte_flow_action *actions) 1358 { 1359 struct rte_flow *flow; 1360 struct rte_port *port; 1361 struct port_flow *pf; 1362 uint32_t id; 1363 struct rte_flow_error error; 1364 1365 /* Poisoning to make sure PMDs update it in case of error. */ 1366 memset(&error, 0x22, sizeof(error)); 1367 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1368 if (!flow) 1369 return port_flow_complain(&error); 1370 port = &ports[port_id]; 1371 if (port->flow_list) { 1372 if (port->flow_list->id == UINT32_MAX) { 1373 printf("Highest rule ID is already assigned, delete" 1374 " it first"); 1375 rte_flow_destroy(port_id, flow, NULL); 1376 return -ENOMEM; 1377 } 1378 id = port->flow_list->id + 1; 1379 } else 1380 id = 0; 1381 pf = port_flow_new(attr, pattern, actions); 1382 if (!pf) { 1383 int err = rte_errno; 1384 1385 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1386 rte_flow_destroy(port_id, flow, NULL); 1387 return -err; 1388 } 1389 pf->next = port->flow_list; 1390 pf->id = id; 1391 pf->flow = flow; 1392 port->flow_list = pf; 1393 printf("Flow rule #%u created\n", pf->id); 1394 return 0; 1395 } 1396 1397 /** Destroy a number of flow rules. */ 1398 int 1399 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1400 { 1401 struct rte_port *port; 1402 struct port_flow **tmp; 1403 uint32_t c = 0; 1404 int ret = 0; 1405 1406 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1407 port_id == (portid_t)RTE_PORT_ALL) 1408 return -EINVAL; 1409 port = &ports[port_id]; 1410 tmp = &port->flow_list; 1411 while (*tmp) { 1412 uint32_t i; 1413 1414 for (i = 0; i != n; ++i) { 1415 struct rte_flow_error error; 1416 struct port_flow *pf = *tmp; 1417 1418 if (rule[i] != pf->id) 1419 continue; 1420 /* 1421 * Poisoning to make sure PMDs update it in case 1422 * of error. 1423 */ 1424 memset(&error, 0x33, sizeof(error)); 1425 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1426 ret = port_flow_complain(&error); 1427 continue; 1428 } 1429 printf("Flow rule #%u destroyed\n", pf->id); 1430 *tmp = pf->next; 1431 free(pf); 1432 break; 1433 } 1434 if (i == n) 1435 tmp = &(*tmp)->next; 1436 ++c; 1437 } 1438 return ret; 1439 } 1440 1441 /** Remove all flow rules. */ 1442 int 1443 port_flow_flush(portid_t port_id) 1444 { 1445 struct rte_flow_error error; 1446 struct rte_port *port; 1447 int ret = 0; 1448 1449 /* Poisoning to make sure PMDs update it in case of error. */ 1450 memset(&error, 0x44, sizeof(error)); 1451 if (rte_flow_flush(port_id, &error)) { 1452 ret = port_flow_complain(&error); 1453 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1454 port_id == (portid_t)RTE_PORT_ALL) 1455 return ret; 1456 } 1457 port = &ports[port_id]; 1458 while (port->flow_list) { 1459 struct port_flow *pf = port->flow_list->next; 1460 1461 free(port->flow_list); 1462 port->flow_list = pf; 1463 } 1464 return ret; 1465 } 1466 1467 /** Query a flow rule. */ 1468 int 1469 port_flow_query(portid_t port_id, uint32_t rule, 1470 enum rte_flow_action_type action) 1471 { 1472 struct rte_flow_error error; 1473 struct rte_port *port; 1474 struct port_flow *pf; 1475 const char *name; 1476 union { 1477 struct rte_flow_query_count count; 1478 } query; 1479 1480 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1481 port_id == (portid_t)RTE_PORT_ALL) 1482 return -EINVAL; 1483 port = &ports[port_id]; 1484 for (pf = port->flow_list; pf; pf = pf->next) 1485 if (pf->id == rule) 1486 break; 1487 if (!pf) { 1488 printf("Flow rule #%u not found\n", rule); 1489 return -ENOENT; 1490 } 1491 if ((unsigned int)action >= RTE_DIM(flow_action) || 1492 !flow_action[action].name) 1493 name = "unknown"; 1494 else 1495 name = flow_action[action].name; 1496 switch (action) { 1497 case RTE_FLOW_ACTION_TYPE_COUNT: 1498 break; 1499 default: 1500 printf("Cannot query action type %d (%s)\n", action, name); 1501 return -ENOTSUP; 1502 } 1503 /* Poisoning to make sure PMDs update it in case of error. */ 1504 memset(&error, 0x55, sizeof(error)); 1505 memset(&query, 0, sizeof(query)); 1506 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1507 return port_flow_complain(&error); 1508 switch (action) { 1509 case RTE_FLOW_ACTION_TYPE_COUNT: 1510 printf("%s:\n" 1511 " hits_set: %u\n" 1512 " bytes_set: %u\n" 1513 " hits: %" PRIu64 "\n" 1514 " bytes: %" PRIu64 "\n", 1515 name, 1516 query.count.hits_set, 1517 query.count.bytes_set, 1518 query.count.hits, 1519 query.count.bytes); 1520 break; 1521 default: 1522 printf("Cannot display result for action type %d (%s)\n", 1523 action, name); 1524 break; 1525 } 1526 return 0; 1527 } 1528 1529 /** List flow rules. */ 1530 void 1531 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1532 { 1533 struct rte_port *port; 1534 struct port_flow *pf; 1535 struct port_flow *list = NULL; 1536 uint32_t i; 1537 1538 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1539 port_id == (portid_t)RTE_PORT_ALL) 1540 return; 1541 port = &ports[port_id]; 1542 if (!port->flow_list) 1543 return; 1544 /* Sort flows by group, priority and ID. */ 1545 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1546 struct port_flow **tmp; 1547 1548 if (n) { 1549 /* Filter out unwanted groups. */ 1550 for (i = 0; i != n; ++i) 1551 if (pf->attr.group == group[i]) 1552 break; 1553 if (i == n) 1554 continue; 1555 } 1556 tmp = &list; 1557 while (*tmp && 1558 (pf->attr.group > (*tmp)->attr.group || 1559 (pf->attr.group == (*tmp)->attr.group && 1560 pf->attr.priority > (*tmp)->attr.priority) || 1561 (pf->attr.group == (*tmp)->attr.group && 1562 pf->attr.priority == (*tmp)->attr.priority && 1563 pf->id > (*tmp)->id))) 1564 tmp = &(*tmp)->tmp; 1565 pf->tmp = *tmp; 1566 *tmp = pf; 1567 } 1568 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1569 for (pf = list; pf != NULL; pf = pf->tmp) { 1570 const struct rte_flow_item *item = pf->pattern; 1571 const struct rte_flow_action *action = pf->actions; 1572 1573 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1574 pf->id, 1575 pf->attr.group, 1576 pf->attr.priority, 1577 pf->attr.ingress ? 'i' : '-', 1578 pf->attr.egress ? 'e' : '-', 1579 pf->attr.transfer ? 't' : '-'); 1580 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1581 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1582 printf("%s ", flow_item[item->type].name); 1583 ++item; 1584 } 1585 printf("=>"); 1586 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1587 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1588 printf(" %s", flow_action[action->type].name); 1589 ++action; 1590 } 1591 printf("\n"); 1592 } 1593 } 1594 1595 /** Restrict ingress traffic to the defined flow rules. */ 1596 int 1597 port_flow_isolate(portid_t port_id, int set) 1598 { 1599 struct rte_flow_error error; 1600 1601 /* Poisoning to make sure PMDs update it in case of error. */ 1602 memset(&error, 0x66, sizeof(error)); 1603 if (rte_flow_isolate(port_id, set, &error)) 1604 return port_flow_complain(&error); 1605 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1606 port_id, 1607 set ? "now restricted" : "not restricted anymore"); 1608 return 0; 1609 } 1610 1611 /* 1612 * RX/TX ring descriptors display functions. 1613 */ 1614 int 1615 rx_queue_id_is_invalid(queueid_t rxq_id) 1616 { 1617 if (rxq_id < nb_rxq) 1618 return 0; 1619 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1620 return 1; 1621 } 1622 1623 int 1624 tx_queue_id_is_invalid(queueid_t txq_id) 1625 { 1626 if (txq_id < nb_txq) 1627 return 0; 1628 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1629 return 1; 1630 } 1631 1632 static int 1633 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1634 { 1635 if (rxdesc_id < nb_rxd) 1636 return 0; 1637 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1638 rxdesc_id, nb_rxd); 1639 return 1; 1640 } 1641 1642 static int 1643 tx_desc_id_is_invalid(uint16_t txdesc_id) 1644 { 1645 if (txdesc_id < nb_txd) 1646 return 0; 1647 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1648 txdesc_id, nb_txd); 1649 return 1; 1650 } 1651 1652 static const struct rte_memzone * 1653 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1654 { 1655 char mz_name[RTE_MEMZONE_NAMESIZE]; 1656 const struct rte_memzone *mz; 1657 1658 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1659 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1660 mz = rte_memzone_lookup(mz_name); 1661 if (mz == NULL) 1662 printf("%s ring memory zoneof (port %d, queue %d) not" 1663 "found (zone name = %s\n", 1664 ring_name, port_id, q_id, mz_name); 1665 return mz; 1666 } 1667 1668 union igb_ring_dword { 1669 uint64_t dword; 1670 struct { 1671 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1672 uint32_t lo; 1673 uint32_t hi; 1674 #else 1675 uint32_t hi; 1676 uint32_t lo; 1677 #endif 1678 } words; 1679 }; 1680 1681 struct igb_ring_desc_32_bytes { 1682 union igb_ring_dword lo_dword; 1683 union igb_ring_dword hi_dword; 1684 union igb_ring_dword resv1; 1685 union igb_ring_dword resv2; 1686 }; 1687 1688 struct igb_ring_desc_16_bytes { 1689 union igb_ring_dword lo_dword; 1690 union igb_ring_dword hi_dword; 1691 }; 1692 1693 static void 1694 ring_rxd_display_dword(union igb_ring_dword dword) 1695 { 1696 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1697 (unsigned)dword.words.hi); 1698 } 1699 1700 static void 1701 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1702 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1703 portid_t port_id, 1704 #else 1705 __rte_unused portid_t port_id, 1706 #endif 1707 uint16_t desc_id) 1708 { 1709 struct igb_ring_desc_16_bytes *ring = 1710 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1711 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1712 struct rte_eth_dev_info dev_info; 1713 1714 memset(&dev_info, 0, sizeof(dev_info)); 1715 rte_eth_dev_info_get(port_id, &dev_info); 1716 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1717 /* 32 bytes RX descriptor, i40e only */ 1718 struct igb_ring_desc_32_bytes *ring = 1719 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1720 ring[desc_id].lo_dword.dword = 1721 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1722 ring_rxd_display_dword(ring[desc_id].lo_dword); 1723 ring[desc_id].hi_dword.dword = 1724 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1725 ring_rxd_display_dword(ring[desc_id].hi_dword); 1726 ring[desc_id].resv1.dword = 1727 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1728 ring_rxd_display_dword(ring[desc_id].resv1); 1729 ring[desc_id].resv2.dword = 1730 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1731 ring_rxd_display_dword(ring[desc_id].resv2); 1732 1733 return; 1734 } 1735 #endif 1736 /* 16 bytes RX descriptor */ 1737 ring[desc_id].lo_dword.dword = 1738 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1739 ring_rxd_display_dword(ring[desc_id].lo_dword); 1740 ring[desc_id].hi_dword.dword = 1741 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1742 ring_rxd_display_dword(ring[desc_id].hi_dword); 1743 } 1744 1745 static void 1746 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1747 { 1748 struct igb_ring_desc_16_bytes *ring; 1749 struct igb_ring_desc_16_bytes txd; 1750 1751 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1752 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1753 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1754 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1755 (unsigned)txd.lo_dword.words.lo, 1756 (unsigned)txd.lo_dword.words.hi, 1757 (unsigned)txd.hi_dword.words.lo, 1758 (unsigned)txd.hi_dword.words.hi); 1759 } 1760 1761 void 1762 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1763 { 1764 const struct rte_memzone *rx_mz; 1765 1766 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1767 return; 1768 if (rx_queue_id_is_invalid(rxq_id)) 1769 return; 1770 if (rx_desc_id_is_invalid(rxd_id)) 1771 return; 1772 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1773 if (rx_mz == NULL) 1774 return; 1775 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1776 } 1777 1778 void 1779 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1780 { 1781 const struct rte_memzone *tx_mz; 1782 1783 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1784 return; 1785 if (tx_queue_id_is_invalid(txq_id)) 1786 return; 1787 if (tx_desc_id_is_invalid(txd_id)) 1788 return; 1789 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1790 if (tx_mz == NULL) 1791 return; 1792 ring_tx_descriptor_display(tx_mz, txd_id); 1793 } 1794 1795 void 1796 fwd_lcores_config_display(void) 1797 { 1798 lcoreid_t lc_id; 1799 1800 printf("List of forwarding lcores:"); 1801 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1802 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1803 printf("\n"); 1804 } 1805 void 1806 rxtx_config_display(void) 1807 { 1808 portid_t pid; 1809 queueid_t qid; 1810 1811 printf(" %s packet forwarding%s packets/burst=%d\n", 1812 cur_fwd_eng->fwd_mode_name, 1813 retry_enabled == 0 ? "" : " with retry", 1814 nb_pkt_per_burst); 1815 1816 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1817 printf(" packet len=%u - nb packet segments=%d\n", 1818 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1819 1820 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1821 nb_fwd_lcores, nb_fwd_ports); 1822 1823 RTE_ETH_FOREACH_DEV(pid) { 1824 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1825 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1826 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1827 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1828 1829 /* per port config */ 1830 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1831 (unsigned int)pid, nb_rxq, nb_txq); 1832 1833 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1834 ports[pid].dev_conf.rxmode.offloads, 1835 ports[pid].dev_conf.txmode.offloads); 1836 1837 /* per rx queue config only for first queue to be less verbose */ 1838 for (qid = 0; qid < 1; qid++) { 1839 printf(" RX queue: %d\n", qid); 1840 printf(" RX desc=%d - RX free threshold=%d\n", 1841 nb_rx_desc[qid], rx_conf[qid].rx_free_thresh); 1842 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1843 " wthresh=%d\n", 1844 rx_conf[qid].rx_thresh.pthresh, 1845 rx_conf[qid].rx_thresh.hthresh, 1846 rx_conf[qid].rx_thresh.wthresh); 1847 printf(" RX Offloads=0x%"PRIx64"\n", 1848 rx_conf[qid].offloads); 1849 } 1850 1851 /* per tx queue config only for first queue to be less verbose */ 1852 for (qid = 0; qid < 1; qid++) { 1853 printf(" TX queue: %d\n", qid); 1854 printf(" TX desc=%d - TX free threshold=%d\n", 1855 nb_tx_desc[qid], tx_conf[qid].tx_free_thresh); 1856 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1857 " wthresh=%d\n", 1858 tx_conf[qid].tx_thresh.pthresh, 1859 tx_conf[qid].tx_thresh.hthresh, 1860 tx_conf[qid].tx_thresh.wthresh); 1861 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1862 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1863 } 1864 } 1865 } 1866 1867 void 1868 port_rss_reta_info(portid_t port_id, 1869 struct rte_eth_rss_reta_entry64 *reta_conf, 1870 uint16_t nb_entries) 1871 { 1872 uint16_t i, idx, shift; 1873 int ret; 1874 1875 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1876 return; 1877 1878 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1879 if (ret != 0) { 1880 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1881 return; 1882 } 1883 1884 for (i = 0; i < nb_entries; i++) { 1885 idx = i / RTE_RETA_GROUP_SIZE; 1886 shift = i % RTE_RETA_GROUP_SIZE; 1887 if (!(reta_conf[idx].mask & (1ULL << shift))) 1888 continue; 1889 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1890 i, reta_conf[idx].reta[shift]); 1891 } 1892 } 1893 1894 /* 1895 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1896 * key of the port. 1897 */ 1898 void 1899 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1900 { 1901 struct rte_eth_rss_conf rss_conf; 1902 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1903 uint64_t rss_hf; 1904 uint8_t i; 1905 int diag; 1906 struct rte_eth_dev_info dev_info; 1907 uint8_t hash_key_size; 1908 1909 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1910 return; 1911 1912 memset(&dev_info, 0, sizeof(dev_info)); 1913 rte_eth_dev_info_get(port_id, &dev_info); 1914 if (dev_info.hash_key_size > 0 && 1915 dev_info.hash_key_size <= sizeof(rss_key)) 1916 hash_key_size = dev_info.hash_key_size; 1917 else { 1918 printf("dev_info did not provide a valid hash key size\n"); 1919 return; 1920 } 1921 1922 rss_conf.rss_hf = 0; 1923 for (i = 0; rss_type_table[i].str; i++) { 1924 if (!strcmp(rss_info, rss_type_table[i].str)) 1925 rss_conf.rss_hf = rss_type_table[i].rss_type; 1926 } 1927 1928 /* Get RSS hash key if asked to display it */ 1929 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1930 rss_conf.rss_key_len = hash_key_size; 1931 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1932 if (diag != 0) { 1933 switch (diag) { 1934 case -ENODEV: 1935 printf("port index %d invalid\n", port_id); 1936 break; 1937 case -ENOTSUP: 1938 printf("operation not supported by device\n"); 1939 break; 1940 default: 1941 printf("operation failed - diag=%d\n", diag); 1942 break; 1943 } 1944 return; 1945 } 1946 rss_hf = rss_conf.rss_hf; 1947 if (rss_hf == 0) { 1948 printf("RSS disabled\n"); 1949 return; 1950 } 1951 printf("RSS functions:\n "); 1952 for (i = 0; rss_type_table[i].str; i++) { 1953 if (rss_hf & rss_type_table[i].rss_type) 1954 printf("%s ", rss_type_table[i].str); 1955 } 1956 printf("\n"); 1957 if (!show_rss_key) 1958 return; 1959 printf("RSS key:\n"); 1960 for (i = 0; i < hash_key_size; i++) 1961 printf("%02X", rss_key[i]); 1962 printf("\n"); 1963 } 1964 1965 void 1966 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1967 uint hash_key_len) 1968 { 1969 struct rte_eth_rss_conf rss_conf; 1970 int diag; 1971 unsigned int i; 1972 1973 rss_conf.rss_key = NULL; 1974 rss_conf.rss_key_len = hash_key_len; 1975 rss_conf.rss_hf = 0; 1976 for (i = 0; rss_type_table[i].str; i++) { 1977 if (!strcmp(rss_type_table[i].str, rss_type)) 1978 rss_conf.rss_hf = rss_type_table[i].rss_type; 1979 } 1980 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1981 if (diag == 0) { 1982 rss_conf.rss_key = hash_key; 1983 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1984 } 1985 if (diag == 0) 1986 return; 1987 1988 switch (diag) { 1989 case -ENODEV: 1990 printf("port index %d invalid\n", port_id); 1991 break; 1992 case -ENOTSUP: 1993 printf("operation not supported by device\n"); 1994 break; 1995 default: 1996 printf("operation failed - diag=%d\n", diag); 1997 break; 1998 } 1999 } 2000 2001 /* 2002 * Setup forwarding configuration for each logical core. 2003 */ 2004 static void 2005 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2006 { 2007 streamid_t nb_fs_per_lcore; 2008 streamid_t nb_fs; 2009 streamid_t sm_id; 2010 lcoreid_t nb_extra; 2011 lcoreid_t nb_fc; 2012 lcoreid_t nb_lc; 2013 lcoreid_t lc_id; 2014 2015 nb_fs = cfg->nb_fwd_streams; 2016 nb_fc = cfg->nb_fwd_lcores; 2017 if (nb_fs <= nb_fc) { 2018 nb_fs_per_lcore = 1; 2019 nb_extra = 0; 2020 } else { 2021 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2022 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2023 } 2024 2025 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2026 sm_id = 0; 2027 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2028 fwd_lcores[lc_id]->stream_idx = sm_id; 2029 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2030 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2031 } 2032 2033 /* 2034 * Assign extra remaining streams, if any. 2035 */ 2036 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2037 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2038 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2039 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2040 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2041 } 2042 } 2043 2044 static portid_t 2045 fwd_topology_tx_port_get(portid_t rxp) 2046 { 2047 static int warning_once = 1; 2048 2049 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2050 2051 switch (port_topology) { 2052 default: 2053 case PORT_TOPOLOGY_PAIRED: 2054 if ((rxp & 0x1) == 0) { 2055 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2056 return rxp + 1; 2057 if (warning_once) { 2058 printf("\nWarning! port-topology=paired" 2059 " and odd forward ports number," 2060 " the last port will pair with" 2061 " itself.\n\n"); 2062 warning_once = 0; 2063 } 2064 return rxp; 2065 } 2066 return rxp - 1; 2067 case PORT_TOPOLOGY_CHAINED: 2068 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2069 case PORT_TOPOLOGY_LOOP: 2070 return rxp; 2071 } 2072 } 2073 2074 static void 2075 simple_fwd_config_setup(void) 2076 { 2077 portid_t i; 2078 2079 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2080 cur_fwd_config.nb_fwd_streams = 2081 (streamid_t) cur_fwd_config.nb_fwd_ports; 2082 2083 /* reinitialize forwarding streams */ 2084 init_fwd_streams(); 2085 2086 /* 2087 * In the simple forwarding test, the number of forwarding cores 2088 * must be lower or equal to the number of forwarding ports. 2089 */ 2090 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2091 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2092 cur_fwd_config.nb_fwd_lcores = 2093 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2094 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2095 2096 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2097 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2098 fwd_streams[i]->rx_queue = 0; 2099 fwd_streams[i]->tx_port = 2100 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2101 fwd_streams[i]->tx_queue = 0; 2102 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2103 fwd_streams[i]->retry_enabled = retry_enabled; 2104 } 2105 } 2106 2107 /** 2108 * For the RSS forwarding test all streams distributed over lcores. Each stream 2109 * being composed of a RX queue to poll on a RX port for input messages, 2110 * associated with a TX queue of a TX port where to send forwarded packets. 2111 */ 2112 static void 2113 rss_fwd_config_setup(void) 2114 { 2115 portid_t rxp; 2116 portid_t txp; 2117 queueid_t rxq; 2118 queueid_t nb_q; 2119 streamid_t sm_id; 2120 2121 nb_q = nb_rxq; 2122 if (nb_q > nb_txq) 2123 nb_q = nb_txq; 2124 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2125 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2126 cur_fwd_config.nb_fwd_streams = 2127 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2128 2129 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2130 cur_fwd_config.nb_fwd_lcores = 2131 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2132 2133 /* reinitialize forwarding streams */ 2134 init_fwd_streams(); 2135 2136 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2137 rxp = 0; rxq = 0; 2138 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2139 struct fwd_stream *fs; 2140 2141 fs = fwd_streams[sm_id]; 2142 txp = fwd_topology_tx_port_get(rxp); 2143 fs->rx_port = fwd_ports_ids[rxp]; 2144 fs->rx_queue = rxq; 2145 fs->tx_port = fwd_ports_ids[txp]; 2146 fs->tx_queue = rxq; 2147 fs->peer_addr = fs->tx_port; 2148 fs->retry_enabled = retry_enabled; 2149 rxq = (queueid_t) (rxq + 1); 2150 if (rxq < nb_q) 2151 continue; 2152 /* 2153 * rxq == nb_q 2154 * Restart from RX queue 0 on next RX port 2155 */ 2156 rxq = 0; 2157 rxp++; 2158 } 2159 } 2160 2161 /** 2162 * For the DCB forwarding test, each core is assigned on each traffic class. 2163 * 2164 * Each core is assigned a multi-stream, each stream being composed of 2165 * a RX queue to poll on a RX port for input messages, associated with 2166 * a TX queue of a TX port where to send forwarded packets. All RX and 2167 * TX queues are mapping to the same traffic class. 2168 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2169 * the same core 2170 */ 2171 static void 2172 dcb_fwd_config_setup(void) 2173 { 2174 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2175 portid_t txp, rxp = 0; 2176 queueid_t txq, rxq = 0; 2177 lcoreid_t lc_id; 2178 uint16_t nb_rx_queue, nb_tx_queue; 2179 uint16_t i, j, k, sm_id = 0; 2180 uint8_t tc = 0; 2181 2182 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2183 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2184 cur_fwd_config.nb_fwd_streams = 2185 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2186 2187 /* reinitialize forwarding streams */ 2188 init_fwd_streams(); 2189 sm_id = 0; 2190 txp = 1; 2191 /* get the dcb info on the first RX and TX ports */ 2192 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2193 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2194 2195 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2196 fwd_lcores[lc_id]->stream_nb = 0; 2197 fwd_lcores[lc_id]->stream_idx = sm_id; 2198 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2199 /* if the nb_queue is zero, means this tc is 2200 * not enabled on the POOL 2201 */ 2202 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2203 break; 2204 k = fwd_lcores[lc_id]->stream_nb + 2205 fwd_lcores[lc_id]->stream_idx; 2206 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2207 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2208 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2209 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2210 for (j = 0; j < nb_rx_queue; j++) { 2211 struct fwd_stream *fs; 2212 2213 fs = fwd_streams[k + j]; 2214 fs->rx_port = fwd_ports_ids[rxp]; 2215 fs->rx_queue = rxq + j; 2216 fs->tx_port = fwd_ports_ids[txp]; 2217 fs->tx_queue = txq + j % nb_tx_queue; 2218 fs->peer_addr = fs->tx_port; 2219 fs->retry_enabled = retry_enabled; 2220 } 2221 fwd_lcores[lc_id]->stream_nb += 2222 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2223 } 2224 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2225 2226 tc++; 2227 if (tc < rxp_dcb_info.nb_tcs) 2228 continue; 2229 /* Restart from TC 0 on next RX port */ 2230 tc = 0; 2231 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2232 rxp = (portid_t) 2233 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2234 else 2235 rxp++; 2236 if (rxp >= nb_fwd_ports) 2237 return; 2238 /* get the dcb information on next RX and TX ports */ 2239 if ((rxp & 0x1) == 0) 2240 txp = (portid_t) (rxp + 1); 2241 else 2242 txp = (portid_t) (rxp - 1); 2243 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2244 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2245 } 2246 } 2247 2248 static void 2249 icmp_echo_config_setup(void) 2250 { 2251 portid_t rxp; 2252 queueid_t rxq; 2253 lcoreid_t lc_id; 2254 uint16_t sm_id; 2255 2256 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2257 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2258 (nb_txq * nb_fwd_ports); 2259 else 2260 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2261 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2262 cur_fwd_config.nb_fwd_streams = 2263 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2264 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2265 cur_fwd_config.nb_fwd_lcores = 2266 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2267 if (verbose_level > 0) { 2268 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2269 __FUNCTION__, 2270 cur_fwd_config.nb_fwd_lcores, 2271 cur_fwd_config.nb_fwd_ports, 2272 cur_fwd_config.nb_fwd_streams); 2273 } 2274 2275 /* reinitialize forwarding streams */ 2276 init_fwd_streams(); 2277 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2278 rxp = 0; rxq = 0; 2279 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2280 if (verbose_level > 0) 2281 printf(" core=%d: \n", lc_id); 2282 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2283 struct fwd_stream *fs; 2284 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2285 fs->rx_port = fwd_ports_ids[rxp]; 2286 fs->rx_queue = rxq; 2287 fs->tx_port = fs->rx_port; 2288 fs->tx_queue = rxq; 2289 fs->peer_addr = fs->tx_port; 2290 fs->retry_enabled = retry_enabled; 2291 if (verbose_level > 0) 2292 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2293 sm_id, fs->rx_port, fs->rx_queue, 2294 fs->tx_queue); 2295 rxq = (queueid_t) (rxq + 1); 2296 if (rxq == nb_rxq) { 2297 rxq = 0; 2298 rxp = (portid_t) (rxp + 1); 2299 } 2300 } 2301 } 2302 } 2303 2304 void 2305 fwd_config_setup(void) 2306 { 2307 cur_fwd_config.fwd_eng = cur_fwd_eng; 2308 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2309 icmp_echo_config_setup(); 2310 return; 2311 } 2312 if ((nb_rxq > 1) && (nb_txq > 1)){ 2313 if (dcb_config) 2314 dcb_fwd_config_setup(); 2315 else 2316 rss_fwd_config_setup(); 2317 } 2318 else 2319 simple_fwd_config_setup(); 2320 } 2321 2322 void 2323 pkt_fwd_config_display(struct fwd_config *cfg) 2324 { 2325 struct fwd_stream *fs; 2326 lcoreid_t lc_id; 2327 streamid_t sm_id; 2328 2329 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2330 "NUMA support %s, MP over anonymous pages %s\n", 2331 cfg->fwd_eng->fwd_mode_name, 2332 retry_enabled == 0 ? "" : " with retry", 2333 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2334 numa_support == 1 ? "enabled" : "disabled", 2335 mp_anon != 0 ? "enabled" : "disabled"); 2336 2337 if (retry_enabled) 2338 printf("TX retry num: %u, delay between TX retries: %uus\n", 2339 burst_tx_retry_num, burst_tx_delay_time); 2340 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2341 printf("Logical Core %u (socket %u) forwards packets on " 2342 "%d streams:", 2343 fwd_lcores_cpuids[lc_id], 2344 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2345 fwd_lcores[lc_id]->stream_nb); 2346 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2347 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2348 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2349 "P=%d/Q=%d (socket %u) ", 2350 fs->rx_port, fs->rx_queue, 2351 ports[fs->rx_port].socket_id, 2352 fs->tx_port, fs->tx_queue, 2353 ports[fs->tx_port].socket_id); 2354 print_ethaddr("peer=", 2355 &peer_eth_addrs[fs->peer_addr]); 2356 } 2357 printf("\n"); 2358 } 2359 printf("\n"); 2360 } 2361 2362 void 2363 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2364 { 2365 uint8_t c, new_peer_addr[6]; 2366 if (!rte_eth_dev_is_valid_port(port_id)) { 2367 printf("Error: Invalid port number %i\n", port_id); 2368 return; 2369 } 2370 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2371 sizeof(new_peer_addr)) < 0) { 2372 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2373 return; 2374 } 2375 for (c = 0; c < 6; c++) 2376 peer_eth_addrs[port_id].addr_bytes[c] = 2377 new_peer_addr[c]; 2378 } 2379 2380 int 2381 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2382 { 2383 unsigned int i; 2384 unsigned int lcore_cpuid; 2385 int record_now; 2386 2387 record_now = 0; 2388 again: 2389 for (i = 0; i < nb_lc; i++) { 2390 lcore_cpuid = lcorelist[i]; 2391 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2392 printf("lcore %u not enabled\n", lcore_cpuid); 2393 return -1; 2394 } 2395 if (lcore_cpuid == rte_get_master_lcore()) { 2396 printf("lcore %u cannot be masked on for running " 2397 "packet forwarding, which is the master lcore " 2398 "and reserved for command line parsing only\n", 2399 lcore_cpuid); 2400 return -1; 2401 } 2402 if (record_now) 2403 fwd_lcores_cpuids[i] = lcore_cpuid; 2404 } 2405 if (record_now == 0) { 2406 record_now = 1; 2407 goto again; 2408 } 2409 nb_cfg_lcores = (lcoreid_t) nb_lc; 2410 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2411 printf("previous number of forwarding cores %u - changed to " 2412 "number of configured cores %u\n", 2413 (unsigned int) nb_fwd_lcores, nb_lc); 2414 nb_fwd_lcores = (lcoreid_t) nb_lc; 2415 } 2416 2417 return 0; 2418 } 2419 2420 int 2421 set_fwd_lcores_mask(uint64_t lcoremask) 2422 { 2423 unsigned int lcorelist[64]; 2424 unsigned int nb_lc; 2425 unsigned int i; 2426 2427 if (lcoremask == 0) { 2428 printf("Invalid NULL mask of cores\n"); 2429 return -1; 2430 } 2431 nb_lc = 0; 2432 for (i = 0; i < 64; i++) { 2433 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2434 continue; 2435 lcorelist[nb_lc++] = i; 2436 } 2437 return set_fwd_lcores_list(lcorelist, nb_lc); 2438 } 2439 2440 void 2441 set_fwd_lcores_number(uint16_t nb_lc) 2442 { 2443 if (nb_lc > nb_cfg_lcores) { 2444 printf("nb fwd cores %u > %u (max. number of configured " 2445 "lcores) - ignored\n", 2446 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2447 return; 2448 } 2449 nb_fwd_lcores = (lcoreid_t) nb_lc; 2450 printf("Number of forwarding cores set to %u\n", 2451 (unsigned int) nb_fwd_lcores); 2452 } 2453 2454 void 2455 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2456 { 2457 unsigned int i; 2458 portid_t port_id; 2459 int record_now; 2460 2461 record_now = 0; 2462 again: 2463 for (i = 0; i < nb_pt; i++) { 2464 port_id = (portid_t) portlist[i]; 2465 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2466 return; 2467 if (record_now) 2468 fwd_ports_ids[i] = port_id; 2469 } 2470 if (record_now == 0) { 2471 record_now = 1; 2472 goto again; 2473 } 2474 nb_cfg_ports = (portid_t) nb_pt; 2475 if (nb_fwd_ports != (portid_t) nb_pt) { 2476 printf("previous number of forwarding ports %u - changed to " 2477 "number of configured ports %u\n", 2478 (unsigned int) nb_fwd_ports, nb_pt); 2479 nb_fwd_ports = (portid_t) nb_pt; 2480 } 2481 } 2482 2483 void 2484 set_fwd_ports_mask(uint64_t portmask) 2485 { 2486 unsigned int portlist[64]; 2487 unsigned int nb_pt; 2488 unsigned int i; 2489 2490 if (portmask == 0) { 2491 printf("Invalid NULL mask of ports\n"); 2492 return; 2493 } 2494 nb_pt = 0; 2495 RTE_ETH_FOREACH_DEV(i) { 2496 if (! ((uint64_t)(1ULL << i) & portmask)) 2497 continue; 2498 portlist[nb_pt++] = i; 2499 } 2500 set_fwd_ports_list(portlist, nb_pt); 2501 } 2502 2503 void 2504 set_fwd_ports_number(uint16_t nb_pt) 2505 { 2506 if (nb_pt > nb_cfg_ports) { 2507 printf("nb fwd ports %u > %u (number of configured " 2508 "ports) - ignored\n", 2509 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2510 return; 2511 } 2512 nb_fwd_ports = (portid_t) nb_pt; 2513 printf("Number of forwarding ports set to %u\n", 2514 (unsigned int) nb_fwd_ports); 2515 } 2516 2517 int 2518 port_is_forwarding(portid_t port_id) 2519 { 2520 unsigned int i; 2521 2522 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2523 return -1; 2524 2525 for (i = 0; i < nb_fwd_ports; i++) { 2526 if (fwd_ports_ids[i] == port_id) 2527 return 1; 2528 } 2529 2530 return 0; 2531 } 2532 2533 void 2534 set_nb_pkt_per_burst(uint16_t nb) 2535 { 2536 if (nb > MAX_PKT_BURST) { 2537 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2538 " ignored\n", 2539 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2540 return; 2541 } 2542 nb_pkt_per_burst = nb; 2543 printf("Number of packets per burst set to %u\n", 2544 (unsigned int) nb_pkt_per_burst); 2545 } 2546 2547 static const char * 2548 tx_split_get_name(enum tx_pkt_split split) 2549 { 2550 uint32_t i; 2551 2552 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2553 if (tx_split_name[i].split == split) 2554 return tx_split_name[i].name; 2555 } 2556 return NULL; 2557 } 2558 2559 void 2560 set_tx_pkt_split(const char *name) 2561 { 2562 uint32_t i; 2563 2564 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2565 if (strcmp(tx_split_name[i].name, name) == 0) { 2566 tx_pkt_split = tx_split_name[i].split; 2567 return; 2568 } 2569 } 2570 printf("unknown value: \"%s\"\n", name); 2571 } 2572 2573 void 2574 show_tx_pkt_segments(void) 2575 { 2576 uint32_t i, n; 2577 const char *split; 2578 2579 n = tx_pkt_nb_segs; 2580 split = tx_split_get_name(tx_pkt_split); 2581 2582 printf("Number of segments: %u\n", n); 2583 printf("Segment sizes: "); 2584 for (i = 0; i != n - 1; i++) 2585 printf("%hu,", tx_pkt_seg_lengths[i]); 2586 printf("%hu\n", tx_pkt_seg_lengths[i]); 2587 printf("Split packet: %s\n", split); 2588 } 2589 2590 void 2591 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2592 { 2593 uint16_t tx_pkt_len; 2594 unsigned i; 2595 2596 if (nb_segs >= (unsigned) nb_txd) { 2597 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2598 nb_segs, (unsigned int) nb_txd); 2599 return; 2600 } 2601 2602 /* 2603 * Check that each segment length is greater or equal than 2604 * the mbuf data sise. 2605 * Check also that the total packet length is greater or equal than the 2606 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2607 */ 2608 tx_pkt_len = 0; 2609 for (i = 0; i < nb_segs; i++) { 2610 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2611 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2612 i, seg_lengths[i], (unsigned) mbuf_data_size); 2613 return; 2614 } 2615 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2616 } 2617 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2618 printf("total packet length=%u < %d - give up\n", 2619 (unsigned) tx_pkt_len, 2620 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2621 return; 2622 } 2623 2624 for (i = 0; i < nb_segs; i++) 2625 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2626 2627 tx_pkt_length = tx_pkt_len; 2628 tx_pkt_nb_segs = (uint8_t) nb_segs; 2629 } 2630 2631 void 2632 setup_gro(const char *onoff, portid_t port_id) 2633 { 2634 if (!rte_eth_dev_is_valid_port(port_id)) { 2635 printf("invalid port id %u\n", port_id); 2636 return; 2637 } 2638 if (test_done == 0) { 2639 printf("Before enable/disable GRO," 2640 " please stop forwarding first\n"); 2641 return; 2642 } 2643 if (strcmp(onoff, "on") == 0) { 2644 if (gro_ports[port_id].enable != 0) { 2645 printf("Port %u has enabled GRO. Please" 2646 " disable GRO first\n", port_id); 2647 return; 2648 } 2649 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2650 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2651 gro_ports[port_id].param.max_flow_num = 2652 GRO_DEFAULT_FLOW_NUM; 2653 gro_ports[port_id].param.max_item_per_flow = 2654 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2655 } 2656 gro_ports[port_id].enable = 1; 2657 } else { 2658 if (gro_ports[port_id].enable == 0) { 2659 printf("Port %u has disabled GRO\n", port_id); 2660 return; 2661 } 2662 gro_ports[port_id].enable = 0; 2663 } 2664 } 2665 2666 void 2667 setup_gro_flush_cycles(uint8_t cycles) 2668 { 2669 if (test_done == 0) { 2670 printf("Before change flush interval for GRO," 2671 " please stop forwarding first.\n"); 2672 return; 2673 } 2674 2675 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2676 GRO_DEFAULT_FLUSH_CYCLES) { 2677 printf("The flushing cycle be in the range" 2678 " of 1 to %u. Revert to the default" 2679 " value %u.\n", 2680 GRO_MAX_FLUSH_CYCLES, 2681 GRO_DEFAULT_FLUSH_CYCLES); 2682 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2683 } 2684 2685 gro_flush_cycles = cycles; 2686 } 2687 2688 void 2689 show_gro(portid_t port_id) 2690 { 2691 struct rte_gro_param *param; 2692 uint32_t max_pkts_num; 2693 2694 param = &gro_ports[port_id].param; 2695 2696 if (!rte_eth_dev_is_valid_port(port_id)) { 2697 printf("Invalid port id %u.\n", port_id); 2698 return; 2699 } 2700 if (gro_ports[port_id].enable) { 2701 printf("GRO type: TCP/IPv4\n"); 2702 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2703 max_pkts_num = param->max_flow_num * 2704 param->max_item_per_flow; 2705 } else 2706 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2707 printf("Max number of packets to perform GRO: %u\n", 2708 max_pkts_num); 2709 printf("Flushing cycles: %u\n", gro_flush_cycles); 2710 } else 2711 printf("Port %u doesn't enable GRO.\n", port_id); 2712 } 2713 2714 void 2715 setup_gso(const char *mode, portid_t port_id) 2716 { 2717 if (!rte_eth_dev_is_valid_port(port_id)) { 2718 printf("invalid port id %u\n", port_id); 2719 return; 2720 } 2721 if (strcmp(mode, "on") == 0) { 2722 if (test_done == 0) { 2723 printf("before enabling GSO," 2724 " please stop forwarding first\n"); 2725 return; 2726 } 2727 gso_ports[port_id].enable = 1; 2728 } else if (strcmp(mode, "off") == 0) { 2729 if (test_done == 0) { 2730 printf("before disabling GSO," 2731 " please stop forwarding first\n"); 2732 return; 2733 } 2734 gso_ports[port_id].enable = 0; 2735 } 2736 } 2737 2738 char* 2739 list_pkt_forwarding_modes(void) 2740 { 2741 static char fwd_modes[128] = ""; 2742 const char *separator = "|"; 2743 struct fwd_engine *fwd_eng; 2744 unsigned i = 0; 2745 2746 if (strlen (fwd_modes) == 0) { 2747 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2748 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2749 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2750 strncat(fwd_modes, separator, 2751 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2752 } 2753 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2754 } 2755 2756 return fwd_modes; 2757 } 2758 2759 char* 2760 list_pkt_forwarding_retry_modes(void) 2761 { 2762 static char fwd_modes[128] = ""; 2763 const char *separator = "|"; 2764 struct fwd_engine *fwd_eng; 2765 unsigned i = 0; 2766 2767 if (strlen(fwd_modes) == 0) { 2768 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2769 if (fwd_eng == &rx_only_engine) 2770 continue; 2771 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2772 sizeof(fwd_modes) - 2773 strlen(fwd_modes) - 1); 2774 strncat(fwd_modes, separator, 2775 sizeof(fwd_modes) - 2776 strlen(fwd_modes) - 1); 2777 } 2778 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2779 } 2780 2781 return fwd_modes; 2782 } 2783 2784 void 2785 set_pkt_forwarding_mode(const char *fwd_mode_name) 2786 { 2787 struct fwd_engine *fwd_eng; 2788 unsigned i; 2789 2790 i = 0; 2791 while ((fwd_eng = fwd_engines[i]) != NULL) { 2792 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2793 printf("Set %s packet forwarding mode%s\n", 2794 fwd_mode_name, 2795 retry_enabled == 0 ? "" : " with retry"); 2796 cur_fwd_eng = fwd_eng; 2797 return; 2798 } 2799 i++; 2800 } 2801 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2802 } 2803 2804 void 2805 set_verbose_level(uint16_t vb_level) 2806 { 2807 printf("Change verbose level from %u to %u\n", 2808 (unsigned int) verbose_level, (unsigned int) vb_level); 2809 verbose_level = vb_level; 2810 } 2811 2812 void 2813 vlan_extend_set(portid_t port_id, int on) 2814 { 2815 int diag; 2816 int vlan_offload; 2817 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2818 2819 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2820 return; 2821 2822 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2823 2824 if (on) { 2825 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2826 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2827 } else { 2828 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2829 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2830 } 2831 2832 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2833 if (diag < 0) 2834 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2835 "diag=%d\n", port_id, on, diag); 2836 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2837 } 2838 2839 void 2840 rx_vlan_strip_set(portid_t port_id, int on) 2841 { 2842 int diag; 2843 int vlan_offload; 2844 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2845 2846 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2847 return; 2848 2849 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2850 2851 if (on) { 2852 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2853 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2854 } else { 2855 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2856 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2857 } 2858 2859 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2860 if (diag < 0) 2861 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2862 "diag=%d\n", port_id, on, diag); 2863 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2864 } 2865 2866 void 2867 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2868 { 2869 int diag; 2870 2871 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2872 return; 2873 2874 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2875 if (diag < 0) 2876 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2877 "diag=%d\n", port_id, queue_id, on, diag); 2878 } 2879 2880 void 2881 rx_vlan_filter_set(portid_t port_id, int on) 2882 { 2883 int diag; 2884 int vlan_offload; 2885 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2886 2887 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2888 return; 2889 2890 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2891 2892 if (on) { 2893 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2894 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2895 } else { 2896 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2897 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2898 } 2899 2900 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2901 if (diag < 0) 2902 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2903 "diag=%d\n", port_id, on, diag); 2904 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2905 } 2906 2907 int 2908 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2909 { 2910 int diag; 2911 2912 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2913 return 1; 2914 if (vlan_id_is_invalid(vlan_id)) 2915 return 1; 2916 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2917 if (diag == 0) 2918 return 0; 2919 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2920 "diag=%d\n", 2921 port_id, vlan_id, on, diag); 2922 return -1; 2923 } 2924 2925 void 2926 rx_vlan_all_filter_set(portid_t port_id, int on) 2927 { 2928 uint16_t vlan_id; 2929 2930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2931 return; 2932 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2933 if (rx_vft_set(port_id, vlan_id, on)) 2934 break; 2935 } 2936 } 2937 2938 void 2939 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2940 { 2941 int diag; 2942 2943 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2944 return; 2945 2946 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2947 if (diag == 0) 2948 return; 2949 2950 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2951 "diag=%d\n", 2952 port_id, vlan_type, tp_id, diag); 2953 } 2954 2955 void 2956 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2957 { 2958 int vlan_offload; 2959 struct rte_eth_dev_info dev_info; 2960 2961 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2962 return; 2963 if (vlan_id_is_invalid(vlan_id)) 2964 return; 2965 2966 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2967 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2968 printf("Error, as QinQ has been enabled.\n"); 2969 return; 2970 } 2971 rte_eth_dev_info_get(port_id, &dev_info); 2972 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2973 printf("Error: vlan insert is not supported by port %d\n", 2974 port_id); 2975 return; 2976 } 2977 2978 tx_vlan_reset(port_id); 2979 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2980 ports[port_id].tx_vlan_id = vlan_id; 2981 } 2982 2983 void 2984 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2985 { 2986 int vlan_offload; 2987 struct rte_eth_dev_info dev_info; 2988 2989 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2990 return; 2991 if (vlan_id_is_invalid(vlan_id)) 2992 return; 2993 if (vlan_id_is_invalid(vlan_id_outer)) 2994 return; 2995 2996 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2997 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2998 printf("Error, as QinQ hasn't been enabled.\n"); 2999 return; 3000 } 3001 rte_eth_dev_info_get(port_id, &dev_info); 3002 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3003 printf("Error: qinq insert not supported by port %d\n", 3004 port_id); 3005 return; 3006 } 3007 3008 tx_vlan_reset(port_id); 3009 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 3010 ports[port_id].tx_vlan_id = vlan_id; 3011 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3012 } 3013 3014 void 3015 tx_vlan_reset(portid_t port_id) 3016 { 3017 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3018 return; 3019 ports[port_id].dev_conf.txmode.offloads &= 3020 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3021 DEV_TX_OFFLOAD_QINQ_INSERT); 3022 ports[port_id].tx_vlan_id = 0; 3023 ports[port_id].tx_vlan_id_outer = 0; 3024 } 3025 3026 void 3027 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3028 { 3029 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3030 return; 3031 3032 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3033 } 3034 3035 void 3036 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3037 { 3038 uint16_t i; 3039 uint8_t existing_mapping_found = 0; 3040 3041 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3042 return; 3043 3044 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3045 return; 3046 3047 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3048 printf("map_value not in required range 0..%d\n", 3049 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3050 return; 3051 } 3052 3053 if (!is_rx) { /*then tx*/ 3054 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3055 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3056 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3057 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3058 existing_mapping_found = 1; 3059 break; 3060 } 3061 } 3062 if (!existing_mapping_found) { /* A new additional mapping... */ 3063 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3064 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3065 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3066 nb_tx_queue_stats_mappings++; 3067 } 3068 } 3069 else { /*rx*/ 3070 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3071 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3072 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3073 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3074 existing_mapping_found = 1; 3075 break; 3076 } 3077 } 3078 if (!existing_mapping_found) { /* A new additional mapping... */ 3079 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3080 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3081 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3082 nb_rx_queue_stats_mappings++; 3083 } 3084 } 3085 } 3086 3087 void 3088 set_xstats_hide_zero(uint8_t on_off) 3089 { 3090 xstats_hide_zero = on_off; 3091 } 3092 3093 static inline void 3094 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3095 { 3096 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3097 3098 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3099 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3100 " tunnel_id: 0x%08x", 3101 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3102 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3103 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3104 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3105 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3106 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3107 3108 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3109 rte_be_to_cpu_16(mask->src_port_mask), 3110 rte_be_to_cpu_16(mask->dst_port_mask)); 3111 3112 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3113 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3114 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3115 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3116 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3117 3118 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3119 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3120 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3121 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3122 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3123 } 3124 3125 printf("\n"); 3126 } 3127 3128 static inline void 3129 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3130 { 3131 struct rte_eth_flex_payload_cfg *cfg; 3132 uint32_t i, j; 3133 3134 for (i = 0; i < flex_conf->nb_payloads; i++) { 3135 cfg = &flex_conf->flex_set[i]; 3136 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3137 printf("\n RAW: "); 3138 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3139 printf("\n L2_PAYLOAD: "); 3140 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3141 printf("\n L3_PAYLOAD: "); 3142 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3143 printf("\n L4_PAYLOAD: "); 3144 else 3145 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3146 for (j = 0; j < num; j++) 3147 printf(" %-5u", cfg->src_offset[j]); 3148 } 3149 printf("\n"); 3150 } 3151 3152 static char * 3153 flowtype_to_str(uint16_t flow_type) 3154 { 3155 struct flow_type_info { 3156 char str[32]; 3157 uint16_t ftype; 3158 }; 3159 3160 uint8_t i; 3161 static struct flow_type_info flowtype_str_table[] = { 3162 {"raw", RTE_ETH_FLOW_RAW}, 3163 {"ipv4", RTE_ETH_FLOW_IPV4}, 3164 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3165 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3166 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3167 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3168 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3169 {"ipv6", RTE_ETH_FLOW_IPV6}, 3170 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3171 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3172 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3173 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3174 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3175 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3176 {"port", RTE_ETH_FLOW_PORT}, 3177 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3178 {"geneve", RTE_ETH_FLOW_GENEVE}, 3179 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3180 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3181 }; 3182 3183 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3184 if (flowtype_str_table[i].ftype == flow_type) 3185 return flowtype_str_table[i].str; 3186 } 3187 3188 return NULL; 3189 } 3190 3191 static inline void 3192 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3193 { 3194 struct rte_eth_fdir_flex_mask *mask; 3195 uint32_t i, j; 3196 char *p; 3197 3198 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3199 mask = &flex_conf->flex_mask[i]; 3200 p = flowtype_to_str(mask->flow_type); 3201 printf("\n %s:\t", p ? p : "unknown"); 3202 for (j = 0; j < num; j++) 3203 printf(" %02x", mask->mask[j]); 3204 } 3205 printf("\n"); 3206 } 3207 3208 static inline void 3209 print_fdir_flow_type(uint32_t flow_types_mask) 3210 { 3211 int i; 3212 char *p; 3213 3214 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3215 if (!(flow_types_mask & (1 << i))) 3216 continue; 3217 p = flowtype_to_str(i); 3218 if (p) 3219 printf(" %s", p); 3220 else 3221 printf(" unknown"); 3222 } 3223 printf("\n"); 3224 } 3225 3226 void 3227 fdir_get_infos(portid_t port_id) 3228 { 3229 struct rte_eth_fdir_stats fdir_stat; 3230 struct rte_eth_fdir_info fdir_info; 3231 int ret; 3232 3233 static const char *fdir_stats_border = "########################"; 3234 3235 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3236 return; 3237 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3238 if (ret < 0) { 3239 printf("\n FDIR is not supported on port %-2d\n", 3240 port_id); 3241 return; 3242 } 3243 3244 memset(&fdir_info, 0, sizeof(fdir_info)); 3245 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3246 RTE_ETH_FILTER_INFO, &fdir_info); 3247 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3248 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3249 RTE_ETH_FILTER_STATS, &fdir_stat); 3250 printf("\n %s FDIR infos for port %-2d %s\n", 3251 fdir_stats_border, port_id, fdir_stats_border); 3252 printf(" MODE: "); 3253 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3254 printf(" PERFECT\n"); 3255 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3256 printf(" PERFECT-MAC-VLAN\n"); 3257 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3258 printf(" PERFECT-TUNNEL\n"); 3259 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3260 printf(" SIGNATURE\n"); 3261 else 3262 printf(" DISABLE\n"); 3263 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3264 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3265 printf(" SUPPORTED FLOW TYPE: "); 3266 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3267 } 3268 printf(" FLEX PAYLOAD INFO:\n"); 3269 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3270 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3271 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3272 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3273 fdir_info.flex_payload_unit, 3274 fdir_info.max_flex_payload_segment_num, 3275 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3276 printf(" MASK: "); 3277 print_fdir_mask(&fdir_info.mask); 3278 if (fdir_info.flex_conf.nb_payloads > 0) { 3279 printf(" FLEX PAYLOAD SRC OFFSET:"); 3280 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3281 } 3282 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3283 printf(" FLEX MASK CFG:"); 3284 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3285 } 3286 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3287 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3288 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3289 fdir_info.guarant_spc, fdir_info.best_spc); 3290 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3291 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3292 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3293 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3294 fdir_stat.collision, fdir_stat.free, 3295 fdir_stat.maxhash, fdir_stat.maxlen, 3296 fdir_stat.add, fdir_stat.remove, 3297 fdir_stat.f_add, fdir_stat.f_remove); 3298 printf(" %s############################%s\n", 3299 fdir_stats_border, fdir_stats_border); 3300 } 3301 3302 void 3303 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3304 { 3305 struct rte_port *port; 3306 struct rte_eth_fdir_flex_conf *flex_conf; 3307 int i, idx = 0; 3308 3309 port = &ports[port_id]; 3310 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3311 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3312 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3313 idx = i; 3314 break; 3315 } 3316 } 3317 if (i >= RTE_ETH_FLOW_MAX) { 3318 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3319 idx = flex_conf->nb_flexmasks; 3320 flex_conf->nb_flexmasks++; 3321 } else { 3322 printf("The flex mask table is full. Can not set flex" 3323 " mask for flow_type(%u).", cfg->flow_type); 3324 return; 3325 } 3326 } 3327 rte_memcpy(&flex_conf->flex_mask[idx], 3328 cfg, 3329 sizeof(struct rte_eth_fdir_flex_mask)); 3330 } 3331 3332 void 3333 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3334 { 3335 struct rte_port *port; 3336 struct rte_eth_fdir_flex_conf *flex_conf; 3337 int i, idx = 0; 3338 3339 port = &ports[port_id]; 3340 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3341 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3342 if (cfg->type == flex_conf->flex_set[i].type) { 3343 idx = i; 3344 break; 3345 } 3346 } 3347 if (i >= RTE_ETH_PAYLOAD_MAX) { 3348 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3349 idx = flex_conf->nb_payloads; 3350 flex_conf->nb_payloads++; 3351 } else { 3352 printf("The flex payload table is full. Can not set" 3353 " flex payload for type(%u).", cfg->type); 3354 return; 3355 } 3356 } 3357 rte_memcpy(&flex_conf->flex_set[idx], 3358 cfg, 3359 sizeof(struct rte_eth_flex_payload_cfg)); 3360 3361 } 3362 3363 void 3364 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3365 { 3366 #ifdef RTE_LIBRTE_IXGBE_PMD 3367 int diag; 3368 3369 if (is_rx) 3370 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3371 else 3372 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3373 3374 if (diag == 0) 3375 return; 3376 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3377 is_rx ? "rx" : "tx", port_id, diag); 3378 return; 3379 #endif 3380 printf("VF %s setting not supported for port %d\n", 3381 is_rx ? "Rx" : "Tx", port_id); 3382 RTE_SET_USED(vf); 3383 RTE_SET_USED(on); 3384 } 3385 3386 int 3387 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3388 { 3389 int diag; 3390 struct rte_eth_link link; 3391 3392 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3393 return 1; 3394 rte_eth_link_get_nowait(port_id, &link); 3395 if (rate > link.link_speed) { 3396 printf("Invalid rate value:%u bigger than link speed: %u\n", 3397 rate, link.link_speed); 3398 return 1; 3399 } 3400 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3401 if (diag == 0) 3402 return diag; 3403 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3404 port_id, diag); 3405 return diag; 3406 } 3407 3408 int 3409 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3410 { 3411 int diag = -ENOTSUP; 3412 3413 RTE_SET_USED(vf); 3414 RTE_SET_USED(rate); 3415 RTE_SET_USED(q_msk); 3416 3417 #ifdef RTE_LIBRTE_IXGBE_PMD 3418 if (diag == -ENOTSUP) 3419 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3420 q_msk); 3421 #endif 3422 #ifdef RTE_LIBRTE_BNXT_PMD 3423 if (diag == -ENOTSUP) 3424 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3425 #endif 3426 if (diag == 0) 3427 return diag; 3428 3429 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3430 port_id, diag); 3431 return diag; 3432 } 3433 3434 /* 3435 * Functions to manage the set of filtered Multicast MAC addresses. 3436 * 3437 * A pool of filtered multicast MAC addresses is associated with each port. 3438 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3439 * The address of the pool and the number of valid multicast MAC addresses 3440 * recorded in the pool are stored in the fields "mc_addr_pool" and 3441 * "mc_addr_nb" of the "rte_port" data structure. 3442 * 3443 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3444 * to be supplied a contiguous array of multicast MAC addresses. 3445 * To comply with this constraint, the set of multicast addresses recorded 3446 * into the pool are systematically compacted at the beginning of the pool. 3447 * Hence, when a multicast address is removed from the pool, all following 3448 * addresses, if any, are copied back to keep the set contiguous. 3449 */ 3450 #define MCAST_POOL_INC 32 3451 3452 static int 3453 mcast_addr_pool_extend(struct rte_port *port) 3454 { 3455 struct ether_addr *mc_pool; 3456 size_t mc_pool_size; 3457 3458 /* 3459 * If a free entry is available at the end of the pool, just 3460 * increment the number of recorded multicast addresses. 3461 */ 3462 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3463 port->mc_addr_nb++; 3464 return 0; 3465 } 3466 3467 /* 3468 * [re]allocate a pool with MCAST_POOL_INC more entries. 3469 * The previous test guarantees that port->mc_addr_nb is a multiple 3470 * of MCAST_POOL_INC. 3471 */ 3472 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3473 MCAST_POOL_INC); 3474 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3475 mc_pool_size); 3476 if (mc_pool == NULL) { 3477 printf("allocation of pool of %u multicast addresses failed\n", 3478 port->mc_addr_nb + MCAST_POOL_INC); 3479 return -ENOMEM; 3480 } 3481 3482 port->mc_addr_pool = mc_pool; 3483 port->mc_addr_nb++; 3484 return 0; 3485 3486 } 3487 3488 static void 3489 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3490 { 3491 port->mc_addr_nb--; 3492 if (addr_idx == port->mc_addr_nb) { 3493 /* No need to recompact the set of multicast addressses. */ 3494 if (port->mc_addr_nb == 0) { 3495 /* free the pool of multicast addresses. */ 3496 free(port->mc_addr_pool); 3497 port->mc_addr_pool = NULL; 3498 } 3499 return; 3500 } 3501 memmove(&port->mc_addr_pool[addr_idx], 3502 &port->mc_addr_pool[addr_idx + 1], 3503 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3504 } 3505 3506 static void 3507 eth_port_multicast_addr_list_set(portid_t port_id) 3508 { 3509 struct rte_port *port; 3510 int diag; 3511 3512 port = &ports[port_id]; 3513 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3514 port->mc_addr_nb); 3515 if (diag == 0) 3516 return; 3517 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3518 port->mc_addr_nb, port_id, -diag); 3519 } 3520 3521 void 3522 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3523 { 3524 struct rte_port *port; 3525 uint32_t i; 3526 3527 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3528 return; 3529 3530 port = &ports[port_id]; 3531 3532 /* 3533 * Check that the added multicast MAC address is not already recorded 3534 * in the pool of multicast addresses. 3535 */ 3536 for (i = 0; i < port->mc_addr_nb; i++) { 3537 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3538 printf("multicast address already filtered by port\n"); 3539 return; 3540 } 3541 } 3542 3543 if (mcast_addr_pool_extend(port) != 0) 3544 return; 3545 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3546 eth_port_multicast_addr_list_set(port_id); 3547 } 3548 3549 void 3550 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3551 { 3552 struct rte_port *port; 3553 uint32_t i; 3554 3555 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3556 return; 3557 3558 port = &ports[port_id]; 3559 3560 /* 3561 * Search the pool of multicast MAC addresses for the removed address. 3562 */ 3563 for (i = 0; i < port->mc_addr_nb; i++) { 3564 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3565 break; 3566 } 3567 if (i == port->mc_addr_nb) { 3568 printf("multicast address not filtered by port %d\n", port_id); 3569 return; 3570 } 3571 3572 mcast_addr_pool_remove(port, i); 3573 eth_port_multicast_addr_list_set(port_id); 3574 } 3575 3576 void 3577 port_dcb_info_display(portid_t port_id) 3578 { 3579 struct rte_eth_dcb_info dcb_info; 3580 uint16_t i; 3581 int ret; 3582 static const char *border = "================"; 3583 3584 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3585 return; 3586 3587 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3588 if (ret) { 3589 printf("\n Failed to get dcb infos on port %-2d\n", 3590 port_id); 3591 return; 3592 } 3593 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3594 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3595 printf("\n TC : "); 3596 for (i = 0; i < dcb_info.nb_tcs; i++) 3597 printf("\t%4d", i); 3598 printf("\n Priority : "); 3599 for (i = 0; i < dcb_info.nb_tcs; i++) 3600 printf("\t%4d", dcb_info.prio_tc[i]); 3601 printf("\n BW percent :"); 3602 for (i = 0; i < dcb_info.nb_tcs; i++) 3603 printf("\t%4d%%", dcb_info.tc_bws[i]); 3604 printf("\n RXQ base : "); 3605 for (i = 0; i < dcb_info.nb_tcs; i++) 3606 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3607 printf("\n RXQ number :"); 3608 for (i = 0; i < dcb_info.nb_tcs; i++) 3609 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3610 printf("\n TXQ base : "); 3611 for (i = 0; i < dcb_info.nb_tcs; i++) 3612 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3613 printf("\n TXQ number :"); 3614 for (i = 0; i < dcb_info.nb_tcs; i++) 3615 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3616 printf("\n"); 3617 } 3618 3619 uint8_t * 3620 open_file(const char *file_path, uint32_t *size) 3621 { 3622 int fd = open(file_path, O_RDONLY); 3623 off_t pkg_size; 3624 uint8_t *buf = NULL; 3625 int ret = 0; 3626 struct stat st_buf; 3627 3628 if (size) 3629 *size = 0; 3630 3631 if (fd == -1) { 3632 printf("%s: Failed to open %s\n", __func__, file_path); 3633 return buf; 3634 } 3635 3636 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3637 close(fd); 3638 printf("%s: File operations failed\n", __func__); 3639 return buf; 3640 } 3641 3642 pkg_size = st_buf.st_size; 3643 if (pkg_size < 0) { 3644 close(fd); 3645 printf("%s: File operations failed\n", __func__); 3646 return buf; 3647 } 3648 3649 buf = (uint8_t *)malloc(pkg_size); 3650 if (!buf) { 3651 close(fd); 3652 printf("%s: Failed to malloc memory\n", __func__); 3653 return buf; 3654 } 3655 3656 ret = read(fd, buf, pkg_size); 3657 if (ret < 0) { 3658 close(fd); 3659 printf("%s: File read operation failed\n", __func__); 3660 close_file(buf); 3661 return NULL; 3662 } 3663 3664 if (size) 3665 *size = pkg_size; 3666 3667 close(fd); 3668 3669 return buf; 3670 } 3671 3672 int 3673 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3674 { 3675 FILE *fh = fopen(file_path, "wb"); 3676 3677 if (fh == NULL) { 3678 printf("%s: Failed to open %s\n", __func__, file_path); 3679 return -1; 3680 } 3681 3682 if (fwrite(buf, 1, size, fh) != size) { 3683 fclose(fh); 3684 printf("%s: File write operation failed\n", __func__); 3685 return -1; 3686 } 3687 3688 fclose(fh); 3689 3690 return 0; 3691 } 3692 3693 int 3694 close_file(uint8_t *buf) 3695 { 3696 if (buf) { 3697 free((void *)buf); 3698 return 0; 3699 } 3700 3701 return -1; 3702 } 3703 3704 void 3705 port_queue_region_info_display(portid_t port_id, void *buf) 3706 { 3707 #ifdef RTE_LIBRTE_I40E_PMD 3708 uint16_t i, j; 3709 struct rte_pmd_i40e_queue_regions *info = 3710 (struct rte_pmd_i40e_queue_regions *)buf; 3711 static const char *queue_region_info_stats_border = "-------"; 3712 3713 if (!info->queue_region_number) 3714 printf("there is no region has been set before"); 3715 3716 printf("\n %s All queue region info for port=%2d %s", 3717 queue_region_info_stats_border, port_id, 3718 queue_region_info_stats_border); 3719 printf("\n queue_region_number: %-14u \n", 3720 info->queue_region_number); 3721 3722 for (i = 0; i < info->queue_region_number; i++) { 3723 printf("\n region_id: %-14u queue_number: %-14u " 3724 "queue_start_index: %-14u \n", 3725 info->region[i].region_id, 3726 info->region[i].queue_num, 3727 info->region[i].queue_start_index); 3728 3729 printf(" user_priority_num is %-14u :", 3730 info->region[i].user_priority_num); 3731 for (j = 0; j < info->region[i].user_priority_num; j++) 3732 printf(" %-14u ", info->region[i].user_priority[j]); 3733 3734 printf("\n flowtype_num is %-14u :", 3735 info->region[i].flowtype_num); 3736 for (j = 0; j < info->region[i].flowtype_num; j++) 3737 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3738 } 3739 #else 3740 RTE_SET_USED(port_id); 3741 RTE_SET_USED(buf); 3742 #endif 3743 3744 printf("\n\n"); 3745 } 3746