1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 portid_t pid; 125 126 static const char *nic_stats_border = "########################"; 127 128 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 129 printf("Valid port range is [0"); 130 RTE_ETH_FOREACH_DEV(pid) 131 printf(", %d", pid); 132 printf("]\n"); 133 return; 134 } 135 rte_eth_stats_get(port_id, &stats); 136 printf("\n %s NIC statistics for port %-2d %s\n", 137 nic_stats_border, port_id, nic_stats_border); 138 139 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 140 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 141 "%-"PRIu64"\n", 142 stats.ipackets, stats.imissed, stats.ibytes); 143 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 144 printf(" RX-nombuf: %-10"PRIu64"\n", 145 stats.rx_nombuf); 146 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 147 "%-"PRIu64"\n", 148 stats.opackets, stats.oerrors, stats.obytes); 149 } 150 else { 151 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 152 " RX-bytes: %10"PRIu64"\n", 153 stats.ipackets, stats.ierrors, stats.ibytes); 154 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 155 printf(" RX-nombuf: %10"PRIu64"\n", 156 stats.rx_nombuf); 157 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 158 " TX-bytes: %10"PRIu64"\n", 159 stats.opackets, stats.oerrors, stats.obytes); 160 } 161 162 if (port->rx_queue_stats_mapping_enabled) { 163 printf("\n"); 164 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 165 printf(" Stats reg %2d RX-packets: %10"PRIu64 166 " RX-errors: %10"PRIu64 167 " RX-bytes: %10"PRIu64"\n", 168 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 169 } 170 } 171 if (port->tx_queue_stats_mapping_enabled) { 172 printf("\n"); 173 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 174 printf(" Stats reg %2d TX-packets: %10"PRIu64 175 " TX-bytes: %10"PRIu64"\n", 176 i, stats.q_opackets[i], stats.q_obytes[i]); 177 } 178 } 179 180 diff_cycles = prev_cycles[port_id]; 181 prev_cycles[port_id] = rte_rdtsc(); 182 if (diff_cycles > 0) 183 diff_cycles = prev_cycles[port_id] - diff_cycles; 184 185 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 186 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 187 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 188 (stats.opackets - prev_pkts_tx[port_id]) : 0; 189 prev_pkts_rx[port_id] = stats.ipackets; 190 prev_pkts_tx[port_id] = stats.opackets; 191 mpps_rx = diff_cycles > 0 ? 192 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 193 mpps_tx = diff_cycles > 0 ? 194 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 195 printf("\n Throughput (since last show)\n"); 196 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 197 mpps_rx, mpps_tx); 198 199 printf(" %s############################%s\n", 200 nic_stats_border, nic_stats_border); 201 } 202 203 void 204 nic_stats_clear(portid_t port_id) 205 { 206 portid_t pid; 207 208 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 209 printf("Valid port range is [0"); 210 RTE_ETH_FOREACH_DEV(pid) 211 printf(", %d", pid); 212 printf("]\n"); 213 return; 214 } 215 rte_eth_stats_reset(port_id); 216 printf("\n NIC statistics for port %d cleared\n", port_id); 217 } 218 219 void 220 nic_xstats_display(portid_t port_id) 221 { 222 struct rte_eth_xstat *xstats; 223 int cnt_xstats, idx_xstat; 224 struct rte_eth_xstat_name *xstats_names; 225 226 printf("###### NIC extended statistics for port %-2d\n", port_id); 227 if (!rte_eth_dev_is_valid_port(port_id)) { 228 printf("Error: Invalid port number %i\n", port_id); 229 return; 230 } 231 232 /* Get count */ 233 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 234 if (cnt_xstats < 0) { 235 printf("Error: Cannot get count of xstats\n"); 236 return; 237 } 238 239 /* Get id-name lookup table */ 240 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 241 if (xstats_names == NULL) { 242 printf("Cannot allocate memory for xstats lookup\n"); 243 return; 244 } 245 if (cnt_xstats != rte_eth_xstats_get_names( 246 port_id, xstats_names, cnt_xstats)) { 247 printf("Error: Cannot get xstats lookup\n"); 248 free(xstats_names); 249 return; 250 } 251 252 /* Get stats themselves */ 253 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 254 if (xstats == NULL) { 255 printf("Cannot allocate memory for xstats\n"); 256 free(xstats_names); 257 return; 258 } 259 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 260 printf("Error: Unable to get xstats\n"); 261 free(xstats_names); 262 free(xstats); 263 return; 264 } 265 266 /* Display xstats */ 267 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 268 if (xstats_hide_zero && !xstats[idx_xstat].value) 269 continue; 270 printf("%s: %"PRIu64"\n", 271 xstats_names[idx_xstat].name, 272 xstats[idx_xstat].value); 273 } 274 free(xstats_names); 275 free(xstats); 276 } 277 278 void 279 nic_xstats_clear(portid_t port_id) 280 { 281 rte_eth_xstats_reset(port_id); 282 } 283 284 void 285 nic_stats_mapping_display(portid_t port_id) 286 { 287 struct rte_port *port = &ports[port_id]; 288 uint16_t i; 289 portid_t pid; 290 291 static const char *nic_stats_mapping_border = "########################"; 292 293 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 294 printf("Valid port range is [0"); 295 RTE_ETH_FOREACH_DEV(pid) 296 printf(", %d", pid); 297 printf("]\n"); 298 return; 299 } 300 301 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 302 printf("Port id %d - either does not support queue statistic mapping or" 303 " no queue statistic mapping set\n", port_id); 304 return; 305 } 306 307 printf("\n %s NIC statistics mapping for port %-2d %s\n", 308 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 309 310 if (port->rx_queue_stats_mapping_enabled) { 311 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 312 if (rx_queue_stats_mappings[i].port_id == port_id) { 313 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 314 rx_queue_stats_mappings[i].queue_id, 315 rx_queue_stats_mappings[i].stats_counter_id); 316 } 317 } 318 printf("\n"); 319 } 320 321 322 if (port->tx_queue_stats_mapping_enabled) { 323 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 324 if (tx_queue_stats_mappings[i].port_id == port_id) { 325 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 326 tx_queue_stats_mappings[i].queue_id, 327 tx_queue_stats_mappings[i].stats_counter_id); 328 } 329 } 330 } 331 332 printf(" %s####################################%s\n", 333 nic_stats_mapping_border, nic_stats_mapping_border); 334 } 335 336 void 337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 338 { 339 struct rte_eth_rxq_info qinfo; 340 int32_t rc; 341 static const char *info_border = "*********************"; 342 343 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 344 if (rc != 0) { 345 printf("Failed to retrieve information for port: %u, " 346 "RX queue: %hu\nerror desc: %s(%d)\n", 347 port_id, queue_id, strerror(-rc), rc); 348 return; 349 } 350 351 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 352 info_border, port_id, queue_id, info_border); 353 354 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 355 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 356 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 357 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 358 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 359 printf("\nRX drop packets: %s", 360 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 361 printf("\nRX deferred start: %s", 362 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 363 printf("\nRX scattered packets: %s", 364 (qinfo.scattered_rx != 0) ? "on" : "off"); 365 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 366 printf("\n"); 367 } 368 369 void 370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 371 { 372 struct rte_eth_txq_info qinfo; 373 int32_t rc; 374 static const char *info_border = "*********************"; 375 376 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 377 if (rc != 0) { 378 printf("Failed to retrieve information for port: %u, " 379 "TX queue: %hu\nerror desc: %s(%d)\n", 380 port_id, queue_id, strerror(-rc), rc); 381 return; 382 } 383 384 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 385 info_border, port_id, queue_id, info_border); 386 387 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 388 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 389 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 390 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 391 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 392 printf("\nTX deferred start: %s", 393 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 394 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 395 printf("\n"); 396 } 397 398 void 399 port_infos_display(portid_t port_id) 400 { 401 struct rte_port *port; 402 struct ether_addr mac_addr; 403 struct rte_eth_link link; 404 struct rte_eth_dev_info dev_info; 405 int vlan_offload; 406 struct rte_mempool * mp; 407 static const char *info_border = "*********************"; 408 portid_t pid; 409 uint16_t mtu; 410 char name[RTE_ETH_NAME_MAX_LEN]; 411 412 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 413 printf("Valid port range is [0"); 414 RTE_ETH_FOREACH_DEV(pid) 415 printf(", %d", pid); 416 printf("]\n"); 417 return; 418 } 419 port = &ports[port_id]; 420 rte_eth_link_get_nowait(port_id, &link); 421 memset(&dev_info, 0, sizeof(dev_info)); 422 rte_eth_dev_info_get(port_id, &dev_info); 423 printf("\n%s Infos for port %-2d %s\n", 424 info_border, port_id, info_border); 425 rte_eth_macaddr_get(port_id, &mac_addr); 426 print_ethaddr("MAC address: ", &mac_addr); 427 rte_eth_dev_get_name_by_port(port_id, name); 428 printf("\nDevice name: %s", name); 429 printf("\nDriver name: %s", dev_info.driver_name); 430 printf("\nConnect to socket: %u", port->socket_id); 431 432 if (port_numa[port_id] != NUMA_NO_CONFIG) { 433 mp = mbuf_pool_find(port_numa[port_id]); 434 if (mp) 435 printf("\nmemory allocation on the socket: %d", 436 port_numa[port_id]); 437 } else 438 printf("\nmemory allocation on the socket: %u",port->socket_id); 439 440 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 441 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 442 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 443 ("full-duplex") : ("half-duplex")); 444 445 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 446 printf("MTU: %u\n", mtu); 447 448 printf("Promiscuous mode: %s\n", 449 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 450 printf("Allmulticast mode: %s\n", 451 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 452 printf("Maximum number of MAC addresses: %u\n", 453 (unsigned int)(port->dev_info.max_mac_addrs)); 454 printf("Maximum number of MAC addresses of hash filtering: %u\n", 455 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 456 457 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 458 if (vlan_offload >= 0){ 459 printf("VLAN offload: \n"); 460 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 461 printf(" strip on \n"); 462 else 463 printf(" strip off \n"); 464 465 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 466 printf(" filter on \n"); 467 else 468 printf(" filter off \n"); 469 470 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 471 printf(" qinq(extend) on \n"); 472 else 473 printf(" qinq(extend) off \n"); 474 } 475 476 if (dev_info.hash_key_size > 0) 477 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 478 if (dev_info.reta_size > 0) 479 printf("Redirection table size: %u\n", dev_info.reta_size); 480 if (!dev_info.flow_type_rss_offloads) 481 printf("No flow type is supported.\n"); 482 else { 483 uint16_t i; 484 char *p; 485 486 printf("Supported flow types:\n"); 487 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 488 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 489 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 490 continue; 491 p = flowtype_to_str(i); 492 if (p) 493 printf(" %s\n", p); 494 else 495 printf(" user defined %d\n", i); 496 } 497 } 498 499 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 500 printf("Maximum configurable length of RX packet: %u\n", 501 dev_info.max_rx_pktlen); 502 if (dev_info.max_vfs) 503 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 504 if (dev_info.max_vmdq_pools) 505 printf("Maximum number of VMDq pools: %u\n", 506 dev_info.max_vmdq_pools); 507 508 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 509 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 510 printf("Max possible number of RXDs per queue: %hu\n", 511 dev_info.rx_desc_lim.nb_max); 512 printf("Min possible number of RXDs per queue: %hu\n", 513 dev_info.rx_desc_lim.nb_min); 514 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 515 516 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 517 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 518 printf("Max possible number of TXDs per queue: %hu\n", 519 dev_info.tx_desc_lim.nb_max); 520 printf("Min possible number of TXDs per queue: %hu\n", 521 dev_info.tx_desc_lim.nb_min); 522 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 523 524 /* Show switch info only if valid switch domain and port id is set */ 525 if (dev_info.switch_info.domain_id != 526 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 527 if (dev_info.switch_info.name) 528 printf("Switch name: %s\n", dev_info.switch_info.name); 529 530 printf("Switch domain Id: %u\n", 531 dev_info.switch_info.domain_id); 532 printf("Switch Port Id: %u\n", 533 dev_info.switch_info.port_id); 534 } 535 } 536 537 void 538 port_offload_cap_display(portid_t port_id) 539 { 540 struct rte_eth_dev_info dev_info; 541 static const char *info_border = "************"; 542 543 if (port_id_is_invalid(port_id, ENABLED_WARN)) 544 return; 545 546 rte_eth_dev_info_get(port_id, &dev_info); 547 548 printf("\n%s Port %d supported offload features: %s\n", 549 info_border, port_id, info_border); 550 551 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 552 printf("VLAN stripped: "); 553 if (ports[port_id].dev_conf.rxmode.offloads & 554 DEV_RX_OFFLOAD_VLAN_STRIP) 555 printf("on\n"); 556 else 557 printf("off\n"); 558 } 559 560 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 561 printf("Double VLANs stripped: "); 562 if (ports[port_id].dev_conf.rxmode.offloads & 563 DEV_RX_OFFLOAD_VLAN_EXTEND) 564 printf("on\n"); 565 else 566 printf("off\n"); 567 } 568 569 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 570 printf("RX IPv4 checksum: "); 571 if (ports[port_id].dev_conf.rxmode.offloads & 572 DEV_RX_OFFLOAD_IPV4_CKSUM) 573 printf("on\n"); 574 else 575 printf("off\n"); 576 } 577 578 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 579 printf("RX UDP checksum: "); 580 if (ports[port_id].dev_conf.rxmode.offloads & 581 DEV_RX_OFFLOAD_UDP_CKSUM) 582 printf("on\n"); 583 else 584 printf("off\n"); 585 } 586 587 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 588 printf("RX TCP checksum: "); 589 if (ports[port_id].dev_conf.rxmode.offloads & 590 DEV_RX_OFFLOAD_TCP_CKSUM) 591 printf("on\n"); 592 else 593 printf("off\n"); 594 } 595 596 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 597 printf("RX Outer IPv4 checksum: "); 598 if (ports[port_id].dev_conf.rxmode.offloads & 599 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 600 printf("on\n"); 601 else 602 printf("off\n"); 603 } 604 605 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 606 printf("Large receive offload: "); 607 if (ports[port_id].dev_conf.rxmode.offloads & 608 DEV_RX_OFFLOAD_TCP_LRO) 609 printf("on\n"); 610 else 611 printf("off\n"); 612 } 613 614 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 615 printf("VLAN insert: "); 616 if (ports[port_id].dev_conf.txmode.offloads & 617 DEV_TX_OFFLOAD_VLAN_INSERT) 618 printf("on\n"); 619 else 620 printf("off\n"); 621 } 622 623 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 624 printf("HW timestamp: "); 625 if (ports[port_id].dev_conf.rxmode.offloads & 626 DEV_RX_OFFLOAD_TIMESTAMP) 627 printf("on\n"); 628 else 629 printf("off\n"); 630 } 631 632 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 633 printf("Double VLANs insert: "); 634 if (ports[port_id].dev_conf.txmode.offloads & 635 DEV_TX_OFFLOAD_QINQ_INSERT) 636 printf("on\n"); 637 else 638 printf("off\n"); 639 } 640 641 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 642 printf("TX IPv4 checksum: "); 643 if (ports[port_id].dev_conf.txmode.offloads & 644 DEV_TX_OFFLOAD_IPV4_CKSUM) 645 printf("on\n"); 646 else 647 printf("off\n"); 648 } 649 650 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 651 printf("TX UDP checksum: "); 652 if (ports[port_id].dev_conf.txmode.offloads & 653 DEV_TX_OFFLOAD_UDP_CKSUM) 654 printf("on\n"); 655 else 656 printf("off\n"); 657 } 658 659 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 660 printf("TX TCP checksum: "); 661 if (ports[port_id].dev_conf.txmode.offloads & 662 DEV_TX_OFFLOAD_TCP_CKSUM) 663 printf("on\n"); 664 else 665 printf("off\n"); 666 } 667 668 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 669 printf("TX SCTP checksum: "); 670 if (ports[port_id].dev_conf.txmode.offloads & 671 DEV_TX_OFFLOAD_SCTP_CKSUM) 672 printf("on\n"); 673 else 674 printf("off\n"); 675 } 676 677 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 678 printf("TX Outer IPv4 checksum: "); 679 if (ports[port_id].dev_conf.txmode.offloads & 680 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 681 printf("on\n"); 682 else 683 printf("off\n"); 684 } 685 686 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 687 printf("TX TCP segmentation: "); 688 if (ports[port_id].dev_conf.txmode.offloads & 689 DEV_TX_OFFLOAD_TCP_TSO) 690 printf("on\n"); 691 else 692 printf("off\n"); 693 } 694 695 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 696 printf("TX UDP segmentation: "); 697 if (ports[port_id].dev_conf.txmode.offloads & 698 DEV_TX_OFFLOAD_UDP_TSO) 699 printf("on\n"); 700 else 701 printf("off\n"); 702 } 703 704 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 705 printf("TSO for VXLAN tunnel packet: "); 706 if (ports[port_id].dev_conf.txmode.offloads & 707 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 708 printf("on\n"); 709 else 710 printf("off\n"); 711 } 712 713 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 714 printf("TSO for GRE tunnel packet: "); 715 if (ports[port_id].dev_conf.txmode.offloads & 716 DEV_TX_OFFLOAD_GRE_TNL_TSO) 717 printf("on\n"); 718 else 719 printf("off\n"); 720 } 721 722 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 723 printf("TSO for IPIP tunnel packet: "); 724 if (ports[port_id].dev_conf.txmode.offloads & 725 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 726 printf("on\n"); 727 else 728 printf("off\n"); 729 } 730 731 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 732 printf("TSO for GENEVE tunnel packet: "); 733 if (ports[port_id].dev_conf.txmode.offloads & 734 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 735 printf("on\n"); 736 else 737 printf("off\n"); 738 } 739 740 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 741 printf("IP tunnel TSO: "); 742 if (ports[port_id].dev_conf.txmode.offloads & 743 DEV_TX_OFFLOAD_IP_TNL_TSO) 744 printf("on\n"); 745 else 746 printf("off\n"); 747 } 748 749 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 750 printf("UDP tunnel TSO: "); 751 if (ports[port_id].dev_conf.txmode.offloads & 752 DEV_TX_OFFLOAD_UDP_TNL_TSO) 753 printf("on\n"); 754 else 755 printf("off\n"); 756 } 757 } 758 759 int 760 port_id_is_invalid(portid_t port_id, enum print_warning warning) 761 { 762 uint16_t pid; 763 764 if (port_id == (portid_t)RTE_PORT_ALL) 765 return 0; 766 767 RTE_ETH_FOREACH_DEV(pid) 768 if (port_id == pid) 769 return 0; 770 771 if (warning == ENABLED_WARN) 772 printf("Invalid port %d\n", port_id); 773 774 return 1; 775 } 776 777 static int 778 vlan_id_is_invalid(uint16_t vlan_id) 779 { 780 if (vlan_id < 4096) 781 return 0; 782 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 783 return 1; 784 } 785 786 static int 787 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 788 { 789 const struct rte_pci_device *pci_dev; 790 const struct rte_bus *bus; 791 uint64_t pci_len; 792 793 if (reg_off & 0x3) { 794 printf("Port register offset 0x%X not aligned on a 4-byte " 795 "boundary\n", 796 (unsigned)reg_off); 797 return 1; 798 } 799 800 if (!ports[port_id].dev_info.device) { 801 printf("Invalid device\n"); 802 return 0; 803 } 804 805 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 806 if (bus && !strcmp(bus->name, "pci")) { 807 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 808 } else { 809 printf("Not a PCI device\n"); 810 return 1; 811 } 812 813 pci_len = pci_dev->mem_resource[0].len; 814 if (reg_off >= pci_len) { 815 printf("Port %d: register offset %u (0x%X) out of port PCI " 816 "resource (length=%"PRIu64")\n", 817 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 818 return 1; 819 } 820 return 0; 821 } 822 823 static int 824 reg_bit_pos_is_invalid(uint8_t bit_pos) 825 { 826 if (bit_pos <= 31) 827 return 0; 828 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 829 return 1; 830 } 831 832 #define display_port_and_reg_off(port_id, reg_off) \ 833 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 834 835 static inline void 836 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 837 { 838 display_port_and_reg_off(port_id, (unsigned)reg_off); 839 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 840 } 841 842 void 843 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 844 { 845 uint32_t reg_v; 846 847 848 if (port_id_is_invalid(port_id, ENABLED_WARN)) 849 return; 850 if (port_reg_off_is_invalid(port_id, reg_off)) 851 return; 852 if (reg_bit_pos_is_invalid(bit_x)) 853 return; 854 reg_v = port_id_pci_reg_read(port_id, reg_off); 855 display_port_and_reg_off(port_id, (unsigned)reg_off); 856 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 857 } 858 859 void 860 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 861 uint8_t bit1_pos, uint8_t bit2_pos) 862 { 863 uint32_t reg_v; 864 uint8_t l_bit; 865 uint8_t h_bit; 866 867 if (port_id_is_invalid(port_id, ENABLED_WARN)) 868 return; 869 if (port_reg_off_is_invalid(port_id, reg_off)) 870 return; 871 if (reg_bit_pos_is_invalid(bit1_pos)) 872 return; 873 if (reg_bit_pos_is_invalid(bit2_pos)) 874 return; 875 if (bit1_pos > bit2_pos) 876 l_bit = bit2_pos, h_bit = bit1_pos; 877 else 878 l_bit = bit1_pos, h_bit = bit2_pos; 879 880 reg_v = port_id_pci_reg_read(port_id, reg_off); 881 reg_v >>= l_bit; 882 if (h_bit < 31) 883 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 884 display_port_and_reg_off(port_id, (unsigned)reg_off); 885 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 886 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 887 } 888 889 void 890 port_reg_display(portid_t port_id, uint32_t reg_off) 891 { 892 uint32_t reg_v; 893 894 if (port_id_is_invalid(port_id, ENABLED_WARN)) 895 return; 896 if (port_reg_off_is_invalid(port_id, reg_off)) 897 return; 898 reg_v = port_id_pci_reg_read(port_id, reg_off); 899 display_port_reg_value(port_id, reg_off, reg_v); 900 } 901 902 void 903 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 904 uint8_t bit_v) 905 { 906 uint32_t reg_v; 907 908 if (port_id_is_invalid(port_id, ENABLED_WARN)) 909 return; 910 if (port_reg_off_is_invalid(port_id, reg_off)) 911 return; 912 if (reg_bit_pos_is_invalid(bit_pos)) 913 return; 914 if (bit_v > 1) { 915 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 916 return; 917 } 918 reg_v = port_id_pci_reg_read(port_id, reg_off); 919 if (bit_v == 0) 920 reg_v &= ~(1 << bit_pos); 921 else 922 reg_v |= (1 << bit_pos); 923 port_id_pci_reg_write(port_id, reg_off, reg_v); 924 display_port_reg_value(port_id, reg_off, reg_v); 925 } 926 927 void 928 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 929 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 930 { 931 uint32_t max_v; 932 uint32_t reg_v; 933 uint8_t l_bit; 934 uint8_t h_bit; 935 936 if (port_id_is_invalid(port_id, ENABLED_WARN)) 937 return; 938 if (port_reg_off_is_invalid(port_id, reg_off)) 939 return; 940 if (reg_bit_pos_is_invalid(bit1_pos)) 941 return; 942 if (reg_bit_pos_is_invalid(bit2_pos)) 943 return; 944 if (bit1_pos > bit2_pos) 945 l_bit = bit2_pos, h_bit = bit1_pos; 946 else 947 l_bit = bit1_pos, h_bit = bit2_pos; 948 949 if ((h_bit - l_bit) < 31) 950 max_v = (1 << (h_bit - l_bit + 1)) - 1; 951 else 952 max_v = 0xFFFFFFFF; 953 954 if (value > max_v) { 955 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 956 (unsigned)value, (unsigned)value, 957 (unsigned)max_v, (unsigned)max_v); 958 return; 959 } 960 reg_v = port_id_pci_reg_read(port_id, reg_off); 961 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 962 reg_v |= (value << l_bit); /* Set changed bits */ 963 port_id_pci_reg_write(port_id, reg_off, reg_v); 964 display_port_reg_value(port_id, reg_off, reg_v); 965 } 966 967 void 968 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 969 { 970 if (port_id_is_invalid(port_id, ENABLED_WARN)) 971 return; 972 if (port_reg_off_is_invalid(port_id, reg_off)) 973 return; 974 port_id_pci_reg_write(port_id, reg_off, reg_v); 975 display_port_reg_value(port_id, reg_off, reg_v); 976 } 977 978 void 979 port_mtu_set(portid_t port_id, uint16_t mtu) 980 { 981 int diag; 982 983 if (port_id_is_invalid(port_id, ENABLED_WARN)) 984 return; 985 diag = rte_eth_dev_set_mtu(port_id, mtu); 986 if (diag == 0) 987 return; 988 printf("Set MTU failed. diag=%d\n", diag); 989 } 990 991 /* Generic flow management functions. */ 992 993 /** Generate flow_item[] entry. */ 994 #define MK_FLOW_ITEM(t, s) \ 995 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 996 .name = # t, \ 997 .size = s, \ 998 } 999 1000 /** Information about known flow pattern items. */ 1001 static const struct { 1002 const char *name; 1003 size_t size; 1004 } flow_item[] = { 1005 MK_FLOW_ITEM(END, 0), 1006 MK_FLOW_ITEM(VOID, 0), 1007 MK_FLOW_ITEM(INVERT, 0), 1008 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 1009 MK_FLOW_ITEM(PF, 0), 1010 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 1011 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 1012 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 1013 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 1014 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1015 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1016 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1017 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1018 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1019 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1020 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1021 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1022 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1023 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1024 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1025 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1026 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1027 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1028 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1029 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1030 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1031 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1032 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 1033 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 1034 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 1035 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 1036 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 1037 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 1038 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 1039 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 1040 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 1041 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 1042 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 1043 }; 1044 1045 /** Pattern item specification types. */ 1046 enum item_spec_type { 1047 ITEM_SPEC, 1048 ITEM_LAST, 1049 ITEM_MASK, 1050 }; 1051 1052 /** Compute storage space needed by item specification and copy it. */ 1053 static size_t 1054 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1055 enum item_spec_type type) 1056 { 1057 size_t size = 0; 1058 const void *item_spec = 1059 type == ITEM_SPEC ? item->spec : 1060 type == ITEM_LAST ? item->last : 1061 type == ITEM_MASK ? item->mask : 1062 NULL; 1063 1064 if (!item_spec) 1065 goto empty; 1066 switch (item->type) { 1067 union { 1068 const struct rte_flow_item_raw *raw; 1069 } src; 1070 union { 1071 struct rte_flow_item_raw *raw; 1072 } dst; 1073 size_t off; 1074 1075 case RTE_FLOW_ITEM_TYPE_RAW: 1076 src.raw = item_spec; 1077 dst.raw = buf; 1078 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1079 sizeof(*src.raw->pattern)); 1080 size = off + src.raw->length * sizeof(*src.raw->pattern); 1081 if (dst.raw) { 1082 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1083 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1084 src.raw->pattern, 1085 size - off); 1086 } 1087 break; 1088 default: 1089 size = flow_item[item->type].size; 1090 if (buf) 1091 memcpy(buf, item_spec, size); 1092 break; 1093 } 1094 empty: 1095 return RTE_ALIGN_CEIL(size, sizeof(double)); 1096 } 1097 1098 /** Generate flow_action[] entry. */ 1099 #define MK_FLOW_ACTION(t, s) \ 1100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1101 .name = # t, \ 1102 .size = s, \ 1103 } 1104 1105 /** Information about known flow actions. */ 1106 static const struct { 1107 const char *name; 1108 size_t size; 1109 } flow_action[] = { 1110 MK_FLOW_ACTION(END, 0), 1111 MK_FLOW_ACTION(VOID, 0), 1112 MK_FLOW_ACTION(PASSTHRU, 0), 1113 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1114 MK_FLOW_ACTION(FLAG, 0), 1115 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1116 MK_FLOW_ACTION(DROP, 0), 1117 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 1118 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1119 MK_FLOW_ACTION(PF, 0), 1120 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1121 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1122 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1123 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1124 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 1125 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 1126 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 1127 MK_FLOW_ACTION(OF_SET_NW_TTL, 1128 sizeof(struct rte_flow_action_of_set_nw_ttl)), 1129 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 1130 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 1131 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 1132 MK_FLOW_ACTION(OF_POP_VLAN, 0), 1133 MK_FLOW_ACTION(OF_PUSH_VLAN, 1134 sizeof(struct rte_flow_action_of_push_vlan)), 1135 MK_FLOW_ACTION(OF_SET_VLAN_VID, 1136 sizeof(struct rte_flow_action_of_set_vlan_vid)), 1137 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 1138 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 1139 MK_FLOW_ACTION(OF_POP_MPLS, 1140 sizeof(struct rte_flow_action_of_pop_mpls)), 1141 MK_FLOW_ACTION(OF_PUSH_MPLS, 1142 sizeof(struct rte_flow_action_of_push_mpls)), 1143 }; 1144 1145 /** Compute storage space needed by action configuration and copy it. */ 1146 static size_t 1147 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1148 { 1149 size_t size = 0; 1150 1151 if (!action->conf) 1152 goto empty; 1153 switch (action->type) { 1154 union { 1155 const struct rte_flow_action_rss *rss; 1156 } src; 1157 union { 1158 struct rte_flow_action_rss *rss; 1159 } dst; 1160 size_t off; 1161 1162 case RTE_FLOW_ACTION_TYPE_RSS: 1163 src.rss = action->conf; 1164 dst.rss = buf; 1165 off = 0; 1166 if (dst.rss) 1167 *dst.rss = (struct rte_flow_action_rss){ 1168 .func = src.rss->func, 1169 .level = src.rss->level, 1170 .types = src.rss->types, 1171 .key_len = src.rss->key_len, 1172 .queue_num = src.rss->queue_num, 1173 }; 1174 off += sizeof(*src.rss); 1175 if (src.rss->key_len) { 1176 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1177 size = sizeof(*src.rss->key) * src.rss->key_len; 1178 if (dst.rss) 1179 dst.rss->key = memcpy 1180 ((void *)((uintptr_t)dst.rss + off), 1181 src.rss->key, size); 1182 off += size; 1183 } 1184 if (src.rss->queue_num) { 1185 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1186 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1187 if (dst.rss) 1188 dst.rss->queue = memcpy 1189 ((void *)((uintptr_t)dst.rss + off), 1190 src.rss->queue, size); 1191 off += size; 1192 } 1193 size = off; 1194 break; 1195 default: 1196 size = flow_action[action->type].size; 1197 if (buf) 1198 memcpy(buf, action->conf, size); 1199 break; 1200 } 1201 empty: 1202 return RTE_ALIGN_CEIL(size, sizeof(double)); 1203 } 1204 1205 /** Generate a port_flow entry from attributes/pattern/actions. */ 1206 static struct port_flow * 1207 port_flow_new(const struct rte_flow_attr *attr, 1208 const struct rte_flow_item *pattern, 1209 const struct rte_flow_action *actions) 1210 { 1211 const struct rte_flow_item *item; 1212 const struct rte_flow_action *action; 1213 struct port_flow *pf = NULL; 1214 size_t tmp; 1215 size_t off1 = 0; 1216 size_t off2 = 0; 1217 int err = ENOTSUP; 1218 1219 store: 1220 item = pattern; 1221 if (pf) 1222 pf->pattern = (void *)&pf->data[off1]; 1223 do { 1224 struct rte_flow_item *dst = NULL; 1225 1226 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1227 !flow_item[item->type].name) 1228 goto notsup; 1229 if (pf) 1230 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1231 off1 += sizeof(*item); 1232 if (item->spec) { 1233 if (pf) 1234 dst->spec = pf->data + off2; 1235 off2 += flow_item_spec_copy 1236 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1237 } 1238 if (item->last) { 1239 if (pf) 1240 dst->last = pf->data + off2; 1241 off2 += flow_item_spec_copy 1242 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1243 } 1244 if (item->mask) { 1245 if (pf) 1246 dst->mask = pf->data + off2; 1247 off2 += flow_item_spec_copy 1248 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1249 } 1250 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1251 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1252 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1253 action = actions; 1254 if (pf) 1255 pf->actions = (void *)&pf->data[off1]; 1256 do { 1257 struct rte_flow_action *dst = NULL; 1258 1259 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1260 !flow_action[action->type].name) 1261 goto notsup; 1262 if (pf) 1263 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1264 off1 += sizeof(*action); 1265 if (action->conf) { 1266 if (pf) 1267 dst->conf = pf->data + off2; 1268 off2 += flow_action_conf_copy 1269 (pf ? pf->data + off2 : NULL, action); 1270 } 1271 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1272 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1273 if (pf != NULL) 1274 return pf; 1275 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1276 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1277 pf = calloc(1, tmp + off1 + off2); 1278 if (pf == NULL) 1279 err = errno; 1280 else { 1281 *pf = (const struct port_flow){ 1282 .size = tmp + off1 + off2, 1283 .attr = *attr, 1284 }; 1285 tmp -= offsetof(struct port_flow, data); 1286 off2 = tmp + off1; 1287 off1 = tmp; 1288 goto store; 1289 } 1290 notsup: 1291 rte_errno = err; 1292 return NULL; 1293 } 1294 1295 /** Print a message out of a flow error. */ 1296 static int 1297 port_flow_complain(struct rte_flow_error *error) 1298 { 1299 static const char *const errstrlist[] = { 1300 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1301 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1302 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1303 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1304 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1305 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1306 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1307 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1308 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1309 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1310 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1311 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1312 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1313 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1314 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1315 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1316 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1317 }; 1318 const char *errstr; 1319 char buf[32]; 1320 int err = rte_errno; 1321 1322 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1323 !errstrlist[error->type]) 1324 errstr = "unknown type"; 1325 else 1326 errstr = errstrlist[error->type]; 1327 printf("Caught error type %d (%s): %s%s\n", 1328 error->type, errstr, 1329 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1330 error->cause), buf) : "", 1331 error->message ? error->message : "(no stated reason)"); 1332 return -err; 1333 } 1334 1335 /** Validate flow rule. */ 1336 int 1337 port_flow_validate(portid_t port_id, 1338 const struct rte_flow_attr *attr, 1339 const struct rte_flow_item *pattern, 1340 const struct rte_flow_action *actions) 1341 { 1342 struct rte_flow_error error; 1343 1344 /* Poisoning to make sure PMDs update it in case of error. */ 1345 memset(&error, 0x11, sizeof(error)); 1346 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1347 return port_flow_complain(&error); 1348 printf("Flow rule validated\n"); 1349 return 0; 1350 } 1351 1352 /** Create flow rule. */ 1353 int 1354 port_flow_create(portid_t port_id, 1355 const struct rte_flow_attr *attr, 1356 const struct rte_flow_item *pattern, 1357 const struct rte_flow_action *actions) 1358 { 1359 struct rte_flow *flow; 1360 struct rte_port *port; 1361 struct port_flow *pf; 1362 uint32_t id; 1363 struct rte_flow_error error; 1364 1365 /* Poisoning to make sure PMDs update it in case of error. */ 1366 memset(&error, 0x22, sizeof(error)); 1367 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1368 if (!flow) 1369 return port_flow_complain(&error); 1370 port = &ports[port_id]; 1371 if (port->flow_list) { 1372 if (port->flow_list->id == UINT32_MAX) { 1373 printf("Highest rule ID is already assigned, delete" 1374 " it first"); 1375 rte_flow_destroy(port_id, flow, NULL); 1376 return -ENOMEM; 1377 } 1378 id = port->flow_list->id + 1; 1379 } else 1380 id = 0; 1381 pf = port_flow_new(attr, pattern, actions); 1382 if (!pf) { 1383 int err = rte_errno; 1384 1385 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1386 rte_flow_destroy(port_id, flow, NULL); 1387 return -err; 1388 } 1389 pf->next = port->flow_list; 1390 pf->id = id; 1391 pf->flow = flow; 1392 port->flow_list = pf; 1393 printf("Flow rule #%u created\n", pf->id); 1394 return 0; 1395 } 1396 1397 /** Destroy a number of flow rules. */ 1398 int 1399 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1400 { 1401 struct rte_port *port; 1402 struct port_flow **tmp; 1403 uint32_t c = 0; 1404 int ret = 0; 1405 1406 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1407 port_id == (portid_t)RTE_PORT_ALL) 1408 return -EINVAL; 1409 port = &ports[port_id]; 1410 tmp = &port->flow_list; 1411 while (*tmp) { 1412 uint32_t i; 1413 1414 for (i = 0; i != n; ++i) { 1415 struct rte_flow_error error; 1416 struct port_flow *pf = *tmp; 1417 1418 if (rule[i] != pf->id) 1419 continue; 1420 /* 1421 * Poisoning to make sure PMDs update it in case 1422 * of error. 1423 */ 1424 memset(&error, 0x33, sizeof(error)); 1425 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1426 ret = port_flow_complain(&error); 1427 continue; 1428 } 1429 printf("Flow rule #%u destroyed\n", pf->id); 1430 *tmp = pf->next; 1431 free(pf); 1432 break; 1433 } 1434 if (i == n) 1435 tmp = &(*tmp)->next; 1436 ++c; 1437 } 1438 return ret; 1439 } 1440 1441 /** Remove all flow rules. */ 1442 int 1443 port_flow_flush(portid_t port_id) 1444 { 1445 struct rte_flow_error error; 1446 struct rte_port *port; 1447 int ret = 0; 1448 1449 /* Poisoning to make sure PMDs update it in case of error. */ 1450 memset(&error, 0x44, sizeof(error)); 1451 if (rte_flow_flush(port_id, &error)) { 1452 ret = port_flow_complain(&error); 1453 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1454 port_id == (portid_t)RTE_PORT_ALL) 1455 return ret; 1456 } 1457 port = &ports[port_id]; 1458 while (port->flow_list) { 1459 struct port_flow *pf = port->flow_list->next; 1460 1461 free(port->flow_list); 1462 port->flow_list = pf; 1463 } 1464 return ret; 1465 } 1466 1467 /** Query a flow rule. */ 1468 int 1469 port_flow_query(portid_t port_id, uint32_t rule, 1470 const struct rte_flow_action *action) 1471 { 1472 struct rte_flow_error error; 1473 struct rte_port *port; 1474 struct port_flow *pf; 1475 const char *name; 1476 union { 1477 struct rte_flow_query_count count; 1478 } query; 1479 1480 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1481 port_id == (portid_t)RTE_PORT_ALL) 1482 return -EINVAL; 1483 port = &ports[port_id]; 1484 for (pf = port->flow_list; pf; pf = pf->next) 1485 if (pf->id == rule) 1486 break; 1487 if (!pf) { 1488 printf("Flow rule #%u not found\n", rule); 1489 return -ENOENT; 1490 } 1491 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1492 !flow_action[action->type].name) 1493 name = "unknown"; 1494 else 1495 name = flow_action[action->type].name; 1496 switch (action->type) { 1497 case RTE_FLOW_ACTION_TYPE_COUNT: 1498 break; 1499 default: 1500 printf("Cannot query action type %d (%s)\n", 1501 action->type, name); 1502 return -ENOTSUP; 1503 } 1504 /* Poisoning to make sure PMDs update it in case of error. */ 1505 memset(&error, 0x55, sizeof(error)); 1506 memset(&query, 0, sizeof(query)); 1507 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1508 return port_flow_complain(&error); 1509 switch (action->type) { 1510 case RTE_FLOW_ACTION_TYPE_COUNT: 1511 printf("%s:\n" 1512 " hits_set: %u\n" 1513 " bytes_set: %u\n" 1514 " hits: %" PRIu64 "\n" 1515 " bytes: %" PRIu64 "\n", 1516 name, 1517 query.count.hits_set, 1518 query.count.bytes_set, 1519 query.count.hits, 1520 query.count.bytes); 1521 break; 1522 default: 1523 printf("Cannot display result for action type %d (%s)\n", 1524 action->type, name); 1525 break; 1526 } 1527 return 0; 1528 } 1529 1530 /** List flow rules. */ 1531 void 1532 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1533 { 1534 struct rte_port *port; 1535 struct port_flow *pf; 1536 struct port_flow *list = NULL; 1537 uint32_t i; 1538 1539 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1540 port_id == (portid_t)RTE_PORT_ALL) 1541 return; 1542 port = &ports[port_id]; 1543 if (!port->flow_list) 1544 return; 1545 /* Sort flows by group, priority and ID. */ 1546 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1547 struct port_flow **tmp; 1548 1549 if (n) { 1550 /* Filter out unwanted groups. */ 1551 for (i = 0; i != n; ++i) 1552 if (pf->attr.group == group[i]) 1553 break; 1554 if (i == n) 1555 continue; 1556 } 1557 tmp = &list; 1558 while (*tmp && 1559 (pf->attr.group > (*tmp)->attr.group || 1560 (pf->attr.group == (*tmp)->attr.group && 1561 pf->attr.priority > (*tmp)->attr.priority) || 1562 (pf->attr.group == (*tmp)->attr.group && 1563 pf->attr.priority == (*tmp)->attr.priority && 1564 pf->id > (*tmp)->id))) 1565 tmp = &(*tmp)->tmp; 1566 pf->tmp = *tmp; 1567 *tmp = pf; 1568 } 1569 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1570 for (pf = list; pf != NULL; pf = pf->tmp) { 1571 const struct rte_flow_item *item = pf->pattern; 1572 const struct rte_flow_action *action = pf->actions; 1573 1574 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1575 pf->id, 1576 pf->attr.group, 1577 pf->attr.priority, 1578 pf->attr.ingress ? 'i' : '-', 1579 pf->attr.egress ? 'e' : '-', 1580 pf->attr.transfer ? 't' : '-'); 1581 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1582 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1583 printf("%s ", flow_item[item->type].name); 1584 ++item; 1585 } 1586 printf("=>"); 1587 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1588 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1589 printf(" %s", flow_action[action->type].name); 1590 ++action; 1591 } 1592 printf("\n"); 1593 } 1594 } 1595 1596 /** Restrict ingress traffic to the defined flow rules. */ 1597 int 1598 port_flow_isolate(portid_t port_id, int set) 1599 { 1600 struct rte_flow_error error; 1601 1602 /* Poisoning to make sure PMDs update it in case of error. */ 1603 memset(&error, 0x66, sizeof(error)); 1604 if (rte_flow_isolate(port_id, set, &error)) 1605 return port_flow_complain(&error); 1606 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1607 port_id, 1608 set ? "now restricted" : "not restricted anymore"); 1609 return 0; 1610 } 1611 1612 /* 1613 * RX/TX ring descriptors display functions. 1614 */ 1615 int 1616 rx_queue_id_is_invalid(queueid_t rxq_id) 1617 { 1618 if (rxq_id < nb_rxq) 1619 return 0; 1620 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1621 return 1; 1622 } 1623 1624 int 1625 tx_queue_id_is_invalid(queueid_t txq_id) 1626 { 1627 if (txq_id < nb_txq) 1628 return 0; 1629 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1630 return 1; 1631 } 1632 1633 static int 1634 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1635 { 1636 if (rxdesc_id < nb_rxd) 1637 return 0; 1638 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1639 rxdesc_id, nb_rxd); 1640 return 1; 1641 } 1642 1643 static int 1644 tx_desc_id_is_invalid(uint16_t txdesc_id) 1645 { 1646 if (txdesc_id < nb_txd) 1647 return 0; 1648 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1649 txdesc_id, nb_txd); 1650 return 1; 1651 } 1652 1653 static const struct rte_memzone * 1654 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1655 { 1656 char mz_name[RTE_MEMZONE_NAMESIZE]; 1657 const struct rte_memzone *mz; 1658 1659 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1660 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1661 mz = rte_memzone_lookup(mz_name); 1662 if (mz == NULL) 1663 printf("%s ring memory zoneof (port %d, queue %d) not" 1664 "found (zone name = %s\n", 1665 ring_name, port_id, q_id, mz_name); 1666 return mz; 1667 } 1668 1669 union igb_ring_dword { 1670 uint64_t dword; 1671 struct { 1672 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1673 uint32_t lo; 1674 uint32_t hi; 1675 #else 1676 uint32_t hi; 1677 uint32_t lo; 1678 #endif 1679 } words; 1680 }; 1681 1682 struct igb_ring_desc_32_bytes { 1683 union igb_ring_dword lo_dword; 1684 union igb_ring_dword hi_dword; 1685 union igb_ring_dword resv1; 1686 union igb_ring_dword resv2; 1687 }; 1688 1689 struct igb_ring_desc_16_bytes { 1690 union igb_ring_dword lo_dword; 1691 union igb_ring_dword hi_dword; 1692 }; 1693 1694 static void 1695 ring_rxd_display_dword(union igb_ring_dword dword) 1696 { 1697 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1698 (unsigned)dword.words.hi); 1699 } 1700 1701 static void 1702 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1703 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1704 portid_t port_id, 1705 #else 1706 __rte_unused portid_t port_id, 1707 #endif 1708 uint16_t desc_id) 1709 { 1710 struct igb_ring_desc_16_bytes *ring = 1711 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1712 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1713 struct rte_eth_dev_info dev_info; 1714 1715 memset(&dev_info, 0, sizeof(dev_info)); 1716 rte_eth_dev_info_get(port_id, &dev_info); 1717 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1718 /* 32 bytes RX descriptor, i40e only */ 1719 struct igb_ring_desc_32_bytes *ring = 1720 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1721 ring[desc_id].lo_dword.dword = 1722 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1723 ring_rxd_display_dword(ring[desc_id].lo_dword); 1724 ring[desc_id].hi_dword.dword = 1725 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1726 ring_rxd_display_dword(ring[desc_id].hi_dword); 1727 ring[desc_id].resv1.dword = 1728 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1729 ring_rxd_display_dword(ring[desc_id].resv1); 1730 ring[desc_id].resv2.dword = 1731 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1732 ring_rxd_display_dword(ring[desc_id].resv2); 1733 1734 return; 1735 } 1736 #endif 1737 /* 16 bytes RX descriptor */ 1738 ring[desc_id].lo_dword.dword = 1739 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1740 ring_rxd_display_dword(ring[desc_id].lo_dword); 1741 ring[desc_id].hi_dword.dword = 1742 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1743 ring_rxd_display_dword(ring[desc_id].hi_dword); 1744 } 1745 1746 static void 1747 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1748 { 1749 struct igb_ring_desc_16_bytes *ring; 1750 struct igb_ring_desc_16_bytes txd; 1751 1752 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1753 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1754 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1755 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1756 (unsigned)txd.lo_dword.words.lo, 1757 (unsigned)txd.lo_dword.words.hi, 1758 (unsigned)txd.hi_dword.words.lo, 1759 (unsigned)txd.hi_dword.words.hi); 1760 } 1761 1762 void 1763 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1764 { 1765 const struct rte_memzone *rx_mz; 1766 1767 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1768 return; 1769 if (rx_queue_id_is_invalid(rxq_id)) 1770 return; 1771 if (rx_desc_id_is_invalid(rxd_id)) 1772 return; 1773 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1774 if (rx_mz == NULL) 1775 return; 1776 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1777 } 1778 1779 void 1780 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1781 { 1782 const struct rte_memzone *tx_mz; 1783 1784 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1785 return; 1786 if (tx_queue_id_is_invalid(txq_id)) 1787 return; 1788 if (tx_desc_id_is_invalid(txd_id)) 1789 return; 1790 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1791 if (tx_mz == NULL) 1792 return; 1793 ring_tx_descriptor_display(tx_mz, txd_id); 1794 } 1795 1796 void 1797 fwd_lcores_config_display(void) 1798 { 1799 lcoreid_t lc_id; 1800 1801 printf("List of forwarding lcores:"); 1802 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1803 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1804 printf("\n"); 1805 } 1806 void 1807 rxtx_config_display(void) 1808 { 1809 portid_t pid; 1810 queueid_t qid; 1811 1812 printf(" %s packet forwarding%s packets/burst=%d\n", 1813 cur_fwd_eng->fwd_mode_name, 1814 retry_enabled == 0 ? "" : " with retry", 1815 nb_pkt_per_burst); 1816 1817 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1818 printf(" packet len=%u - nb packet segments=%d\n", 1819 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1820 1821 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1822 nb_fwd_lcores, nb_fwd_ports); 1823 1824 RTE_ETH_FOREACH_DEV(pid) { 1825 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1826 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1827 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1828 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1829 1830 /* per port config */ 1831 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1832 (unsigned int)pid, nb_rxq, nb_txq); 1833 1834 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1835 ports[pid].dev_conf.rxmode.offloads, 1836 ports[pid].dev_conf.txmode.offloads); 1837 1838 /* per rx queue config only for first queue to be less verbose */ 1839 for (qid = 0; qid < 1; qid++) { 1840 printf(" RX queue: %d\n", qid); 1841 printf(" RX desc=%d - RX free threshold=%d\n", 1842 nb_rx_desc[qid], rx_conf[qid].rx_free_thresh); 1843 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1844 " wthresh=%d\n", 1845 rx_conf[qid].rx_thresh.pthresh, 1846 rx_conf[qid].rx_thresh.hthresh, 1847 rx_conf[qid].rx_thresh.wthresh); 1848 printf(" RX Offloads=0x%"PRIx64"\n", 1849 rx_conf[qid].offloads); 1850 } 1851 1852 /* per tx queue config only for first queue to be less verbose */ 1853 for (qid = 0; qid < 1; qid++) { 1854 printf(" TX queue: %d\n", qid); 1855 printf(" TX desc=%d - TX free threshold=%d\n", 1856 nb_tx_desc[qid], tx_conf[qid].tx_free_thresh); 1857 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1858 " wthresh=%d\n", 1859 tx_conf[qid].tx_thresh.pthresh, 1860 tx_conf[qid].tx_thresh.hthresh, 1861 tx_conf[qid].tx_thresh.wthresh); 1862 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1863 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1864 } 1865 } 1866 } 1867 1868 void 1869 port_rss_reta_info(portid_t port_id, 1870 struct rte_eth_rss_reta_entry64 *reta_conf, 1871 uint16_t nb_entries) 1872 { 1873 uint16_t i, idx, shift; 1874 int ret; 1875 1876 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1877 return; 1878 1879 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1880 if (ret != 0) { 1881 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1882 return; 1883 } 1884 1885 for (i = 0; i < nb_entries; i++) { 1886 idx = i / RTE_RETA_GROUP_SIZE; 1887 shift = i % RTE_RETA_GROUP_SIZE; 1888 if (!(reta_conf[idx].mask & (1ULL << shift))) 1889 continue; 1890 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1891 i, reta_conf[idx].reta[shift]); 1892 } 1893 } 1894 1895 /* 1896 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1897 * key of the port. 1898 */ 1899 void 1900 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1901 { 1902 struct rte_eth_rss_conf rss_conf; 1903 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1904 uint64_t rss_hf; 1905 uint8_t i; 1906 int diag; 1907 struct rte_eth_dev_info dev_info; 1908 uint8_t hash_key_size; 1909 1910 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1911 return; 1912 1913 memset(&dev_info, 0, sizeof(dev_info)); 1914 rte_eth_dev_info_get(port_id, &dev_info); 1915 if (dev_info.hash_key_size > 0 && 1916 dev_info.hash_key_size <= sizeof(rss_key)) 1917 hash_key_size = dev_info.hash_key_size; 1918 else { 1919 printf("dev_info did not provide a valid hash key size\n"); 1920 return; 1921 } 1922 1923 rss_conf.rss_hf = 0; 1924 for (i = 0; rss_type_table[i].str; i++) { 1925 if (!strcmp(rss_info, rss_type_table[i].str)) 1926 rss_conf.rss_hf = rss_type_table[i].rss_type; 1927 } 1928 1929 /* Get RSS hash key if asked to display it */ 1930 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1931 rss_conf.rss_key_len = hash_key_size; 1932 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1933 if (diag != 0) { 1934 switch (diag) { 1935 case -ENODEV: 1936 printf("port index %d invalid\n", port_id); 1937 break; 1938 case -ENOTSUP: 1939 printf("operation not supported by device\n"); 1940 break; 1941 default: 1942 printf("operation failed - diag=%d\n", diag); 1943 break; 1944 } 1945 return; 1946 } 1947 rss_hf = rss_conf.rss_hf; 1948 if (rss_hf == 0) { 1949 printf("RSS disabled\n"); 1950 return; 1951 } 1952 printf("RSS functions:\n "); 1953 for (i = 0; rss_type_table[i].str; i++) { 1954 if (rss_hf & rss_type_table[i].rss_type) 1955 printf("%s ", rss_type_table[i].str); 1956 } 1957 printf("\n"); 1958 if (!show_rss_key) 1959 return; 1960 printf("RSS key:\n"); 1961 for (i = 0; i < hash_key_size; i++) 1962 printf("%02X", rss_key[i]); 1963 printf("\n"); 1964 } 1965 1966 void 1967 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1968 uint hash_key_len) 1969 { 1970 struct rte_eth_rss_conf rss_conf; 1971 int diag; 1972 unsigned int i; 1973 1974 rss_conf.rss_key = NULL; 1975 rss_conf.rss_key_len = hash_key_len; 1976 rss_conf.rss_hf = 0; 1977 for (i = 0; rss_type_table[i].str; i++) { 1978 if (!strcmp(rss_type_table[i].str, rss_type)) 1979 rss_conf.rss_hf = rss_type_table[i].rss_type; 1980 } 1981 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1982 if (diag == 0) { 1983 rss_conf.rss_key = hash_key; 1984 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 1985 } 1986 if (diag == 0) 1987 return; 1988 1989 switch (diag) { 1990 case -ENODEV: 1991 printf("port index %d invalid\n", port_id); 1992 break; 1993 case -ENOTSUP: 1994 printf("operation not supported by device\n"); 1995 break; 1996 default: 1997 printf("operation failed - diag=%d\n", diag); 1998 break; 1999 } 2000 } 2001 2002 /* 2003 * Setup forwarding configuration for each logical core. 2004 */ 2005 static void 2006 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2007 { 2008 streamid_t nb_fs_per_lcore; 2009 streamid_t nb_fs; 2010 streamid_t sm_id; 2011 lcoreid_t nb_extra; 2012 lcoreid_t nb_fc; 2013 lcoreid_t nb_lc; 2014 lcoreid_t lc_id; 2015 2016 nb_fs = cfg->nb_fwd_streams; 2017 nb_fc = cfg->nb_fwd_lcores; 2018 if (nb_fs <= nb_fc) { 2019 nb_fs_per_lcore = 1; 2020 nb_extra = 0; 2021 } else { 2022 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2023 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2024 } 2025 2026 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2027 sm_id = 0; 2028 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2029 fwd_lcores[lc_id]->stream_idx = sm_id; 2030 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2031 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2032 } 2033 2034 /* 2035 * Assign extra remaining streams, if any. 2036 */ 2037 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2038 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2039 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2040 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2041 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2042 } 2043 } 2044 2045 static portid_t 2046 fwd_topology_tx_port_get(portid_t rxp) 2047 { 2048 static int warning_once = 1; 2049 2050 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2051 2052 switch (port_topology) { 2053 default: 2054 case PORT_TOPOLOGY_PAIRED: 2055 if ((rxp & 0x1) == 0) { 2056 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2057 return rxp + 1; 2058 if (warning_once) { 2059 printf("\nWarning! port-topology=paired" 2060 " and odd forward ports number," 2061 " the last port will pair with" 2062 " itself.\n\n"); 2063 warning_once = 0; 2064 } 2065 return rxp; 2066 } 2067 return rxp - 1; 2068 case PORT_TOPOLOGY_CHAINED: 2069 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2070 case PORT_TOPOLOGY_LOOP: 2071 return rxp; 2072 } 2073 } 2074 2075 static void 2076 simple_fwd_config_setup(void) 2077 { 2078 portid_t i; 2079 2080 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2081 cur_fwd_config.nb_fwd_streams = 2082 (streamid_t) cur_fwd_config.nb_fwd_ports; 2083 2084 /* reinitialize forwarding streams */ 2085 init_fwd_streams(); 2086 2087 /* 2088 * In the simple forwarding test, the number of forwarding cores 2089 * must be lower or equal to the number of forwarding ports. 2090 */ 2091 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2092 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2093 cur_fwd_config.nb_fwd_lcores = 2094 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2095 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2096 2097 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2098 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2099 fwd_streams[i]->rx_queue = 0; 2100 fwd_streams[i]->tx_port = 2101 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2102 fwd_streams[i]->tx_queue = 0; 2103 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2104 fwd_streams[i]->retry_enabled = retry_enabled; 2105 } 2106 } 2107 2108 /** 2109 * For the RSS forwarding test all streams distributed over lcores. Each stream 2110 * being composed of a RX queue to poll on a RX port for input messages, 2111 * associated with a TX queue of a TX port where to send forwarded packets. 2112 */ 2113 static void 2114 rss_fwd_config_setup(void) 2115 { 2116 portid_t rxp; 2117 portid_t txp; 2118 queueid_t rxq; 2119 queueid_t nb_q; 2120 streamid_t sm_id; 2121 2122 nb_q = nb_rxq; 2123 if (nb_q > nb_txq) 2124 nb_q = nb_txq; 2125 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2126 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2127 cur_fwd_config.nb_fwd_streams = 2128 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2129 2130 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2131 cur_fwd_config.nb_fwd_lcores = 2132 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2133 2134 /* reinitialize forwarding streams */ 2135 init_fwd_streams(); 2136 2137 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2138 rxp = 0; rxq = 0; 2139 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2140 struct fwd_stream *fs; 2141 2142 fs = fwd_streams[sm_id]; 2143 txp = fwd_topology_tx_port_get(rxp); 2144 fs->rx_port = fwd_ports_ids[rxp]; 2145 fs->rx_queue = rxq; 2146 fs->tx_port = fwd_ports_ids[txp]; 2147 fs->tx_queue = rxq; 2148 fs->peer_addr = fs->tx_port; 2149 fs->retry_enabled = retry_enabled; 2150 rxq = (queueid_t) (rxq + 1); 2151 if (rxq < nb_q) 2152 continue; 2153 /* 2154 * rxq == nb_q 2155 * Restart from RX queue 0 on next RX port 2156 */ 2157 rxq = 0; 2158 rxp++; 2159 } 2160 } 2161 2162 /** 2163 * For the DCB forwarding test, each core is assigned on each traffic class. 2164 * 2165 * Each core is assigned a multi-stream, each stream being composed of 2166 * a RX queue to poll on a RX port for input messages, associated with 2167 * a TX queue of a TX port where to send forwarded packets. All RX and 2168 * TX queues are mapping to the same traffic class. 2169 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2170 * the same core 2171 */ 2172 static void 2173 dcb_fwd_config_setup(void) 2174 { 2175 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2176 portid_t txp, rxp = 0; 2177 queueid_t txq, rxq = 0; 2178 lcoreid_t lc_id; 2179 uint16_t nb_rx_queue, nb_tx_queue; 2180 uint16_t i, j, k, sm_id = 0; 2181 uint8_t tc = 0; 2182 2183 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2184 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2185 cur_fwd_config.nb_fwd_streams = 2186 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2187 2188 /* reinitialize forwarding streams */ 2189 init_fwd_streams(); 2190 sm_id = 0; 2191 txp = 1; 2192 /* get the dcb info on the first RX and TX ports */ 2193 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2194 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2195 2196 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2197 fwd_lcores[lc_id]->stream_nb = 0; 2198 fwd_lcores[lc_id]->stream_idx = sm_id; 2199 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2200 /* if the nb_queue is zero, means this tc is 2201 * not enabled on the POOL 2202 */ 2203 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2204 break; 2205 k = fwd_lcores[lc_id]->stream_nb + 2206 fwd_lcores[lc_id]->stream_idx; 2207 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2208 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2209 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2210 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2211 for (j = 0; j < nb_rx_queue; j++) { 2212 struct fwd_stream *fs; 2213 2214 fs = fwd_streams[k + j]; 2215 fs->rx_port = fwd_ports_ids[rxp]; 2216 fs->rx_queue = rxq + j; 2217 fs->tx_port = fwd_ports_ids[txp]; 2218 fs->tx_queue = txq + j % nb_tx_queue; 2219 fs->peer_addr = fs->tx_port; 2220 fs->retry_enabled = retry_enabled; 2221 } 2222 fwd_lcores[lc_id]->stream_nb += 2223 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2224 } 2225 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2226 2227 tc++; 2228 if (tc < rxp_dcb_info.nb_tcs) 2229 continue; 2230 /* Restart from TC 0 on next RX port */ 2231 tc = 0; 2232 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2233 rxp = (portid_t) 2234 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2235 else 2236 rxp++; 2237 if (rxp >= nb_fwd_ports) 2238 return; 2239 /* get the dcb information on next RX and TX ports */ 2240 if ((rxp & 0x1) == 0) 2241 txp = (portid_t) (rxp + 1); 2242 else 2243 txp = (portid_t) (rxp - 1); 2244 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2245 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2246 } 2247 } 2248 2249 static void 2250 icmp_echo_config_setup(void) 2251 { 2252 portid_t rxp; 2253 queueid_t rxq; 2254 lcoreid_t lc_id; 2255 uint16_t sm_id; 2256 2257 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2258 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2259 (nb_txq * nb_fwd_ports); 2260 else 2261 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2262 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2263 cur_fwd_config.nb_fwd_streams = 2264 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2265 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2266 cur_fwd_config.nb_fwd_lcores = 2267 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2268 if (verbose_level > 0) { 2269 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2270 __FUNCTION__, 2271 cur_fwd_config.nb_fwd_lcores, 2272 cur_fwd_config.nb_fwd_ports, 2273 cur_fwd_config.nb_fwd_streams); 2274 } 2275 2276 /* reinitialize forwarding streams */ 2277 init_fwd_streams(); 2278 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2279 rxp = 0; rxq = 0; 2280 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2281 if (verbose_level > 0) 2282 printf(" core=%d: \n", lc_id); 2283 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2284 struct fwd_stream *fs; 2285 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2286 fs->rx_port = fwd_ports_ids[rxp]; 2287 fs->rx_queue = rxq; 2288 fs->tx_port = fs->rx_port; 2289 fs->tx_queue = rxq; 2290 fs->peer_addr = fs->tx_port; 2291 fs->retry_enabled = retry_enabled; 2292 if (verbose_level > 0) 2293 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2294 sm_id, fs->rx_port, fs->rx_queue, 2295 fs->tx_queue); 2296 rxq = (queueid_t) (rxq + 1); 2297 if (rxq == nb_rxq) { 2298 rxq = 0; 2299 rxp = (portid_t) (rxp + 1); 2300 } 2301 } 2302 } 2303 } 2304 2305 void 2306 fwd_config_setup(void) 2307 { 2308 cur_fwd_config.fwd_eng = cur_fwd_eng; 2309 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2310 icmp_echo_config_setup(); 2311 return; 2312 } 2313 if ((nb_rxq > 1) && (nb_txq > 1)){ 2314 if (dcb_config) 2315 dcb_fwd_config_setup(); 2316 else 2317 rss_fwd_config_setup(); 2318 } 2319 else 2320 simple_fwd_config_setup(); 2321 } 2322 2323 void 2324 pkt_fwd_config_display(struct fwd_config *cfg) 2325 { 2326 struct fwd_stream *fs; 2327 lcoreid_t lc_id; 2328 streamid_t sm_id; 2329 2330 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2331 "NUMA support %s, MP over anonymous pages %s\n", 2332 cfg->fwd_eng->fwd_mode_name, 2333 retry_enabled == 0 ? "" : " with retry", 2334 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2335 numa_support == 1 ? "enabled" : "disabled", 2336 mp_anon != 0 ? "enabled" : "disabled"); 2337 2338 if (retry_enabled) 2339 printf("TX retry num: %u, delay between TX retries: %uus\n", 2340 burst_tx_retry_num, burst_tx_delay_time); 2341 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2342 printf("Logical Core %u (socket %u) forwards packets on " 2343 "%d streams:", 2344 fwd_lcores_cpuids[lc_id], 2345 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2346 fwd_lcores[lc_id]->stream_nb); 2347 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2348 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2349 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2350 "P=%d/Q=%d (socket %u) ", 2351 fs->rx_port, fs->rx_queue, 2352 ports[fs->rx_port].socket_id, 2353 fs->tx_port, fs->tx_queue, 2354 ports[fs->tx_port].socket_id); 2355 print_ethaddr("peer=", 2356 &peer_eth_addrs[fs->peer_addr]); 2357 } 2358 printf("\n"); 2359 } 2360 printf("\n"); 2361 } 2362 2363 void 2364 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2365 { 2366 uint8_t c, new_peer_addr[6]; 2367 if (!rte_eth_dev_is_valid_port(port_id)) { 2368 printf("Error: Invalid port number %i\n", port_id); 2369 return; 2370 } 2371 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2372 sizeof(new_peer_addr)) < 0) { 2373 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2374 return; 2375 } 2376 for (c = 0; c < 6; c++) 2377 peer_eth_addrs[port_id].addr_bytes[c] = 2378 new_peer_addr[c]; 2379 } 2380 2381 int 2382 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2383 { 2384 unsigned int i; 2385 unsigned int lcore_cpuid; 2386 int record_now; 2387 2388 record_now = 0; 2389 again: 2390 for (i = 0; i < nb_lc; i++) { 2391 lcore_cpuid = lcorelist[i]; 2392 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2393 printf("lcore %u not enabled\n", lcore_cpuid); 2394 return -1; 2395 } 2396 if (lcore_cpuid == rte_get_master_lcore()) { 2397 printf("lcore %u cannot be masked on for running " 2398 "packet forwarding, which is the master lcore " 2399 "and reserved for command line parsing only\n", 2400 lcore_cpuid); 2401 return -1; 2402 } 2403 if (record_now) 2404 fwd_lcores_cpuids[i] = lcore_cpuid; 2405 } 2406 if (record_now == 0) { 2407 record_now = 1; 2408 goto again; 2409 } 2410 nb_cfg_lcores = (lcoreid_t) nb_lc; 2411 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2412 printf("previous number of forwarding cores %u - changed to " 2413 "number of configured cores %u\n", 2414 (unsigned int) nb_fwd_lcores, nb_lc); 2415 nb_fwd_lcores = (lcoreid_t) nb_lc; 2416 } 2417 2418 return 0; 2419 } 2420 2421 int 2422 set_fwd_lcores_mask(uint64_t lcoremask) 2423 { 2424 unsigned int lcorelist[64]; 2425 unsigned int nb_lc; 2426 unsigned int i; 2427 2428 if (lcoremask == 0) { 2429 printf("Invalid NULL mask of cores\n"); 2430 return -1; 2431 } 2432 nb_lc = 0; 2433 for (i = 0; i < 64; i++) { 2434 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2435 continue; 2436 lcorelist[nb_lc++] = i; 2437 } 2438 return set_fwd_lcores_list(lcorelist, nb_lc); 2439 } 2440 2441 void 2442 set_fwd_lcores_number(uint16_t nb_lc) 2443 { 2444 if (nb_lc > nb_cfg_lcores) { 2445 printf("nb fwd cores %u > %u (max. number of configured " 2446 "lcores) - ignored\n", 2447 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2448 return; 2449 } 2450 nb_fwd_lcores = (lcoreid_t) nb_lc; 2451 printf("Number of forwarding cores set to %u\n", 2452 (unsigned int) nb_fwd_lcores); 2453 } 2454 2455 void 2456 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2457 { 2458 unsigned int i; 2459 portid_t port_id; 2460 int record_now; 2461 2462 record_now = 0; 2463 again: 2464 for (i = 0; i < nb_pt; i++) { 2465 port_id = (portid_t) portlist[i]; 2466 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2467 return; 2468 if (record_now) 2469 fwd_ports_ids[i] = port_id; 2470 } 2471 if (record_now == 0) { 2472 record_now = 1; 2473 goto again; 2474 } 2475 nb_cfg_ports = (portid_t) nb_pt; 2476 if (nb_fwd_ports != (portid_t) nb_pt) { 2477 printf("previous number of forwarding ports %u - changed to " 2478 "number of configured ports %u\n", 2479 (unsigned int) nb_fwd_ports, nb_pt); 2480 nb_fwd_ports = (portid_t) nb_pt; 2481 } 2482 } 2483 2484 void 2485 set_fwd_ports_mask(uint64_t portmask) 2486 { 2487 unsigned int portlist[64]; 2488 unsigned int nb_pt; 2489 unsigned int i; 2490 2491 if (portmask == 0) { 2492 printf("Invalid NULL mask of ports\n"); 2493 return; 2494 } 2495 nb_pt = 0; 2496 RTE_ETH_FOREACH_DEV(i) { 2497 if (! ((uint64_t)(1ULL << i) & portmask)) 2498 continue; 2499 portlist[nb_pt++] = i; 2500 } 2501 set_fwd_ports_list(portlist, nb_pt); 2502 } 2503 2504 void 2505 set_fwd_ports_number(uint16_t nb_pt) 2506 { 2507 if (nb_pt > nb_cfg_ports) { 2508 printf("nb fwd ports %u > %u (number of configured " 2509 "ports) - ignored\n", 2510 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2511 return; 2512 } 2513 nb_fwd_ports = (portid_t) nb_pt; 2514 printf("Number of forwarding ports set to %u\n", 2515 (unsigned int) nb_fwd_ports); 2516 } 2517 2518 int 2519 port_is_forwarding(portid_t port_id) 2520 { 2521 unsigned int i; 2522 2523 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2524 return -1; 2525 2526 for (i = 0; i < nb_fwd_ports; i++) { 2527 if (fwd_ports_ids[i] == port_id) 2528 return 1; 2529 } 2530 2531 return 0; 2532 } 2533 2534 void 2535 set_nb_pkt_per_burst(uint16_t nb) 2536 { 2537 if (nb > MAX_PKT_BURST) { 2538 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2539 " ignored\n", 2540 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2541 return; 2542 } 2543 nb_pkt_per_burst = nb; 2544 printf("Number of packets per burst set to %u\n", 2545 (unsigned int) nb_pkt_per_burst); 2546 } 2547 2548 static const char * 2549 tx_split_get_name(enum tx_pkt_split split) 2550 { 2551 uint32_t i; 2552 2553 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2554 if (tx_split_name[i].split == split) 2555 return tx_split_name[i].name; 2556 } 2557 return NULL; 2558 } 2559 2560 void 2561 set_tx_pkt_split(const char *name) 2562 { 2563 uint32_t i; 2564 2565 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2566 if (strcmp(tx_split_name[i].name, name) == 0) { 2567 tx_pkt_split = tx_split_name[i].split; 2568 return; 2569 } 2570 } 2571 printf("unknown value: \"%s\"\n", name); 2572 } 2573 2574 void 2575 show_tx_pkt_segments(void) 2576 { 2577 uint32_t i, n; 2578 const char *split; 2579 2580 n = tx_pkt_nb_segs; 2581 split = tx_split_get_name(tx_pkt_split); 2582 2583 printf("Number of segments: %u\n", n); 2584 printf("Segment sizes: "); 2585 for (i = 0; i != n - 1; i++) 2586 printf("%hu,", tx_pkt_seg_lengths[i]); 2587 printf("%hu\n", tx_pkt_seg_lengths[i]); 2588 printf("Split packet: %s\n", split); 2589 } 2590 2591 void 2592 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2593 { 2594 uint16_t tx_pkt_len; 2595 unsigned i; 2596 2597 if (nb_segs >= (unsigned) nb_txd) { 2598 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2599 nb_segs, (unsigned int) nb_txd); 2600 return; 2601 } 2602 2603 /* 2604 * Check that each segment length is greater or equal than 2605 * the mbuf data sise. 2606 * Check also that the total packet length is greater or equal than the 2607 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2608 */ 2609 tx_pkt_len = 0; 2610 for (i = 0; i < nb_segs; i++) { 2611 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2612 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2613 i, seg_lengths[i], (unsigned) mbuf_data_size); 2614 return; 2615 } 2616 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2617 } 2618 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2619 printf("total packet length=%u < %d - give up\n", 2620 (unsigned) tx_pkt_len, 2621 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2622 return; 2623 } 2624 2625 for (i = 0; i < nb_segs; i++) 2626 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2627 2628 tx_pkt_length = tx_pkt_len; 2629 tx_pkt_nb_segs = (uint8_t) nb_segs; 2630 } 2631 2632 void 2633 setup_gro(const char *onoff, portid_t port_id) 2634 { 2635 if (!rte_eth_dev_is_valid_port(port_id)) { 2636 printf("invalid port id %u\n", port_id); 2637 return; 2638 } 2639 if (test_done == 0) { 2640 printf("Before enable/disable GRO," 2641 " please stop forwarding first\n"); 2642 return; 2643 } 2644 if (strcmp(onoff, "on") == 0) { 2645 if (gro_ports[port_id].enable != 0) { 2646 printf("Port %u has enabled GRO. Please" 2647 " disable GRO first\n", port_id); 2648 return; 2649 } 2650 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2651 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2652 gro_ports[port_id].param.max_flow_num = 2653 GRO_DEFAULT_FLOW_NUM; 2654 gro_ports[port_id].param.max_item_per_flow = 2655 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2656 } 2657 gro_ports[port_id].enable = 1; 2658 } else { 2659 if (gro_ports[port_id].enable == 0) { 2660 printf("Port %u has disabled GRO\n", port_id); 2661 return; 2662 } 2663 gro_ports[port_id].enable = 0; 2664 } 2665 } 2666 2667 void 2668 setup_gro_flush_cycles(uint8_t cycles) 2669 { 2670 if (test_done == 0) { 2671 printf("Before change flush interval for GRO," 2672 " please stop forwarding first.\n"); 2673 return; 2674 } 2675 2676 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2677 GRO_DEFAULT_FLUSH_CYCLES) { 2678 printf("The flushing cycle be in the range" 2679 " of 1 to %u. Revert to the default" 2680 " value %u.\n", 2681 GRO_MAX_FLUSH_CYCLES, 2682 GRO_DEFAULT_FLUSH_CYCLES); 2683 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2684 } 2685 2686 gro_flush_cycles = cycles; 2687 } 2688 2689 void 2690 show_gro(portid_t port_id) 2691 { 2692 struct rte_gro_param *param; 2693 uint32_t max_pkts_num; 2694 2695 param = &gro_ports[port_id].param; 2696 2697 if (!rte_eth_dev_is_valid_port(port_id)) { 2698 printf("Invalid port id %u.\n", port_id); 2699 return; 2700 } 2701 if (gro_ports[port_id].enable) { 2702 printf("GRO type: TCP/IPv4\n"); 2703 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2704 max_pkts_num = param->max_flow_num * 2705 param->max_item_per_flow; 2706 } else 2707 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2708 printf("Max number of packets to perform GRO: %u\n", 2709 max_pkts_num); 2710 printf("Flushing cycles: %u\n", gro_flush_cycles); 2711 } else 2712 printf("Port %u doesn't enable GRO.\n", port_id); 2713 } 2714 2715 void 2716 setup_gso(const char *mode, portid_t port_id) 2717 { 2718 if (!rte_eth_dev_is_valid_port(port_id)) { 2719 printf("invalid port id %u\n", port_id); 2720 return; 2721 } 2722 if (strcmp(mode, "on") == 0) { 2723 if (test_done == 0) { 2724 printf("before enabling GSO," 2725 " please stop forwarding first\n"); 2726 return; 2727 } 2728 gso_ports[port_id].enable = 1; 2729 } else if (strcmp(mode, "off") == 0) { 2730 if (test_done == 0) { 2731 printf("before disabling GSO," 2732 " please stop forwarding first\n"); 2733 return; 2734 } 2735 gso_ports[port_id].enable = 0; 2736 } 2737 } 2738 2739 char* 2740 list_pkt_forwarding_modes(void) 2741 { 2742 static char fwd_modes[128] = ""; 2743 const char *separator = "|"; 2744 struct fwd_engine *fwd_eng; 2745 unsigned i = 0; 2746 2747 if (strlen (fwd_modes) == 0) { 2748 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2749 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2750 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2751 strncat(fwd_modes, separator, 2752 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2753 } 2754 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2755 } 2756 2757 return fwd_modes; 2758 } 2759 2760 char* 2761 list_pkt_forwarding_retry_modes(void) 2762 { 2763 static char fwd_modes[128] = ""; 2764 const char *separator = "|"; 2765 struct fwd_engine *fwd_eng; 2766 unsigned i = 0; 2767 2768 if (strlen(fwd_modes) == 0) { 2769 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2770 if (fwd_eng == &rx_only_engine) 2771 continue; 2772 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2773 sizeof(fwd_modes) - 2774 strlen(fwd_modes) - 1); 2775 strncat(fwd_modes, separator, 2776 sizeof(fwd_modes) - 2777 strlen(fwd_modes) - 1); 2778 } 2779 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2780 } 2781 2782 return fwd_modes; 2783 } 2784 2785 void 2786 set_pkt_forwarding_mode(const char *fwd_mode_name) 2787 { 2788 struct fwd_engine *fwd_eng; 2789 unsigned i; 2790 2791 i = 0; 2792 while ((fwd_eng = fwd_engines[i]) != NULL) { 2793 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2794 printf("Set %s packet forwarding mode%s\n", 2795 fwd_mode_name, 2796 retry_enabled == 0 ? "" : " with retry"); 2797 cur_fwd_eng = fwd_eng; 2798 return; 2799 } 2800 i++; 2801 } 2802 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2803 } 2804 2805 void 2806 set_verbose_level(uint16_t vb_level) 2807 { 2808 printf("Change verbose level from %u to %u\n", 2809 (unsigned int) verbose_level, (unsigned int) vb_level); 2810 verbose_level = vb_level; 2811 } 2812 2813 void 2814 vlan_extend_set(portid_t port_id, int on) 2815 { 2816 int diag; 2817 int vlan_offload; 2818 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2819 2820 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2821 return; 2822 2823 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2824 2825 if (on) { 2826 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2827 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2828 } else { 2829 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2830 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2831 } 2832 2833 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2834 if (diag < 0) 2835 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2836 "diag=%d\n", port_id, on, diag); 2837 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2838 } 2839 2840 void 2841 rx_vlan_strip_set(portid_t port_id, int on) 2842 { 2843 int diag; 2844 int vlan_offload; 2845 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2846 2847 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2848 return; 2849 2850 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2851 2852 if (on) { 2853 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2854 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2855 } else { 2856 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2857 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2858 } 2859 2860 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2861 if (diag < 0) 2862 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2863 "diag=%d\n", port_id, on, diag); 2864 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2865 } 2866 2867 void 2868 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2869 { 2870 int diag; 2871 2872 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2873 return; 2874 2875 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2876 if (diag < 0) 2877 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2878 "diag=%d\n", port_id, queue_id, on, diag); 2879 } 2880 2881 void 2882 rx_vlan_filter_set(portid_t port_id, int on) 2883 { 2884 int diag; 2885 int vlan_offload; 2886 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2887 2888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2889 return; 2890 2891 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2892 2893 if (on) { 2894 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2895 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2896 } else { 2897 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2898 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2899 } 2900 2901 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2902 if (diag < 0) 2903 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2904 "diag=%d\n", port_id, on, diag); 2905 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2906 } 2907 2908 int 2909 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2910 { 2911 int diag; 2912 2913 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2914 return 1; 2915 if (vlan_id_is_invalid(vlan_id)) 2916 return 1; 2917 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 2918 if (diag == 0) 2919 return 0; 2920 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 2921 "diag=%d\n", 2922 port_id, vlan_id, on, diag); 2923 return -1; 2924 } 2925 2926 void 2927 rx_vlan_all_filter_set(portid_t port_id, int on) 2928 { 2929 uint16_t vlan_id; 2930 2931 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2932 return; 2933 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 2934 if (rx_vft_set(port_id, vlan_id, on)) 2935 break; 2936 } 2937 } 2938 2939 void 2940 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 2941 { 2942 int diag; 2943 2944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2945 return; 2946 2947 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 2948 if (diag == 0) 2949 return; 2950 2951 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 2952 "diag=%d\n", 2953 port_id, vlan_type, tp_id, diag); 2954 } 2955 2956 void 2957 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 2958 { 2959 int vlan_offload; 2960 struct rte_eth_dev_info dev_info; 2961 2962 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2963 return; 2964 if (vlan_id_is_invalid(vlan_id)) 2965 return; 2966 2967 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2968 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 2969 printf("Error, as QinQ has been enabled.\n"); 2970 return; 2971 } 2972 rte_eth_dev_info_get(port_id, &dev_info); 2973 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 2974 printf("Error: vlan insert is not supported by port %d\n", 2975 port_id); 2976 return; 2977 } 2978 2979 tx_vlan_reset(port_id); 2980 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 2981 ports[port_id].tx_vlan_id = vlan_id; 2982 } 2983 2984 void 2985 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 2986 { 2987 int vlan_offload; 2988 struct rte_eth_dev_info dev_info; 2989 2990 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2991 return; 2992 if (vlan_id_is_invalid(vlan_id)) 2993 return; 2994 if (vlan_id_is_invalid(vlan_id_outer)) 2995 return; 2996 2997 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2998 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 2999 printf("Error, as QinQ hasn't been enabled.\n"); 3000 return; 3001 } 3002 rte_eth_dev_info_get(port_id, &dev_info); 3003 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3004 printf("Error: qinq insert not supported by port %d\n", 3005 port_id); 3006 return; 3007 } 3008 3009 tx_vlan_reset(port_id); 3010 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 3011 ports[port_id].tx_vlan_id = vlan_id; 3012 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3013 } 3014 3015 void 3016 tx_vlan_reset(portid_t port_id) 3017 { 3018 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3019 return; 3020 ports[port_id].dev_conf.txmode.offloads &= 3021 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3022 DEV_TX_OFFLOAD_QINQ_INSERT); 3023 ports[port_id].tx_vlan_id = 0; 3024 ports[port_id].tx_vlan_id_outer = 0; 3025 } 3026 3027 void 3028 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3029 { 3030 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3031 return; 3032 3033 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3034 } 3035 3036 void 3037 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3038 { 3039 uint16_t i; 3040 uint8_t existing_mapping_found = 0; 3041 3042 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3043 return; 3044 3045 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3046 return; 3047 3048 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3049 printf("map_value not in required range 0..%d\n", 3050 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3051 return; 3052 } 3053 3054 if (!is_rx) { /*then tx*/ 3055 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3056 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3057 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3058 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3059 existing_mapping_found = 1; 3060 break; 3061 } 3062 } 3063 if (!existing_mapping_found) { /* A new additional mapping... */ 3064 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3065 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3066 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3067 nb_tx_queue_stats_mappings++; 3068 } 3069 } 3070 else { /*rx*/ 3071 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3072 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3073 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3074 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3075 existing_mapping_found = 1; 3076 break; 3077 } 3078 } 3079 if (!existing_mapping_found) { /* A new additional mapping... */ 3080 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3081 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3082 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3083 nb_rx_queue_stats_mappings++; 3084 } 3085 } 3086 } 3087 3088 void 3089 set_xstats_hide_zero(uint8_t on_off) 3090 { 3091 xstats_hide_zero = on_off; 3092 } 3093 3094 static inline void 3095 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3096 { 3097 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3098 3099 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3100 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3101 " tunnel_id: 0x%08x", 3102 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3103 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3104 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3105 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3106 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3107 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3108 3109 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3110 rte_be_to_cpu_16(mask->src_port_mask), 3111 rte_be_to_cpu_16(mask->dst_port_mask)); 3112 3113 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3114 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3115 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3116 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3117 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3118 3119 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3120 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3121 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3122 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3123 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3124 } 3125 3126 printf("\n"); 3127 } 3128 3129 static inline void 3130 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3131 { 3132 struct rte_eth_flex_payload_cfg *cfg; 3133 uint32_t i, j; 3134 3135 for (i = 0; i < flex_conf->nb_payloads; i++) { 3136 cfg = &flex_conf->flex_set[i]; 3137 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3138 printf("\n RAW: "); 3139 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3140 printf("\n L2_PAYLOAD: "); 3141 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3142 printf("\n L3_PAYLOAD: "); 3143 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3144 printf("\n L4_PAYLOAD: "); 3145 else 3146 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3147 for (j = 0; j < num; j++) 3148 printf(" %-5u", cfg->src_offset[j]); 3149 } 3150 printf("\n"); 3151 } 3152 3153 static char * 3154 flowtype_to_str(uint16_t flow_type) 3155 { 3156 struct flow_type_info { 3157 char str[32]; 3158 uint16_t ftype; 3159 }; 3160 3161 uint8_t i; 3162 static struct flow_type_info flowtype_str_table[] = { 3163 {"raw", RTE_ETH_FLOW_RAW}, 3164 {"ipv4", RTE_ETH_FLOW_IPV4}, 3165 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3166 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3167 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3168 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3169 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3170 {"ipv6", RTE_ETH_FLOW_IPV6}, 3171 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3172 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3173 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3174 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3175 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3176 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3177 {"port", RTE_ETH_FLOW_PORT}, 3178 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3179 {"geneve", RTE_ETH_FLOW_GENEVE}, 3180 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3181 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3182 }; 3183 3184 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3185 if (flowtype_str_table[i].ftype == flow_type) 3186 return flowtype_str_table[i].str; 3187 } 3188 3189 return NULL; 3190 } 3191 3192 static inline void 3193 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3194 { 3195 struct rte_eth_fdir_flex_mask *mask; 3196 uint32_t i, j; 3197 char *p; 3198 3199 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3200 mask = &flex_conf->flex_mask[i]; 3201 p = flowtype_to_str(mask->flow_type); 3202 printf("\n %s:\t", p ? p : "unknown"); 3203 for (j = 0; j < num; j++) 3204 printf(" %02x", mask->mask[j]); 3205 } 3206 printf("\n"); 3207 } 3208 3209 static inline void 3210 print_fdir_flow_type(uint32_t flow_types_mask) 3211 { 3212 int i; 3213 char *p; 3214 3215 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3216 if (!(flow_types_mask & (1 << i))) 3217 continue; 3218 p = flowtype_to_str(i); 3219 if (p) 3220 printf(" %s", p); 3221 else 3222 printf(" unknown"); 3223 } 3224 printf("\n"); 3225 } 3226 3227 void 3228 fdir_get_infos(portid_t port_id) 3229 { 3230 struct rte_eth_fdir_stats fdir_stat; 3231 struct rte_eth_fdir_info fdir_info; 3232 int ret; 3233 3234 static const char *fdir_stats_border = "########################"; 3235 3236 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3237 return; 3238 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3239 if (ret < 0) { 3240 printf("\n FDIR is not supported on port %-2d\n", 3241 port_id); 3242 return; 3243 } 3244 3245 memset(&fdir_info, 0, sizeof(fdir_info)); 3246 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3247 RTE_ETH_FILTER_INFO, &fdir_info); 3248 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3249 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3250 RTE_ETH_FILTER_STATS, &fdir_stat); 3251 printf("\n %s FDIR infos for port %-2d %s\n", 3252 fdir_stats_border, port_id, fdir_stats_border); 3253 printf(" MODE: "); 3254 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3255 printf(" PERFECT\n"); 3256 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3257 printf(" PERFECT-MAC-VLAN\n"); 3258 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3259 printf(" PERFECT-TUNNEL\n"); 3260 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3261 printf(" SIGNATURE\n"); 3262 else 3263 printf(" DISABLE\n"); 3264 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3265 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3266 printf(" SUPPORTED FLOW TYPE: "); 3267 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3268 } 3269 printf(" FLEX PAYLOAD INFO:\n"); 3270 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3271 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3272 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3273 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3274 fdir_info.flex_payload_unit, 3275 fdir_info.max_flex_payload_segment_num, 3276 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3277 printf(" MASK: "); 3278 print_fdir_mask(&fdir_info.mask); 3279 if (fdir_info.flex_conf.nb_payloads > 0) { 3280 printf(" FLEX PAYLOAD SRC OFFSET:"); 3281 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3282 } 3283 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3284 printf(" FLEX MASK CFG:"); 3285 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3286 } 3287 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3288 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3289 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3290 fdir_info.guarant_spc, fdir_info.best_spc); 3291 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3292 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3293 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3294 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3295 fdir_stat.collision, fdir_stat.free, 3296 fdir_stat.maxhash, fdir_stat.maxlen, 3297 fdir_stat.add, fdir_stat.remove, 3298 fdir_stat.f_add, fdir_stat.f_remove); 3299 printf(" %s############################%s\n", 3300 fdir_stats_border, fdir_stats_border); 3301 } 3302 3303 void 3304 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3305 { 3306 struct rte_port *port; 3307 struct rte_eth_fdir_flex_conf *flex_conf; 3308 int i, idx = 0; 3309 3310 port = &ports[port_id]; 3311 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3312 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3313 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3314 idx = i; 3315 break; 3316 } 3317 } 3318 if (i >= RTE_ETH_FLOW_MAX) { 3319 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3320 idx = flex_conf->nb_flexmasks; 3321 flex_conf->nb_flexmasks++; 3322 } else { 3323 printf("The flex mask table is full. Can not set flex" 3324 " mask for flow_type(%u).", cfg->flow_type); 3325 return; 3326 } 3327 } 3328 rte_memcpy(&flex_conf->flex_mask[idx], 3329 cfg, 3330 sizeof(struct rte_eth_fdir_flex_mask)); 3331 } 3332 3333 void 3334 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3335 { 3336 struct rte_port *port; 3337 struct rte_eth_fdir_flex_conf *flex_conf; 3338 int i, idx = 0; 3339 3340 port = &ports[port_id]; 3341 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3342 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3343 if (cfg->type == flex_conf->flex_set[i].type) { 3344 idx = i; 3345 break; 3346 } 3347 } 3348 if (i >= RTE_ETH_PAYLOAD_MAX) { 3349 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3350 idx = flex_conf->nb_payloads; 3351 flex_conf->nb_payloads++; 3352 } else { 3353 printf("The flex payload table is full. Can not set" 3354 " flex payload for type(%u).", cfg->type); 3355 return; 3356 } 3357 } 3358 rte_memcpy(&flex_conf->flex_set[idx], 3359 cfg, 3360 sizeof(struct rte_eth_flex_payload_cfg)); 3361 3362 } 3363 3364 void 3365 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3366 { 3367 #ifdef RTE_LIBRTE_IXGBE_PMD 3368 int diag; 3369 3370 if (is_rx) 3371 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3372 else 3373 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3374 3375 if (diag == 0) 3376 return; 3377 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3378 is_rx ? "rx" : "tx", port_id, diag); 3379 return; 3380 #endif 3381 printf("VF %s setting not supported for port %d\n", 3382 is_rx ? "Rx" : "Tx", port_id); 3383 RTE_SET_USED(vf); 3384 RTE_SET_USED(on); 3385 } 3386 3387 int 3388 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3389 { 3390 int diag; 3391 struct rte_eth_link link; 3392 3393 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3394 return 1; 3395 rte_eth_link_get_nowait(port_id, &link); 3396 if (rate > link.link_speed) { 3397 printf("Invalid rate value:%u bigger than link speed: %u\n", 3398 rate, link.link_speed); 3399 return 1; 3400 } 3401 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3402 if (diag == 0) 3403 return diag; 3404 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3405 port_id, diag); 3406 return diag; 3407 } 3408 3409 int 3410 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3411 { 3412 int diag = -ENOTSUP; 3413 3414 RTE_SET_USED(vf); 3415 RTE_SET_USED(rate); 3416 RTE_SET_USED(q_msk); 3417 3418 #ifdef RTE_LIBRTE_IXGBE_PMD 3419 if (diag == -ENOTSUP) 3420 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3421 q_msk); 3422 #endif 3423 #ifdef RTE_LIBRTE_BNXT_PMD 3424 if (diag == -ENOTSUP) 3425 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3426 #endif 3427 if (diag == 0) 3428 return diag; 3429 3430 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3431 port_id, diag); 3432 return diag; 3433 } 3434 3435 /* 3436 * Functions to manage the set of filtered Multicast MAC addresses. 3437 * 3438 * A pool of filtered multicast MAC addresses is associated with each port. 3439 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3440 * The address of the pool and the number of valid multicast MAC addresses 3441 * recorded in the pool are stored in the fields "mc_addr_pool" and 3442 * "mc_addr_nb" of the "rte_port" data structure. 3443 * 3444 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3445 * to be supplied a contiguous array of multicast MAC addresses. 3446 * To comply with this constraint, the set of multicast addresses recorded 3447 * into the pool are systematically compacted at the beginning of the pool. 3448 * Hence, when a multicast address is removed from the pool, all following 3449 * addresses, if any, are copied back to keep the set contiguous. 3450 */ 3451 #define MCAST_POOL_INC 32 3452 3453 static int 3454 mcast_addr_pool_extend(struct rte_port *port) 3455 { 3456 struct ether_addr *mc_pool; 3457 size_t mc_pool_size; 3458 3459 /* 3460 * If a free entry is available at the end of the pool, just 3461 * increment the number of recorded multicast addresses. 3462 */ 3463 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3464 port->mc_addr_nb++; 3465 return 0; 3466 } 3467 3468 /* 3469 * [re]allocate a pool with MCAST_POOL_INC more entries. 3470 * The previous test guarantees that port->mc_addr_nb is a multiple 3471 * of MCAST_POOL_INC. 3472 */ 3473 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3474 MCAST_POOL_INC); 3475 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3476 mc_pool_size); 3477 if (mc_pool == NULL) { 3478 printf("allocation of pool of %u multicast addresses failed\n", 3479 port->mc_addr_nb + MCAST_POOL_INC); 3480 return -ENOMEM; 3481 } 3482 3483 port->mc_addr_pool = mc_pool; 3484 port->mc_addr_nb++; 3485 return 0; 3486 3487 } 3488 3489 static void 3490 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3491 { 3492 port->mc_addr_nb--; 3493 if (addr_idx == port->mc_addr_nb) { 3494 /* No need to recompact the set of multicast addressses. */ 3495 if (port->mc_addr_nb == 0) { 3496 /* free the pool of multicast addresses. */ 3497 free(port->mc_addr_pool); 3498 port->mc_addr_pool = NULL; 3499 } 3500 return; 3501 } 3502 memmove(&port->mc_addr_pool[addr_idx], 3503 &port->mc_addr_pool[addr_idx + 1], 3504 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3505 } 3506 3507 static void 3508 eth_port_multicast_addr_list_set(portid_t port_id) 3509 { 3510 struct rte_port *port; 3511 int diag; 3512 3513 port = &ports[port_id]; 3514 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3515 port->mc_addr_nb); 3516 if (diag == 0) 3517 return; 3518 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3519 port->mc_addr_nb, port_id, -diag); 3520 } 3521 3522 void 3523 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3524 { 3525 struct rte_port *port; 3526 uint32_t i; 3527 3528 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3529 return; 3530 3531 port = &ports[port_id]; 3532 3533 /* 3534 * Check that the added multicast MAC address is not already recorded 3535 * in the pool of multicast addresses. 3536 */ 3537 for (i = 0; i < port->mc_addr_nb; i++) { 3538 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3539 printf("multicast address already filtered by port\n"); 3540 return; 3541 } 3542 } 3543 3544 if (mcast_addr_pool_extend(port) != 0) 3545 return; 3546 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3547 eth_port_multicast_addr_list_set(port_id); 3548 } 3549 3550 void 3551 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3552 { 3553 struct rte_port *port; 3554 uint32_t i; 3555 3556 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3557 return; 3558 3559 port = &ports[port_id]; 3560 3561 /* 3562 * Search the pool of multicast MAC addresses for the removed address. 3563 */ 3564 for (i = 0; i < port->mc_addr_nb; i++) { 3565 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3566 break; 3567 } 3568 if (i == port->mc_addr_nb) { 3569 printf("multicast address not filtered by port %d\n", port_id); 3570 return; 3571 } 3572 3573 mcast_addr_pool_remove(port, i); 3574 eth_port_multicast_addr_list_set(port_id); 3575 } 3576 3577 void 3578 port_dcb_info_display(portid_t port_id) 3579 { 3580 struct rte_eth_dcb_info dcb_info; 3581 uint16_t i; 3582 int ret; 3583 static const char *border = "================"; 3584 3585 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3586 return; 3587 3588 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3589 if (ret) { 3590 printf("\n Failed to get dcb infos on port %-2d\n", 3591 port_id); 3592 return; 3593 } 3594 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3595 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3596 printf("\n TC : "); 3597 for (i = 0; i < dcb_info.nb_tcs; i++) 3598 printf("\t%4d", i); 3599 printf("\n Priority : "); 3600 for (i = 0; i < dcb_info.nb_tcs; i++) 3601 printf("\t%4d", dcb_info.prio_tc[i]); 3602 printf("\n BW percent :"); 3603 for (i = 0; i < dcb_info.nb_tcs; i++) 3604 printf("\t%4d%%", dcb_info.tc_bws[i]); 3605 printf("\n RXQ base : "); 3606 for (i = 0; i < dcb_info.nb_tcs; i++) 3607 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3608 printf("\n RXQ number :"); 3609 for (i = 0; i < dcb_info.nb_tcs; i++) 3610 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3611 printf("\n TXQ base : "); 3612 for (i = 0; i < dcb_info.nb_tcs; i++) 3613 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3614 printf("\n TXQ number :"); 3615 for (i = 0; i < dcb_info.nb_tcs; i++) 3616 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3617 printf("\n"); 3618 } 3619 3620 uint8_t * 3621 open_file(const char *file_path, uint32_t *size) 3622 { 3623 int fd = open(file_path, O_RDONLY); 3624 off_t pkg_size; 3625 uint8_t *buf = NULL; 3626 int ret = 0; 3627 struct stat st_buf; 3628 3629 if (size) 3630 *size = 0; 3631 3632 if (fd == -1) { 3633 printf("%s: Failed to open %s\n", __func__, file_path); 3634 return buf; 3635 } 3636 3637 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3638 close(fd); 3639 printf("%s: File operations failed\n", __func__); 3640 return buf; 3641 } 3642 3643 pkg_size = st_buf.st_size; 3644 if (pkg_size < 0) { 3645 close(fd); 3646 printf("%s: File operations failed\n", __func__); 3647 return buf; 3648 } 3649 3650 buf = (uint8_t *)malloc(pkg_size); 3651 if (!buf) { 3652 close(fd); 3653 printf("%s: Failed to malloc memory\n", __func__); 3654 return buf; 3655 } 3656 3657 ret = read(fd, buf, pkg_size); 3658 if (ret < 0) { 3659 close(fd); 3660 printf("%s: File read operation failed\n", __func__); 3661 close_file(buf); 3662 return NULL; 3663 } 3664 3665 if (size) 3666 *size = pkg_size; 3667 3668 close(fd); 3669 3670 return buf; 3671 } 3672 3673 int 3674 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3675 { 3676 FILE *fh = fopen(file_path, "wb"); 3677 3678 if (fh == NULL) { 3679 printf("%s: Failed to open %s\n", __func__, file_path); 3680 return -1; 3681 } 3682 3683 if (fwrite(buf, 1, size, fh) != size) { 3684 fclose(fh); 3685 printf("%s: File write operation failed\n", __func__); 3686 return -1; 3687 } 3688 3689 fclose(fh); 3690 3691 return 0; 3692 } 3693 3694 int 3695 close_file(uint8_t *buf) 3696 { 3697 if (buf) { 3698 free((void *)buf); 3699 return 0; 3700 } 3701 3702 return -1; 3703 } 3704 3705 void 3706 port_queue_region_info_display(portid_t port_id, void *buf) 3707 { 3708 #ifdef RTE_LIBRTE_I40E_PMD 3709 uint16_t i, j; 3710 struct rte_pmd_i40e_queue_regions *info = 3711 (struct rte_pmd_i40e_queue_regions *)buf; 3712 static const char *queue_region_info_stats_border = "-------"; 3713 3714 if (!info->queue_region_number) 3715 printf("there is no region has been set before"); 3716 3717 printf("\n %s All queue region info for port=%2d %s", 3718 queue_region_info_stats_border, port_id, 3719 queue_region_info_stats_border); 3720 printf("\n queue_region_number: %-14u \n", 3721 info->queue_region_number); 3722 3723 for (i = 0; i < info->queue_region_number; i++) { 3724 printf("\n region_id: %-14u queue_number: %-14u " 3725 "queue_start_index: %-14u \n", 3726 info->region[i].region_id, 3727 info->region[i].queue_num, 3728 info->region[i].queue_start_index); 3729 3730 printf(" user_priority_num is %-14u :", 3731 info->region[i].user_priority_num); 3732 for (j = 0; j < info->region[i].user_priority_num; j++) 3733 printf(" %-14u ", info->region[i].user_priority[j]); 3734 3735 printf("\n flowtype_num is %-14u :", 3736 info->region[i].flowtype_num); 3737 for (j = 0; j < info->region[i].flowtype_num; j++) 3738 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3739 } 3740 #else 3741 RTE_SET_USED(port_id); 3742 RTE_SET_USED(buf); 3743 #endif 3744 3745 printf("\n\n"); 3746 } 3747