1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_atomic.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_mempool.h> 33 #include <rte_mbuf.h> 34 #include <rte_interrupts.h> 35 #include <rte_pci.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_string_fns.h> 39 #include <rte_cycles.h> 40 #include <rte_flow.h> 41 #include <rte_errno.h> 42 #ifdef RTE_LIBRTE_IXGBE_PMD 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_LIBRTE_I40E_PMD 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_LIBRTE_BNXT_PMD 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #include <rte_gro.h> 52 #include <cmdline_parse_etheraddr.h> 53 54 #include "testpmd.h" 55 56 static char *flowtype_to_str(uint16_t flow_type); 57 58 static const struct { 59 enum tx_pkt_split split; 60 const char *name; 61 } tx_split_name[] = { 62 { 63 .split = TX_PKT_SPLIT_OFF, 64 .name = "off", 65 }, 66 { 67 .split = TX_PKT_SPLIT_ON, 68 .name = "on", 69 }, 70 { 71 .split = TX_PKT_SPLIT_RND, 72 .name = "rand", 73 }, 74 }; 75 76 const struct rss_type_info rss_type_table[] = { 77 { "ipv4", ETH_RSS_IPV4 }, 78 { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, 79 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, 80 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, 81 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, 82 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, 83 { "ipv6", ETH_RSS_IPV6 }, 84 { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, 85 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, 86 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, 87 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, 88 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, 89 { "l2-payload", ETH_RSS_L2_PAYLOAD }, 90 { "ipv6-ex", ETH_RSS_IPV6_EX }, 91 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, 92 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, 93 { "port", ETH_RSS_PORT }, 94 { "vxlan", ETH_RSS_VXLAN }, 95 { "geneve", ETH_RSS_GENEVE }, 96 { "nvgre", ETH_RSS_NVGRE }, 97 { "ip", ETH_RSS_IP }, 98 { "udp", ETH_RSS_UDP }, 99 { "tcp", ETH_RSS_TCP }, 100 { "sctp", ETH_RSS_SCTP }, 101 { "tunnel", ETH_RSS_TUNNEL }, 102 { NULL, 0 }, 103 }; 104 105 static void 106 print_ethaddr(const char *name, struct ether_addr *eth_addr) 107 { 108 char buf[ETHER_ADDR_FMT_SIZE]; 109 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 110 printf("%s%s", name, buf); 111 } 112 113 void 114 nic_stats_display(portid_t port_id) 115 { 116 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 117 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 118 static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; 119 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles; 120 uint64_t mpps_rx, mpps_tx; 121 struct rte_eth_stats stats; 122 struct rte_port *port = &ports[port_id]; 123 uint8_t i; 124 125 static const char *nic_stats_border = "########################"; 126 127 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 128 print_valid_ports(); 129 return; 130 } 131 rte_eth_stats_get(port_id, &stats); 132 printf("\n %s NIC statistics for port %-2d %s\n", 133 nic_stats_border, port_id, nic_stats_border); 134 135 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 136 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 137 "%-"PRIu64"\n", 138 stats.ipackets, stats.imissed, stats.ibytes); 139 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 140 printf(" RX-nombuf: %-10"PRIu64"\n", 141 stats.rx_nombuf); 142 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 143 "%-"PRIu64"\n", 144 stats.opackets, stats.oerrors, stats.obytes); 145 } 146 else { 147 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 148 " RX-bytes: %10"PRIu64"\n", 149 stats.ipackets, stats.ierrors, stats.ibytes); 150 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); 151 printf(" RX-nombuf: %10"PRIu64"\n", 152 stats.rx_nombuf); 153 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 154 " TX-bytes: %10"PRIu64"\n", 155 stats.opackets, stats.oerrors, stats.obytes); 156 } 157 158 if (port->rx_queue_stats_mapping_enabled) { 159 printf("\n"); 160 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 161 printf(" Stats reg %2d RX-packets: %10"PRIu64 162 " RX-errors: %10"PRIu64 163 " RX-bytes: %10"PRIu64"\n", 164 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); 165 } 166 } 167 if (port->tx_queue_stats_mapping_enabled) { 168 printf("\n"); 169 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 170 printf(" Stats reg %2d TX-packets: %10"PRIu64 171 " TX-bytes: %10"PRIu64"\n", 172 i, stats.q_opackets[i], stats.q_obytes[i]); 173 } 174 } 175 176 diff_cycles = prev_cycles[port_id]; 177 prev_cycles[port_id] = rte_rdtsc(); 178 if (diff_cycles > 0) 179 diff_cycles = prev_cycles[port_id] - diff_cycles; 180 181 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 182 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 183 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 184 (stats.opackets - prev_pkts_tx[port_id]) : 0; 185 prev_pkts_rx[port_id] = stats.ipackets; 186 prev_pkts_tx[port_id] = stats.opackets; 187 mpps_rx = diff_cycles > 0 ? 188 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; 189 mpps_tx = diff_cycles > 0 ? 190 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; 191 printf("\n Throughput (since last show)\n"); 192 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n", 193 mpps_rx, mpps_tx); 194 195 printf(" %s############################%s\n", 196 nic_stats_border, nic_stats_border); 197 } 198 199 void 200 nic_stats_clear(portid_t port_id) 201 { 202 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 203 print_valid_ports(); 204 return; 205 } 206 rte_eth_stats_reset(port_id); 207 printf("\n NIC statistics for port %d cleared\n", port_id); 208 } 209 210 void 211 nic_xstats_display(portid_t port_id) 212 { 213 struct rte_eth_xstat *xstats; 214 int cnt_xstats, idx_xstat; 215 struct rte_eth_xstat_name *xstats_names; 216 217 printf("###### NIC extended statistics for port %-2d\n", port_id); 218 if (!rte_eth_dev_is_valid_port(port_id)) { 219 printf("Error: Invalid port number %i\n", port_id); 220 return; 221 } 222 223 /* Get count */ 224 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 225 if (cnt_xstats < 0) { 226 printf("Error: Cannot get count of xstats\n"); 227 return; 228 } 229 230 /* Get id-name lookup table */ 231 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 232 if (xstats_names == NULL) { 233 printf("Cannot allocate memory for xstats lookup\n"); 234 return; 235 } 236 if (cnt_xstats != rte_eth_xstats_get_names( 237 port_id, xstats_names, cnt_xstats)) { 238 printf("Error: Cannot get xstats lookup\n"); 239 free(xstats_names); 240 return; 241 } 242 243 /* Get stats themselves */ 244 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 245 if (xstats == NULL) { 246 printf("Cannot allocate memory for xstats\n"); 247 free(xstats_names); 248 return; 249 } 250 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 251 printf("Error: Unable to get xstats\n"); 252 free(xstats_names); 253 free(xstats); 254 return; 255 } 256 257 /* Display xstats */ 258 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 259 if (xstats_hide_zero && !xstats[idx_xstat].value) 260 continue; 261 printf("%s: %"PRIu64"\n", 262 xstats_names[idx_xstat].name, 263 xstats[idx_xstat].value); 264 } 265 free(xstats_names); 266 free(xstats); 267 } 268 269 void 270 nic_xstats_clear(portid_t port_id) 271 { 272 rte_eth_xstats_reset(port_id); 273 } 274 275 void 276 nic_stats_mapping_display(portid_t port_id) 277 { 278 struct rte_port *port = &ports[port_id]; 279 uint16_t i; 280 281 static const char *nic_stats_mapping_border = "########################"; 282 283 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 284 print_valid_ports(); 285 return; 286 } 287 288 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 289 printf("Port id %d - either does not support queue statistic mapping or" 290 " no queue statistic mapping set\n", port_id); 291 return; 292 } 293 294 printf("\n %s NIC statistics mapping for port %-2d %s\n", 295 nic_stats_mapping_border, port_id, nic_stats_mapping_border); 296 297 if (port->rx_queue_stats_mapping_enabled) { 298 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 299 if (rx_queue_stats_mappings[i].port_id == port_id) { 300 printf(" RX-queue %2d mapped to Stats Reg %2d\n", 301 rx_queue_stats_mappings[i].queue_id, 302 rx_queue_stats_mappings[i].stats_counter_id); 303 } 304 } 305 printf("\n"); 306 } 307 308 309 if (port->tx_queue_stats_mapping_enabled) { 310 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 311 if (tx_queue_stats_mappings[i].port_id == port_id) { 312 printf(" TX-queue %2d mapped to Stats Reg %2d\n", 313 tx_queue_stats_mappings[i].queue_id, 314 tx_queue_stats_mappings[i].stats_counter_id); 315 } 316 } 317 } 318 319 printf(" %s####################################%s\n", 320 nic_stats_mapping_border, nic_stats_mapping_border); 321 } 322 323 void 324 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 325 { 326 struct rte_eth_rxq_info qinfo; 327 int32_t rc; 328 static const char *info_border = "*********************"; 329 330 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 331 if (rc != 0) { 332 printf("Failed to retrieve information for port: %u, " 333 "RX queue: %hu\nerror desc: %s(%d)\n", 334 port_id, queue_id, strerror(-rc), rc); 335 return; 336 } 337 338 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 339 info_border, port_id, queue_id, info_border); 340 341 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 342 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 343 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 344 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 345 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 346 printf("\nRX drop packets: %s", 347 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 348 printf("\nRX deferred start: %s", 349 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 350 printf("\nRX scattered packets: %s", 351 (qinfo.scattered_rx != 0) ? "on" : "off"); 352 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 353 printf("\n"); 354 } 355 356 void 357 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 358 { 359 struct rte_eth_txq_info qinfo; 360 int32_t rc; 361 static const char *info_border = "*********************"; 362 363 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 364 if (rc != 0) { 365 printf("Failed to retrieve information for port: %u, " 366 "TX queue: %hu\nerror desc: %s(%d)\n", 367 port_id, queue_id, strerror(-rc), rc); 368 return; 369 } 370 371 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 372 info_border, port_id, queue_id, info_border); 373 374 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 375 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 376 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 377 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 378 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 379 printf("\nTX deferred start: %s", 380 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 381 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 382 printf("\n"); 383 } 384 385 void 386 port_infos_display(portid_t port_id) 387 { 388 struct rte_port *port; 389 struct ether_addr mac_addr; 390 struct rte_eth_link link; 391 struct rte_eth_dev_info dev_info; 392 int vlan_offload; 393 struct rte_mempool * mp; 394 static const char *info_border = "*********************"; 395 uint16_t mtu; 396 char name[RTE_ETH_NAME_MAX_LEN]; 397 398 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 399 print_valid_ports(); 400 return; 401 } 402 port = &ports[port_id]; 403 rte_eth_link_get_nowait(port_id, &link); 404 memset(&dev_info, 0, sizeof(dev_info)); 405 rte_eth_dev_info_get(port_id, &dev_info); 406 printf("\n%s Infos for port %-2d %s\n", 407 info_border, port_id, info_border); 408 rte_eth_macaddr_get(port_id, &mac_addr); 409 print_ethaddr("MAC address: ", &mac_addr); 410 rte_eth_dev_get_name_by_port(port_id, name); 411 printf("\nDevice name: %s", name); 412 printf("\nDriver name: %s", dev_info.driver_name); 413 printf("\nConnect to socket: %u", port->socket_id); 414 415 if (port_numa[port_id] != NUMA_NO_CONFIG) { 416 mp = mbuf_pool_find(port_numa[port_id]); 417 if (mp) 418 printf("\nmemory allocation on the socket: %d", 419 port_numa[port_id]); 420 } else 421 printf("\nmemory allocation on the socket: %u",port->socket_id); 422 423 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 424 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); 425 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 426 ("full-duplex") : ("half-duplex")); 427 428 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 429 printf("MTU: %u\n", mtu); 430 431 printf("Promiscuous mode: %s\n", 432 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 433 printf("Allmulticast mode: %s\n", 434 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 435 printf("Maximum number of MAC addresses: %u\n", 436 (unsigned int)(port->dev_info.max_mac_addrs)); 437 printf("Maximum number of MAC addresses of hash filtering: %u\n", 438 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 439 440 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 441 if (vlan_offload >= 0){ 442 printf("VLAN offload: \n"); 443 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) 444 printf(" strip on \n"); 445 else 446 printf(" strip off \n"); 447 448 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) 449 printf(" filter on \n"); 450 else 451 printf(" filter off \n"); 452 453 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) 454 printf(" qinq(extend) on \n"); 455 else 456 printf(" qinq(extend) off \n"); 457 } 458 459 if (dev_info.hash_key_size > 0) 460 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 461 if (dev_info.reta_size > 0) 462 printf("Redirection table size: %u\n", dev_info.reta_size); 463 if (!dev_info.flow_type_rss_offloads) 464 printf("No flow type is supported.\n"); 465 else { 466 uint16_t i; 467 char *p; 468 469 printf("Supported flow types:\n"); 470 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 471 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 472 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 473 continue; 474 p = flowtype_to_str(i); 475 if (p) 476 printf(" %s\n", p); 477 else 478 printf(" user defined %d\n", i); 479 } 480 } 481 482 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 483 printf("Maximum configurable length of RX packet: %u\n", 484 dev_info.max_rx_pktlen); 485 if (dev_info.max_vfs) 486 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 487 if (dev_info.max_vmdq_pools) 488 printf("Maximum number of VMDq pools: %u\n", 489 dev_info.max_vmdq_pools); 490 491 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 492 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 493 printf("Max possible number of RXDs per queue: %hu\n", 494 dev_info.rx_desc_lim.nb_max); 495 printf("Min possible number of RXDs per queue: %hu\n", 496 dev_info.rx_desc_lim.nb_min); 497 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 498 499 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 500 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 501 printf("Max possible number of TXDs per queue: %hu\n", 502 dev_info.tx_desc_lim.nb_max); 503 printf("Min possible number of TXDs per queue: %hu\n", 504 dev_info.tx_desc_lim.nb_min); 505 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 506 507 /* Show switch info only if valid switch domain and port id is set */ 508 if (dev_info.switch_info.domain_id != 509 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 510 if (dev_info.switch_info.name) 511 printf("Switch name: %s\n", dev_info.switch_info.name); 512 513 printf("Switch domain Id: %u\n", 514 dev_info.switch_info.domain_id); 515 printf("Switch Port Id: %u\n", 516 dev_info.switch_info.port_id); 517 } 518 } 519 520 void 521 port_offload_cap_display(portid_t port_id) 522 { 523 struct rte_eth_dev_info dev_info; 524 static const char *info_border = "************"; 525 526 if (port_id_is_invalid(port_id, ENABLED_WARN)) 527 return; 528 529 rte_eth_dev_info_get(port_id, &dev_info); 530 531 printf("\n%s Port %d supported offload features: %s\n", 532 info_border, port_id, info_border); 533 534 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { 535 printf("VLAN stripped: "); 536 if (ports[port_id].dev_conf.rxmode.offloads & 537 DEV_RX_OFFLOAD_VLAN_STRIP) 538 printf("on\n"); 539 else 540 printf("off\n"); 541 } 542 543 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { 544 printf("Double VLANs stripped: "); 545 if (ports[port_id].dev_conf.rxmode.offloads & 546 DEV_RX_OFFLOAD_VLAN_EXTEND) 547 printf("on\n"); 548 else 549 printf("off\n"); 550 } 551 552 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { 553 printf("RX IPv4 checksum: "); 554 if (ports[port_id].dev_conf.rxmode.offloads & 555 DEV_RX_OFFLOAD_IPV4_CKSUM) 556 printf("on\n"); 557 else 558 printf("off\n"); 559 } 560 561 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { 562 printf("RX UDP checksum: "); 563 if (ports[port_id].dev_conf.rxmode.offloads & 564 DEV_RX_OFFLOAD_UDP_CKSUM) 565 printf("on\n"); 566 else 567 printf("off\n"); 568 } 569 570 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { 571 printf("RX TCP checksum: "); 572 if (ports[port_id].dev_conf.rxmode.offloads & 573 DEV_RX_OFFLOAD_TCP_CKSUM) 574 printf("on\n"); 575 else 576 printf("off\n"); 577 } 578 579 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { 580 printf("RX Outer IPv4 checksum: "); 581 if (ports[port_id].dev_conf.rxmode.offloads & 582 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) 583 printf("on\n"); 584 else 585 printf("off\n"); 586 } 587 588 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { 589 printf("Large receive offload: "); 590 if (ports[port_id].dev_conf.rxmode.offloads & 591 DEV_RX_OFFLOAD_TCP_LRO) 592 printf("on\n"); 593 else 594 printf("off\n"); 595 } 596 597 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { 598 printf("VLAN insert: "); 599 if (ports[port_id].dev_conf.txmode.offloads & 600 DEV_TX_OFFLOAD_VLAN_INSERT) 601 printf("on\n"); 602 else 603 printf("off\n"); 604 } 605 606 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { 607 printf("HW timestamp: "); 608 if (ports[port_id].dev_conf.rxmode.offloads & 609 DEV_RX_OFFLOAD_TIMESTAMP) 610 printf("on\n"); 611 else 612 printf("off\n"); 613 } 614 615 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { 616 printf("Double VLANs insert: "); 617 if (ports[port_id].dev_conf.txmode.offloads & 618 DEV_TX_OFFLOAD_QINQ_INSERT) 619 printf("on\n"); 620 else 621 printf("off\n"); 622 } 623 624 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { 625 printf("TX IPv4 checksum: "); 626 if (ports[port_id].dev_conf.txmode.offloads & 627 DEV_TX_OFFLOAD_IPV4_CKSUM) 628 printf("on\n"); 629 else 630 printf("off\n"); 631 } 632 633 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { 634 printf("TX UDP checksum: "); 635 if (ports[port_id].dev_conf.txmode.offloads & 636 DEV_TX_OFFLOAD_UDP_CKSUM) 637 printf("on\n"); 638 else 639 printf("off\n"); 640 } 641 642 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { 643 printf("TX TCP checksum: "); 644 if (ports[port_id].dev_conf.txmode.offloads & 645 DEV_TX_OFFLOAD_TCP_CKSUM) 646 printf("on\n"); 647 else 648 printf("off\n"); 649 } 650 651 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { 652 printf("TX SCTP checksum: "); 653 if (ports[port_id].dev_conf.txmode.offloads & 654 DEV_TX_OFFLOAD_SCTP_CKSUM) 655 printf("on\n"); 656 else 657 printf("off\n"); 658 } 659 660 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { 661 printf("TX Outer IPv4 checksum: "); 662 if (ports[port_id].dev_conf.txmode.offloads & 663 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 664 printf("on\n"); 665 else 666 printf("off\n"); 667 } 668 669 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { 670 printf("TX TCP segmentation: "); 671 if (ports[port_id].dev_conf.txmode.offloads & 672 DEV_TX_OFFLOAD_TCP_TSO) 673 printf("on\n"); 674 else 675 printf("off\n"); 676 } 677 678 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { 679 printf("TX UDP segmentation: "); 680 if (ports[port_id].dev_conf.txmode.offloads & 681 DEV_TX_OFFLOAD_UDP_TSO) 682 printf("on\n"); 683 else 684 printf("off\n"); 685 } 686 687 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { 688 printf("TSO for VXLAN tunnel packet: "); 689 if (ports[port_id].dev_conf.txmode.offloads & 690 DEV_TX_OFFLOAD_VXLAN_TNL_TSO) 691 printf("on\n"); 692 else 693 printf("off\n"); 694 } 695 696 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { 697 printf("TSO for GRE tunnel packet: "); 698 if (ports[port_id].dev_conf.txmode.offloads & 699 DEV_TX_OFFLOAD_GRE_TNL_TSO) 700 printf("on\n"); 701 else 702 printf("off\n"); 703 } 704 705 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { 706 printf("TSO for IPIP tunnel packet: "); 707 if (ports[port_id].dev_conf.txmode.offloads & 708 DEV_TX_OFFLOAD_IPIP_TNL_TSO) 709 printf("on\n"); 710 else 711 printf("off\n"); 712 } 713 714 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { 715 printf("TSO for GENEVE tunnel packet: "); 716 if (ports[port_id].dev_conf.txmode.offloads & 717 DEV_TX_OFFLOAD_GENEVE_TNL_TSO) 718 printf("on\n"); 719 else 720 printf("off\n"); 721 } 722 723 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { 724 printf("IP tunnel TSO: "); 725 if (ports[port_id].dev_conf.txmode.offloads & 726 DEV_TX_OFFLOAD_IP_TNL_TSO) 727 printf("on\n"); 728 else 729 printf("off\n"); 730 } 731 732 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { 733 printf("UDP tunnel TSO: "); 734 if (ports[port_id].dev_conf.txmode.offloads & 735 DEV_TX_OFFLOAD_UDP_TNL_TSO) 736 printf("on\n"); 737 else 738 printf("off\n"); 739 } 740 } 741 742 int 743 port_id_is_invalid(portid_t port_id, enum print_warning warning) 744 { 745 uint16_t pid; 746 747 if (port_id == (portid_t)RTE_PORT_ALL) 748 return 0; 749 750 RTE_ETH_FOREACH_DEV(pid) 751 if (port_id == pid) 752 return 0; 753 754 if (warning == ENABLED_WARN) 755 printf("Invalid port %d\n", port_id); 756 757 return 1; 758 } 759 760 void print_valid_ports(void) 761 { 762 portid_t pid; 763 764 printf("The valid ports array is ["); 765 RTE_ETH_FOREACH_DEV(pid) { 766 printf(" %d", pid); 767 } 768 printf(" ]\n"); 769 } 770 771 static int 772 vlan_id_is_invalid(uint16_t vlan_id) 773 { 774 if (vlan_id < 4096) 775 return 0; 776 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); 777 return 1; 778 } 779 780 static int 781 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 782 { 783 const struct rte_pci_device *pci_dev; 784 const struct rte_bus *bus; 785 uint64_t pci_len; 786 787 if (reg_off & 0x3) { 788 printf("Port register offset 0x%X not aligned on a 4-byte " 789 "boundary\n", 790 (unsigned)reg_off); 791 return 1; 792 } 793 794 if (!ports[port_id].dev_info.device) { 795 printf("Invalid device\n"); 796 return 0; 797 } 798 799 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 800 if (bus && !strcmp(bus->name, "pci")) { 801 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 802 } else { 803 printf("Not a PCI device\n"); 804 return 1; 805 } 806 807 pci_len = pci_dev->mem_resource[0].len; 808 if (reg_off >= pci_len) { 809 printf("Port %d: register offset %u (0x%X) out of port PCI " 810 "resource (length=%"PRIu64")\n", 811 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); 812 return 1; 813 } 814 return 0; 815 } 816 817 static int 818 reg_bit_pos_is_invalid(uint8_t bit_pos) 819 { 820 if (bit_pos <= 31) 821 return 0; 822 printf("Invalid bit position %d (must be <= 31)\n", bit_pos); 823 return 1; 824 } 825 826 #define display_port_and_reg_off(port_id, reg_off) \ 827 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 828 829 static inline void 830 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 831 { 832 display_port_and_reg_off(port_id, (unsigned)reg_off); 833 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 834 } 835 836 void 837 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 838 { 839 uint32_t reg_v; 840 841 842 if (port_id_is_invalid(port_id, ENABLED_WARN)) 843 return; 844 if (port_reg_off_is_invalid(port_id, reg_off)) 845 return; 846 if (reg_bit_pos_is_invalid(bit_x)) 847 return; 848 reg_v = port_id_pci_reg_read(port_id, reg_off); 849 display_port_and_reg_off(port_id, (unsigned)reg_off); 850 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 851 } 852 853 void 854 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 855 uint8_t bit1_pos, uint8_t bit2_pos) 856 { 857 uint32_t reg_v; 858 uint8_t l_bit; 859 uint8_t h_bit; 860 861 if (port_id_is_invalid(port_id, ENABLED_WARN)) 862 return; 863 if (port_reg_off_is_invalid(port_id, reg_off)) 864 return; 865 if (reg_bit_pos_is_invalid(bit1_pos)) 866 return; 867 if (reg_bit_pos_is_invalid(bit2_pos)) 868 return; 869 if (bit1_pos > bit2_pos) 870 l_bit = bit2_pos, h_bit = bit1_pos; 871 else 872 l_bit = bit1_pos, h_bit = bit2_pos; 873 874 reg_v = port_id_pci_reg_read(port_id, reg_off); 875 reg_v >>= l_bit; 876 if (h_bit < 31) 877 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 878 display_port_and_reg_off(port_id, (unsigned)reg_off); 879 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 880 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 881 } 882 883 void 884 port_reg_display(portid_t port_id, uint32_t reg_off) 885 { 886 uint32_t reg_v; 887 888 if (port_id_is_invalid(port_id, ENABLED_WARN)) 889 return; 890 if (port_reg_off_is_invalid(port_id, reg_off)) 891 return; 892 reg_v = port_id_pci_reg_read(port_id, reg_off); 893 display_port_reg_value(port_id, reg_off, reg_v); 894 } 895 896 void 897 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 898 uint8_t bit_v) 899 { 900 uint32_t reg_v; 901 902 if (port_id_is_invalid(port_id, ENABLED_WARN)) 903 return; 904 if (port_reg_off_is_invalid(port_id, reg_off)) 905 return; 906 if (reg_bit_pos_is_invalid(bit_pos)) 907 return; 908 if (bit_v > 1) { 909 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); 910 return; 911 } 912 reg_v = port_id_pci_reg_read(port_id, reg_off); 913 if (bit_v == 0) 914 reg_v &= ~(1 << bit_pos); 915 else 916 reg_v |= (1 << bit_pos); 917 port_id_pci_reg_write(port_id, reg_off, reg_v); 918 display_port_reg_value(port_id, reg_off, reg_v); 919 } 920 921 void 922 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 923 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 924 { 925 uint32_t max_v; 926 uint32_t reg_v; 927 uint8_t l_bit; 928 uint8_t h_bit; 929 930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 931 return; 932 if (port_reg_off_is_invalid(port_id, reg_off)) 933 return; 934 if (reg_bit_pos_is_invalid(bit1_pos)) 935 return; 936 if (reg_bit_pos_is_invalid(bit2_pos)) 937 return; 938 if (bit1_pos > bit2_pos) 939 l_bit = bit2_pos, h_bit = bit1_pos; 940 else 941 l_bit = bit1_pos, h_bit = bit2_pos; 942 943 if ((h_bit - l_bit) < 31) 944 max_v = (1 << (h_bit - l_bit + 1)) - 1; 945 else 946 max_v = 0xFFFFFFFF; 947 948 if (value > max_v) { 949 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", 950 (unsigned)value, (unsigned)value, 951 (unsigned)max_v, (unsigned)max_v); 952 return; 953 } 954 reg_v = port_id_pci_reg_read(port_id, reg_off); 955 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 956 reg_v |= (value << l_bit); /* Set changed bits */ 957 port_id_pci_reg_write(port_id, reg_off, reg_v); 958 display_port_reg_value(port_id, reg_off, reg_v); 959 } 960 961 void 962 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 963 { 964 if (port_id_is_invalid(port_id, ENABLED_WARN)) 965 return; 966 if (port_reg_off_is_invalid(port_id, reg_off)) 967 return; 968 port_id_pci_reg_write(port_id, reg_off, reg_v); 969 display_port_reg_value(port_id, reg_off, reg_v); 970 } 971 972 void 973 port_mtu_set(portid_t port_id, uint16_t mtu) 974 { 975 int diag; 976 977 if (port_id_is_invalid(port_id, ENABLED_WARN)) 978 return; 979 diag = rte_eth_dev_set_mtu(port_id, mtu); 980 if (diag == 0) 981 return; 982 printf("Set MTU failed. diag=%d\n", diag); 983 } 984 985 /* Generic flow management functions. */ 986 987 /** Generate flow_item[] entry. */ 988 #define MK_FLOW_ITEM(t, s) \ 989 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 990 .name = # t, \ 991 .size = s, \ 992 } 993 994 /** Information about known flow pattern items. */ 995 static const struct { 996 const char *name; 997 size_t size; 998 } flow_item[] = { 999 MK_FLOW_ITEM(END, 0), 1000 MK_FLOW_ITEM(VOID, 0), 1001 MK_FLOW_ITEM(INVERT, 0), 1002 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 1003 MK_FLOW_ITEM(PF, 0), 1004 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 1005 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 1006 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 1007 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 1008 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 1009 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 1010 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 1011 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 1012 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 1013 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 1014 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 1015 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 1016 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 1017 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 1018 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 1019 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 1020 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 1021 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 1022 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 1023 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 1024 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 1025 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 1026 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 1027 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 1028 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 1029 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 1030 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 1031 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 1032 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 1033 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 1034 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 1035 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 1036 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 1037 }; 1038 1039 /** Pattern item specification types. */ 1040 enum item_spec_type { 1041 ITEM_SPEC, 1042 ITEM_LAST, 1043 ITEM_MASK, 1044 }; 1045 1046 /** Compute storage space needed by item specification and copy it. */ 1047 static size_t 1048 flow_item_spec_copy(void *buf, const struct rte_flow_item *item, 1049 enum item_spec_type type) 1050 { 1051 size_t size = 0; 1052 const void *data = 1053 type == ITEM_SPEC ? item->spec : 1054 type == ITEM_LAST ? item->last : 1055 type == ITEM_MASK ? item->mask : 1056 NULL; 1057 1058 if (!item->spec || !data) 1059 goto empty; 1060 switch (item->type) { 1061 union { 1062 const struct rte_flow_item_raw *raw; 1063 } spec; 1064 union { 1065 const struct rte_flow_item_raw *raw; 1066 } last; 1067 union { 1068 const struct rte_flow_item_raw *raw; 1069 } mask; 1070 union { 1071 const struct rte_flow_item_raw *raw; 1072 } src; 1073 union { 1074 struct rte_flow_item_raw *raw; 1075 } dst; 1076 size_t off; 1077 1078 case RTE_FLOW_ITEM_TYPE_RAW: 1079 spec.raw = item->spec; 1080 last.raw = item->last ? item->last : item->spec; 1081 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 1082 src.raw = data; 1083 dst.raw = buf; 1084 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), 1085 sizeof(*src.raw->pattern)); 1086 if (type == ITEM_SPEC || 1087 (type == ITEM_MASK && 1088 ((spec.raw->length & mask.raw->length) >= 1089 (last.raw->length & mask.raw->length)))) 1090 size = spec.raw->length & mask.raw->length; 1091 else 1092 size = last.raw->length & mask.raw->length; 1093 size = off + size * sizeof(*src.raw->pattern); 1094 if (dst.raw) { 1095 memcpy(dst.raw, src.raw, sizeof(*src.raw)); 1096 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, 1097 src.raw->pattern, 1098 size - off); 1099 } 1100 break; 1101 default: 1102 size = flow_item[item->type].size; 1103 if (buf) 1104 memcpy(buf, data, size); 1105 break; 1106 } 1107 empty: 1108 return RTE_ALIGN_CEIL(size, sizeof(double)); 1109 } 1110 1111 /** Generate flow_action[] entry. */ 1112 #define MK_FLOW_ACTION(t, s) \ 1113 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 1114 .name = # t, \ 1115 .size = s, \ 1116 } 1117 1118 /** Information about known flow actions. */ 1119 static const struct { 1120 const char *name; 1121 size_t size; 1122 } flow_action[] = { 1123 MK_FLOW_ACTION(END, 0), 1124 MK_FLOW_ACTION(VOID, 0), 1125 MK_FLOW_ACTION(PASSTHRU, 0), 1126 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 1127 MK_FLOW_ACTION(FLAG, 0), 1128 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 1129 MK_FLOW_ACTION(DROP, 0), 1130 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 1131 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 1132 MK_FLOW_ACTION(PF, 0), 1133 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 1134 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 1135 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 1136 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 1137 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 1138 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 1139 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 1140 MK_FLOW_ACTION(OF_SET_NW_TTL, 1141 sizeof(struct rte_flow_action_of_set_nw_ttl)), 1142 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 1143 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 1144 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 1145 MK_FLOW_ACTION(OF_POP_VLAN, 0), 1146 MK_FLOW_ACTION(OF_PUSH_VLAN, 1147 sizeof(struct rte_flow_action_of_push_vlan)), 1148 MK_FLOW_ACTION(OF_SET_VLAN_VID, 1149 sizeof(struct rte_flow_action_of_set_vlan_vid)), 1150 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 1151 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 1152 MK_FLOW_ACTION(OF_POP_MPLS, 1153 sizeof(struct rte_flow_action_of_pop_mpls)), 1154 MK_FLOW_ACTION(OF_PUSH_MPLS, 1155 sizeof(struct rte_flow_action_of_push_mpls)), 1156 }; 1157 1158 /** Compute storage space needed by action configuration and copy it. */ 1159 static size_t 1160 flow_action_conf_copy(void *buf, const struct rte_flow_action *action) 1161 { 1162 size_t size = 0; 1163 1164 if (!action->conf) 1165 goto empty; 1166 switch (action->type) { 1167 union { 1168 const struct rte_flow_action_rss *rss; 1169 } src; 1170 union { 1171 struct rte_flow_action_rss *rss; 1172 } dst; 1173 size_t off; 1174 1175 case RTE_FLOW_ACTION_TYPE_RSS: 1176 src.rss = action->conf; 1177 dst.rss = buf; 1178 off = 0; 1179 if (dst.rss) 1180 *dst.rss = (struct rte_flow_action_rss){ 1181 .func = src.rss->func, 1182 .level = src.rss->level, 1183 .types = src.rss->types, 1184 .key_len = src.rss->key_len, 1185 .queue_num = src.rss->queue_num, 1186 }; 1187 off += sizeof(*src.rss); 1188 if (src.rss->key_len) { 1189 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1190 size = sizeof(*src.rss->key) * src.rss->key_len; 1191 if (dst.rss) 1192 dst.rss->key = memcpy 1193 ((void *)((uintptr_t)dst.rss + off), 1194 src.rss->key, size); 1195 off += size; 1196 } 1197 if (src.rss->queue_num) { 1198 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1199 size = sizeof(*src.rss->queue) * src.rss->queue_num; 1200 if (dst.rss) 1201 dst.rss->queue = memcpy 1202 ((void *)((uintptr_t)dst.rss + off), 1203 src.rss->queue, size); 1204 off += size; 1205 } 1206 size = off; 1207 break; 1208 default: 1209 size = flow_action[action->type].size; 1210 if (buf) 1211 memcpy(buf, action->conf, size); 1212 break; 1213 } 1214 empty: 1215 return RTE_ALIGN_CEIL(size, sizeof(double)); 1216 } 1217 1218 /** Generate a port_flow entry from attributes/pattern/actions. */ 1219 static struct port_flow * 1220 port_flow_new(const struct rte_flow_attr *attr, 1221 const struct rte_flow_item *pattern, 1222 const struct rte_flow_action *actions) 1223 { 1224 const struct rte_flow_item *item; 1225 const struct rte_flow_action *action; 1226 struct port_flow *pf = NULL; 1227 size_t tmp; 1228 size_t off1 = 0; 1229 size_t off2 = 0; 1230 int err = ENOTSUP; 1231 1232 store: 1233 item = pattern; 1234 if (pf) 1235 pf->pattern = (void *)&pf->data[off1]; 1236 do { 1237 struct rte_flow_item *dst = NULL; 1238 1239 if ((unsigned int)item->type >= RTE_DIM(flow_item) || 1240 !flow_item[item->type].name) 1241 goto notsup; 1242 if (pf) 1243 dst = memcpy(pf->data + off1, item, sizeof(*item)); 1244 off1 += sizeof(*item); 1245 if (item->spec) { 1246 if (pf) 1247 dst->spec = pf->data + off2; 1248 off2 += flow_item_spec_copy 1249 (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); 1250 } 1251 if (item->last) { 1252 if (pf) 1253 dst->last = pf->data + off2; 1254 off2 += flow_item_spec_copy 1255 (pf ? pf->data + off2 : NULL, item, ITEM_LAST); 1256 } 1257 if (item->mask) { 1258 if (pf) 1259 dst->mask = pf->data + off2; 1260 off2 += flow_item_spec_copy 1261 (pf ? pf->data + off2 : NULL, item, ITEM_MASK); 1262 } 1263 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1264 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); 1265 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1266 action = actions; 1267 if (pf) 1268 pf->actions = (void *)&pf->data[off1]; 1269 do { 1270 struct rte_flow_action *dst = NULL; 1271 1272 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1273 !flow_action[action->type].name) 1274 goto notsup; 1275 if (pf) 1276 dst = memcpy(pf->data + off1, action, sizeof(*action)); 1277 off1 += sizeof(*action); 1278 if (action->conf) { 1279 if (pf) 1280 dst->conf = pf->data + off2; 1281 off2 += flow_action_conf_copy 1282 (pf ? pf->data + off2 : NULL, action); 1283 } 1284 off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); 1285 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); 1286 if (pf != NULL) 1287 return pf; 1288 off1 = RTE_ALIGN_CEIL(off1, sizeof(double)); 1289 tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double)); 1290 pf = calloc(1, tmp + off1 + off2); 1291 if (pf == NULL) 1292 err = errno; 1293 else { 1294 *pf = (const struct port_flow){ 1295 .size = tmp + off1 + off2, 1296 .attr = *attr, 1297 }; 1298 tmp -= offsetof(struct port_flow, data); 1299 off2 = tmp + off1; 1300 off1 = tmp; 1301 goto store; 1302 } 1303 notsup: 1304 rte_errno = err; 1305 return NULL; 1306 } 1307 1308 /** Print a message out of a flow error. */ 1309 static int 1310 port_flow_complain(struct rte_flow_error *error) 1311 { 1312 static const char *const errstrlist[] = { 1313 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1314 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1315 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1316 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1317 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1318 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1319 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1320 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1321 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1322 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1323 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1324 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1325 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1326 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1327 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1328 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1329 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1330 }; 1331 const char *errstr; 1332 char buf[32]; 1333 int err = rte_errno; 1334 1335 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1336 !errstrlist[error->type]) 1337 errstr = "unknown type"; 1338 else 1339 errstr = errstrlist[error->type]; 1340 printf("Caught error type %d (%s): %s%s\n", 1341 error->type, errstr, 1342 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1343 error->cause), buf) : "", 1344 error->message ? error->message : "(no stated reason)"); 1345 return -err; 1346 } 1347 1348 /** Validate flow rule. */ 1349 int 1350 port_flow_validate(portid_t port_id, 1351 const struct rte_flow_attr *attr, 1352 const struct rte_flow_item *pattern, 1353 const struct rte_flow_action *actions) 1354 { 1355 struct rte_flow_error error; 1356 1357 /* Poisoning to make sure PMDs update it in case of error. */ 1358 memset(&error, 0x11, sizeof(error)); 1359 if (rte_flow_validate(port_id, attr, pattern, actions, &error)) 1360 return port_flow_complain(&error); 1361 printf("Flow rule validated\n"); 1362 return 0; 1363 } 1364 1365 /** Create flow rule. */ 1366 int 1367 port_flow_create(portid_t port_id, 1368 const struct rte_flow_attr *attr, 1369 const struct rte_flow_item *pattern, 1370 const struct rte_flow_action *actions) 1371 { 1372 struct rte_flow *flow; 1373 struct rte_port *port; 1374 struct port_flow *pf; 1375 uint32_t id; 1376 struct rte_flow_error error; 1377 1378 /* Poisoning to make sure PMDs update it in case of error. */ 1379 memset(&error, 0x22, sizeof(error)); 1380 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 1381 if (!flow) 1382 return port_flow_complain(&error); 1383 port = &ports[port_id]; 1384 if (port->flow_list) { 1385 if (port->flow_list->id == UINT32_MAX) { 1386 printf("Highest rule ID is already assigned, delete" 1387 " it first"); 1388 rte_flow_destroy(port_id, flow, NULL); 1389 return -ENOMEM; 1390 } 1391 id = port->flow_list->id + 1; 1392 } else 1393 id = 0; 1394 pf = port_flow_new(attr, pattern, actions); 1395 if (!pf) { 1396 int err = rte_errno; 1397 1398 printf("Cannot allocate flow: %s\n", rte_strerror(err)); 1399 rte_flow_destroy(port_id, flow, NULL); 1400 return -err; 1401 } 1402 pf->next = port->flow_list; 1403 pf->id = id; 1404 pf->flow = flow; 1405 port->flow_list = pf; 1406 printf("Flow rule #%u created\n", pf->id); 1407 return 0; 1408 } 1409 1410 /** Destroy a number of flow rules. */ 1411 int 1412 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 1413 { 1414 struct rte_port *port; 1415 struct port_flow **tmp; 1416 uint32_t c = 0; 1417 int ret = 0; 1418 1419 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1420 port_id == (portid_t)RTE_PORT_ALL) 1421 return -EINVAL; 1422 port = &ports[port_id]; 1423 tmp = &port->flow_list; 1424 while (*tmp) { 1425 uint32_t i; 1426 1427 for (i = 0; i != n; ++i) { 1428 struct rte_flow_error error; 1429 struct port_flow *pf = *tmp; 1430 1431 if (rule[i] != pf->id) 1432 continue; 1433 /* 1434 * Poisoning to make sure PMDs update it in case 1435 * of error. 1436 */ 1437 memset(&error, 0x33, sizeof(error)); 1438 if (rte_flow_destroy(port_id, pf->flow, &error)) { 1439 ret = port_flow_complain(&error); 1440 continue; 1441 } 1442 printf("Flow rule #%u destroyed\n", pf->id); 1443 *tmp = pf->next; 1444 free(pf); 1445 break; 1446 } 1447 if (i == n) 1448 tmp = &(*tmp)->next; 1449 ++c; 1450 } 1451 return ret; 1452 } 1453 1454 /** Remove all flow rules. */ 1455 int 1456 port_flow_flush(portid_t port_id) 1457 { 1458 struct rte_flow_error error; 1459 struct rte_port *port; 1460 int ret = 0; 1461 1462 /* Poisoning to make sure PMDs update it in case of error. */ 1463 memset(&error, 0x44, sizeof(error)); 1464 if (rte_flow_flush(port_id, &error)) { 1465 ret = port_flow_complain(&error); 1466 if (port_id_is_invalid(port_id, DISABLED_WARN) || 1467 port_id == (portid_t)RTE_PORT_ALL) 1468 return ret; 1469 } 1470 port = &ports[port_id]; 1471 while (port->flow_list) { 1472 struct port_flow *pf = port->flow_list->next; 1473 1474 free(port->flow_list); 1475 port->flow_list = pf; 1476 } 1477 return ret; 1478 } 1479 1480 /** Query a flow rule. */ 1481 int 1482 port_flow_query(portid_t port_id, uint32_t rule, 1483 const struct rte_flow_action *action) 1484 { 1485 struct rte_flow_error error; 1486 struct rte_port *port; 1487 struct port_flow *pf; 1488 const char *name; 1489 union { 1490 struct rte_flow_query_count count; 1491 } query; 1492 1493 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1494 port_id == (portid_t)RTE_PORT_ALL) 1495 return -EINVAL; 1496 port = &ports[port_id]; 1497 for (pf = port->flow_list; pf; pf = pf->next) 1498 if (pf->id == rule) 1499 break; 1500 if (!pf) { 1501 printf("Flow rule #%u not found\n", rule); 1502 return -ENOENT; 1503 } 1504 if ((unsigned int)action->type >= RTE_DIM(flow_action) || 1505 !flow_action[action->type].name) 1506 name = "unknown"; 1507 else 1508 name = flow_action[action->type].name; 1509 switch (action->type) { 1510 case RTE_FLOW_ACTION_TYPE_COUNT: 1511 break; 1512 default: 1513 printf("Cannot query action type %d (%s)\n", 1514 action->type, name); 1515 return -ENOTSUP; 1516 } 1517 /* Poisoning to make sure PMDs update it in case of error. */ 1518 memset(&error, 0x55, sizeof(error)); 1519 memset(&query, 0, sizeof(query)); 1520 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 1521 return port_flow_complain(&error); 1522 switch (action->type) { 1523 case RTE_FLOW_ACTION_TYPE_COUNT: 1524 printf("%s:\n" 1525 " hits_set: %u\n" 1526 " bytes_set: %u\n" 1527 " hits: %" PRIu64 "\n" 1528 " bytes: %" PRIu64 "\n", 1529 name, 1530 query.count.hits_set, 1531 query.count.bytes_set, 1532 query.count.hits, 1533 query.count.bytes); 1534 break; 1535 default: 1536 printf("Cannot display result for action type %d (%s)\n", 1537 action->type, name); 1538 break; 1539 } 1540 return 0; 1541 } 1542 1543 /** List flow rules. */ 1544 void 1545 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) 1546 { 1547 struct rte_port *port; 1548 struct port_flow *pf; 1549 struct port_flow *list = NULL; 1550 uint32_t i; 1551 1552 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1553 port_id == (portid_t)RTE_PORT_ALL) 1554 return; 1555 port = &ports[port_id]; 1556 if (!port->flow_list) 1557 return; 1558 /* Sort flows by group, priority and ID. */ 1559 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 1560 struct port_flow **tmp; 1561 1562 if (n) { 1563 /* Filter out unwanted groups. */ 1564 for (i = 0; i != n; ++i) 1565 if (pf->attr.group == group[i]) 1566 break; 1567 if (i == n) 1568 continue; 1569 } 1570 tmp = &list; 1571 while (*tmp && 1572 (pf->attr.group > (*tmp)->attr.group || 1573 (pf->attr.group == (*tmp)->attr.group && 1574 pf->attr.priority > (*tmp)->attr.priority) || 1575 (pf->attr.group == (*tmp)->attr.group && 1576 pf->attr.priority == (*tmp)->attr.priority && 1577 pf->id > (*tmp)->id))) 1578 tmp = &(*tmp)->tmp; 1579 pf->tmp = *tmp; 1580 *tmp = pf; 1581 } 1582 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 1583 for (pf = list; pf != NULL; pf = pf->tmp) { 1584 const struct rte_flow_item *item = pf->pattern; 1585 const struct rte_flow_action *action = pf->actions; 1586 1587 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 1588 pf->id, 1589 pf->attr.group, 1590 pf->attr.priority, 1591 pf->attr.ingress ? 'i' : '-', 1592 pf->attr.egress ? 'e' : '-', 1593 pf->attr.transfer ? 't' : '-'); 1594 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 1595 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1596 printf("%s ", flow_item[item->type].name); 1597 ++item; 1598 } 1599 printf("=>"); 1600 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 1601 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 1602 printf(" %s", flow_action[action->type].name); 1603 ++action; 1604 } 1605 printf("\n"); 1606 } 1607 } 1608 1609 /** Restrict ingress traffic to the defined flow rules. */ 1610 int 1611 port_flow_isolate(portid_t port_id, int set) 1612 { 1613 struct rte_flow_error error; 1614 1615 /* Poisoning to make sure PMDs update it in case of error. */ 1616 memset(&error, 0x66, sizeof(error)); 1617 if (rte_flow_isolate(port_id, set, &error)) 1618 return port_flow_complain(&error); 1619 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 1620 port_id, 1621 set ? "now restricted" : "not restricted anymore"); 1622 return 0; 1623 } 1624 1625 /* 1626 * RX/TX ring descriptors display functions. 1627 */ 1628 int 1629 rx_queue_id_is_invalid(queueid_t rxq_id) 1630 { 1631 if (rxq_id < nb_rxq) 1632 return 0; 1633 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); 1634 return 1; 1635 } 1636 1637 int 1638 tx_queue_id_is_invalid(queueid_t txq_id) 1639 { 1640 if (txq_id < nb_txq) 1641 return 0; 1642 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); 1643 return 1; 1644 } 1645 1646 static int 1647 rx_desc_id_is_invalid(uint16_t rxdesc_id) 1648 { 1649 if (rxdesc_id < nb_rxd) 1650 return 0; 1651 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", 1652 rxdesc_id, nb_rxd); 1653 return 1; 1654 } 1655 1656 static int 1657 tx_desc_id_is_invalid(uint16_t txdesc_id) 1658 { 1659 if (txdesc_id < nb_txd) 1660 return 0; 1661 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", 1662 txdesc_id, nb_txd); 1663 return 1; 1664 } 1665 1666 static const struct rte_memzone * 1667 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 1668 { 1669 char mz_name[RTE_MEMZONE_NAMESIZE]; 1670 const struct rte_memzone *mz; 1671 1672 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", 1673 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); 1674 mz = rte_memzone_lookup(mz_name); 1675 if (mz == NULL) 1676 printf("%s ring memory zoneof (port %d, queue %d) not" 1677 "found (zone name = %s\n", 1678 ring_name, port_id, q_id, mz_name); 1679 return mz; 1680 } 1681 1682 union igb_ring_dword { 1683 uint64_t dword; 1684 struct { 1685 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1686 uint32_t lo; 1687 uint32_t hi; 1688 #else 1689 uint32_t hi; 1690 uint32_t lo; 1691 #endif 1692 } words; 1693 }; 1694 1695 struct igb_ring_desc_32_bytes { 1696 union igb_ring_dword lo_dword; 1697 union igb_ring_dword hi_dword; 1698 union igb_ring_dword resv1; 1699 union igb_ring_dword resv2; 1700 }; 1701 1702 struct igb_ring_desc_16_bytes { 1703 union igb_ring_dword lo_dword; 1704 union igb_ring_dword hi_dword; 1705 }; 1706 1707 static void 1708 ring_rxd_display_dword(union igb_ring_dword dword) 1709 { 1710 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 1711 (unsigned)dword.words.hi); 1712 } 1713 1714 static void 1715 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 1716 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1717 portid_t port_id, 1718 #else 1719 __rte_unused portid_t port_id, 1720 #endif 1721 uint16_t desc_id) 1722 { 1723 struct igb_ring_desc_16_bytes *ring = 1724 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1725 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 1726 struct rte_eth_dev_info dev_info; 1727 1728 memset(&dev_info, 0, sizeof(dev_info)); 1729 rte_eth_dev_info_get(port_id, &dev_info); 1730 if (strstr(dev_info.driver_name, "i40e") != NULL) { 1731 /* 32 bytes RX descriptor, i40e only */ 1732 struct igb_ring_desc_32_bytes *ring = 1733 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 1734 ring[desc_id].lo_dword.dword = 1735 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1736 ring_rxd_display_dword(ring[desc_id].lo_dword); 1737 ring[desc_id].hi_dword.dword = 1738 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1739 ring_rxd_display_dword(ring[desc_id].hi_dword); 1740 ring[desc_id].resv1.dword = 1741 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 1742 ring_rxd_display_dword(ring[desc_id].resv1); 1743 ring[desc_id].resv2.dword = 1744 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 1745 ring_rxd_display_dword(ring[desc_id].resv2); 1746 1747 return; 1748 } 1749 #endif 1750 /* 16 bytes RX descriptor */ 1751 ring[desc_id].lo_dword.dword = 1752 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1753 ring_rxd_display_dword(ring[desc_id].lo_dword); 1754 ring[desc_id].hi_dword.dword = 1755 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1756 ring_rxd_display_dword(ring[desc_id].hi_dword); 1757 } 1758 1759 static void 1760 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 1761 { 1762 struct igb_ring_desc_16_bytes *ring; 1763 struct igb_ring_desc_16_bytes txd; 1764 1765 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 1766 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 1767 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 1768 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 1769 (unsigned)txd.lo_dword.words.lo, 1770 (unsigned)txd.lo_dword.words.hi, 1771 (unsigned)txd.hi_dword.words.lo, 1772 (unsigned)txd.hi_dword.words.hi); 1773 } 1774 1775 void 1776 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 1777 { 1778 const struct rte_memzone *rx_mz; 1779 1780 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1781 return; 1782 if (rx_queue_id_is_invalid(rxq_id)) 1783 return; 1784 if (rx_desc_id_is_invalid(rxd_id)) 1785 return; 1786 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 1787 if (rx_mz == NULL) 1788 return; 1789 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 1790 } 1791 1792 void 1793 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 1794 { 1795 const struct rte_memzone *tx_mz; 1796 1797 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1798 return; 1799 if (tx_queue_id_is_invalid(txq_id)) 1800 return; 1801 if (tx_desc_id_is_invalid(txd_id)) 1802 return; 1803 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 1804 if (tx_mz == NULL) 1805 return; 1806 ring_tx_descriptor_display(tx_mz, txd_id); 1807 } 1808 1809 void 1810 fwd_lcores_config_display(void) 1811 { 1812 lcoreid_t lc_id; 1813 1814 printf("List of forwarding lcores:"); 1815 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 1816 printf(" %2u", fwd_lcores_cpuids[lc_id]); 1817 printf("\n"); 1818 } 1819 void 1820 rxtx_config_display(void) 1821 { 1822 portid_t pid; 1823 queueid_t qid; 1824 1825 printf(" %s packet forwarding%s packets/burst=%d\n", 1826 cur_fwd_eng->fwd_mode_name, 1827 retry_enabled == 0 ? "" : " with retry", 1828 nb_pkt_per_burst); 1829 1830 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 1831 printf(" packet len=%u - nb packet segments=%d\n", 1832 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 1833 1834 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 1835 nb_fwd_lcores, nb_fwd_ports); 1836 1837 RTE_ETH_FOREACH_DEV(pid) { 1838 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 1839 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 1840 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 1841 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 1842 uint16_t nb_rx_desc_tmp; 1843 uint16_t nb_tx_desc_tmp; 1844 struct rte_eth_rxq_info rx_qinfo; 1845 struct rte_eth_txq_info tx_qinfo; 1846 int32_t rc; 1847 1848 /* per port config */ 1849 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 1850 (unsigned int)pid, nb_rxq, nb_txq); 1851 1852 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 1853 ports[pid].dev_conf.rxmode.offloads, 1854 ports[pid].dev_conf.txmode.offloads); 1855 1856 /* per rx queue config only for first queue to be less verbose */ 1857 for (qid = 0; qid < 1; qid++) { 1858 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 1859 if (rc) 1860 nb_rx_desc_tmp = nb_rx_desc[qid]; 1861 else 1862 nb_rx_desc_tmp = rx_qinfo.nb_desc; 1863 1864 printf(" RX queue: %d\n", qid); 1865 printf(" RX desc=%d - RX free threshold=%d\n", 1866 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); 1867 printf(" RX threshold registers: pthresh=%d hthresh=%d " 1868 " wthresh=%d\n", 1869 rx_conf[qid].rx_thresh.pthresh, 1870 rx_conf[qid].rx_thresh.hthresh, 1871 rx_conf[qid].rx_thresh.wthresh); 1872 printf(" RX Offloads=0x%"PRIx64"\n", 1873 rx_conf[qid].offloads); 1874 } 1875 1876 /* per tx queue config only for first queue to be less verbose */ 1877 for (qid = 0; qid < 1; qid++) { 1878 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 1879 if (rc) 1880 nb_tx_desc_tmp = nb_tx_desc[qid]; 1881 else 1882 nb_tx_desc_tmp = tx_qinfo.nb_desc; 1883 1884 printf(" TX queue: %d\n", qid); 1885 printf(" TX desc=%d - TX free threshold=%d\n", 1886 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); 1887 printf(" TX threshold registers: pthresh=%d hthresh=%d " 1888 " wthresh=%d\n", 1889 tx_conf[qid].tx_thresh.pthresh, 1890 tx_conf[qid].tx_thresh.hthresh, 1891 tx_conf[qid].tx_thresh.wthresh); 1892 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 1893 tx_conf[qid].offloads, tx_conf->tx_rs_thresh); 1894 } 1895 } 1896 } 1897 1898 void 1899 port_rss_reta_info(portid_t port_id, 1900 struct rte_eth_rss_reta_entry64 *reta_conf, 1901 uint16_t nb_entries) 1902 { 1903 uint16_t i, idx, shift; 1904 int ret; 1905 1906 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1907 return; 1908 1909 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 1910 if (ret != 0) { 1911 printf("Failed to get RSS RETA info, return code = %d\n", ret); 1912 return; 1913 } 1914 1915 for (i = 0; i < nb_entries; i++) { 1916 idx = i / RTE_RETA_GROUP_SIZE; 1917 shift = i % RTE_RETA_GROUP_SIZE; 1918 if (!(reta_conf[idx].mask & (1ULL << shift))) 1919 continue; 1920 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 1921 i, reta_conf[idx].reta[shift]); 1922 } 1923 } 1924 1925 /* 1926 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash 1927 * key of the port. 1928 */ 1929 void 1930 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) 1931 { 1932 struct rte_eth_rss_conf rss_conf; 1933 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 1934 uint64_t rss_hf; 1935 uint8_t i; 1936 int diag; 1937 struct rte_eth_dev_info dev_info; 1938 uint8_t hash_key_size; 1939 1940 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1941 return; 1942 1943 memset(&dev_info, 0, sizeof(dev_info)); 1944 rte_eth_dev_info_get(port_id, &dev_info); 1945 if (dev_info.hash_key_size > 0 && 1946 dev_info.hash_key_size <= sizeof(rss_key)) 1947 hash_key_size = dev_info.hash_key_size; 1948 else { 1949 printf("dev_info did not provide a valid hash key size\n"); 1950 return; 1951 } 1952 1953 rss_conf.rss_hf = 0; 1954 for (i = 0; rss_type_table[i].str; i++) { 1955 if (!strcmp(rss_info, rss_type_table[i].str)) 1956 rss_conf.rss_hf = rss_type_table[i].rss_type; 1957 } 1958 1959 /* Get RSS hash key if asked to display it */ 1960 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 1961 rss_conf.rss_key_len = hash_key_size; 1962 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 1963 if (diag != 0) { 1964 switch (diag) { 1965 case -ENODEV: 1966 printf("port index %d invalid\n", port_id); 1967 break; 1968 case -ENOTSUP: 1969 printf("operation not supported by device\n"); 1970 break; 1971 default: 1972 printf("operation failed - diag=%d\n", diag); 1973 break; 1974 } 1975 return; 1976 } 1977 rss_hf = rss_conf.rss_hf; 1978 if (rss_hf == 0) { 1979 printf("RSS disabled\n"); 1980 return; 1981 } 1982 printf("RSS functions:\n "); 1983 for (i = 0; rss_type_table[i].str; i++) { 1984 if (rss_hf & rss_type_table[i].rss_type) 1985 printf("%s ", rss_type_table[i].str); 1986 } 1987 printf("\n"); 1988 if (!show_rss_key) 1989 return; 1990 printf("RSS key:\n"); 1991 for (i = 0; i < hash_key_size; i++) 1992 printf("%02X", rss_key[i]); 1993 printf("\n"); 1994 } 1995 1996 void 1997 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 1998 uint hash_key_len) 1999 { 2000 struct rte_eth_rss_conf rss_conf; 2001 int diag; 2002 unsigned int i; 2003 2004 rss_conf.rss_key = NULL; 2005 rss_conf.rss_key_len = hash_key_len; 2006 rss_conf.rss_hf = 0; 2007 for (i = 0; rss_type_table[i].str; i++) { 2008 if (!strcmp(rss_type_table[i].str, rss_type)) 2009 rss_conf.rss_hf = rss_type_table[i].rss_type; 2010 } 2011 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 2012 if (diag == 0) { 2013 rss_conf.rss_key = hash_key; 2014 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 2015 } 2016 if (diag == 0) 2017 return; 2018 2019 switch (diag) { 2020 case -ENODEV: 2021 printf("port index %d invalid\n", port_id); 2022 break; 2023 case -ENOTSUP: 2024 printf("operation not supported by device\n"); 2025 break; 2026 default: 2027 printf("operation failed - diag=%d\n", diag); 2028 break; 2029 } 2030 } 2031 2032 /* 2033 * Setup forwarding configuration for each logical core. 2034 */ 2035 static void 2036 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 2037 { 2038 streamid_t nb_fs_per_lcore; 2039 streamid_t nb_fs; 2040 streamid_t sm_id; 2041 lcoreid_t nb_extra; 2042 lcoreid_t nb_fc; 2043 lcoreid_t nb_lc; 2044 lcoreid_t lc_id; 2045 2046 nb_fs = cfg->nb_fwd_streams; 2047 nb_fc = cfg->nb_fwd_lcores; 2048 if (nb_fs <= nb_fc) { 2049 nb_fs_per_lcore = 1; 2050 nb_extra = 0; 2051 } else { 2052 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 2053 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 2054 } 2055 2056 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 2057 sm_id = 0; 2058 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 2059 fwd_lcores[lc_id]->stream_idx = sm_id; 2060 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 2061 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2062 } 2063 2064 /* 2065 * Assign extra remaining streams, if any. 2066 */ 2067 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 2068 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 2069 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 2070 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 2071 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 2072 } 2073 } 2074 2075 static portid_t 2076 fwd_topology_tx_port_get(portid_t rxp) 2077 { 2078 static int warning_once = 1; 2079 2080 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 2081 2082 switch (port_topology) { 2083 default: 2084 case PORT_TOPOLOGY_PAIRED: 2085 if ((rxp & 0x1) == 0) { 2086 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 2087 return rxp + 1; 2088 if (warning_once) { 2089 printf("\nWarning! port-topology=paired" 2090 " and odd forward ports number," 2091 " the last port will pair with" 2092 " itself.\n\n"); 2093 warning_once = 0; 2094 } 2095 return rxp; 2096 } 2097 return rxp - 1; 2098 case PORT_TOPOLOGY_CHAINED: 2099 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 2100 case PORT_TOPOLOGY_LOOP: 2101 return rxp; 2102 } 2103 } 2104 2105 static void 2106 simple_fwd_config_setup(void) 2107 { 2108 portid_t i; 2109 2110 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 2111 cur_fwd_config.nb_fwd_streams = 2112 (streamid_t) cur_fwd_config.nb_fwd_ports; 2113 2114 /* reinitialize forwarding streams */ 2115 init_fwd_streams(); 2116 2117 /* 2118 * In the simple forwarding test, the number of forwarding cores 2119 * must be lower or equal to the number of forwarding ports. 2120 */ 2121 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2122 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 2123 cur_fwd_config.nb_fwd_lcores = 2124 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 2125 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2126 2127 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2128 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 2129 fwd_streams[i]->rx_queue = 0; 2130 fwd_streams[i]->tx_port = 2131 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 2132 fwd_streams[i]->tx_queue = 0; 2133 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2134 fwd_streams[i]->retry_enabled = retry_enabled; 2135 } 2136 } 2137 2138 /** 2139 * For the RSS forwarding test all streams distributed over lcores. Each stream 2140 * being composed of a RX queue to poll on a RX port for input messages, 2141 * associated with a TX queue of a TX port where to send forwarded packets. 2142 */ 2143 static void 2144 rss_fwd_config_setup(void) 2145 { 2146 portid_t rxp; 2147 portid_t txp; 2148 queueid_t rxq; 2149 queueid_t nb_q; 2150 streamid_t sm_id; 2151 2152 nb_q = nb_rxq; 2153 if (nb_q > nb_txq) 2154 nb_q = nb_txq; 2155 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2156 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2157 cur_fwd_config.nb_fwd_streams = 2158 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 2159 2160 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2161 cur_fwd_config.nb_fwd_lcores = 2162 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2163 2164 /* reinitialize forwarding streams */ 2165 init_fwd_streams(); 2166 2167 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2168 rxp = 0; rxq = 0; 2169 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 2170 struct fwd_stream *fs; 2171 2172 fs = fwd_streams[sm_id]; 2173 txp = fwd_topology_tx_port_get(rxp); 2174 fs->rx_port = fwd_ports_ids[rxp]; 2175 fs->rx_queue = rxq; 2176 fs->tx_port = fwd_ports_ids[txp]; 2177 fs->tx_queue = rxq; 2178 fs->peer_addr = fs->tx_port; 2179 fs->retry_enabled = retry_enabled; 2180 rxp++; 2181 if (rxp < nb_fwd_ports) 2182 continue; 2183 rxp = 0; 2184 rxq++; 2185 } 2186 } 2187 2188 /** 2189 * For the DCB forwarding test, each core is assigned on each traffic class. 2190 * 2191 * Each core is assigned a multi-stream, each stream being composed of 2192 * a RX queue to poll on a RX port for input messages, associated with 2193 * a TX queue of a TX port where to send forwarded packets. All RX and 2194 * TX queues are mapping to the same traffic class. 2195 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 2196 * the same core 2197 */ 2198 static void 2199 dcb_fwd_config_setup(void) 2200 { 2201 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 2202 portid_t txp, rxp = 0; 2203 queueid_t txq, rxq = 0; 2204 lcoreid_t lc_id; 2205 uint16_t nb_rx_queue, nb_tx_queue; 2206 uint16_t i, j, k, sm_id = 0; 2207 uint8_t tc = 0; 2208 2209 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2210 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2211 cur_fwd_config.nb_fwd_streams = 2212 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2213 2214 /* reinitialize forwarding streams */ 2215 init_fwd_streams(); 2216 sm_id = 0; 2217 txp = 1; 2218 /* get the dcb info on the first RX and TX ports */ 2219 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2220 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2221 2222 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2223 fwd_lcores[lc_id]->stream_nb = 0; 2224 fwd_lcores[lc_id]->stream_idx = sm_id; 2225 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { 2226 /* if the nb_queue is zero, means this tc is 2227 * not enabled on the POOL 2228 */ 2229 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 2230 break; 2231 k = fwd_lcores[lc_id]->stream_nb + 2232 fwd_lcores[lc_id]->stream_idx; 2233 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 2234 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 2235 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2236 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 2237 for (j = 0; j < nb_rx_queue; j++) { 2238 struct fwd_stream *fs; 2239 2240 fs = fwd_streams[k + j]; 2241 fs->rx_port = fwd_ports_ids[rxp]; 2242 fs->rx_queue = rxq + j; 2243 fs->tx_port = fwd_ports_ids[txp]; 2244 fs->tx_queue = txq + j % nb_tx_queue; 2245 fs->peer_addr = fs->tx_port; 2246 fs->retry_enabled = retry_enabled; 2247 } 2248 fwd_lcores[lc_id]->stream_nb += 2249 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 2250 } 2251 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 2252 2253 tc++; 2254 if (tc < rxp_dcb_info.nb_tcs) 2255 continue; 2256 /* Restart from TC 0 on next RX port */ 2257 tc = 0; 2258 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 2259 rxp = (portid_t) 2260 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 2261 else 2262 rxp++; 2263 if (rxp >= nb_fwd_ports) 2264 return; 2265 /* get the dcb information on next RX and TX ports */ 2266 if ((rxp & 0x1) == 0) 2267 txp = (portid_t) (rxp + 1); 2268 else 2269 txp = (portid_t) (rxp - 1); 2270 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 2271 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 2272 } 2273 } 2274 2275 static void 2276 icmp_echo_config_setup(void) 2277 { 2278 portid_t rxp; 2279 queueid_t rxq; 2280 lcoreid_t lc_id; 2281 uint16_t sm_id; 2282 2283 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 2284 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 2285 (nb_txq * nb_fwd_ports); 2286 else 2287 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 2288 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 2289 cur_fwd_config.nb_fwd_streams = 2290 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 2291 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 2292 cur_fwd_config.nb_fwd_lcores = 2293 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 2294 if (verbose_level > 0) { 2295 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 2296 __FUNCTION__, 2297 cur_fwd_config.nb_fwd_lcores, 2298 cur_fwd_config.nb_fwd_ports, 2299 cur_fwd_config.nb_fwd_streams); 2300 } 2301 2302 /* reinitialize forwarding streams */ 2303 init_fwd_streams(); 2304 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2305 rxp = 0; rxq = 0; 2306 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 2307 if (verbose_level > 0) 2308 printf(" core=%d: \n", lc_id); 2309 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2310 struct fwd_stream *fs; 2311 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2312 fs->rx_port = fwd_ports_ids[rxp]; 2313 fs->rx_queue = rxq; 2314 fs->tx_port = fs->rx_port; 2315 fs->tx_queue = rxq; 2316 fs->peer_addr = fs->tx_port; 2317 fs->retry_enabled = retry_enabled; 2318 if (verbose_level > 0) 2319 printf(" stream=%d port=%d rxq=%d txq=%d\n", 2320 sm_id, fs->rx_port, fs->rx_queue, 2321 fs->tx_queue); 2322 rxq = (queueid_t) (rxq + 1); 2323 if (rxq == nb_rxq) { 2324 rxq = 0; 2325 rxp = (portid_t) (rxp + 1); 2326 } 2327 } 2328 } 2329 } 2330 2331 #if defined RTE_LIBRTE_PMD_SOFTNIC 2332 static void 2333 softnic_fwd_config_setup(void) 2334 { 2335 struct rte_port *port; 2336 portid_t pid, softnic_portid; 2337 queueid_t i; 2338 uint8_t softnic_enable = 0; 2339 2340 RTE_ETH_FOREACH_DEV(pid) { 2341 port = &ports[pid]; 2342 const char *driver = port->dev_info.driver_name; 2343 2344 if (strcmp(driver, "net_softnic") == 0) { 2345 softnic_portid = pid; 2346 softnic_enable = 1; 2347 break; 2348 } 2349 } 2350 2351 if (softnic_enable == 0) { 2352 printf("Softnic mode not configured(%s)!\n", __func__); 2353 return; 2354 } 2355 2356 cur_fwd_config.nb_fwd_ports = 1; 2357 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; 2358 2359 /* Re-initialize forwarding streams */ 2360 init_fwd_streams(); 2361 2362 /* 2363 * In the softnic forwarding test, the number of forwarding cores 2364 * is set to one and remaining are used for softnic packet processing. 2365 */ 2366 cur_fwd_config.nb_fwd_lcores = 1; 2367 setup_fwd_config_of_each_lcore(&cur_fwd_config); 2368 2369 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { 2370 fwd_streams[i]->rx_port = softnic_portid; 2371 fwd_streams[i]->rx_queue = i; 2372 fwd_streams[i]->tx_port = softnic_portid; 2373 fwd_streams[i]->tx_queue = i; 2374 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 2375 fwd_streams[i]->retry_enabled = retry_enabled; 2376 } 2377 } 2378 #endif 2379 2380 void 2381 fwd_config_setup(void) 2382 { 2383 cur_fwd_config.fwd_eng = cur_fwd_eng; 2384 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 2385 icmp_echo_config_setup(); 2386 return; 2387 } 2388 2389 #if defined RTE_LIBRTE_PMD_SOFTNIC 2390 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 2391 softnic_fwd_config_setup(); 2392 return; 2393 } 2394 #endif 2395 2396 if ((nb_rxq > 1) && (nb_txq > 1)){ 2397 if (dcb_config) 2398 dcb_fwd_config_setup(); 2399 else 2400 rss_fwd_config_setup(); 2401 } 2402 else 2403 simple_fwd_config_setup(); 2404 } 2405 2406 void 2407 pkt_fwd_config_display(struct fwd_config *cfg) 2408 { 2409 struct fwd_stream *fs; 2410 lcoreid_t lc_id; 2411 streamid_t sm_id; 2412 2413 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 2414 "NUMA support %s, MP over anonymous pages %s\n", 2415 cfg->fwd_eng->fwd_mode_name, 2416 retry_enabled == 0 ? "" : " with retry", 2417 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 2418 numa_support == 1 ? "enabled" : "disabled", 2419 mp_anon != 0 ? "enabled" : "disabled"); 2420 2421 if (retry_enabled) 2422 printf("TX retry num: %u, delay between TX retries: %uus\n", 2423 burst_tx_retry_num, burst_tx_delay_time); 2424 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 2425 printf("Logical Core %u (socket %u) forwards packets on " 2426 "%d streams:", 2427 fwd_lcores_cpuids[lc_id], 2428 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 2429 fwd_lcores[lc_id]->stream_nb); 2430 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 2431 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 2432 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 2433 "P=%d/Q=%d (socket %u) ", 2434 fs->rx_port, fs->rx_queue, 2435 ports[fs->rx_port].socket_id, 2436 fs->tx_port, fs->tx_queue, 2437 ports[fs->tx_port].socket_id); 2438 print_ethaddr("peer=", 2439 &peer_eth_addrs[fs->peer_addr]); 2440 } 2441 printf("\n"); 2442 } 2443 printf("\n"); 2444 } 2445 2446 void 2447 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 2448 { 2449 uint8_t c, new_peer_addr[6]; 2450 if (!rte_eth_dev_is_valid_port(port_id)) { 2451 printf("Error: Invalid port number %i\n", port_id); 2452 return; 2453 } 2454 if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, 2455 sizeof(new_peer_addr)) < 0) { 2456 printf("Error: Invalid ethernet address: %s\n", peer_addr); 2457 return; 2458 } 2459 for (c = 0; c < 6; c++) 2460 peer_eth_addrs[port_id].addr_bytes[c] = 2461 new_peer_addr[c]; 2462 } 2463 2464 int 2465 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 2466 { 2467 unsigned int i; 2468 unsigned int lcore_cpuid; 2469 int record_now; 2470 2471 record_now = 0; 2472 again: 2473 for (i = 0; i < nb_lc; i++) { 2474 lcore_cpuid = lcorelist[i]; 2475 if (! rte_lcore_is_enabled(lcore_cpuid)) { 2476 printf("lcore %u not enabled\n", lcore_cpuid); 2477 return -1; 2478 } 2479 if (lcore_cpuid == rte_get_master_lcore()) { 2480 printf("lcore %u cannot be masked on for running " 2481 "packet forwarding, which is the master lcore " 2482 "and reserved for command line parsing only\n", 2483 lcore_cpuid); 2484 return -1; 2485 } 2486 if (record_now) 2487 fwd_lcores_cpuids[i] = lcore_cpuid; 2488 } 2489 if (record_now == 0) { 2490 record_now = 1; 2491 goto again; 2492 } 2493 nb_cfg_lcores = (lcoreid_t) nb_lc; 2494 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 2495 printf("previous number of forwarding cores %u - changed to " 2496 "number of configured cores %u\n", 2497 (unsigned int) nb_fwd_lcores, nb_lc); 2498 nb_fwd_lcores = (lcoreid_t) nb_lc; 2499 } 2500 2501 return 0; 2502 } 2503 2504 int 2505 set_fwd_lcores_mask(uint64_t lcoremask) 2506 { 2507 unsigned int lcorelist[64]; 2508 unsigned int nb_lc; 2509 unsigned int i; 2510 2511 if (lcoremask == 0) { 2512 printf("Invalid NULL mask of cores\n"); 2513 return -1; 2514 } 2515 nb_lc = 0; 2516 for (i = 0; i < 64; i++) { 2517 if (! ((uint64_t)(1ULL << i) & lcoremask)) 2518 continue; 2519 lcorelist[nb_lc++] = i; 2520 } 2521 return set_fwd_lcores_list(lcorelist, nb_lc); 2522 } 2523 2524 void 2525 set_fwd_lcores_number(uint16_t nb_lc) 2526 { 2527 if (nb_lc > nb_cfg_lcores) { 2528 printf("nb fwd cores %u > %u (max. number of configured " 2529 "lcores) - ignored\n", 2530 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 2531 return; 2532 } 2533 nb_fwd_lcores = (lcoreid_t) nb_lc; 2534 printf("Number of forwarding cores set to %u\n", 2535 (unsigned int) nb_fwd_lcores); 2536 } 2537 2538 void 2539 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 2540 { 2541 unsigned int i; 2542 portid_t port_id; 2543 int record_now; 2544 2545 record_now = 0; 2546 again: 2547 for (i = 0; i < nb_pt; i++) { 2548 port_id = (portid_t) portlist[i]; 2549 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2550 return; 2551 if (record_now) 2552 fwd_ports_ids[i] = port_id; 2553 } 2554 if (record_now == 0) { 2555 record_now = 1; 2556 goto again; 2557 } 2558 nb_cfg_ports = (portid_t) nb_pt; 2559 if (nb_fwd_ports != (portid_t) nb_pt) { 2560 printf("previous number of forwarding ports %u - changed to " 2561 "number of configured ports %u\n", 2562 (unsigned int) nb_fwd_ports, nb_pt); 2563 nb_fwd_ports = (portid_t) nb_pt; 2564 } 2565 } 2566 2567 void 2568 set_fwd_ports_mask(uint64_t portmask) 2569 { 2570 unsigned int portlist[64]; 2571 unsigned int nb_pt; 2572 unsigned int i; 2573 2574 if (portmask == 0) { 2575 printf("Invalid NULL mask of ports\n"); 2576 return; 2577 } 2578 nb_pt = 0; 2579 RTE_ETH_FOREACH_DEV(i) { 2580 if (! ((uint64_t)(1ULL << i) & portmask)) 2581 continue; 2582 portlist[nb_pt++] = i; 2583 } 2584 set_fwd_ports_list(portlist, nb_pt); 2585 } 2586 2587 void 2588 set_fwd_ports_number(uint16_t nb_pt) 2589 { 2590 if (nb_pt > nb_cfg_ports) { 2591 printf("nb fwd ports %u > %u (number of configured " 2592 "ports) - ignored\n", 2593 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 2594 return; 2595 } 2596 nb_fwd_ports = (portid_t) nb_pt; 2597 printf("Number of forwarding ports set to %u\n", 2598 (unsigned int) nb_fwd_ports); 2599 } 2600 2601 int 2602 port_is_forwarding(portid_t port_id) 2603 { 2604 unsigned int i; 2605 2606 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2607 return -1; 2608 2609 for (i = 0; i < nb_fwd_ports; i++) { 2610 if (fwd_ports_ids[i] == port_id) 2611 return 1; 2612 } 2613 2614 return 0; 2615 } 2616 2617 void 2618 set_nb_pkt_per_burst(uint16_t nb) 2619 { 2620 if (nb > MAX_PKT_BURST) { 2621 printf("nb pkt per burst: %u > %u (maximum packet per burst) " 2622 " ignored\n", 2623 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 2624 return; 2625 } 2626 nb_pkt_per_burst = nb; 2627 printf("Number of packets per burst set to %u\n", 2628 (unsigned int) nb_pkt_per_burst); 2629 } 2630 2631 static const char * 2632 tx_split_get_name(enum tx_pkt_split split) 2633 { 2634 uint32_t i; 2635 2636 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2637 if (tx_split_name[i].split == split) 2638 return tx_split_name[i].name; 2639 } 2640 return NULL; 2641 } 2642 2643 void 2644 set_tx_pkt_split(const char *name) 2645 { 2646 uint32_t i; 2647 2648 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 2649 if (strcmp(tx_split_name[i].name, name) == 0) { 2650 tx_pkt_split = tx_split_name[i].split; 2651 return; 2652 } 2653 } 2654 printf("unknown value: \"%s\"\n", name); 2655 } 2656 2657 void 2658 show_tx_pkt_segments(void) 2659 { 2660 uint32_t i, n; 2661 const char *split; 2662 2663 n = tx_pkt_nb_segs; 2664 split = tx_split_get_name(tx_pkt_split); 2665 2666 printf("Number of segments: %u\n", n); 2667 printf("Segment sizes: "); 2668 for (i = 0; i != n - 1; i++) 2669 printf("%hu,", tx_pkt_seg_lengths[i]); 2670 printf("%hu\n", tx_pkt_seg_lengths[i]); 2671 printf("Split packet: %s\n", split); 2672 } 2673 2674 void 2675 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) 2676 { 2677 uint16_t tx_pkt_len; 2678 unsigned i; 2679 2680 if (nb_segs >= (unsigned) nb_txd) { 2681 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", 2682 nb_segs, (unsigned int) nb_txd); 2683 return; 2684 } 2685 2686 /* 2687 * Check that each segment length is greater or equal than 2688 * the mbuf data sise. 2689 * Check also that the total packet length is greater or equal than the 2690 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). 2691 */ 2692 tx_pkt_len = 0; 2693 for (i = 0; i < nb_segs; i++) { 2694 if (seg_lengths[i] > (unsigned) mbuf_data_size) { 2695 printf("length[%u]=%u > mbuf_data_size=%u - give up\n", 2696 i, seg_lengths[i], (unsigned) mbuf_data_size); 2697 return; 2698 } 2699 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 2700 } 2701 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { 2702 printf("total packet length=%u < %d - give up\n", 2703 (unsigned) tx_pkt_len, 2704 (int)(sizeof(struct ether_hdr) + 20 + 8)); 2705 return; 2706 } 2707 2708 for (i = 0; i < nb_segs; i++) 2709 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 2710 2711 tx_pkt_length = tx_pkt_len; 2712 tx_pkt_nb_segs = (uint8_t) nb_segs; 2713 } 2714 2715 void 2716 setup_gro(const char *onoff, portid_t port_id) 2717 { 2718 if (!rte_eth_dev_is_valid_port(port_id)) { 2719 printf("invalid port id %u\n", port_id); 2720 return; 2721 } 2722 if (test_done == 0) { 2723 printf("Before enable/disable GRO," 2724 " please stop forwarding first\n"); 2725 return; 2726 } 2727 if (strcmp(onoff, "on") == 0) { 2728 if (gro_ports[port_id].enable != 0) { 2729 printf("Port %u has enabled GRO. Please" 2730 " disable GRO first\n", port_id); 2731 return; 2732 } 2733 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2734 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 2735 gro_ports[port_id].param.max_flow_num = 2736 GRO_DEFAULT_FLOW_NUM; 2737 gro_ports[port_id].param.max_item_per_flow = 2738 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 2739 } 2740 gro_ports[port_id].enable = 1; 2741 } else { 2742 if (gro_ports[port_id].enable == 0) { 2743 printf("Port %u has disabled GRO\n", port_id); 2744 return; 2745 } 2746 gro_ports[port_id].enable = 0; 2747 } 2748 } 2749 2750 void 2751 setup_gro_flush_cycles(uint8_t cycles) 2752 { 2753 if (test_done == 0) { 2754 printf("Before change flush interval for GRO," 2755 " please stop forwarding first.\n"); 2756 return; 2757 } 2758 2759 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 2760 GRO_DEFAULT_FLUSH_CYCLES) { 2761 printf("The flushing cycle be in the range" 2762 " of 1 to %u. Revert to the default" 2763 " value %u.\n", 2764 GRO_MAX_FLUSH_CYCLES, 2765 GRO_DEFAULT_FLUSH_CYCLES); 2766 cycles = GRO_DEFAULT_FLUSH_CYCLES; 2767 } 2768 2769 gro_flush_cycles = cycles; 2770 } 2771 2772 void 2773 show_gro(portid_t port_id) 2774 { 2775 struct rte_gro_param *param; 2776 uint32_t max_pkts_num; 2777 2778 param = &gro_ports[port_id].param; 2779 2780 if (!rte_eth_dev_is_valid_port(port_id)) { 2781 printf("Invalid port id %u.\n", port_id); 2782 return; 2783 } 2784 if (gro_ports[port_id].enable) { 2785 printf("GRO type: TCP/IPv4\n"); 2786 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 2787 max_pkts_num = param->max_flow_num * 2788 param->max_item_per_flow; 2789 } else 2790 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 2791 printf("Max number of packets to perform GRO: %u\n", 2792 max_pkts_num); 2793 printf("Flushing cycles: %u\n", gro_flush_cycles); 2794 } else 2795 printf("Port %u doesn't enable GRO.\n", port_id); 2796 } 2797 2798 void 2799 setup_gso(const char *mode, portid_t port_id) 2800 { 2801 if (!rte_eth_dev_is_valid_port(port_id)) { 2802 printf("invalid port id %u\n", port_id); 2803 return; 2804 } 2805 if (strcmp(mode, "on") == 0) { 2806 if (test_done == 0) { 2807 printf("before enabling GSO," 2808 " please stop forwarding first\n"); 2809 return; 2810 } 2811 gso_ports[port_id].enable = 1; 2812 } else if (strcmp(mode, "off") == 0) { 2813 if (test_done == 0) { 2814 printf("before disabling GSO," 2815 " please stop forwarding first\n"); 2816 return; 2817 } 2818 gso_ports[port_id].enable = 0; 2819 } 2820 } 2821 2822 char* 2823 list_pkt_forwarding_modes(void) 2824 { 2825 static char fwd_modes[128] = ""; 2826 const char *separator = "|"; 2827 struct fwd_engine *fwd_eng; 2828 unsigned i = 0; 2829 2830 if (strlen (fwd_modes) == 0) { 2831 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2832 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2833 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2834 strncat(fwd_modes, separator, 2835 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 2836 } 2837 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2838 } 2839 2840 return fwd_modes; 2841 } 2842 2843 char* 2844 list_pkt_forwarding_retry_modes(void) 2845 { 2846 static char fwd_modes[128] = ""; 2847 const char *separator = "|"; 2848 struct fwd_engine *fwd_eng; 2849 unsigned i = 0; 2850 2851 if (strlen(fwd_modes) == 0) { 2852 while ((fwd_eng = fwd_engines[i++]) != NULL) { 2853 if (fwd_eng == &rx_only_engine) 2854 continue; 2855 strncat(fwd_modes, fwd_eng->fwd_mode_name, 2856 sizeof(fwd_modes) - 2857 strlen(fwd_modes) - 1); 2858 strncat(fwd_modes, separator, 2859 sizeof(fwd_modes) - 2860 strlen(fwd_modes) - 1); 2861 } 2862 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 2863 } 2864 2865 return fwd_modes; 2866 } 2867 2868 void 2869 set_pkt_forwarding_mode(const char *fwd_mode_name) 2870 { 2871 struct fwd_engine *fwd_eng; 2872 unsigned i; 2873 2874 i = 0; 2875 while ((fwd_eng = fwd_engines[i]) != NULL) { 2876 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 2877 printf("Set %s packet forwarding mode%s\n", 2878 fwd_mode_name, 2879 retry_enabled == 0 ? "" : " with retry"); 2880 cur_fwd_eng = fwd_eng; 2881 return; 2882 } 2883 i++; 2884 } 2885 printf("Invalid %s packet forwarding mode\n", fwd_mode_name); 2886 } 2887 2888 void 2889 set_verbose_level(uint16_t vb_level) 2890 { 2891 printf("Change verbose level from %u to %u\n", 2892 (unsigned int) verbose_level, (unsigned int) vb_level); 2893 verbose_level = vb_level; 2894 } 2895 2896 void 2897 vlan_extend_set(portid_t port_id, int on) 2898 { 2899 int diag; 2900 int vlan_offload; 2901 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2902 2903 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2904 return; 2905 2906 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2907 2908 if (on) { 2909 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; 2910 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 2911 } else { 2912 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; 2913 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2914 } 2915 2916 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2917 if (diag < 0) 2918 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " 2919 "diag=%d\n", port_id, on, diag); 2920 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2921 } 2922 2923 void 2924 rx_vlan_strip_set(portid_t port_id, int on) 2925 { 2926 int diag; 2927 int vlan_offload; 2928 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2929 2930 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2931 return; 2932 2933 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2934 2935 if (on) { 2936 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; 2937 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2938 } else { 2939 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; 2940 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2941 } 2942 2943 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2944 if (diag < 0) 2945 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " 2946 "diag=%d\n", port_id, on, diag); 2947 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2948 } 2949 2950 void 2951 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 2952 { 2953 int diag; 2954 2955 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2956 return; 2957 2958 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 2959 if (diag < 0) 2960 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " 2961 "diag=%d\n", port_id, queue_id, on, diag); 2962 } 2963 2964 void 2965 rx_vlan_filter_set(portid_t port_id, int on) 2966 { 2967 int diag; 2968 int vlan_offload; 2969 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 2970 2971 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2972 return; 2973 2974 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 2975 2976 if (on) { 2977 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; 2978 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2979 } else { 2980 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; 2981 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 2982 } 2983 2984 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 2985 if (diag < 0) 2986 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " 2987 "diag=%d\n", port_id, on, diag); 2988 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 2989 } 2990 2991 int 2992 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 2993 { 2994 int diag; 2995 2996 if (port_id_is_invalid(port_id, ENABLED_WARN)) 2997 return 1; 2998 if (vlan_id_is_invalid(vlan_id)) 2999 return 1; 3000 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 3001 if (diag == 0) 3002 return 0; 3003 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " 3004 "diag=%d\n", 3005 port_id, vlan_id, on, diag); 3006 return -1; 3007 } 3008 3009 void 3010 rx_vlan_all_filter_set(portid_t port_id, int on) 3011 { 3012 uint16_t vlan_id; 3013 3014 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3015 return; 3016 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 3017 if (rx_vft_set(port_id, vlan_id, on)) 3018 break; 3019 } 3020 } 3021 3022 void 3023 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 3024 { 3025 int diag; 3026 3027 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3028 return; 3029 3030 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 3031 if (diag == 0) 3032 return; 3033 3034 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " 3035 "diag=%d\n", 3036 port_id, vlan_type, tp_id, diag); 3037 } 3038 3039 void 3040 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 3041 { 3042 int vlan_offload; 3043 struct rte_eth_dev_info dev_info; 3044 3045 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3046 return; 3047 if (vlan_id_is_invalid(vlan_id)) 3048 return; 3049 3050 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3051 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) { 3052 printf("Error, as QinQ has been enabled.\n"); 3053 return; 3054 } 3055 rte_eth_dev_info_get(port_id, &dev_info); 3056 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { 3057 printf("Error: vlan insert is not supported by port %d\n", 3058 port_id); 3059 return; 3060 } 3061 3062 tx_vlan_reset(port_id); 3063 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; 3064 ports[port_id].tx_vlan_id = vlan_id; 3065 } 3066 3067 void 3068 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 3069 { 3070 int vlan_offload; 3071 struct rte_eth_dev_info dev_info; 3072 3073 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3074 return; 3075 if (vlan_id_is_invalid(vlan_id)) 3076 return; 3077 if (vlan_id_is_invalid(vlan_id_outer)) 3078 return; 3079 3080 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 3081 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) { 3082 printf("Error, as QinQ hasn't been enabled.\n"); 3083 return; 3084 } 3085 rte_eth_dev_info_get(port_id, &dev_info); 3086 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { 3087 printf("Error: qinq insert not supported by port %d\n", 3088 port_id); 3089 return; 3090 } 3091 3092 tx_vlan_reset(port_id); 3093 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; 3094 ports[port_id].tx_vlan_id = vlan_id; 3095 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 3096 } 3097 3098 void 3099 tx_vlan_reset(portid_t port_id) 3100 { 3101 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3102 return; 3103 ports[port_id].dev_conf.txmode.offloads &= 3104 ~(DEV_TX_OFFLOAD_VLAN_INSERT | 3105 DEV_TX_OFFLOAD_QINQ_INSERT); 3106 ports[port_id].tx_vlan_id = 0; 3107 ports[port_id].tx_vlan_id_outer = 0; 3108 } 3109 3110 void 3111 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 3112 { 3113 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3114 return; 3115 3116 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 3117 } 3118 3119 void 3120 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 3121 { 3122 uint16_t i; 3123 uint8_t existing_mapping_found = 0; 3124 3125 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3126 return; 3127 3128 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 3129 return; 3130 3131 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 3132 printf("map_value not in required range 0..%d\n", 3133 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 3134 return; 3135 } 3136 3137 if (!is_rx) { /*then tx*/ 3138 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3139 if ((tx_queue_stats_mappings[i].port_id == port_id) && 3140 (tx_queue_stats_mappings[i].queue_id == queue_id)) { 3141 tx_queue_stats_mappings[i].stats_counter_id = map_value; 3142 existing_mapping_found = 1; 3143 break; 3144 } 3145 } 3146 if (!existing_mapping_found) { /* A new additional mapping... */ 3147 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; 3148 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; 3149 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; 3150 nb_tx_queue_stats_mappings++; 3151 } 3152 } 3153 else { /*rx*/ 3154 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3155 if ((rx_queue_stats_mappings[i].port_id == port_id) && 3156 (rx_queue_stats_mappings[i].queue_id == queue_id)) { 3157 rx_queue_stats_mappings[i].stats_counter_id = map_value; 3158 existing_mapping_found = 1; 3159 break; 3160 } 3161 } 3162 if (!existing_mapping_found) { /* A new additional mapping... */ 3163 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; 3164 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; 3165 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; 3166 nb_rx_queue_stats_mappings++; 3167 } 3168 } 3169 } 3170 3171 void 3172 set_xstats_hide_zero(uint8_t on_off) 3173 { 3174 xstats_hide_zero = on_off; 3175 } 3176 3177 static inline void 3178 print_fdir_mask(struct rte_eth_fdir_masks *mask) 3179 { 3180 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 3181 3182 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3183 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 3184 " tunnel_id: 0x%08x", 3185 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 3186 rte_be_to_cpu_32(mask->tunnel_id_mask)); 3187 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3188 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 3189 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 3190 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 3191 3192 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 3193 rte_be_to_cpu_16(mask->src_port_mask), 3194 rte_be_to_cpu_16(mask->dst_port_mask)); 3195 3196 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3197 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 3198 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 3199 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 3200 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 3201 3202 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 3203 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 3204 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 3205 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 3206 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 3207 } 3208 3209 printf("\n"); 3210 } 3211 3212 static inline void 3213 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3214 { 3215 struct rte_eth_flex_payload_cfg *cfg; 3216 uint32_t i, j; 3217 3218 for (i = 0; i < flex_conf->nb_payloads; i++) { 3219 cfg = &flex_conf->flex_set[i]; 3220 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 3221 printf("\n RAW: "); 3222 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 3223 printf("\n L2_PAYLOAD: "); 3224 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 3225 printf("\n L3_PAYLOAD: "); 3226 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 3227 printf("\n L4_PAYLOAD: "); 3228 else 3229 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 3230 for (j = 0; j < num; j++) 3231 printf(" %-5u", cfg->src_offset[j]); 3232 } 3233 printf("\n"); 3234 } 3235 3236 static char * 3237 flowtype_to_str(uint16_t flow_type) 3238 { 3239 struct flow_type_info { 3240 char str[32]; 3241 uint16_t ftype; 3242 }; 3243 3244 uint8_t i; 3245 static struct flow_type_info flowtype_str_table[] = { 3246 {"raw", RTE_ETH_FLOW_RAW}, 3247 {"ipv4", RTE_ETH_FLOW_IPV4}, 3248 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 3249 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 3250 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 3251 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 3252 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 3253 {"ipv6", RTE_ETH_FLOW_IPV6}, 3254 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 3255 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 3256 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 3257 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 3258 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 3259 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 3260 {"port", RTE_ETH_FLOW_PORT}, 3261 {"vxlan", RTE_ETH_FLOW_VXLAN}, 3262 {"geneve", RTE_ETH_FLOW_GENEVE}, 3263 {"nvgre", RTE_ETH_FLOW_NVGRE}, 3264 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 3265 }; 3266 3267 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 3268 if (flowtype_str_table[i].ftype == flow_type) 3269 return flowtype_str_table[i].str; 3270 } 3271 3272 return NULL; 3273 } 3274 3275 static inline void 3276 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 3277 { 3278 struct rte_eth_fdir_flex_mask *mask; 3279 uint32_t i, j; 3280 char *p; 3281 3282 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 3283 mask = &flex_conf->flex_mask[i]; 3284 p = flowtype_to_str(mask->flow_type); 3285 printf("\n %s:\t", p ? p : "unknown"); 3286 for (j = 0; j < num; j++) 3287 printf(" %02x", mask->mask[j]); 3288 } 3289 printf("\n"); 3290 } 3291 3292 static inline void 3293 print_fdir_flow_type(uint32_t flow_types_mask) 3294 { 3295 int i; 3296 char *p; 3297 3298 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 3299 if (!(flow_types_mask & (1 << i))) 3300 continue; 3301 p = flowtype_to_str(i); 3302 if (p) 3303 printf(" %s", p); 3304 else 3305 printf(" unknown"); 3306 } 3307 printf("\n"); 3308 } 3309 3310 void 3311 fdir_get_infos(portid_t port_id) 3312 { 3313 struct rte_eth_fdir_stats fdir_stat; 3314 struct rte_eth_fdir_info fdir_info; 3315 int ret; 3316 3317 static const char *fdir_stats_border = "########################"; 3318 3319 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3320 return; 3321 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); 3322 if (ret < 0) { 3323 printf("\n FDIR is not supported on port %-2d\n", 3324 port_id); 3325 return; 3326 } 3327 3328 memset(&fdir_info, 0, sizeof(fdir_info)); 3329 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3330 RTE_ETH_FILTER_INFO, &fdir_info); 3331 memset(&fdir_stat, 0, sizeof(fdir_stat)); 3332 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, 3333 RTE_ETH_FILTER_STATS, &fdir_stat); 3334 printf("\n %s FDIR infos for port %-2d %s\n", 3335 fdir_stats_border, port_id, fdir_stats_border); 3336 printf(" MODE: "); 3337 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 3338 printf(" PERFECT\n"); 3339 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 3340 printf(" PERFECT-MAC-VLAN\n"); 3341 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3342 printf(" PERFECT-TUNNEL\n"); 3343 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 3344 printf(" SIGNATURE\n"); 3345 else 3346 printf(" DISABLE\n"); 3347 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 3348 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 3349 printf(" SUPPORTED FLOW TYPE: "); 3350 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 3351 } 3352 printf(" FLEX PAYLOAD INFO:\n"); 3353 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 3354 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 3355 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 3356 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 3357 fdir_info.flex_payload_unit, 3358 fdir_info.max_flex_payload_segment_num, 3359 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 3360 printf(" MASK: "); 3361 print_fdir_mask(&fdir_info.mask); 3362 if (fdir_info.flex_conf.nb_payloads > 0) { 3363 printf(" FLEX PAYLOAD SRC OFFSET:"); 3364 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3365 } 3366 if (fdir_info.flex_conf.nb_flexmasks > 0) { 3367 printf(" FLEX MASK CFG:"); 3368 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 3369 } 3370 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 3371 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 3372 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 3373 fdir_info.guarant_spc, fdir_info.best_spc); 3374 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 3375 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 3376 " add: %-10"PRIu64" remove: %"PRIu64"\n" 3377 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 3378 fdir_stat.collision, fdir_stat.free, 3379 fdir_stat.maxhash, fdir_stat.maxlen, 3380 fdir_stat.add, fdir_stat.remove, 3381 fdir_stat.f_add, fdir_stat.f_remove); 3382 printf(" %s############################%s\n", 3383 fdir_stats_border, fdir_stats_border); 3384 } 3385 3386 void 3387 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 3388 { 3389 struct rte_port *port; 3390 struct rte_eth_fdir_flex_conf *flex_conf; 3391 int i, idx = 0; 3392 3393 port = &ports[port_id]; 3394 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3395 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 3396 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 3397 idx = i; 3398 break; 3399 } 3400 } 3401 if (i >= RTE_ETH_FLOW_MAX) { 3402 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 3403 idx = flex_conf->nb_flexmasks; 3404 flex_conf->nb_flexmasks++; 3405 } else { 3406 printf("The flex mask table is full. Can not set flex" 3407 " mask for flow_type(%u).", cfg->flow_type); 3408 return; 3409 } 3410 } 3411 rte_memcpy(&flex_conf->flex_mask[idx], 3412 cfg, 3413 sizeof(struct rte_eth_fdir_flex_mask)); 3414 } 3415 3416 void 3417 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 3418 { 3419 struct rte_port *port; 3420 struct rte_eth_fdir_flex_conf *flex_conf; 3421 int i, idx = 0; 3422 3423 port = &ports[port_id]; 3424 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 3425 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 3426 if (cfg->type == flex_conf->flex_set[i].type) { 3427 idx = i; 3428 break; 3429 } 3430 } 3431 if (i >= RTE_ETH_PAYLOAD_MAX) { 3432 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 3433 idx = flex_conf->nb_payloads; 3434 flex_conf->nb_payloads++; 3435 } else { 3436 printf("The flex payload table is full. Can not set" 3437 " flex payload for type(%u).", cfg->type); 3438 return; 3439 } 3440 } 3441 rte_memcpy(&flex_conf->flex_set[idx], 3442 cfg, 3443 sizeof(struct rte_eth_flex_payload_cfg)); 3444 3445 } 3446 3447 void 3448 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 3449 { 3450 #ifdef RTE_LIBRTE_IXGBE_PMD 3451 int diag; 3452 3453 if (is_rx) 3454 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 3455 else 3456 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 3457 3458 if (diag == 0) 3459 return; 3460 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 3461 is_rx ? "rx" : "tx", port_id, diag); 3462 return; 3463 #endif 3464 printf("VF %s setting not supported for port %d\n", 3465 is_rx ? "Rx" : "Tx", port_id); 3466 RTE_SET_USED(vf); 3467 RTE_SET_USED(on); 3468 } 3469 3470 int 3471 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 3472 { 3473 int diag; 3474 struct rte_eth_link link; 3475 3476 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3477 return 1; 3478 rte_eth_link_get_nowait(port_id, &link); 3479 if (rate > link.link_speed) { 3480 printf("Invalid rate value:%u bigger than link speed: %u\n", 3481 rate, link.link_speed); 3482 return 1; 3483 } 3484 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 3485 if (diag == 0) 3486 return diag; 3487 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 3488 port_id, diag); 3489 return diag; 3490 } 3491 3492 int 3493 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 3494 { 3495 int diag = -ENOTSUP; 3496 3497 RTE_SET_USED(vf); 3498 RTE_SET_USED(rate); 3499 RTE_SET_USED(q_msk); 3500 3501 #ifdef RTE_LIBRTE_IXGBE_PMD 3502 if (diag == -ENOTSUP) 3503 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 3504 q_msk); 3505 #endif 3506 #ifdef RTE_LIBRTE_BNXT_PMD 3507 if (diag == -ENOTSUP) 3508 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 3509 #endif 3510 if (diag == 0) 3511 return diag; 3512 3513 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", 3514 port_id, diag); 3515 return diag; 3516 } 3517 3518 /* 3519 * Functions to manage the set of filtered Multicast MAC addresses. 3520 * 3521 * A pool of filtered multicast MAC addresses is associated with each port. 3522 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 3523 * The address of the pool and the number of valid multicast MAC addresses 3524 * recorded in the pool are stored in the fields "mc_addr_pool" and 3525 * "mc_addr_nb" of the "rte_port" data structure. 3526 * 3527 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 3528 * to be supplied a contiguous array of multicast MAC addresses. 3529 * To comply with this constraint, the set of multicast addresses recorded 3530 * into the pool are systematically compacted at the beginning of the pool. 3531 * Hence, when a multicast address is removed from the pool, all following 3532 * addresses, if any, are copied back to keep the set contiguous. 3533 */ 3534 #define MCAST_POOL_INC 32 3535 3536 static int 3537 mcast_addr_pool_extend(struct rte_port *port) 3538 { 3539 struct ether_addr *mc_pool; 3540 size_t mc_pool_size; 3541 3542 /* 3543 * If a free entry is available at the end of the pool, just 3544 * increment the number of recorded multicast addresses. 3545 */ 3546 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 3547 port->mc_addr_nb++; 3548 return 0; 3549 } 3550 3551 /* 3552 * [re]allocate a pool with MCAST_POOL_INC more entries. 3553 * The previous test guarantees that port->mc_addr_nb is a multiple 3554 * of MCAST_POOL_INC. 3555 */ 3556 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb + 3557 MCAST_POOL_INC); 3558 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool, 3559 mc_pool_size); 3560 if (mc_pool == NULL) { 3561 printf("allocation of pool of %u multicast addresses failed\n", 3562 port->mc_addr_nb + MCAST_POOL_INC); 3563 return -ENOMEM; 3564 } 3565 3566 port->mc_addr_pool = mc_pool; 3567 port->mc_addr_nb++; 3568 return 0; 3569 3570 } 3571 3572 static void 3573 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 3574 { 3575 port->mc_addr_nb--; 3576 if (addr_idx == port->mc_addr_nb) { 3577 /* No need to recompact the set of multicast addressses. */ 3578 if (port->mc_addr_nb == 0) { 3579 /* free the pool of multicast addresses. */ 3580 free(port->mc_addr_pool); 3581 port->mc_addr_pool = NULL; 3582 } 3583 return; 3584 } 3585 memmove(&port->mc_addr_pool[addr_idx], 3586 &port->mc_addr_pool[addr_idx + 1], 3587 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx)); 3588 } 3589 3590 static void 3591 eth_port_multicast_addr_list_set(portid_t port_id) 3592 { 3593 struct rte_port *port; 3594 int diag; 3595 3596 port = &ports[port_id]; 3597 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 3598 port->mc_addr_nb); 3599 if (diag == 0) 3600 return; 3601 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 3602 port->mc_addr_nb, port_id, -diag); 3603 } 3604 3605 void 3606 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr) 3607 { 3608 struct rte_port *port; 3609 uint32_t i; 3610 3611 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3612 return; 3613 3614 port = &ports[port_id]; 3615 3616 /* 3617 * Check that the added multicast MAC address is not already recorded 3618 * in the pool of multicast addresses. 3619 */ 3620 for (i = 0; i < port->mc_addr_nb; i++) { 3621 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 3622 printf("multicast address already filtered by port\n"); 3623 return; 3624 } 3625 } 3626 3627 if (mcast_addr_pool_extend(port) != 0) 3628 return; 3629 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]); 3630 eth_port_multicast_addr_list_set(port_id); 3631 } 3632 3633 void 3634 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr) 3635 { 3636 struct rte_port *port; 3637 uint32_t i; 3638 3639 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3640 return; 3641 3642 port = &ports[port_id]; 3643 3644 /* 3645 * Search the pool of multicast MAC addresses for the removed address. 3646 */ 3647 for (i = 0; i < port->mc_addr_nb; i++) { 3648 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 3649 break; 3650 } 3651 if (i == port->mc_addr_nb) { 3652 printf("multicast address not filtered by port %d\n", port_id); 3653 return; 3654 } 3655 3656 mcast_addr_pool_remove(port, i); 3657 eth_port_multicast_addr_list_set(port_id); 3658 } 3659 3660 void 3661 port_dcb_info_display(portid_t port_id) 3662 { 3663 struct rte_eth_dcb_info dcb_info; 3664 uint16_t i; 3665 int ret; 3666 static const char *border = "================"; 3667 3668 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3669 return; 3670 3671 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 3672 if (ret) { 3673 printf("\n Failed to get dcb infos on port %-2d\n", 3674 port_id); 3675 return; 3676 } 3677 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 3678 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 3679 printf("\n TC : "); 3680 for (i = 0; i < dcb_info.nb_tcs; i++) 3681 printf("\t%4d", i); 3682 printf("\n Priority : "); 3683 for (i = 0; i < dcb_info.nb_tcs; i++) 3684 printf("\t%4d", dcb_info.prio_tc[i]); 3685 printf("\n BW percent :"); 3686 for (i = 0; i < dcb_info.nb_tcs; i++) 3687 printf("\t%4d%%", dcb_info.tc_bws[i]); 3688 printf("\n RXQ base : "); 3689 for (i = 0; i < dcb_info.nb_tcs; i++) 3690 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 3691 printf("\n RXQ number :"); 3692 for (i = 0; i < dcb_info.nb_tcs; i++) 3693 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 3694 printf("\n TXQ base : "); 3695 for (i = 0; i < dcb_info.nb_tcs; i++) 3696 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 3697 printf("\n TXQ number :"); 3698 for (i = 0; i < dcb_info.nb_tcs; i++) 3699 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 3700 printf("\n"); 3701 } 3702 3703 uint8_t * 3704 open_file(const char *file_path, uint32_t *size) 3705 { 3706 int fd = open(file_path, O_RDONLY); 3707 off_t pkg_size; 3708 uint8_t *buf = NULL; 3709 int ret = 0; 3710 struct stat st_buf; 3711 3712 if (size) 3713 *size = 0; 3714 3715 if (fd == -1) { 3716 printf("%s: Failed to open %s\n", __func__, file_path); 3717 return buf; 3718 } 3719 3720 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 3721 close(fd); 3722 printf("%s: File operations failed\n", __func__); 3723 return buf; 3724 } 3725 3726 pkg_size = st_buf.st_size; 3727 if (pkg_size < 0) { 3728 close(fd); 3729 printf("%s: File operations failed\n", __func__); 3730 return buf; 3731 } 3732 3733 buf = (uint8_t *)malloc(pkg_size); 3734 if (!buf) { 3735 close(fd); 3736 printf("%s: Failed to malloc memory\n", __func__); 3737 return buf; 3738 } 3739 3740 ret = read(fd, buf, pkg_size); 3741 if (ret < 0) { 3742 close(fd); 3743 printf("%s: File read operation failed\n", __func__); 3744 close_file(buf); 3745 return NULL; 3746 } 3747 3748 if (size) 3749 *size = pkg_size; 3750 3751 close(fd); 3752 3753 return buf; 3754 } 3755 3756 int 3757 save_file(const char *file_path, uint8_t *buf, uint32_t size) 3758 { 3759 FILE *fh = fopen(file_path, "wb"); 3760 3761 if (fh == NULL) { 3762 printf("%s: Failed to open %s\n", __func__, file_path); 3763 return -1; 3764 } 3765 3766 if (fwrite(buf, 1, size, fh) != size) { 3767 fclose(fh); 3768 printf("%s: File write operation failed\n", __func__); 3769 return -1; 3770 } 3771 3772 fclose(fh); 3773 3774 return 0; 3775 } 3776 3777 int 3778 close_file(uint8_t *buf) 3779 { 3780 if (buf) { 3781 free((void *)buf); 3782 return 0; 3783 } 3784 3785 return -1; 3786 } 3787 3788 void 3789 port_queue_region_info_display(portid_t port_id, void *buf) 3790 { 3791 #ifdef RTE_LIBRTE_I40E_PMD 3792 uint16_t i, j; 3793 struct rte_pmd_i40e_queue_regions *info = 3794 (struct rte_pmd_i40e_queue_regions *)buf; 3795 static const char *queue_region_info_stats_border = "-------"; 3796 3797 if (!info->queue_region_number) 3798 printf("there is no region has been set before"); 3799 3800 printf("\n %s All queue region info for port=%2d %s", 3801 queue_region_info_stats_border, port_id, 3802 queue_region_info_stats_border); 3803 printf("\n queue_region_number: %-14u \n", 3804 info->queue_region_number); 3805 3806 for (i = 0; i < info->queue_region_number; i++) { 3807 printf("\n region_id: %-14u queue_number: %-14u " 3808 "queue_start_index: %-14u \n", 3809 info->region[i].region_id, 3810 info->region[i].queue_num, 3811 info->region[i].queue_start_index); 3812 3813 printf(" user_priority_num is %-14u :", 3814 info->region[i].user_priority_num); 3815 for (j = 0; j < info->region[i].user_priority_num; j++) 3816 printf(" %-14u ", info->region[i].user_priority[j]); 3817 3818 printf("\n flowtype_num is %-14u :", 3819 info->region[i].flowtype_num); 3820 for (j = 0; j < info->region[i].flowtype_num; j++) 3821 printf(" %-14u ", info->region[i].hw_flowtype[j]); 3822 } 3823 #else 3824 RTE_SET_USED(port_id); 3825 RTE_SET_USED(buf); 3826 #endif 3827 3828 printf("\n\n"); 3829 } 3830