1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <stdint.h> 37 #include <inttypes.h> 38 #include <sys/types.h> 39 #include <string.h> 40 #include <sys/queue.h> 41 #include <stdarg.h> 42 #include <errno.h> 43 #include <getopt.h> 44 45 #include <rte_common.h> 46 #include <rte_byteorder.h> 47 #include <rte_log.h> 48 #include <rte_tailq.h> 49 #include <rte_memory.h> 50 #include <rte_memcpy.h> 51 #include <rte_memzone.h> 52 #include <rte_eal.h> 53 #include <rte_per_lcore.h> 54 #include <rte_launch.h> 55 #include <rte_atomic.h> 56 #include <rte_cycles.h> 57 #include <rte_prefetch.h> 58 #include <rte_lcore.h> 59 #include <rte_per_lcore.h> 60 #include <rte_branch_prediction.h> 61 #include <rte_interrupts.h> 62 #include <rte_pci.h> 63 #include <rte_random.h> 64 #include <rte_debug.h> 65 #include <rte_ether.h> 66 #include <rte_ethdev.h> 67 #include <rte_ring.h> 68 #include <rte_mempool.h> 69 #include <rte_mbuf.h> 70 #include <rte_malloc.h> 71 #include <rte_fbk_hash.h> 72 #include <rte_ip.h> 73 74 #include "main.h" 75 76 #define RTE_LOGTYPE_IPv4_MULTICAST RTE_LOGTYPE_USER1 77 78 #define MAX_PORTS 16 79 80 #define MCAST_CLONE_PORTS 2 81 #define MCAST_CLONE_SEGS 2 82 83 #define PKT_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 84 #define NB_PKT_MBUF 8192 85 86 #define HDR_MBUF_SIZE (sizeof(struct rte_mbuf) + 2 * RTE_PKTMBUF_HEADROOM) 87 #define NB_HDR_MBUF (NB_PKT_MBUF * MAX_PORTS) 88 89 #define CLONE_MBUF_SIZE (sizeof(struct rte_mbuf)) 90 #define NB_CLONE_MBUF (NB_PKT_MBUF * MCAST_CLONE_PORTS * MCAST_CLONE_SEGS * 2) 91 92 /* allow max jumbo frame 9.5 KB */ 93 #define JUMBO_FRAME_MAX_SIZE 0x2600 94 95 /* 96 * RX and TX Prefetch, Host, and Write-back threshold values should be 97 * carefully set for optimal performance. Consult the network 98 * controller's datasheet and supporting DPDK documentation for guidance 99 * on how these parameters should be set. 100 */ 101 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ 102 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ 103 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ 104 105 /* 106 * These default values are optimized for use with the Intel(R) 82599 10 GbE 107 * Controller and the DPDK ixgbe PMD. Consider using other values for other 108 * network controllers and/or network drivers. 109 */ 110 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ 111 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ 112 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ 113 114 #define MAX_PKT_BURST 32 115 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 116 117 /* Configure how many packets ahead to prefetch, when reading packets */ 118 #define PREFETCH_OFFSET 3 119 120 /* 121 * Construct Ethernet multicast address from IPv4 multicast address. 122 * Citing RFC 1112, section 6.4: 123 * "An IP host group address is mapped to an Ethernet multicast address 124 * by placing the low-order 23-bits of the IP address into the low-order 125 * 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)." 126 */ 127 #define ETHER_ADDR_FOR_IPV4_MCAST(x) \ 128 (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16) 129 130 /* 131 * Configurable number of RX/TX ring descriptors 132 */ 133 #define RTE_TEST_RX_DESC_DEFAULT 128 134 #define RTE_TEST_TX_DESC_DEFAULT 512 135 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 136 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 137 138 /* ethernet addresses of ports */ 139 static struct ether_addr ports_eth_addr[MAX_PORTS]; 140 141 /* mask of enabled ports */ 142 static uint32_t enabled_port_mask = 0; 143 144 static uint8_t nb_ports = 0; 145 146 static int rx_queue_per_lcore = 1; 147 148 struct mbuf_table { 149 uint16_t len; 150 struct rte_mbuf *m_table[MAX_PKT_BURST]; 151 }; 152 153 #define MAX_RX_QUEUE_PER_LCORE 16 154 #define MAX_TX_QUEUE_PER_PORT 16 155 struct lcore_queue_conf { 156 uint64_t tx_tsc; 157 uint16_t n_rx_queue; 158 uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; 159 uint16_t tx_queue_id[MAX_PORTS]; 160 struct mbuf_table tx_mbufs[MAX_PORTS]; 161 } __rte_cache_aligned; 162 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 163 164 static const struct rte_eth_conf port_conf = { 165 .rxmode = { 166 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, 167 .split_hdr_size = 0, 168 .header_split = 0, /**< Header Split disabled */ 169 .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 170 .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 171 .jumbo_frame = 1, /**< Jumbo Frame Support enabled */ 172 .hw_strip_crc = 0, /**< CRC stripped by hardware */ 173 }, 174 .txmode = { 175 .mq_mode = ETH_MQ_TX_NONE, 176 }, 177 }; 178 179 static const struct rte_eth_rxconf rx_conf = { 180 .rx_thresh = { 181 .pthresh = RX_PTHRESH, 182 .hthresh = RX_HTHRESH, 183 .wthresh = RX_WTHRESH, 184 }, 185 }; 186 187 static const struct rte_eth_txconf tx_conf = { 188 .tx_thresh = { 189 .pthresh = TX_PTHRESH, 190 .hthresh = TX_HTHRESH, 191 .wthresh = TX_WTHRESH, 192 }, 193 .tx_free_thresh = 0, /* Use PMD default values */ 194 .tx_rs_thresh = 0, /* Use PMD default values */ 195 }; 196 197 static struct rte_mempool *packet_pool, *header_pool, *clone_pool; 198 199 200 /* Multicast */ 201 static struct rte_fbk_hash_params mcast_hash_params = { 202 .name = "MCAST_HASH", 203 .entries = 1024, 204 .entries_per_bucket = 4, 205 .socket_id = 0, 206 .hash_func = NULL, 207 .init_val = 0, 208 }; 209 210 struct rte_fbk_hash_table *mcast_hash = NULL; 211 212 struct mcast_group_params { 213 uint32_t ip; 214 uint16_t port_mask; 215 }; 216 217 static struct mcast_group_params mcast_group_table[] = { 218 {IPv4(224,0,0,101), 0x1}, 219 {IPv4(224,0,0,102), 0x2}, 220 {IPv4(224,0,0,103), 0x3}, 221 {IPv4(224,0,0,104), 0x4}, 222 {IPv4(224,0,0,105), 0x5}, 223 {IPv4(224,0,0,106), 0x6}, 224 {IPv4(224,0,0,107), 0x7}, 225 {IPv4(224,0,0,108), 0x8}, 226 {IPv4(224,0,0,109), 0x9}, 227 {IPv4(224,0,0,110), 0xA}, 228 {IPv4(224,0,0,111), 0xB}, 229 {IPv4(224,0,0,112), 0xC}, 230 {IPv4(224,0,0,113), 0xD}, 231 {IPv4(224,0,0,114), 0xE}, 232 {IPv4(224,0,0,115), 0xF}, 233 }; 234 235 #define N_MCAST_GROUPS \ 236 (sizeof (mcast_group_table) / sizeof (mcast_group_table[0])) 237 238 239 /* Send burst of packets on an output interface */ 240 static void 241 send_burst(struct lcore_queue_conf *qconf, uint8_t port) 242 { 243 struct rte_mbuf **m_table; 244 uint16_t n, queueid; 245 int ret; 246 247 queueid = qconf->tx_queue_id[port]; 248 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; 249 n = qconf->tx_mbufs[port].len; 250 251 ret = rte_eth_tx_burst(port, queueid, m_table, n); 252 while (unlikely (ret < n)) { 253 rte_pktmbuf_free(m_table[ret]); 254 ret++; 255 } 256 257 qconf->tx_mbufs[port].len = 0; 258 } 259 260 /* Get number of bits set. */ 261 static inline uint32_t 262 bitcnt(uint32_t v) 263 { 264 uint32_t n; 265 266 for (n = 0; v != 0; v &= v - 1, n++) 267 ; 268 269 return (n); 270 } 271 272 /** 273 * Create the output multicast packet based on the given input packet. 274 * There are two approaches for creating outgoing packet, though both 275 * are based on data zero-copy idea, they differ in few details: 276 * First one creates a clone of the input packet, e.g - walk though all 277 * segments of the input packet, and for each of them create a new packet 278 * mbuf and attach that new mbuf to the segment (refer to rte_pktmbuf_clone() 279 * for more details). Then new mbuf is allocated for the packet header 280 * and is prepended to the 'clone' mbuf. 281 * Second approach doesn't make a clone, it just increment refcnt for all 282 * input packet segments. Then it allocates new mbuf for the packet header 283 * and prepends it to the input packet. 284 * Basically first approach reuses only input packet's data, but creates 285 * it's own copy of packet's metadata. Second approach reuses both input's 286 * packet data and metadata. 287 * The advantage of first approach - is that each outgoing packet has it's 288 * own copy of metadata, so we can safely modify data pointer of the 289 * input packet. That allows us to skip creation if the output packet for 290 * the last destination port, but instead modify input packet's header inplace, 291 * e.g: for N destination ports we need to invoke mcast_out_pkt (N-1) times. 292 * The advantage of second approach - less work for each outgoing packet, 293 * e.g: we skip "clone" operation completely. Though it comes with a price - 294 * input packet's metadata has to be intact. So for N destination ports we 295 * need to invoke mcast_out_pkt N times. 296 * So for small number of outgoing ports (and segments in the input packet) 297 * first approach will be faster. 298 * As number of outgoing ports (and/or input segments) will grow, 299 * second way will become more preferable. 300 * 301 * @param pkt 302 * Input packet mbuf. 303 * @param use_clone 304 * Control which of the two approaches described above should be used: 305 * - 0 - use second approach: 306 * Don't "clone" input packet. 307 * Prepend new header directly to the input packet 308 * - 1 - use first approach: 309 * Make a "clone" of input packet first. 310 * Prepend new header to the clone of the input packet 311 * @return 312 * - The pointer to the new outgoing packet. 313 * - NULL if operation failed. 314 */ 315 static inline struct rte_mbuf * 316 mcast_out_pkt(struct rte_mbuf *pkt, int use_clone) 317 { 318 struct rte_mbuf *hdr; 319 320 /* Create new mbuf for the header. */ 321 if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL)) 322 return (NULL); 323 324 /* If requested, then make a new clone packet. */ 325 if (use_clone != 0 && 326 unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) { 327 rte_pktmbuf_free(hdr); 328 return (NULL); 329 } 330 331 /* prepend new header */ 332 hdr->next = pkt; 333 334 335 /* update header's fields */ 336 hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len); 337 hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1); 338 339 /* copy metadata from source packet*/ 340 hdr->port = pkt->port; 341 hdr->vlan_tci = pkt->vlan_tci; 342 hdr->l2_l3_len = pkt->l2_l3_len; 343 hdr->hash = pkt->hash; 344 345 hdr->ol_flags = pkt->ol_flags; 346 347 __rte_mbuf_sanity_check(hdr, 1); 348 return (hdr); 349 } 350 351 /* 352 * Write new Ethernet header to the outgoing packet, 353 * and put it into the outgoing queue for the given port. 354 */ 355 static inline void 356 mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr, 357 struct lcore_queue_conf *qconf, uint8_t port) 358 { 359 struct ether_hdr *ethdr; 360 uint16_t len; 361 362 /* Construct Ethernet header. */ 363 ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr)); 364 RTE_MBUF_ASSERT(ethdr != NULL); 365 366 ether_addr_copy(dest_addr, ðdr->d_addr); 367 ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr); 368 ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4); 369 370 /* Put new packet into the output queue */ 371 len = qconf->tx_mbufs[port].len; 372 qconf->tx_mbufs[port].m_table[len] = pkt; 373 qconf->tx_mbufs[port].len = ++len; 374 375 /* Transmit packets */ 376 if (unlikely(MAX_PKT_BURST == len)) 377 send_burst(qconf, port); 378 } 379 380 /* Multicast forward of the input packet */ 381 static inline void 382 mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf) 383 { 384 struct rte_mbuf *mc; 385 struct ipv4_hdr *iphdr; 386 uint32_t dest_addr, port_mask, port_num, use_clone; 387 int32_t hash; 388 uint8_t port; 389 union { 390 uint64_t as_int; 391 struct ether_addr as_addr; 392 } dst_eth_addr; 393 394 /* Remove the Ethernet header from the input packet */ 395 iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr)); 396 RTE_MBUF_ASSERT(iphdr != NULL); 397 398 dest_addr = rte_be_to_cpu_32(iphdr->dst_addr); 399 400 /* 401 * Check that it is a valid multicast address and 402 * we have some active ports assigned to it. 403 */ 404 if(!IS_IPV4_MCAST(dest_addr) || 405 (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 || 406 (port_mask = hash & enabled_port_mask) == 0) { 407 rte_pktmbuf_free(m); 408 return; 409 } 410 411 /* Calculate number of destination ports. */ 412 port_num = bitcnt(port_mask); 413 414 /* Should we use rte_pktmbuf_clone() or not. */ 415 use_clone = (port_num <= MCAST_CLONE_PORTS && 416 m->nb_segs <= MCAST_CLONE_SEGS); 417 418 /* Mark all packet's segments as referenced port_num times */ 419 if (use_clone == 0) 420 rte_pktmbuf_refcnt_update(m, (uint16_t)port_num); 421 422 /* construct destination ethernet address */ 423 dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr); 424 425 for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) { 426 427 /* Prepare output packet and send it out. */ 428 if ((port_mask & 1) != 0) { 429 if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL)) 430 mcast_send_pkt(mc, &dst_eth_addr.as_addr, 431 qconf, port); 432 else if (use_clone == 0) 433 rte_pktmbuf_free(m); 434 } 435 } 436 437 /* 438 * If we making clone packets, then, for the last destination port, 439 * we can overwrite input packet's metadata. 440 */ 441 if (use_clone != 0) 442 mcast_send_pkt(m, &dst_eth_addr.as_addr, qconf, port); 443 else 444 rte_pktmbuf_free(m); 445 } 446 447 /* Send burst of outgoing packet, if timeout expires. */ 448 static inline void 449 send_timeout_burst(struct lcore_queue_conf *qconf) 450 { 451 uint64_t cur_tsc; 452 uint8_t portid; 453 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; 454 455 cur_tsc = rte_rdtsc(); 456 if (likely (cur_tsc < qconf->tx_tsc + drain_tsc)) 457 return; 458 459 for (portid = 0; portid < MAX_PORTS; portid++) { 460 if (qconf->tx_mbufs[portid].len != 0) 461 send_burst(qconf, portid); 462 } 463 qconf->tx_tsc = cur_tsc; 464 } 465 466 /* main processing loop */ 467 static int 468 main_loop(__rte_unused void *dummy) 469 { 470 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 471 unsigned lcore_id; 472 int i, j, nb_rx; 473 uint8_t portid; 474 struct lcore_queue_conf *qconf; 475 476 lcore_id = rte_lcore_id(); 477 qconf = &lcore_queue_conf[lcore_id]; 478 479 480 if (qconf->n_rx_queue == 0) { 481 RTE_LOG(INFO, IPv4_MULTICAST, "lcore %u has nothing to do\n", 482 lcore_id); 483 return 0; 484 } 485 486 RTE_LOG(INFO, IPv4_MULTICAST, "entering main loop on lcore %u\n", 487 lcore_id); 488 489 for (i = 0; i < qconf->n_rx_queue; i++) { 490 491 portid = qconf->rx_queue_list[i]; 492 RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n", 493 lcore_id, (int) portid); 494 } 495 496 while (1) { 497 498 /* 499 * Read packet from RX queues 500 */ 501 for (i = 0; i < qconf->n_rx_queue; i++) { 502 503 portid = qconf->rx_queue_list[i]; 504 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst, 505 MAX_PKT_BURST); 506 507 /* Prefetch first packets */ 508 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { 509 rte_prefetch0(rte_pktmbuf_mtod( 510 pkts_burst[j], void *)); 511 } 512 513 /* Prefetch and forward already prefetched packets */ 514 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { 515 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ 516 j + PREFETCH_OFFSET], void *)); 517 mcast_forward(pkts_burst[j], qconf); 518 } 519 520 /* Forward remaining prefetched packets */ 521 for (; j < nb_rx; j++) { 522 mcast_forward(pkts_burst[j], qconf); 523 } 524 } 525 526 /* Send out packets from TX queues */ 527 send_timeout_burst(qconf); 528 } 529 } 530 531 /* display usage */ 532 static void 533 print_usage(const char *prgname) 534 { 535 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" 536 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 537 " -q NQ: number of queue (=ports) per lcore (default is 1)\n", 538 prgname); 539 } 540 541 static uint32_t 542 parse_portmask(const char *portmask) 543 { 544 char *end = NULL; 545 unsigned long pm; 546 547 /* parse hexadecimal string */ 548 pm = strtoul(portmask, &end, 16); 549 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 550 return 0; 551 552 return ((uint32_t)pm); 553 } 554 555 static int 556 parse_nqueue(const char *q_arg) 557 { 558 char *end = NULL; 559 unsigned long n; 560 561 /* parse numerical string */ 562 errno = 0; 563 n = strtoul(q_arg, &end, 0); 564 if (errno != 0 || end == NULL || *end != '\0' || 565 n == 0 || n >= MAX_RX_QUEUE_PER_LCORE) 566 return (-1); 567 568 return (n); 569 } 570 571 /* Parse the argument given in the command line of the application */ 572 static int 573 parse_args(int argc, char **argv) 574 { 575 int opt, ret; 576 char **argvopt; 577 int option_index; 578 char *prgname = argv[0]; 579 static struct option lgopts[] = { 580 {NULL, 0, 0, 0} 581 }; 582 583 argvopt = argv; 584 585 while ((opt = getopt_long(argc, argvopt, "p:q:", 586 lgopts, &option_index)) != EOF) { 587 588 switch (opt) { 589 /* portmask */ 590 case 'p': 591 enabled_port_mask = parse_portmask(optarg); 592 if (enabled_port_mask == 0) { 593 printf("invalid portmask\n"); 594 print_usage(prgname); 595 return -1; 596 } 597 break; 598 599 /* nqueue */ 600 case 'q': 601 rx_queue_per_lcore = parse_nqueue(optarg); 602 if (rx_queue_per_lcore < 0) { 603 printf("invalid queue number\n"); 604 print_usage(prgname); 605 return -1; 606 } 607 break; 608 609 default: 610 print_usage(prgname); 611 return -1; 612 } 613 } 614 615 if (optind >= 0) 616 argv[optind-1] = prgname; 617 618 ret = optind-1; 619 optind = 0; /* reset getopt lib */ 620 return ret; 621 } 622 623 static void 624 print_ethaddr(const char *name, struct ether_addr *eth_addr) 625 { 626 printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, 627 eth_addr->addr_bytes[0], 628 eth_addr->addr_bytes[1], 629 eth_addr->addr_bytes[2], 630 eth_addr->addr_bytes[3], 631 eth_addr->addr_bytes[4], 632 eth_addr->addr_bytes[5]); 633 } 634 635 static int 636 init_mcast_hash(void) 637 { 638 uint32_t i; 639 640 mcast_hash_params.socket_id = rte_socket_id(); 641 mcast_hash = rte_fbk_hash_create(&mcast_hash_params); 642 if (mcast_hash == NULL){ 643 return -1; 644 } 645 646 for (i = 0; i < N_MCAST_GROUPS; i ++){ 647 if (rte_fbk_hash_add_key(mcast_hash, 648 mcast_group_table[i].ip, 649 mcast_group_table[i].port_mask) < 0) { 650 return -1; 651 } 652 } 653 654 return 0; 655 } 656 657 /* Check the link status of all ports in up to 9s, and print them finally */ 658 static void 659 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 660 { 661 #define CHECK_INTERVAL 100 /* 100ms */ 662 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 663 uint8_t portid, count, all_ports_up, print_flag = 0; 664 struct rte_eth_link link; 665 666 printf("\nChecking link status"); 667 fflush(stdout); 668 for (count = 0; count <= MAX_CHECK_TIME; count++) { 669 all_ports_up = 1; 670 for (portid = 0; portid < port_num; portid++) { 671 if ((port_mask & (1 << portid)) == 0) 672 continue; 673 memset(&link, 0, sizeof(link)); 674 rte_eth_link_get_nowait(portid, &link); 675 /* print link status if flag set */ 676 if (print_flag == 1) { 677 if (link.link_status) 678 printf("Port %d Link Up - speed %u " 679 "Mbps - %s\n", (uint8_t)portid, 680 (unsigned)link.link_speed, 681 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 682 ("full-duplex") : ("half-duplex\n")); 683 else 684 printf("Port %d Link Down\n", 685 (uint8_t)portid); 686 continue; 687 } 688 /* clear all_ports_up flag if any link down */ 689 if (link.link_status == 0) { 690 all_ports_up = 0; 691 break; 692 } 693 } 694 /* after finally printing all link status, get out */ 695 if (print_flag == 1) 696 break; 697 698 if (all_ports_up == 0) { 699 printf("."); 700 fflush(stdout); 701 rte_delay_ms(CHECK_INTERVAL); 702 } 703 704 /* set the print_flag if all ports up or timeout */ 705 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 706 print_flag = 1; 707 printf("done\n"); 708 } 709 } 710 } 711 712 int 713 MAIN(int argc, char **argv) 714 { 715 struct lcore_queue_conf *qconf; 716 int ret; 717 uint16_t queueid; 718 unsigned lcore_id = 0, rx_lcore_id = 0; 719 uint32_t n_tx_queue, nb_lcores; 720 uint8_t portid; 721 722 /* init EAL */ 723 ret = rte_eal_init(argc, argv); 724 if (ret < 0) 725 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); 726 argc -= ret; 727 argv += ret; 728 729 /* parse application arguments (after the EAL ones) */ 730 ret = parse_args(argc, argv); 731 if (ret < 0) 732 rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n"); 733 734 /* create the mbuf pools */ 735 packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF, 736 PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), 737 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 738 rte_socket_id(), 0); 739 740 if (packet_pool == NULL) 741 rte_exit(EXIT_FAILURE, "Cannot init packet mbuf pool\n"); 742 743 header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF, 744 HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, 745 rte_socket_id(), 0); 746 747 if (header_pool == NULL) 748 rte_exit(EXIT_FAILURE, "Cannot init header mbuf pool\n"); 749 750 clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF, 751 CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, 752 rte_socket_id(), 0); 753 754 if (clone_pool == NULL) 755 rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n"); 756 757 nb_ports = rte_eth_dev_count(); 758 if (nb_ports == 0) 759 rte_exit(EXIT_FAILURE, "No physical ports!\n"); 760 if (nb_ports > MAX_PORTS) 761 nb_ports = MAX_PORTS; 762 763 nb_lcores = rte_lcore_count(); 764 765 /* initialize all ports */ 766 for (portid = 0; portid < nb_ports; portid++) { 767 /* skip ports that are not enabled */ 768 if ((enabled_port_mask & (1 << portid)) == 0) { 769 printf("Skipping disabled port %d\n", portid); 770 continue; 771 } 772 773 qconf = &lcore_queue_conf[rx_lcore_id]; 774 775 /* get the lcore_id for this port */ 776 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 777 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) { 778 779 rx_lcore_id ++; 780 qconf = &lcore_queue_conf[rx_lcore_id]; 781 782 if (rx_lcore_id >= RTE_MAX_LCORE) 783 rte_exit(EXIT_FAILURE, "Not enough cores\n"); 784 } 785 qconf->rx_queue_list[qconf->n_rx_queue] = portid; 786 qconf->n_rx_queue++; 787 788 /* init port */ 789 printf("Initializing port %d on lcore %u... ", portid, 790 rx_lcore_id); 791 fflush(stdout); 792 793 n_tx_queue = nb_lcores; 794 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) 795 n_tx_queue = MAX_TX_QUEUE_PER_PORT; 796 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, 797 &port_conf); 798 if (ret < 0) 799 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", 800 ret, portid); 801 802 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); 803 print_ethaddr(" Address:", &ports_eth_addr[portid]); 804 printf(", "); 805 806 /* init one RX queue */ 807 queueid = 0; 808 printf("rxq=%hu ", queueid); 809 fflush(stdout); 810 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, 811 rte_eth_dev_socket_id(portid), &rx_conf, 812 packet_pool); 813 if (ret < 0) 814 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n", 815 ret, portid); 816 817 /* init one TX queue per couple (lcore,port) */ 818 queueid = 0; 819 820 RTE_LCORE_FOREACH(lcore_id) { 821 if (rte_lcore_is_enabled(lcore_id) == 0) 822 continue; 823 printf("txq=%u,%hu ", lcore_id, queueid); 824 fflush(stdout); 825 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, 826 rte_lcore_to_socket_id(lcore_id), &tx_conf); 827 if (ret < 0) 828 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " 829 "port=%d\n", ret, portid); 830 831 qconf = &lcore_queue_conf[lcore_id]; 832 qconf->tx_queue_id[portid] = queueid; 833 queueid++; 834 } 835 836 /* Start device */ 837 ret = rte_eth_dev_start(portid); 838 if (ret < 0) 839 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", 840 ret, portid); 841 842 printf("done:\n"); 843 } 844 845 check_all_ports_link_status(nb_ports, enabled_port_mask); 846 847 /* initialize the multicast hash */ 848 int retval = init_mcast_hash(); 849 if (retval != 0) 850 rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n"); 851 852 /* launch per-lcore init on every lcore */ 853 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); 854 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 855 if (rte_eal_wait_lcore(lcore_id) < 0) 856 return -1; 857 } 858 859 return 0; 860 } 861