1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 4e9d48c00SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel */ 33af75078fSIntel 34af75078fSIntel #include <stdio.h> 35af75078fSIntel #include <stdlib.h> 36af75078fSIntel #include <stdint.h> 37af75078fSIntel #include <inttypes.h> 38af75078fSIntel #include <sys/types.h> 39af75078fSIntel #include <string.h> 40af75078fSIntel #include <sys/queue.h> 41af75078fSIntel #include <stdarg.h> 42af75078fSIntel #include <errno.h> 43af75078fSIntel #include <getopt.h> 44af75078fSIntel 45af75078fSIntel #include <rte_common.h> 4696ff4453SKonstantin Ananyev #include <rte_common_vect.h> 47af75078fSIntel #include <rte_byteorder.h> 48af75078fSIntel #include <rte_log.h> 49af75078fSIntel #include <rte_memory.h> 50af75078fSIntel #include <rte_memcpy.h> 51af75078fSIntel #include <rte_memzone.h> 52af75078fSIntel #include <rte_tailq.h> 53af75078fSIntel #include <rte_eal.h> 54af75078fSIntel #include <rte_per_lcore.h> 55af75078fSIntel #include <rte_launch.h> 56af75078fSIntel #include <rte_atomic.h> 57af75078fSIntel #include <rte_cycles.h> 58af75078fSIntel #include <rte_prefetch.h> 59af75078fSIntel #include <rte_lcore.h> 60af75078fSIntel #include <rte_per_lcore.h> 61af75078fSIntel #include <rte_branch_prediction.h> 62af75078fSIntel #include <rte_interrupts.h> 63af75078fSIntel #include <rte_pci.h> 64af75078fSIntel #include <rte_random.h> 65af75078fSIntel #include <rte_debug.h> 66af75078fSIntel #include <rte_ether.h> 67af75078fSIntel #include <rte_ethdev.h> 68af75078fSIntel #include <rte_ring.h> 69af75078fSIntel #include <rte_mempool.h> 70af75078fSIntel #include <rte_mbuf.h> 71af75078fSIntel #include <rte_ip.h> 72af75078fSIntel #include <rte_tcp.h> 73af75078fSIntel #include <rte_udp.h> 74af75078fSIntel #include <rte_string_fns.h> 75af75078fSIntel 76af75078fSIntel #include "main.h" 77af75078fSIntel 78af75078fSIntel #define APP_LOOKUP_EXACT_MATCH 0 79af75078fSIntel #define APP_LOOKUP_LPM 1 80af75078fSIntel #define DO_RFC_1812_CHECKS 81af75078fSIntel 82af75078fSIntel #ifndef APP_LOOKUP_METHOD 83af75078fSIntel #define APP_LOOKUP_METHOD APP_LOOKUP_LPM 84af75078fSIntel #endif 85af75078fSIntel 8696ff4453SKonstantin Ananyev /* 8796ff4453SKonstantin Ananyev * When set to zero, simple forwaring path is eanbled. 8896ff4453SKonstantin Ananyev * When set to one, optimized forwarding path is enabled. 8996ff4453SKonstantin Ananyev * Note that LPM optimisation path uses SSE4.1 instructions. 9096ff4453SKonstantin Ananyev */ 9196ff4453SKonstantin Ananyev #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__)) 9296ff4453SKonstantin Ananyev #define ENABLE_MULTI_BUFFER_OPTIMIZE 0 9396ff4453SKonstantin Ananyev #else 94997ee890SIntel #define ENABLE_MULTI_BUFFER_OPTIMIZE 1 9596ff4453SKonstantin Ananyev #endif 96997ee890SIntel 97af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 98af75078fSIntel #include <rte_hash.h> 99af75078fSIntel #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 100af75078fSIntel #include <rte_lpm.h> 101997ee890SIntel #include <rte_lpm6.h> 102af75078fSIntel #else 103af75078fSIntel #error "APP_LOOKUP_METHOD set to incorrect value" 104af75078fSIntel #endif 105af75078fSIntel 106f68aad79SIntel #ifndef IPv6_BYTES 107f68aad79SIntel #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\ 108f68aad79SIntel "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 109f68aad79SIntel #define IPv6_BYTES(addr) \ 110f68aad79SIntel addr[0], addr[1], addr[2], addr[3], \ 111f68aad79SIntel addr[4], addr[5], addr[6], addr[7], \ 112f68aad79SIntel addr[8], addr[9], addr[10], addr[11],\ 113f68aad79SIntel addr[12], addr[13],addr[14], addr[15] 114f68aad79SIntel #endif 115f68aad79SIntel 116f68aad79SIntel 117af75078fSIntel #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1 118af75078fSIntel 119f68aad79SIntel #define MAX_JUMBO_PKT_LEN 9600 120f68aad79SIntel 121f68aad79SIntel #define IPV6_ADDR_LEN 16 122f68aad79SIntel 123f68aad79SIntel #define MEMPOOL_CACHE_SIZE 256 124f68aad79SIntel 125af75078fSIntel #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 126f68aad79SIntel 127f68aad79SIntel /* 128f68aad79SIntel * This expression is used to calculate the number of mbufs needed depending on user input, taking 129f68aad79SIntel * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore. 130f68aad79SIntel * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192 131f68aad79SIntel */ 132f68aad79SIntel 133f68aad79SIntel #define NB_MBUF RTE_MAX ( \ 134f68aad79SIntel (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \ 135f68aad79SIntel nb_ports*nb_lcores*MAX_PKT_BURST + \ 136f68aad79SIntel nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \ 137f68aad79SIntel nb_lcores*MEMPOOL_CACHE_SIZE), \ 138f68aad79SIntel (unsigned)8192) 139af75078fSIntel 140af75078fSIntel #define MAX_PKT_BURST 32 1415c95261dSIntel #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 142af75078fSIntel 14396ff4453SKonstantin Ananyev /* 14496ff4453SKonstantin Ananyev * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send. 14596ff4453SKonstantin Ananyev */ 14696ff4453SKonstantin Ananyev #define MAX_TX_BURST (MAX_PKT_BURST / 2) 14796ff4453SKonstantin Ananyev 148af75078fSIntel #define NB_SOCKETS 8 149af75078fSIntel 150af75078fSIntel /* Configure how many packets ahead to prefetch, when reading packets */ 151af75078fSIntel #define PREFETCH_OFFSET 3 152af75078fSIntel 15396ff4453SKonstantin Ananyev /* Used to mark destination port as 'invalid'. */ 15496ff4453SKonstantin Ananyev #define BAD_PORT ((uint16_t)-1) 15596ff4453SKonstantin Ananyev 15696ff4453SKonstantin Ananyev #define FWDSTEP 4 15796ff4453SKonstantin Ananyev 158af75078fSIntel /* 159af75078fSIntel * Configurable number of RX/TX ring descriptors 160af75078fSIntel */ 161af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 162af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 163af75078fSIntel static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 164af75078fSIntel static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 165af75078fSIntel 166af75078fSIntel /* ethernet addresses of ports */ 1671c17baf4SIntel static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; 168af75078fSIntel 16996ff4453SKonstantin Ananyev static __m128i val_eth[RTE_MAX_ETHPORTS]; 17096ff4453SKonstantin Ananyev 17196ff4453SKonstantin Ananyev /* replace first 12B of the ethernet header. */ 17296ff4453SKonstantin Ananyev #define MASK_ETH 0x3f 17396ff4453SKonstantin Ananyev 174af75078fSIntel /* mask of enabled ports */ 175af75078fSIntel static uint32_t enabled_port_mask = 0; 176af75078fSIntel static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */ 177af75078fSIntel static int numa_on = 1; /**< NUMA is enabled by default. */ 178af75078fSIntel 179997ee890SIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 180997ee890SIntel static int ipv6 = 0; /**< ipv6 is false by default. */ 181997ee890SIntel #endif 182997ee890SIntel 183af75078fSIntel struct mbuf_table { 184af75078fSIntel uint16_t len; 185af75078fSIntel struct rte_mbuf *m_table[MAX_PKT_BURST]; 186af75078fSIntel }; 187af75078fSIntel 188af75078fSIntel struct lcore_rx_queue { 189af75078fSIntel uint8_t port_id; 190af75078fSIntel uint8_t queue_id; 191af75078fSIntel } __rte_cache_aligned; 192af75078fSIntel 193af75078fSIntel #define MAX_RX_QUEUE_PER_LCORE 16 1941c17baf4SIntel #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS 195af75078fSIntel #define MAX_RX_QUEUE_PER_PORT 128 196af75078fSIntel 197af75078fSIntel #define MAX_LCORE_PARAMS 1024 198af75078fSIntel struct lcore_params { 199af75078fSIntel uint8_t port_id; 200af75078fSIntel uint8_t queue_id; 201af75078fSIntel uint8_t lcore_id; 202af75078fSIntel } __rte_cache_aligned; 203af75078fSIntel 204af75078fSIntel static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; 205af75078fSIntel static struct lcore_params lcore_params_array_default[] = { 206af75078fSIntel {0, 0, 2}, 207af75078fSIntel {0, 1, 2}, 208af75078fSIntel {0, 2, 2}, 209af75078fSIntel {1, 0, 2}, 210af75078fSIntel {1, 1, 2}, 211af75078fSIntel {1, 2, 2}, 212af75078fSIntel {2, 0, 2}, 213af75078fSIntel {3, 0, 3}, 214af75078fSIntel {3, 1, 3}, 215af75078fSIntel }; 216af75078fSIntel 217af75078fSIntel static struct lcore_params * lcore_params = lcore_params_array_default; 218af75078fSIntel static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / 219af75078fSIntel sizeof(lcore_params_array_default[0]); 220af75078fSIntel 221af75078fSIntel static struct rte_eth_conf port_conf = { 222af75078fSIntel .rxmode = { 22313c4ebd6SBruce Richardson .mq_mode = ETH_MQ_RX_RSS, 224f68aad79SIntel .max_rx_pkt_len = ETHER_MAX_LEN, 225af75078fSIntel .split_hdr_size = 0, 226af75078fSIntel .header_split = 0, /**< Header Split disabled */ 227af75078fSIntel .hw_ip_checksum = 1, /**< IP checksum offload enabled */ 228af75078fSIntel .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 229af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 230af75078fSIntel .hw_strip_crc = 0, /**< CRC stripped by hardware */ 231af75078fSIntel }, 232af75078fSIntel .rx_adv_conf = { 233af75078fSIntel .rss_conf = { 234af75078fSIntel .rss_key = NULL, 2358a387fa8SHelin Zhang .rss_hf = ETH_RSS_IP, 236af75078fSIntel }, 237af75078fSIntel }, 238af75078fSIntel .txmode = { 23932e7aa0bSIntel .mq_mode = ETH_MQ_TX_NONE, 240af75078fSIntel }, 241af75078fSIntel }; 242af75078fSIntel 243af75078fSIntel static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; 244af75078fSIntel 245af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 24618d5e8d7SIntel 24718d5e8d7SIntel #ifdef RTE_MACHINE_CPUFLAG_SSE4_2 24818d5e8d7SIntel #include <rte_hash_crc.h> 24918d5e8d7SIntel #define DEFAULT_HASH_FUNC rte_hash_crc 25018d5e8d7SIntel #else 25118d5e8d7SIntel #include <rte_jhash.h> 25218d5e8d7SIntel #define DEFAULT_HASH_FUNC rte_jhash 25318d5e8d7SIntel #endif 25418d5e8d7SIntel 255af75078fSIntel struct ipv4_5tuple { 256af75078fSIntel uint32_t ip_dst; 257af75078fSIntel uint32_t ip_src; 258af75078fSIntel uint16_t port_dst; 259af75078fSIntel uint16_t port_src; 260af75078fSIntel uint8_t proto; 261af75078fSIntel } __attribute__((__packed__)); 262af75078fSIntel 263997ee890SIntel union ipv4_5tuple_host { 264997ee890SIntel struct { 265997ee890SIntel uint8_t pad0; 266997ee890SIntel uint8_t proto; 267997ee890SIntel uint16_t pad1; 268997ee890SIntel uint32_t ip_src; 269997ee890SIntel uint32_t ip_dst; 270997ee890SIntel uint16_t port_src; 271997ee890SIntel uint16_t port_dst; 272997ee890SIntel }; 273997ee890SIntel __m128i xmm; 274997ee890SIntel }; 275997ee890SIntel 276997ee890SIntel #define XMM_NUM_IN_IPV6_5TUPLE 3 277997ee890SIntel 278f68aad79SIntel struct ipv6_5tuple { 279f68aad79SIntel uint8_t ip_dst[IPV6_ADDR_LEN]; 280f68aad79SIntel uint8_t ip_src[IPV6_ADDR_LEN]; 281f68aad79SIntel uint16_t port_dst; 282f68aad79SIntel uint16_t port_src; 283f68aad79SIntel uint8_t proto; 284f68aad79SIntel } __attribute__((__packed__)); 285f68aad79SIntel 286997ee890SIntel union ipv6_5tuple_host { 287997ee890SIntel struct { 288997ee890SIntel uint16_t pad0; 289997ee890SIntel uint8_t proto; 290997ee890SIntel uint8_t pad1; 291997ee890SIntel uint8_t ip_src[IPV6_ADDR_LEN]; 292997ee890SIntel uint8_t ip_dst[IPV6_ADDR_LEN]; 293997ee890SIntel uint16_t port_src; 294997ee890SIntel uint16_t port_dst; 295997ee890SIntel uint64_t reserve; 296997ee890SIntel }; 297997ee890SIntel __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE]; 298997ee890SIntel }; 299997ee890SIntel 300f68aad79SIntel struct ipv4_l3fwd_route { 301af75078fSIntel struct ipv4_5tuple key; 302af75078fSIntel uint8_t if_out; 303af75078fSIntel }; 304af75078fSIntel 305f68aad79SIntel struct ipv6_l3fwd_route { 306f68aad79SIntel struct ipv6_5tuple key; 307f68aad79SIntel uint8_t if_out; 308f68aad79SIntel }; 309f68aad79SIntel 310f68aad79SIntel static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = { 311997ee890SIntel {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0}, 312997ee890SIntel {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1}, 313997ee890SIntel {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2}, 314997ee890SIntel {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3}, 315af75078fSIntel }; 316af75078fSIntel 317f68aad79SIntel static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = { 318997ee890SIntel {{ 319997ee890SIntel {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0}, 320997ee890SIntel {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05}, 321997ee890SIntel 101, 11, IPPROTO_TCP}, 0}, 322997ee890SIntel 323997ee890SIntel {{ 324997ee890SIntel {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0}, 325997ee890SIntel {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05}, 326997ee890SIntel 102, 12, IPPROTO_TCP}, 1}, 327997ee890SIntel 328997ee890SIntel {{ 329997ee890SIntel {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0}, 330997ee890SIntel {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05}, 331997ee890SIntel 101, 11, IPPROTO_TCP}, 2}, 332997ee890SIntel 333997ee890SIntel {{ 334997ee890SIntel {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0}, 335997ee890SIntel {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05}, 336997ee890SIntel 102, 12, IPPROTO_TCP}, 3}, 337f68aad79SIntel }; 338f68aad79SIntel 339af75078fSIntel typedef struct rte_hash lookup_struct_t; 340f68aad79SIntel static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS]; 341f68aad79SIntel static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS]; 342af75078fSIntel 343997ee890SIntel #ifdef RTE_ARCH_X86_64 344997ee890SIntel /* default to 4 million hash entries (approx) */ 345997ee890SIntel #define L3FWD_HASH_ENTRIES 1024*1024*4 346997ee890SIntel #else 347997ee890SIntel /* 32-bit has less address-space for hugepage memory, limit to 1M entries */ 348997ee890SIntel #define L3FWD_HASH_ENTRIES 1024*1024*1 349997ee890SIntel #endif 350997ee890SIntel #define HASH_ENTRY_NUMBER_DEFAULT 4 351f68aad79SIntel 352997ee890SIntel static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; 353af75078fSIntel 354997ee890SIntel static inline uint32_t 355997ee890SIntel ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, 356997ee890SIntel uint32_t init_val) 357997ee890SIntel { 358997ee890SIntel const union ipv4_5tuple_host *k; 359997ee890SIntel uint32_t t; 360997ee890SIntel const uint32_t *p; 361997ee890SIntel 362997ee890SIntel k = data; 363997ee890SIntel t = k->proto; 364997ee890SIntel p = (const uint32_t *)&k->port_src; 365997ee890SIntel 366997ee890SIntel #ifdef RTE_MACHINE_CPUFLAG_SSE4_2 367997ee890SIntel init_val = rte_hash_crc_4byte(t, init_val); 368997ee890SIntel init_val = rte_hash_crc_4byte(k->ip_src, init_val); 369997ee890SIntel init_val = rte_hash_crc_4byte(k->ip_dst, init_val); 370997ee890SIntel init_val = rte_hash_crc_4byte(*p, init_val); 371997ee890SIntel #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ 372997ee890SIntel init_val = rte_jhash_1word(t, init_val); 373997ee890SIntel init_val = rte_jhash_1word(k->ip_src, init_val); 374997ee890SIntel init_val = rte_jhash_1word(k->ip_dst, init_val); 375997ee890SIntel init_val = rte_jhash_1word(*p, init_val); 376997ee890SIntel #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ 377997ee890SIntel return (init_val); 378997ee890SIntel } 379997ee890SIntel 380997ee890SIntel static inline uint32_t 381997ee890SIntel ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val) 382997ee890SIntel { 383997ee890SIntel const union ipv6_5tuple_host *k; 384997ee890SIntel uint32_t t; 385997ee890SIntel const uint32_t *p; 386997ee890SIntel #ifdef RTE_MACHINE_CPUFLAG_SSE4_2 387997ee890SIntel const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3; 388997ee890SIntel const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3; 389997ee890SIntel #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ 390997ee890SIntel 391997ee890SIntel k = data; 392997ee890SIntel t = k->proto; 393997ee890SIntel p = (const uint32_t *)&k->port_src; 394997ee890SIntel 395997ee890SIntel #ifdef RTE_MACHINE_CPUFLAG_SSE4_2 396997ee890SIntel ip_src0 = (const uint32_t *) k->ip_src; 397997ee890SIntel ip_src1 = (const uint32_t *)(k->ip_src+4); 398997ee890SIntel ip_src2 = (const uint32_t *)(k->ip_src+8); 399997ee890SIntel ip_src3 = (const uint32_t *)(k->ip_src+12); 400997ee890SIntel ip_dst0 = (const uint32_t *) k->ip_dst; 401997ee890SIntel ip_dst1 = (const uint32_t *)(k->ip_dst+4); 402997ee890SIntel ip_dst2 = (const uint32_t *)(k->ip_dst+8); 403997ee890SIntel ip_dst3 = (const uint32_t *)(k->ip_dst+12); 404997ee890SIntel init_val = rte_hash_crc_4byte(t, init_val); 405997ee890SIntel init_val = rte_hash_crc_4byte(*ip_src0, init_val); 406997ee890SIntel init_val = rte_hash_crc_4byte(*ip_src1, init_val); 407997ee890SIntel init_val = rte_hash_crc_4byte(*ip_src2, init_val); 408997ee890SIntel init_val = rte_hash_crc_4byte(*ip_src3, init_val); 409997ee890SIntel init_val = rte_hash_crc_4byte(*ip_dst0, init_val); 410997ee890SIntel init_val = rte_hash_crc_4byte(*ip_dst1, init_val); 411997ee890SIntel init_val = rte_hash_crc_4byte(*ip_dst2, init_val); 412997ee890SIntel init_val = rte_hash_crc_4byte(*ip_dst3, init_val); 413997ee890SIntel init_val = rte_hash_crc_4byte(*p, init_val); 414997ee890SIntel #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ 415997ee890SIntel init_val = rte_jhash_1word(t, init_val); 416997ee890SIntel init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); 417997ee890SIntel init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); 418997ee890SIntel init_val = rte_jhash_1word(*p, init_val); 419997ee890SIntel #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ 420997ee890SIntel return (init_val); 421997ee890SIntel } 422af75078fSIntel 423f68aad79SIntel #define IPV4_L3FWD_NUM_ROUTES \ 424f68aad79SIntel (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0])) 425f68aad79SIntel 426f68aad79SIntel #define IPV6_L3FWD_NUM_ROUTES \ 427f68aad79SIntel (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0])) 428f68aad79SIntel 429f68aad79SIntel static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; 430f68aad79SIntel static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; 431997ee890SIntel 432af75078fSIntel #endif 433af75078fSIntel 434af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 435f68aad79SIntel struct ipv4_l3fwd_route { 436af75078fSIntel uint32_t ip; 437af75078fSIntel uint8_t depth; 438af75078fSIntel uint8_t if_out; 439af75078fSIntel }; 440af75078fSIntel 441997ee890SIntel struct ipv6_l3fwd_route { 442997ee890SIntel uint8_t ip[16]; 443997ee890SIntel uint8_t depth; 444997ee890SIntel uint8_t if_out; 445997ee890SIntel }; 446997ee890SIntel 447f68aad79SIntel static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = { 448af75078fSIntel {IPv4(1,1,1,0), 24, 0}, 449af75078fSIntel {IPv4(2,1,1,0), 24, 1}, 450af75078fSIntel {IPv4(3,1,1,0), 24, 2}, 451af75078fSIntel {IPv4(4,1,1,0), 24, 3}, 452af75078fSIntel {IPv4(5,1,1,0), 24, 4}, 453af75078fSIntel {IPv4(6,1,1,0), 24, 5}, 454af75078fSIntel {IPv4(7,1,1,0), 24, 6}, 455af75078fSIntel {IPv4(8,1,1,0), 24, 7}, 456af75078fSIntel }; 457af75078fSIntel 458997ee890SIntel static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = { 459997ee890SIntel {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0}, 460997ee890SIntel {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1}, 461997ee890SIntel {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2}, 462997ee890SIntel {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3}, 463997ee890SIntel {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4}, 464997ee890SIntel {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5}, 465997ee890SIntel {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6}, 466997ee890SIntel {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7}, 467997ee890SIntel }; 468997ee890SIntel 469f68aad79SIntel #define IPV4_L3FWD_NUM_ROUTES \ 470f68aad79SIntel (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0])) 471997ee890SIntel #define IPV6_L3FWD_NUM_ROUTES \ 472997ee890SIntel (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0])) 473af75078fSIntel 474f68aad79SIntel #define IPV4_L3FWD_LPM_MAX_RULES 1024 475997ee890SIntel #define IPV6_L3FWD_LPM_MAX_RULES 1024 476997ee890SIntel #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16) 477af75078fSIntel 478af75078fSIntel typedef struct rte_lpm lookup_struct_t; 479997ee890SIntel typedef struct rte_lpm6 lookup6_struct_t; 480f68aad79SIntel static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS]; 481997ee890SIntel static lookup6_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS]; 482af75078fSIntel #endif 483af75078fSIntel 484af75078fSIntel struct lcore_conf { 485af75078fSIntel uint16_t n_rx_queue; 486af75078fSIntel struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; 4871c17baf4SIntel uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; 4881c17baf4SIntel struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; 489f68aad79SIntel lookup_struct_t * ipv4_lookup_struct; 490997ee890SIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 491997ee890SIntel lookup6_struct_t * ipv6_lookup_struct; 492997ee890SIntel #else 493f68aad79SIntel lookup_struct_t * ipv6_lookup_struct; 494997ee890SIntel #endif 495af75078fSIntel } __rte_cache_aligned; 496af75078fSIntel 497af75078fSIntel static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; 498af75078fSIntel 499af75078fSIntel /* Send burst of packets on an output interface */ 500af75078fSIntel static inline int 501af75078fSIntel send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) 502af75078fSIntel { 503af75078fSIntel struct rte_mbuf **m_table; 504af75078fSIntel int ret; 505af75078fSIntel uint16_t queueid; 506af75078fSIntel 507af75078fSIntel queueid = qconf->tx_queue_id[port]; 508af75078fSIntel m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; 509af75078fSIntel 510af75078fSIntel ret = rte_eth_tx_burst(port, queueid, m_table, n); 511af75078fSIntel if (unlikely(ret < n)) { 512af75078fSIntel do { 513af75078fSIntel rte_pktmbuf_free(m_table[ret]); 514af75078fSIntel } while (++ret < n); 515af75078fSIntel } 516af75078fSIntel 517af75078fSIntel return 0; 518af75078fSIntel } 519af75078fSIntel 520af75078fSIntel /* Enqueue a single packet, and send burst if queue is filled */ 521af75078fSIntel static inline int 522af75078fSIntel send_single_packet(struct rte_mbuf *m, uint8_t port) 523af75078fSIntel { 524af75078fSIntel uint32_t lcore_id; 525af75078fSIntel uint16_t len; 526af75078fSIntel struct lcore_conf *qconf; 527af75078fSIntel 528af75078fSIntel lcore_id = rte_lcore_id(); 529af75078fSIntel 530af75078fSIntel qconf = &lcore_conf[lcore_id]; 531af75078fSIntel len = qconf->tx_mbufs[port].len; 532af75078fSIntel qconf->tx_mbufs[port].m_table[len] = m; 533af75078fSIntel len++; 534af75078fSIntel 535af75078fSIntel /* enough pkts to be sent */ 536af75078fSIntel if (unlikely(len == MAX_PKT_BURST)) { 537af75078fSIntel send_burst(qconf, MAX_PKT_BURST, port); 538af75078fSIntel len = 0; 539af75078fSIntel } 540af75078fSIntel 541af75078fSIntel qconf->tx_mbufs[port].len = len; 542af75078fSIntel return 0; 543af75078fSIntel } 544af75078fSIntel 54596ff4453SKonstantin Ananyev static inline __attribute__((always_inline)) void 54696ff4453SKonstantin Ananyev send_packetsx4(struct lcore_conf *qconf, uint8_t port, 54796ff4453SKonstantin Ananyev struct rte_mbuf *m[], uint32_t num) 54896ff4453SKonstantin Ananyev { 54996ff4453SKonstantin Ananyev uint32_t len, j, n; 55096ff4453SKonstantin Ananyev 55196ff4453SKonstantin Ananyev len = qconf->tx_mbufs[port].len; 55296ff4453SKonstantin Ananyev 55396ff4453SKonstantin Ananyev /* 55496ff4453SKonstantin Ananyev * If TX buffer for that queue is empty, and we have enough packets, 55596ff4453SKonstantin Ananyev * then send them straightway. 55696ff4453SKonstantin Ananyev */ 55796ff4453SKonstantin Ananyev if (num >= MAX_TX_BURST && len == 0) { 55896ff4453SKonstantin Ananyev n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num); 55996ff4453SKonstantin Ananyev if (unlikely(n < num)) { 56096ff4453SKonstantin Ananyev do { 56196ff4453SKonstantin Ananyev rte_pktmbuf_free(m[n]); 56296ff4453SKonstantin Ananyev } while (++n < num); 56396ff4453SKonstantin Ananyev } 56496ff4453SKonstantin Ananyev return; 56596ff4453SKonstantin Ananyev } 56696ff4453SKonstantin Ananyev 56796ff4453SKonstantin Ananyev /* 56896ff4453SKonstantin Ananyev * Put packets into TX buffer for that queue. 56996ff4453SKonstantin Ananyev */ 57096ff4453SKonstantin Ananyev 57196ff4453SKonstantin Ananyev n = len + num; 57296ff4453SKonstantin Ananyev n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num; 57396ff4453SKonstantin Ananyev 57496ff4453SKonstantin Ananyev j = 0; 57596ff4453SKonstantin Ananyev switch (n % FWDSTEP) { 57696ff4453SKonstantin Ananyev while (j < n) { 57796ff4453SKonstantin Ananyev case 0: 57896ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[len + j] = m[j]; 57996ff4453SKonstantin Ananyev j++; 58096ff4453SKonstantin Ananyev case 3: 58196ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[len + j] = m[j]; 58296ff4453SKonstantin Ananyev j++; 58396ff4453SKonstantin Ananyev case 2: 58496ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[len + j] = m[j]; 58596ff4453SKonstantin Ananyev j++; 58696ff4453SKonstantin Ananyev case 1: 58796ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[len + j] = m[j]; 58896ff4453SKonstantin Ananyev j++; 58996ff4453SKonstantin Ananyev } 59096ff4453SKonstantin Ananyev } 59196ff4453SKonstantin Ananyev 59296ff4453SKonstantin Ananyev len += n; 59396ff4453SKonstantin Ananyev 59496ff4453SKonstantin Ananyev /* enough pkts to be sent */ 59596ff4453SKonstantin Ananyev if (unlikely(len == MAX_PKT_BURST)) { 59696ff4453SKonstantin Ananyev 59796ff4453SKonstantin Ananyev send_burst(qconf, MAX_PKT_BURST, port); 59896ff4453SKonstantin Ananyev 59996ff4453SKonstantin Ananyev /* copy rest of the packets into the TX buffer. */ 60096ff4453SKonstantin Ananyev len = num - n; 60196ff4453SKonstantin Ananyev j = 0; 60296ff4453SKonstantin Ananyev switch (len % FWDSTEP) { 60396ff4453SKonstantin Ananyev while (j < len) { 60496ff4453SKonstantin Ananyev case 0: 60596ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[j] = m[n + j]; 60696ff4453SKonstantin Ananyev j++; 60796ff4453SKonstantin Ananyev case 3: 60896ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[j] = m[n + j]; 60996ff4453SKonstantin Ananyev j++; 61096ff4453SKonstantin Ananyev case 2: 61196ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[j] = m[n + j]; 61296ff4453SKonstantin Ananyev j++; 61396ff4453SKonstantin Ananyev case 1: 61496ff4453SKonstantin Ananyev qconf->tx_mbufs[port].m_table[j] = m[n + j]; 61596ff4453SKonstantin Ananyev j++; 61696ff4453SKonstantin Ananyev } 61796ff4453SKonstantin Ananyev } 61896ff4453SKonstantin Ananyev } 61996ff4453SKonstantin Ananyev 62096ff4453SKonstantin Ananyev qconf->tx_mbufs[port].len = len; 62196ff4453SKonstantin Ananyev } 62296ff4453SKonstantin Ananyev 623af75078fSIntel #ifdef DO_RFC_1812_CHECKS 624af75078fSIntel static inline int 625af75078fSIntel is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len) 626af75078fSIntel { 627af75078fSIntel /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */ 628af75078fSIntel /* 629af75078fSIntel * 1. The packet length reported by the Link Layer must be large 630af75078fSIntel * enough to hold the minimum length legal IP datagram (20 bytes). 631af75078fSIntel */ 632af75078fSIntel if (link_len < sizeof(struct ipv4_hdr)) 633af75078fSIntel return -1; 634af75078fSIntel 635af75078fSIntel /* 2. The IP checksum must be correct. */ 636af75078fSIntel /* this is checked in H/W */ 637af75078fSIntel 638af75078fSIntel /* 639af75078fSIntel * 3. The IP version number must be 4. If the version number is not 4 640af75078fSIntel * then the packet may be another version of IP, such as IPng or 641af75078fSIntel * ST-II. 642af75078fSIntel */ 643af75078fSIntel if (((pkt->version_ihl) >> 4) != 4) 644af75078fSIntel return -3; 645af75078fSIntel /* 646af75078fSIntel * 4. The IP header length field must be large enough to hold the 647af75078fSIntel * minimum length legal IP datagram (20 bytes = 5 words). 648af75078fSIntel */ 649af75078fSIntel if ((pkt->version_ihl & 0xf) < 5) 650af75078fSIntel return -4; 651af75078fSIntel 652af75078fSIntel /* 653af75078fSIntel * 5. The IP total length field must be large enough to hold the IP 654af75078fSIntel * datagram header, whose length is specified in the IP header length 655af75078fSIntel * field. 656af75078fSIntel */ 657af75078fSIntel if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr)) 658af75078fSIntel return -5; 659af75078fSIntel 660af75078fSIntel return 0; 661af75078fSIntel } 662af75078fSIntel #endif 663af75078fSIntel 664af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 665af75078fSIntel 666997ee890SIntel static __m128i mask0; 667997ee890SIntel static __m128i mask1; 668997ee890SIntel static __m128i mask2; 669af75078fSIntel static inline uint8_t 670997ee890SIntel get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct) 671af75078fSIntel { 672af75078fSIntel int ret = 0; 673997ee890SIntel union ipv4_5tuple_host key; 674af75078fSIntel 675997ee890SIntel ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live); 676997ee890SIntel __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr)); 677997ee890SIntel /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */ 678997ee890SIntel key.xmm = _mm_and_si128(data, mask0); 679af75078fSIntel /* Find destination port */ 680f68aad79SIntel ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key); 681f68aad79SIntel return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]); 682f68aad79SIntel } 683f68aad79SIntel 684f68aad79SIntel static inline uint8_t 685997ee890SIntel get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct) 686f68aad79SIntel { 687f68aad79SIntel int ret = 0; 688997ee890SIntel union ipv6_5tuple_host key; 689f68aad79SIntel 690997ee890SIntel ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len); 691997ee890SIntel __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr)); 692997ee890SIntel __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i))); 693997ee890SIntel __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i))); 694997ee890SIntel /* Get part of 5 tuple: src IP address lower 96 bits and protocol */ 695997ee890SIntel key.xmm[0] = _mm_and_si128(data0, mask1); 696997ee890SIntel /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */ 697997ee890SIntel key.xmm[1] = data1; 698997ee890SIntel /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */ 699997ee890SIntel key.xmm[2] = _mm_and_si128(data2, mask2); 700f68aad79SIntel 701f68aad79SIntel /* Find destination port */ 702f68aad79SIntel ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key); 703f68aad79SIntel return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]); 704af75078fSIntel } 705af75078fSIntel #endif 706af75078fSIntel 707af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 70896ff4453SKonstantin Ananyev 709af75078fSIntel static inline uint8_t 710997ee890SIntel get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct) 711af75078fSIntel { 712af75078fSIntel uint8_t next_hop; 713af75078fSIntel 714f68aad79SIntel return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, 71596ff4453SKonstantin Ananyev rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr), 71696ff4453SKonstantin Ananyev &next_hop) == 0) ? next_hop : portid); 717997ee890SIntel } 718997ee890SIntel 719997ee890SIntel static inline uint8_t 720997ee890SIntel get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_l3fwd_lookup_struct) 721997ee890SIntel { 722997ee890SIntel uint8_t next_hop; 723997ee890SIntel return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, 724997ee890SIntel ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)? 725af75078fSIntel next_hop : portid); 726af75078fSIntel } 727af75078fSIntel #endif 728af75078fSIntel 72996ff4453SKonstantin Ananyev #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \ 73096ff4453SKonstantin Ananyev (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)) 731997ee890SIntel static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf); 732997ee890SIntel 733997ee890SIntel #define MASK_ALL_PKTS 0xf 734997ee890SIntel #define EXECLUDE_1ST_PKT 0xe 735997ee890SIntel #define EXECLUDE_2ND_PKT 0xd 736997ee890SIntel #define EXECLUDE_3RD_PKT 0xb 737997ee890SIntel #define EXECLUDE_4TH_PKT 0x7 738997ee890SIntel 739af75078fSIntel static inline void 740997ee890SIntel simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf) 741997ee890SIntel { 742997ee890SIntel struct ether_hdr *eth_hdr[4]; 743997ee890SIntel struct ipv4_hdr *ipv4_hdr[4]; 744997ee890SIntel void *d_addr_bytes[4]; 745997ee890SIntel uint8_t dst_port[4]; 746997ee890SIntel int32_t ret[4]; 747997ee890SIntel union ipv4_5tuple_host key[4]; 748997ee890SIntel __m128i data[4]; 749997ee890SIntel 750997ee890SIntel eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *); 751997ee890SIntel eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *); 752997ee890SIntel eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *); 753997ee890SIntel eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *); 754997ee890SIntel 755997ee890SIntel /* Handle IPv4 headers.*/ 756997ee890SIntel ipv4_hdr[0] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) + 757997ee890SIntel sizeof(struct ether_hdr)); 758997ee890SIntel ipv4_hdr[1] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) + 759997ee890SIntel sizeof(struct ether_hdr)); 760997ee890SIntel ipv4_hdr[2] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) + 761997ee890SIntel sizeof(struct ether_hdr)); 762997ee890SIntel ipv4_hdr[3] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) + 763997ee890SIntel sizeof(struct ether_hdr)); 764997ee890SIntel 765997ee890SIntel #ifdef DO_RFC_1812_CHECKS 766997ee890SIntel /* Check to make sure the packet is valid (RFC1812) */ 767997ee890SIntel uint8_t valid_mask = MASK_ALL_PKTS; 768ea672a8bSOlivier Matz if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) { 769997ee890SIntel rte_pktmbuf_free(m[0]); 770997ee890SIntel valid_mask &= EXECLUDE_1ST_PKT; 771997ee890SIntel } 772ea672a8bSOlivier Matz if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) { 773997ee890SIntel rte_pktmbuf_free(m[1]); 774997ee890SIntel valid_mask &= EXECLUDE_2ND_PKT; 775997ee890SIntel } 776ea672a8bSOlivier Matz if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) { 777997ee890SIntel rte_pktmbuf_free(m[2]); 778997ee890SIntel valid_mask &= EXECLUDE_3RD_PKT; 779997ee890SIntel } 780ea672a8bSOlivier Matz if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) { 781997ee890SIntel rte_pktmbuf_free(m[3]); 782997ee890SIntel valid_mask &= EXECLUDE_4TH_PKT; 783997ee890SIntel } 784997ee890SIntel if (unlikely(valid_mask != MASK_ALL_PKTS)) { 785997ee890SIntel if (valid_mask == 0){ 786997ee890SIntel return; 787997ee890SIntel } else { 788997ee890SIntel uint8_t i = 0; 789997ee890SIntel for (i = 0; i < 4; i++) { 790997ee890SIntel if ((0x1 << i) & valid_mask) { 791997ee890SIntel l3fwd_simple_forward(m[i], portid, qconf); 792997ee890SIntel } 793997ee890SIntel } 794997ee890SIntel return; 795997ee890SIntel } 796997ee890SIntel } 797997ee890SIntel #endif // End of #ifdef DO_RFC_1812_CHECKS 798997ee890SIntel 799997ee890SIntel data[0] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) + 800997ee890SIntel sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live))); 801997ee890SIntel data[1] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) + 802997ee890SIntel sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live))); 803997ee890SIntel data[2] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) + 804997ee890SIntel sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live))); 805997ee890SIntel data[3] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) + 806997ee890SIntel sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live))); 807997ee890SIntel 808997ee890SIntel key[0].xmm = _mm_and_si128(data[0], mask0); 809997ee890SIntel key[1].xmm = _mm_and_si128(data[1], mask0); 810997ee890SIntel key[2].xmm = _mm_and_si128(data[2], mask0); 811997ee890SIntel key[3].xmm = _mm_and_si128(data[3], mask0); 812997ee890SIntel 813997ee890SIntel const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]}; 814997ee890SIntel rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 4, ret); 815997ee890SIntel dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]); 816997ee890SIntel dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]); 817997ee890SIntel dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]); 818997ee890SIntel dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]); 819997ee890SIntel 820997ee890SIntel if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0) 821997ee890SIntel dst_port[0] = portid; 822997ee890SIntel if (dst_port[1] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[1]) == 0) 823997ee890SIntel dst_port[1] = portid; 824997ee890SIntel if (dst_port[2] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[2]) == 0) 825997ee890SIntel dst_port[2] = portid; 826997ee890SIntel if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0) 827997ee890SIntel dst_port[3] = portid; 828997ee890SIntel 829997ee890SIntel /* 02:00:00:00:00:xx */ 830997ee890SIntel d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0]; 831997ee890SIntel d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0]; 832997ee890SIntel d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0]; 833997ee890SIntel d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0]; 834997ee890SIntel *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40); 835997ee890SIntel *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40); 836997ee890SIntel *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40); 837997ee890SIntel *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40); 838997ee890SIntel 839997ee890SIntel #ifdef DO_RFC_1812_CHECKS 840997ee890SIntel /* Update time to live and header checksum */ 841997ee890SIntel --(ipv4_hdr[0]->time_to_live); 842997ee890SIntel --(ipv4_hdr[1]->time_to_live); 843997ee890SIntel --(ipv4_hdr[2]->time_to_live); 844997ee890SIntel --(ipv4_hdr[3]->time_to_live); 845997ee890SIntel ++(ipv4_hdr[0]->hdr_checksum); 846997ee890SIntel ++(ipv4_hdr[1]->hdr_checksum); 847997ee890SIntel ++(ipv4_hdr[2]->hdr_checksum); 848997ee890SIntel ++(ipv4_hdr[3]->hdr_checksum); 849997ee890SIntel #endif 850997ee890SIntel 851997ee890SIntel /* src addr */ 852997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr); 853997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr); 854997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr); 855997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr); 856997ee890SIntel 857997ee890SIntel send_single_packet(m[0], (uint8_t)dst_port[0]); 858997ee890SIntel send_single_packet(m[1], (uint8_t)dst_port[1]); 859997ee890SIntel send_single_packet(m[2], (uint8_t)dst_port[2]); 860997ee890SIntel send_single_packet(m[3], (uint8_t)dst_port[3]); 861997ee890SIntel 862997ee890SIntel } 863997ee890SIntel 864997ee890SIntel static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1, 865997ee890SIntel union ipv6_5tuple_host * key) 866997ee890SIntel { 867997ee890SIntel __m128i tmpdata0 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *) 868997ee890SIntel + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len))); 869997ee890SIntel __m128i tmpdata1 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *) 870997ee890SIntel + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) 871997ee890SIntel + sizeof(__m128i))); 872997ee890SIntel __m128i tmpdata2 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *) 873997ee890SIntel + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) 874997ee890SIntel + sizeof(__m128i) + sizeof(__m128i))); 875997ee890SIntel key->xmm[0] = _mm_and_si128(tmpdata0, mask0); 876997ee890SIntel key->xmm[1] = tmpdata1; 877997ee890SIntel key->xmm[2] = _mm_and_si128(tmpdata2, mask1); 878997ee890SIntel return; 879997ee890SIntel } 880997ee890SIntel 881997ee890SIntel static inline void 882997ee890SIntel simple_ipv6_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf) 883997ee890SIntel { 884997ee890SIntel struct ether_hdr *eth_hdr[4]; 885997ee890SIntel __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[4]; 886997ee890SIntel void *d_addr_bytes[4]; 887997ee890SIntel uint8_t dst_port[4]; 888997ee890SIntel int32_t ret[4]; 889997ee890SIntel union ipv6_5tuple_host key[4]; 890997ee890SIntel 891997ee890SIntel eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *); 892997ee890SIntel eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *); 893997ee890SIntel eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *); 894997ee890SIntel eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *); 895997ee890SIntel 896997ee890SIntel /* Handle IPv6 headers.*/ 897997ee890SIntel ipv6_hdr[0] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) + 898997ee890SIntel sizeof(struct ether_hdr)); 899997ee890SIntel ipv6_hdr[1] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) + 900997ee890SIntel sizeof(struct ether_hdr)); 901997ee890SIntel ipv6_hdr[2] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) + 902997ee890SIntel sizeof(struct ether_hdr)); 903997ee890SIntel ipv6_hdr[3] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) + 904997ee890SIntel sizeof(struct ether_hdr)); 905997ee890SIntel 906997ee890SIntel get_ipv6_5tuple(m[0], mask1, mask2, &key[0]); 907997ee890SIntel get_ipv6_5tuple(m[1], mask1, mask2, &key[1]); 908997ee890SIntel get_ipv6_5tuple(m[2], mask1, mask2, &key[2]); 909997ee890SIntel get_ipv6_5tuple(m[3], mask1, mask2, &key[3]); 910997ee890SIntel 911997ee890SIntel const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]}; 912997ee890SIntel rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 4, ret); 913997ee890SIntel dst_port[0] = (uint8_t) ((ret[0] < 0)? portid:ipv6_l3fwd_out_if[ret[0]]); 914997ee890SIntel dst_port[1] = (uint8_t) ((ret[1] < 0)? portid:ipv6_l3fwd_out_if[ret[1]]); 915997ee890SIntel dst_port[2] = (uint8_t) ((ret[2] < 0)? portid:ipv6_l3fwd_out_if[ret[2]]); 916997ee890SIntel dst_port[3] = (uint8_t) ((ret[3] < 0)? portid:ipv6_l3fwd_out_if[ret[3]]); 917997ee890SIntel 918997ee890SIntel if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0) 919997ee890SIntel dst_port[0] = portid; 920997ee890SIntel if (dst_port[1] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[1]) == 0) 921997ee890SIntel dst_port[1] = portid; 922997ee890SIntel if (dst_port[2] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[2]) == 0) 923997ee890SIntel dst_port[2] = portid; 924997ee890SIntel if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0) 925997ee890SIntel dst_port[3] = portid; 926997ee890SIntel 927997ee890SIntel /* 02:00:00:00:00:xx */ 928997ee890SIntel d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0]; 929997ee890SIntel d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0]; 930997ee890SIntel d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0]; 931997ee890SIntel d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0]; 932997ee890SIntel *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40); 933997ee890SIntel *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40); 934997ee890SIntel *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40); 935997ee890SIntel *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40); 936997ee890SIntel 937997ee890SIntel /* src addr */ 938997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr); 939997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr); 940997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr); 941997ee890SIntel ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr); 942997ee890SIntel 943997ee890SIntel send_single_packet(m[0], (uint8_t)dst_port[0]); 944997ee890SIntel send_single_packet(m[1], (uint8_t)dst_port[1]); 945997ee890SIntel send_single_packet(m[2], (uint8_t)dst_port[2]); 946997ee890SIntel send_single_packet(m[3], (uint8_t)dst_port[3]); 947997ee890SIntel 948997ee890SIntel } 94996ff4453SKonstantin Ananyev #endif /* APP_LOOKUP_METHOD */ 950997ee890SIntel 951997ee890SIntel static inline __attribute__((always_inline)) void 952f68aad79SIntel l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf) 953af75078fSIntel { 954af75078fSIntel struct ether_hdr *eth_hdr; 955af75078fSIntel struct ipv4_hdr *ipv4_hdr; 956f68aad79SIntel void *d_addr_bytes; 957af75078fSIntel uint8_t dst_port; 958af75078fSIntel 959af75078fSIntel eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 960af75078fSIntel 961f68aad79SIntel if (m->ol_flags & PKT_RX_IPV4_HDR) { 962f68aad79SIntel /* Handle IPv4 headers.*/ 963af75078fSIntel ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + 964af75078fSIntel sizeof(struct ether_hdr)); 965af75078fSIntel 966af75078fSIntel #ifdef DO_RFC_1812_CHECKS 967af75078fSIntel /* Check to make sure the packet is valid (RFC1812) */ 968ea672a8bSOlivier Matz if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) { 969af75078fSIntel rte_pktmbuf_free(m); 970af75078fSIntel return; 971af75078fSIntel } 972af75078fSIntel #endif 973af75078fSIntel 97496ff4453SKonstantin Ananyev dst_port = get_ipv4_dst_port(ipv4_hdr, portid, 97596ff4453SKonstantin Ananyev qconf->ipv4_lookup_struct); 97696ff4453SKonstantin Ananyev if (dst_port >= RTE_MAX_ETHPORTS || 97796ff4453SKonstantin Ananyev (enabled_port_mask & 1 << dst_port) == 0) 978af75078fSIntel dst_port = portid; 979af75078fSIntel 980f68aad79SIntel /* 02:00:00:00:00:xx */ 981f68aad79SIntel d_addr_bytes = ð_hdr->d_addr.addr_bytes[0]; 98296ff4453SKonstantin Ananyev *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR + 98396ff4453SKonstantin Ananyev ((uint64_t)dst_port << 40); 984af75078fSIntel 985af75078fSIntel #ifdef DO_RFC_1812_CHECKS 986af75078fSIntel /* Update time to live and header checksum */ 987af75078fSIntel --(ipv4_hdr->time_to_live); 988af75078fSIntel ++(ipv4_hdr->hdr_checksum); 989af75078fSIntel #endif 990af75078fSIntel 991af75078fSIntel /* src addr */ 992af75078fSIntel ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); 993af75078fSIntel 994af75078fSIntel send_single_packet(m, dst_port); 995997ee890SIntel 996997ee890SIntel } else { 997f68aad79SIntel /* Handle IPv6 headers.*/ 998f68aad79SIntel struct ipv6_hdr *ipv6_hdr; 999f68aad79SIntel 1000f68aad79SIntel ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + 1001f68aad79SIntel sizeof(struct ether_hdr)); 1002f68aad79SIntel 1003f68aad79SIntel dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct); 1004f68aad79SIntel 10051c17baf4SIntel if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0) 1006f68aad79SIntel dst_port = portid; 1007f68aad79SIntel 1008f68aad79SIntel /* 02:00:00:00:00:xx */ 1009f68aad79SIntel d_addr_bytes = ð_hdr->d_addr.addr_bytes[0]; 101096ff4453SKonstantin Ananyev *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR + 101196ff4453SKonstantin Ananyev ((uint64_t)dst_port << 40); 1012f68aad79SIntel 1013f68aad79SIntel /* src addr */ 1014f68aad79SIntel ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); 1015f68aad79SIntel 1016f68aad79SIntel send_single_packet(m, dst_port); 1017f68aad79SIntel } 1018af75078fSIntel 1019af75078fSIntel } 1020af75078fSIntel 102196ff4453SKonstantin Ananyev #ifdef DO_RFC_1812_CHECKS 102296ff4453SKonstantin Ananyev 102396ff4453SKonstantin Ananyev #define IPV4_MIN_VER_IHL 0x45 102496ff4453SKonstantin Ananyev #define IPV4_MAX_VER_IHL 0x4f 102596ff4453SKonstantin Ananyev #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL) 102696ff4453SKonstantin Ananyev 102796ff4453SKonstantin Ananyev /* Minimum value of IPV4 total length (20B) in network byte order. */ 102896ff4453SKonstantin Ananyev #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8) 102996ff4453SKonstantin Ananyev 103096ff4453SKonstantin Ananyev /* 103196ff4453SKonstantin Ananyev * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2: 103296ff4453SKonstantin Ananyev * - The IP version number must be 4. 103396ff4453SKonstantin Ananyev * - The IP header length field must be large enough to hold the 103496ff4453SKonstantin Ananyev * minimum length legal IP datagram (20 bytes = 5 words). 103596ff4453SKonstantin Ananyev * - The IP total length field must be large enough to hold the IP 103696ff4453SKonstantin Ananyev * datagram header, whose length is specified in the IP header length 103796ff4453SKonstantin Ananyev * field. 103896ff4453SKonstantin Ananyev * If we encounter invalid IPV4 packet, then set destination port for it 103996ff4453SKonstantin Ananyev * to BAD_PORT value. 104096ff4453SKonstantin Ananyev */ 104196ff4453SKonstantin Ananyev static inline __attribute__((always_inline)) void 104296ff4453SKonstantin Ananyev rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags) 104396ff4453SKonstantin Ananyev { 104496ff4453SKonstantin Ananyev uint8_t ihl; 104596ff4453SKonstantin Ananyev 104696ff4453SKonstantin Ananyev if ((flags & PKT_RX_IPV4_HDR) != 0) { 104796ff4453SKonstantin Ananyev 104896ff4453SKonstantin Ananyev ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL; 104996ff4453SKonstantin Ananyev 105096ff4453SKonstantin Ananyev ipv4_hdr->time_to_live--; 105196ff4453SKonstantin Ananyev ipv4_hdr->hdr_checksum++; 105296ff4453SKonstantin Ananyev 105396ff4453SKonstantin Ananyev if (ihl > IPV4_MAX_VER_IHL_DIFF || 105496ff4453SKonstantin Ananyev ((uint8_t)ipv4_hdr->total_length == 0 && 105596ff4453SKonstantin Ananyev ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) { 105696ff4453SKonstantin Ananyev dp[0] = BAD_PORT; 105796ff4453SKonstantin Ananyev } 105896ff4453SKonstantin Ananyev } 105996ff4453SKonstantin Ananyev } 106096ff4453SKonstantin Ananyev 106196ff4453SKonstantin Ananyev #else 106296ff4453SKonstantin Ananyev #define rfc1812_process(mb, dp) do { } while (0) 106396ff4453SKonstantin Ananyev #endif /* DO_RFC_1812_CHECKS */ 106496ff4453SKonstantin Ananyev 106596ff4453SKonstantin Ananyev 106696ff4453SKonstantin Ananyev #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \ 106796ff4453SKonstantin Ananyev (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)) 106896ff4453SKonstantin Ananyev 106996ff4453SKonstantin Ananyev static inline __attribute__((always_inline)) uint16_t 107096ff4453SKonstantin Ananyev get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, 107196ff4453SKonstantin Ananyev uint32_t dst_ipv4, uint8_t portid) 107296ff4453SKonstantin Ananyev { 107396ff4453SKonstantin Ananyev uint8_t next_hop; 107496ff4453SKonstantin Ananyev struct ipv6_hdr *ipv6_hdr; 107596ff4453SKonstantin Ananyev struct ether_hdr *eth_hdr; 107696ff4453SKonstantin Ananyev 107796ff4453SKonstantin Ananyev if (pkt->ol_flags & PKT_RX_IPV4_HDR) { 107896ff4453SKonstantin Ananyev if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4, 107996ff4453SKonstantin Ananyev &next_hop) != 0) 108096ff4453SKonstantin Ananyev next_hop = portid; 108196ff4453SKonstantin Ananyev } else if (pkt->ol_flags & PKT_RX_IPV6_HDR) { 108296ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); 108396ff4453SKonstantin Ananyev ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1); 108496ff4453SKonstantin Ananyev if (rte_lpm6_lookup(qconf->ipv6_lookup_struct, 108596ff4453SKonstantin Ananyev ipv6_hdr->dst_addr, &next_hop) != 0) 108696ff4453SKonstantin Ananyev next_hop = portid; 108796ff4453SKonstantin Ananyev } else { 108896ff4453SKonstantin Ananyev next_hop = portid; 108996ff4453SKonstantin Ananyev } 109096ff4453SKonstantin Ananyev 109196ff4453SKonstantin Ananyev return next_hop; 109296ff4453SKonstantin Ananyev } 109396ff4453SKonstantin Ananyev 109496ff4453SKonstantin Ananyev static inline void 109596ff4453SKonstantin Ananyev process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt, 109696ff4453SKonstantin Ananyev uint16_t *dst_port, uint8_t portid) 109796ff4453SKonstantin Ananyev { 109896ff4453SKonstantin Ananyev struct ether_hdr *eth_hdr; 109996ff4453SKonstantin Ananyev struct ipv4_hdr *ipv4_hdr; 110096ff4453SKonstantin Ananyev uint32_t dst_ipv4; 110196ff4453SKonstantin Ananyev uint16_t dp; 110296ff4453SKonstantin Ananyev __m128i te, ve; 110396ff4453SKonstantin Ananyev 110496ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); 110596ff4453SKonstantin Ananyev ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); 110696ff4453SKonstantin Ananyev 110796ff4453SKonstantin Ananyev dst_ipv4 = ipv4_hdr->dst_addr; 110896ff4453SKonstantin Ananyev dst_ipv4 = rte_be_to_cpu_32(dst_ipv4); 110996ff4453SKonstantin Ananyev dp = get_dst_port(qconf, pkt, dst_ipv4, portid); 111096ff4453SKonstantin Ananyev 111196ff4453SKonstantin Ananyev te = _mm_load_si128((__m128i *)eth_hdr); 111296ff4453SKonstantin Ananyev ve = val_eth[dp]; 111396ff4453SKonstantin Ananyev 111496ff4453SKonstantin Ananyev dst_port[0] = dp; 111596ff4453SKonstantin Ananyev rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags); 111696ff4453SKonstantin Ananyev 111796ff4453SKonstantin Ananyev te = _mm_blend_epi16(te, ve, MASK_ETH); 111896ff4453SKonstantin Ananyev _mm_store_si128((__m128i *)eth_hdr, te); 111996ff4453SKonstantin Ananyev } 112096ff4453SKonstantin Ananyev 112196ff4453SKonstantin Ananyev /* 112296ff4453SKonstantin Ananyev * Read ol_flags and destination IPV4 addresses from 4 mbufs. 112396ff4453SKonstantin Ananyev */ 112496ff4453SKonstantin Ananyev static inline void 112596ff4453SKonstantin Ananyev processx4_step1(struct rte_mbuf *pkt[FWDSTEP], __m128i *dip, uint32_t *flag) 112696ff4453SKonstantin Ananyev { 112796ff4453SKonstantin Ananyev struct ipv4_hdr *ipv4_hdr; 112896ff4453SKonstantin Ananyev struct ether_hdr *eth_hdr; 112996ff4453SKonstantin Ananyev uint32_t x0, x1, x2, x3; 113096ff4453SKonstantin Ananyev 113196ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *); 113296ff4453SKonstantin Ananyev ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); 113396ff4453SKonstantin Ananyev x0 = ipv4_hdr->dst_addr; 113496ff4453SKonstantin Ananyev flag[0] = pkt[0]->ol_flags & PKT_RX_IPV4_HDR; 113596ff4453SKonstantin Ananyev 113696ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *); 113796ff4453SKonstantin Ananyev ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); 113896ff4453SKonstantin Ananyev x1 = ipv4_hdr->dst_addr; 113996ff4453SKonstantin Ananyev flag[0] &= pkt[1]->ol_flags; 114096ff4453SKonstantin Ananyev 114196ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *); 114296ff4453SKonstantin Ananyev ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); 114396ff4453SKonstantin Ananyev x2 = ipv4_hdr->dst_addr; 114496ff4453SKonstantin Ananyev flag[0] &= pkt[2]->ol_flags; 114596ff4453SKonstantin Ananyev 114696ff4453SKonstantin Ananyev eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *); 114796ff4453SKonstantin Ananyev ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); 114896ff4453SKonstantin Ananyev x3 = ipv4_hdr->dst_addr; 114996ff4453SKonstantin Ananyev flag[0] &= pkt[3]->ol_flags; 115096ff4453SKonstantin Ananyev 115196ff4453SKonstantin Ananyev dip[0] = _mm_set_epi32(x3, x2, x1, x0); 115296ff4453SKonstantin Ananyev } 115396ff4453SKonstantin Ananyev 115496ff4453SKonstantin Ananyev /* 115596ff4453SKonstantin Ananyev * Lookup into LPM for destination port. 115696ff4453SKonstantin Ananyev * If lookup fails, use incoming port (portid) as destination port. 115796ff4453SKonstantin Ananyev */ 115896ff4453SKonstantin Ananyev static inline void 115996ff4453SKonstantin Ananyev processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag, 116096ff4453SKonstantin Ananyev uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP]) 116196ff4453SKonstantin Ananyev { 116296ff4453SKonstantin Ananyev rte_xmm_t dst; 116396ff4453SKonstantin Ananyev const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 116496ff4453SKonstantin Ananyev 4, 5, 6, 7, 0, 1, 2, 3); 116596ff4453SKonstantin Ananyev 116696ff4453SKonstantin Ananyev /* Byte swap 4 IPV4 addresses. */ 116796ff4453SKonstantin Ananyev dip = _mm_shuffle_epi8(dip, bswap_mask); 116896ff4453SKonstantin Ananyev 116996ff4453SKonstantin Ananyev /* if all 4 packets are IPV4. */ 117096ff4453SKonstantin Ananyev if (likely(flag != 0)) { 117196ff4453SKonstantin Ananyev rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid); 117296ff4453SKonstantin Ananyev } else { 117396ff4453SKonstantin Ananyev dst.m = dip; 117496ff4453SKonstantin Ananyev dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid); 117596ff4453SKonstantin Ananyev dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid); 117696ff4453SKonstantin Ananyev dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid); 117796ff4453SKonstantin Ananyev dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid); 117896ff4453SKonstantin Ananyev } 117996ff4453SKonstantin Ananyev } 118096ff4453SKonstantin Ananyev 118196ff4453SKonstantin Ananyev /* 118296ff4453SKonstantin Ananyev * Update source and destination MAC addresses in the ethernet header. 118396ff4453SKonstantin Ananyev * Perform RFC1812 checks and updates for IPV4 packets. 118496ff4453SKonstantin Ananyev */ 118596ff4453SKonstantin Ananyev static inline void 118696ff4453SKonstantin Ananyev processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) 118796ff4453SKonstantin Ananyev { 118896ff4453SKonstantin Ananyev __m128i te[FWDSTEP]; 118996ff4453SKonstantin Ananyev __m128i ve[FWDSTEP]; 119096ff4453SKonstantin Ananyev __m128i *p[FWDSTEP]; 119196ff4453SKonstantin Ananyev 119296ff4453SKonstantin Ananyev p[0] = (rte_pktmbuf_mtod(pkt[0], __m128i *)); 119396ff4453SKonstantin Ananyev p[1] = (rte_pktmbuf_mtod(pkt[1], __m128i *)); 119496ff4453SKonstantin Ananyev p[2] = (rte_pktmbuf_mtod(pkt[2], __m128i *)); 119596ff4453SKonstantin Ananyev p[3] = (rte_pktmbuf_mtod(pkt[3], __m128i *)); 119696ff4453SKonstantin Ananyev 119796ff4453SKonstantin Ananyev ve[0] = val_eth[dst_port[0]]; 119896ff4453SKonstantin Ananyev te[0] = _mm_load_si128(p[0]); 119996ff4453SKonstantin Ananyev 120096ff4453SKonstantin Ananyev ve[1] = val_eth[dst_port[1]]; 120196ff4453SKonstantin Ananyev te[1] = _mm_load_si128(p[1]); 120296ff4453SKonstantin Ananyev 120396ff4453SKonstantin Ananyev ve[2] = val_eth[dst_port[2]]; 120496ff4453SKonstantin Ananyev te[2] = _mm_load_si128(p[2]); 120596ff4453SKonstantin Ananyev 120696ff4453SKonstantin Ananyev ve[3] = val_eth[dst_port[3]]; 120796ff4453SKonstantin Ananyev te[3] = _mm_load_si128(p[3]); 120896ff4453SKonstantin Ananyev 120996ff4453SKonstantin Ananyev /* Update first 12 bytes, keep rest bytes intact. */ 121096ff4453SKonstantin Ananyev te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH); 121196ff4453SKonstantin Ananyev te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH); 121296ff4453SKonstantin Ananyev te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH); 121396ff4453SKonstantin Ananyev te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH); 121496ff4453SKonstantin Ananyev 121596ff4453SKonstantin Ananyev _mm_store_si128(p[0], te[0]); 121696ff4453SKonstantin Ananyev _mm_store_si128(p[1], te[1]); 121796ff4453SKonstantin Ananyev _mm_store_si128(p[2], te[2]); 121896ff4453SKonstantin Ananyev _mm_store_si128(p[3], te[3]); 121996ff4453SKonstantin Ananyev 122096ff4453SKonstantin Ananyev rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), 122196ff4453SKonstantin Ananyev &dst_port[0], pkt[0]->ol_flags); 122296ff4453SKonstantin Ananyev rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), 122396ff4453SKonstantin Ananyev &dst_port[1], pkt[1]->ol_flags); 122496ff4453SKonstantin Ananyev rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1), 122596ff4453SKonstantin Ananyev &dst_port[2], pkt[2]->ol_flags); 122696ff4453SKonstantin Ananyev rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), 122796ff4453SKonstantin Ananyev &dst_port[3], pkt[3]->ol_flags); 122896ff4453SKonstantin Ananyev } 122996ff4453SKonstantin Ananyev 12307e7ab814SKonstantin Ananyev /* 12317e7ab814SKonstantin Ananyev * We group consecutive packets with the same destionation port into one burst. 12327e7ab814SKonstantin Ananyev * To avoid extra latency this is done together with some other packet 12337e7ab814SKonstantin Ananyev * processing, but after we made a final decision about packet's destination. 12347e7ab814SKonstantin Ananyev * To do this we maintain: 12357e7ab814SKonstantin Ananyev * pnum - array of number of consecutive packets with the same dest port for 12367e7ab814SKonstantin Ananyev * each packet in the input burst. 12377e7ab814SKonstantin Ananyev * lp - pointer to the last updated element in the pnum. 12387e7ab814SKonstantin Ananyev * dlp - dest port value lp corresponds to. 12397e7ab814SKonstantin Ananyev */ 12407e7ab814SKonstantin Ananyev 12417e7ab814SKonstantin Ananyev #define GRPSZ (1 << FWDSTEP) 12427e7ab814SKonstantin Ananyev #define GRPMSK (GRPSZ - 1) 12437e7ab814SKonstantin Ananyev 12447e7ab814SKonstantin Ananyev #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \ 12457e7ab814SKonstantin Ananyev if (likely((dlp) == (dcp)[(idx)])) { \ 12467e7ab814SKonstantin Ananyev (lp)[0]++; \ 12477e7ab814SKonstantin Ananyev } else { \ 12487e7ab814SKonstantin Ananyev (dlp) = (dcp)[idx]; \ 12497e7ab814SKonstantin Ananyev (lp) = (pn) + (idx); \ 12507e7ab814SKonstantin Ananyev (lp)[0] = 1; \ 12517e7ab814SKonstantin Ananyev } \ 12527e7ab814SKonstantin Ananyev } while (0) 12537e7ab814SKonstantin Ananyev 12547e7ab814SKonstantin Ananyev /* 12557e7ab814SKonstantin Ananyev * Group consecutive packets with the same destination port in bursts of 4. 12567e7ab814SKonstantin Ananyev * Suppose we have array of destionation ports: 12577e7ab814SKonstantin Ananyev * dst_port[] = {a, b, c, d,, e, ... } 12587e7ab814SKonstantin Ananyev * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>. 12597e7ab814SKonstantin Ananyev * We doing 4 comparisions at once and the result is 4 bit mask. 12607e7ab814SKonstantin Ananyev * This mask is used as an index into prebuild array of pnum values. 12617e7ab814SKonstantin Ananyev */ 12627e7ab814SKonstantin Ananyev static inline uint16_t * 12637e7ab814SKonstantin Ananyev port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2) 12647e7ab814SKonstantin Ananyev { 12657e7ab814SKonstantin Ananyev static const struct { 12667e7ab814SKonstantin Ananyev uint64_t pnum; /* prebuild 4 values for pnum[]. */ 12677e7ab814SKonstantin Ananyev int32_t idx; /* index for new last updated elemnet. */ 12687e7ab814SKonstantin Ananyev uint16_t lpv; /* add value to the last updated element. */ 12697e7ab814SKonstantin Ananyev } gptbl[GRPSZ] = { 12707e7ab814SKonstantin Ananyev { 12717e7ab814SKonstantin Ananyev /* 0: a != b, b != c, c != d, d != e */ 12727e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000100010001), 12737e7ab814SKonstantin Ananyev .idx = 4, 12747e7ab814SKonstantin Ananyev .lpv = 0, 12757e7ab814SKonstantin Ananyev }, 12767e7ab814SKonstantin Ananyev { 12777e7ab814SKonstantin Ananyev /* 1: a == b, b != c, c != d, d != e */ 12787e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000100010002), 12797e7ab814SKonstantin Ananyev .idx = 4, 12807e7ab814SKonstantin Ananyev .lpv = 1, 12817e7ab814SKonstantin Ananyev }, 12827e7ab814SKonstantin Ananyev { 12837e7ab814SKonstantin Ananyev /* 2: a != b, b == c, c != d, d != e */ 12847e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000100020001), 12857e7ab814SKonstantin Ananyev .idx = 4, 12867e7ab814SKonstantin Ananyev .lpv = 0, 12877e7ab814SKonstantin Ananyev }, 12887e7ab814SKonstantin Ananyev { 12897e7ab814SKonstantin Ananyev /* 3: a == b, b == c, c != d, d != e */ 12907e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000100020003), 12917e7ab814SKonstantin Ananyev .idx = 4, 12927e7ab814SKonstantin Ananyev .lpv = 2, 12937e7ab814SKonstantin Ananyev }, 12947e7ab814SKonstantin Ananyev { 12957e7ab814SKonstantin Ananyev /* 4: a != b, b != c, c == d, d != e */ 12967e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000200010001), 12977e7ab814SKonstantin Ananyev .idx = 4, 12987e7ab814SKonstantin Ananyev .lpv = 0, 12997e7ab814SKonstantin Ananyev }, 13007e7ab814SKonstantin Ananyev { 13017e7ab814SKonstantin Ananyev /* 5: a == b, b != c, c == d, d != e */ 13027e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000200010002), 13037e7ab814SKonstantin Ananyev .idx = 4, 13047e7ab814SKonstantin Ananyev .lpv = 1, 13057e7ab814SKonstantin Ananyev }, 13067e7ab814SKonstantin Ananyev { 13077e7ab814SKonstantin Ananyev /* 6: a != b, b == c, c == d, d != e */ 13087e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000200030001), 13097e7ab814SKonstantin Ananyev .idx = 4, 13107e7ab814SKonstantin Ananyev .lpv = 0, 13117e7ab814SKonstantin Ananyev }, 13127e7ab814SKonstantin Ananyev { 13137e7ab814SKonstantin Ananyev /* 7: a == b, b == c, c == d, d != e */ 13147e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0001000200030004), 13157e7ab814SKonstantin Ananyev .idx = 4, 13167e7ab814SKonstantin Ananyev .lpv = 3, 13177e7ab814SKonstantin Ananyev }, 13187e7ab814SKonstantin Ananyev { 13197e7ab814SKonstantin Ananyev /* 8: a != b, b != c, c != d, d == e */ 13207e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000100010001), 13217e7ab814SKonstantin Ananyev .idx = 3, 13227e7ab814SKonstantin Ananyev .lpv = 0, 13237e7ab814SKonstantin Ananyev }, 13247e7ab814SKonstantin Ananyev { 13257e7ab814SKonstantin Ananyev /* 9: a == b, b != c, c != d, d == e */ 13267e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000100010002), 13277e7ab814SKonstantin Ananyev .idx = 3, 13287e7ab814SKonstantin Ananyev .lpv = 1, 13297e7ab814SKonstantin Ananyev }, 13307e7ab814SKonstantin Ananyev { 13317e7ab814SKonstantin Ananyev /* 0xa: a != b, b == c, c != d, d == e */ 13327e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000100020001), 13337e7ab814SKonstantin Ananyev .idx = 3, 13347e7ab814SKonstantin Ananyev .lpv = 0, 13357e7ab814SKonstantin Ananyev }, 13367e7ab814SKonstantin Ananyev { 13377e7ab814SKonstantin Ananyev /* 0xb: a == b, b == c, c != d, d == e */ 13387e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000100020003), 13397e7ab814SKonstantin Ananyev .idx = 3, 13407e7ab814SKonstantin Ananyev .lpv = 2, 13417e7ab814SKonstantin Ananyev }, 13427e7ab814SKonstantin Ananyev { 13437e7ab814SKonstantin Ananyev /* 0xc: a != b, b != c, c == d, d == e */ 13447e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000300010001), 13457e7ab814SKonstantin Ananyev .idx = 2, 13467e7ab814SKonstantin Ananyev .lpv = 0, 13477e7ab814SKonstantin Ananyev }, 13487e7ab814SKonstantin Ananyev { 13497e7ab814SKonstantin Ananyev /* 0xd: a == b, b != c, c == d, d == e */ 13507e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000300010002), 13517e7ab814SKonstantin Ananyev .idx = 2, 13527e7ab814SKonstantin Ananyev .lpv = 1, 13537e7ab814SKonstantin Ananyev }, 13547e7ab814SKonstantin Ananyev { 13557e7ab814SKonstantin Ananyev /* 0xe: a != b, b == c, c == d, d == e */ 13567e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000300040001), 13577e7ab814SKonstantin Ananyev .idx = 1, 13587e7ab814SKonstantin Ananyev .lpv = 0, 13597e7ab814SKonstantin Ananyev }, 13607e7ab814SKonstantin Ananyev { 13617e7ab814SKonstantin Ananyev /* 0xf: a == b, b == c, c == d, d == e */ 13627e7ab814SKonstantin Ananyev .pnum = UINT64_C(0x0002000300040005), 13637e7ab814SKonstantin Ananyev .idx = 0, 13647e7ab814SKonstantin Ananyev .lpv = 4, 13657e7ab814SKonstantin Ananyev }, 13667e7ab814SKonstantin Ananyev }; 13677e7ab814SKonstantin Ananyev 13687e7ab814SKonstantin Ananyev union { 13697e7ab814SKonstantin Ananyev uint16_t u16[FWDSTEP + 1]; 13707e7ab814SKonstantin Ananyev uint64_t u64; 13717e7ab814SKonstantin Ananyev } *pnum = (void *)pn; 13727e7ab814SKonstantin Ananyev 13737e7ab814SKonstantin Ananyev int32_t v; 13747e7ab814SKonstantin Ananyev 13757e7ab814SKonstantin Ananyev dp1 = _mm_cmpeq_epi16(dp1, dp2); 13767e7ab814SKonstantin Ananyev dp1 = _mm_unpacklo_epi16(dp1, dp1); 13777e7ab814SKonstantin Ananyev v = _mm_movemask_ps((__m128)dp1); 13787e7ab814SKonstantin Ananyev 13797e7ab814SKonstantin Ananyev /* update last port counter. */ 13807e7ab814SKonstantin Ananyev lp[0] += gptbl[v].lpv; 13817e7ab814SKonstantin Ananyev 13827e7ab814SKonstantin Ananyev /* if dest port value has changed. */ 13837e7ab814SKonstantin Ananyev if (v != GRPMSK) { 13847e7ab814SKonstantin Ananyev lp = pnum->u16 + gptbl[v].idx; 13857e7ab814SKonstantin Ananyev lp[0] = 1; 13867e7ab814SKonstantin Ananyev pnum->u64 = gptbl[v].pnum; 13877e7ab814SKonstantin Ananyev } 13887e7ab814SKonstantin Ananyev 13897e7ab814SKonstantin Ananyev return lp; 13907e7ab814SKonstantin Ananyev } 13917e7ab814SKonstantin Ananyev 139296ff4453SKonstantin Ananyev #endif /* APP_LOOKUP_METHOD */ 139396ff4453SKonstantin Ananyev 1394af75078fSIntel /* main processing loop */ 1395cdfd5dbbSIntel static int 1396af75078fSIntel main_loop(__attribute__((unused)) void *dummy) 1397af75078fSIntel { 1398af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1399af75078fSIntel unsigned lcore_id; 14005c95261dSIntel uint64_t prev_tsc, diff_tsc, cur_tsc; 1401af75078fSIntel int i, j, nb_rx; 1402af75078fSIntel uint8_t portid, queueid; 1403af75078fSIntel struct lcore_conf *qconf; 140496ff4453SKonstantin Ananyev const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / 140596ff4453SKonstantin Ananyev US_PER_S * BURST_TX_DRAIN_US; 140696ff4453SKonstantin Ananyev 140796ff4453SKonstantin Ananyev #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \ 140896ff4453SKonstantin Ananyev (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)) 140996ff4453SKonstantin Ananyev int32_t k; 14107e7ab814SKonstantin Ananyev uint16_t dlp; 14117e7ab814SKonstantin Ananyev uint16_t *lp; 141296ff4453SKonstantin Ananyev uint16_t dst_port[MAX_PKT_BURST]; 141396ff4453SKonstantin Ananyev __m128i dip[MAX_PKT_BURST / FWDSTEP]; 141496ff4453SKonstantin Ananyev uint32_t flag[MAX_PKT_BURST / FWDSTEP]; 14157e7ab814SKonstantin Ananyev uint16_t pnum[MAX_PKT_BURST + 1]; 141696ff4453SKonstantin Ananyev #endif 14175c95261dSIntel 14185c95261dSIntel prev_tsc = 0; 1419af75078fSIntel 1420af75078fSIntel lcore_id = rte_lcore_id(); 1421af75078fSIntel qconf = &lcore_conf[lcore_id]; 1422af75078fSIntel 1423af75078fSIntel if (qconf->n_rx_queue == 0) { 1424af75078fSIntel RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id); 1425cdfd5dbbSIntel return 0; 1426af75078fSIntel } 1427af75078fSIntel 1428af75078fSIntel RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id); 1429af75078fSIntel 1430af75078fSIntel for (i = 0; i < qconf->n_rx_queue; i++) { 1431af75078fSIntel 1432af75078fSIntel portid = qconf->rx_queue_list[i].port_id; 1433af75078fSIntel queueid = qconf->rx_queue_list[i].queue_id; 1434af75078fSIntel RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id, 1435af75078fSIntel portid, queueid); 1436af75078fSIntel } 1437af75078fSIntel 1438af75078fSIntel while (1) { 1439af75078fSIntel 1440af75078fSIntel cur_tsc = rte_rdtsc(); 1441af75078fSIntel 1442af75078fSIntel /* 1443af75078fSIntel * TX burst queue drain 1444af75078fSIntel */ 1445af75078fSIntel diff_tsc = cur_tsc - prev_tsc; 14465c95261dSIntel if (unlikely(diff_tsc > drain_tsc)) { 1447af75078fSIntel 1448af75078fSIntel /* 1449af75078fSIntel * This could be optimized (use queueid instead of 1450af75078fSIntel * portid), but it is not called so often 1451af75078fSIntel */ 14521c17baf4SIntel for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 1453af75078fSIntel if (qconf->tx_mbufs[portid].len == 0) 1454af75078fSIntel continue; 145596ff4453SKonstantin Ananyev send_burst(qconf, 1456af75078fSIntel qconf->tx_mbufs[portid].len, 1457af75078fSIntel portid); 1458af75078fSIntel qconf->tx_mbufs[portid].len = 0; 1459af75078fSIntel } 1460af75078fSIntel 1461af75078fSIntel prev_tsc = cur_tsc; 1462af75078fSIntel } 1463af75078fSIntel 1464af75078fSIntel /* 1465af75078fSIntel * Read packet from RX queues 1466af75078fSIntel */ 1467af75078fSIntel for (i = 0; i < qconf->n_rx_queue; ++i) { 1468af75078fSIntel portid = qconf->rx_queue_list[i].port_id; 1469af75078fSIntel queueid = qconf->rx_queue_list[i].queue_id; 147096ff4453SKonstantin Ananyev nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, 147196ff4453SKonstantin Ananyev MAX_PKT_BURST); 147296ff4453SKonstantin Ananyev if (nb_rx == 0) 147396ff4453SKonstantin Ananyev continue; 147496ff4453SKonstantin Ananyev 147596ff4453SKonstantin Ananyev #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1) 147696ff4453SKonstantin Ananyev #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 1477997ee890SIntel { 147896ff4453SKonstantin Ananyev /* 147996ff4453SKonstantin Ananyev * Send nb_rx - nb_rx%4 packets 148096ff4453SKonstantin Ananyev * in groups of 4. 148196ff4453SKonstantin Ananyev */ 1482997ee890SIntel int32_t n = RTE_ALIGN_FLOOR(nb_rx, 4); 1483997ee890SIntel for (j = 0; j < n ; j+=4) { 1484997ee890SIntel uint32_t ol_flag = pkts_burst[j]->ol_flags 1485997ee890SIntel & pkts_burst[j+1]->ol_flags 1486997ee890SIntel & pkts_burst[j+2]->ol_flags 1487997ee890SIntel & pkts_burst[j+3]->ol_flags; 1488997ee890SIntel if (ol_flag & PKT_RX_IPV4_HDR ) { 1489997ee890SIntel simple_ipv4_fwd_4pkts(&pkts_burst[j], 1490997ee890SIntel portid, qconf); 1491997ee890SIntel } else if (ol_flag & PKT_RX_IPV6_HDR) { 1492997ee890SIntel simple_ipv6_fwd_4pkts(&pkts_burst[j], 1493997ee890SIntel portid, qconf); 1494997ee890SIntel } else { 1495997ee890SIntel l3fwd_simple_forward(pkts_burst[j], 1496997ee890SIntel portid, qconf); 1497997ee890SIntel l3fwd_simple_forward(pkts_burst[j+1], 1498997ee890SIntel portid, qconf); 1499997ee890SIntel l3fwd_simple_forward(pkts_burst[j+2], 1500997ee890SIntel portid, qconf); 1501997ee890SIntel l3fwd_simple_forward(pkts_burst[j+3], 1502997ee890SIntel portid, qconf); 1503997ee890SIntel } 1504997ee890SIntel } 1505997ee890SIntel for (; j < nb_rx ; j++) { 1506997ee890SIntel l3fwd_simple_forward(pkts_burst[j], 1507997ee890SIntel portid, qconf); 1508997ee890SIntel } 1509997ee890SIntel } 151096ff4453SKonstantin Ananyev #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 151196ff4453SKonstantin Ananyev 151296ff4453SKonstantin Ananyev k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); 151396ff4453SKonstantin Ananyev for (j = 0; j != k; j += FWDSTEP) { 151496ff4453SKonstantin Ananyev processx4_step1(&pkts_burst[j], 151596ff4453SKonstantin Ananyev &dip[j / FWDSTEP], 151696ff4453SKonstantin Ananyev &flag[j / FWDSTEP]); 151796ff4453SKonstantin Ananyev } 151896ff4453SKonstantin Ananyev 151996ff4453SKonstantin Ananyev k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); 152096ff4453SKonstantin Ananyev for (j = 0; j != k; j += FWDSTEP) { 152196ff4453SKonstantin Ananyev processx4_step2(qconf, dip[j / FWDSTEP], 152296ff4453SKonstantin Ananyev flag[j / FWDSTEP], portid, 152396ff4453SKonstantin Ananyev &pkts_burst[j], &dst_port[j]); 152496ff4453SKonstantin Ananyev } 152596ff4453SKonstantin Ananyev 15267e7ab814SKonstantin Ananyev /* 15277e7ab814SKonstantin Ananyev * Finish packet processing and group consecutive 15287e7ab814SKonstantin Ananyev * packets with the same destination port. 15297e7ab814SKonstantin Ananyev */ 153096ff4453SKonstantin Ananyev k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); 15317e7ab814SKonstantin Ananyev if (k != 0) { 15327e7ab814SKonstantin Ananyev __m128i dp1, dp2; 15337e7ab814SKonstantin Ananyev 15347e7ab814SKonstantin Ananyev lp = pnum; 15357e7ab814SKonstantin Ananyev lp[0] = 1; 15367e7ab814SKonstantin Ananyev 15377e7ab814SKonstantin Ananyev processx4_step3(pkts_burst, dst_port); 15387e7ab814SKonstantin Ananyev 15397e7ab814SKonstantin Ananyev /* dp1: <d[0], d[1], d[2], d[3], ... > */ 15407e7ab814SKonstantin Ananyev dp1 = _mm_loadu_si128((__m128i *)dst_port); 15417e7ab814SKonstantin Ananyev 15427e7ab814SKonstantin Ananyev for (j = FWDSTEP; j != k; j += FWDSTEP) { 15437e7ab814SKonstantin Ananyev processx4_step3(&pkts_burst[j], 15447e7ab814SKonstantin Ananyev &dst_port[j]); 15457e7ab814SKonstantin Ananyev 15467e7ab814SKonstantin Ananyev /* 15477e7ab814SKonstantin Ananyev * dp2: 15487e7ab814SKonstantin Ananyev * <d[j-3], d[j-2], d[j-1], d[j], ... > 15497e7ab814SKonstantin Ananyev */ 15507e7ab814SKonstantin Ananyev dp2 = _mm_loadu_si128((__m128i *) 15517e7ab814SKonstantin Ananyev &dst_port[j - FWDSTEP + 1]); 15527e7ab814SKonstantin Ananyev lp = port_groupx4(&pnum[j - FWDSTEP], 15537e7ab814SKonstantin Ananyev lp, dp1, dp2); 15547e7ab814SKonstantin Ananyev 15557e7ab814SKonstantin Ananyev /* 15567e7ab814SKonstantin Ananyev * dp1: 15577e7ab814SKonstantin Ananyev * <d[j], d[j+1], d[j+2], d[j+3], ... > 15587e7ab814SKonstantin Ananyev */ 15597e7ab814SKonstantin Ananyev dp1 = _mm_srli_si128(dp2, 15607e7ab814SKonstantin Ananyev (FWDSTEP - 1) * 15617e7ab814SKonstantin Ananyev sizeof(dst_port[0])); 15627e7ab814SKonstantin Ananyev } 15637e7ab814SKonstantin Ananyev 15647e7ab814SKonstantin Ananyev /* 15657e7ab814SKonstantin Ananyev * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... > 15667e7ab814SKonstantin Ananyev */ 15677e7ab814SKonstantin Ananyev dp2 = _mm_shufflelo_epi16(dp1, 0xf9); 15687e7ab814SKonstantin Ananyev lp = port_groupx4(&pnum[j - FWDSTEP], lp, 15697e7ab814SKonstantin Ananyev dp1, dp2); 15707e7ab814SKonstantin Ananyev 15717e7ab814SKonstantin Ananyev /* 15727e7ab814SKonstantin Ananyev * remove values added by the last repeated 15737e7ab814SKonstantin Ananyev * dst port. 15747e7ab814SKonstantin Ananyev */ 15757e7ab814SKonstantin Ananyev lp[0]--; 15767e7ab814SKonstantin Ananyev dlp = dst_port[j - 1]; 15777e7ab814SKonstantin Ananyev } else { 15787e7ab814SKonstantin Ananyev /* set dlp and lp to the never used values. */ 15797e7ab814SKonstantin Ananyev dlp = BAD_PORT - 1; 15807e7ab814SKonstantin Ananyev lp = pnum + MAX_PKT_BURST; 158196ff4453SKonstantin Ananyev } 158296ff4453SKonstantin Ananyev 158396ff4453SKonstantin Ananyev /* Process up to last 3 packets one by one. */ 158496ff4453SKonstantin Ananyev switch (nb_rx % FWDSTEP) { 158596ff4453SKonstantin Ananyev case 3: 158696ff4453SKonstantin Ananyev process_packet(qconf, pkts_burst[j], 158796ff4453SKonstantin Ananyev dst_port + j, portid); 15887e7ab814SKonstantin Ananyev GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); 158996ff4453SKonstantin Ananyev j++; 159096ff4453SKonstantin Ananyev case 2: 159196ff4453SKonstantin Ananyev process_packet(qconf, pkts_burst[j], 159296ff4453SKonstantin Ananyev dst_port + j, portid); 15937e7ab814SKonstantin Ananyev GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); 159496ff4453SKonstantin Ananyev j++; 159596ff4453SKonstantin Ananyev case 1: 159696ff4453SKonstantin Ananyev process_packet(qconf, pkts_burst[j], 159796ff4453SKonstantin Ananyev dst_port + j, portid); 15987e7ab814SKonstantin Ananyev GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); 159996ff4453SKonstantin Ananyev j++; 160096ff4453SKonstantin Ananyev } 160196ff4453SKonstantin Ananyev 160296ff4453SKonstantin Ananyev /* 160396ff4453SKonstantin Ananyev * Send packets out, through destination port. 16047e7ab814SKonstantin Ananyev * Consecuteve pacekts with the same destination port 16057e7ab814SKonstantin Ananyev * are already grouped together. 160696ff4453SKonstantin Ananyev * If destination port for the packet equals BAD_PORT, 160796ff4453SKonstantin Ananyev * then free the packet without sending it out. 160896ff4453SKonstantin Ananyev */ 16097e7ab814SKonstantin Ananyev for (j = 0; j < nb_rx; j += k) { 161096ff4453SKonstantin Ananyev 16117e7ab814SKonstantin Ananyev int32_t m; 16127e7ab814SKonstantin Ananyev uint16_t pn; 161396ff4453SKonstantin Ananyev 16147e7ab814SKonstantin Ananyev pn = dst_port[j]; 16157e7ab814SKonstantin Ananyev k = pnum[j]; 161696ff4453SKonstantin Ananyev 16177e7ab814SKonstantin Ananyev if (likely(pn != BAD_PORT)) { 16187e7ab814SKonstantin Ananyev send_packetsx4(qconf, pn, 16197e7ab814SKonstantin Ananyev pkts_burst + j, k); 16207e7ab814SKonstantin Ananyev } else { 16217e7ab814SKonstantin Ananyev for (m = j; m != j + k; m++) 16227e7ab814SKonstantin Ananyev rte_pktmbuf_free(pkts_burst[m]); 162396ff4453SKonstantin Ananyev } 162496ff4453SKonstantin Ananyev } 162596ff4453SKonstantin Ananyev 162696ff4453SKonstantin Ananyev #endif /* APP_LOOKUP_METHOD */ 162796ff4453SKonstantin Ananyev #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */ 162896ff4453SKonstantin Ananyev 1629af75078fSIntel /* Prefetch first packets */ 1630af75078fSIntel for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { 1631af75078fSIntel rte_prefetch0(rte_pktmbuf_mtod( 1632af75078fSIntel pkts_burst[j], void *)); 1633af75078fSIntel } 1634af75078fSIntel 1635af75078fSIntel /* Prefetch and forward already prefetched packets */ 1636af75078fSIntel for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { 1637af75078fSIntel rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ 1638af75078fSIntel j + PREFETCH_OFFSET], void *)); 163996ff4453SKonstantin Ananyev l3fwd_simple_forward(pkts_burst[j], portid, 164096ff4453SKonstantin Ananyev qconf); 1641af75078fSIntel } 1642af75078fSIntel 1643af75078fSIntel /* Forward remaining prefetched packets */ 1644af75078fSIntel for (; j < nb_rx; j++) { 164596ff4453SKonstantin Ananyev l3fwd_simple_forward(pkts_burst[j], portid, 164696ff4453SKonstantin Ananyev qconf); 1647af75078fSIntel } 164896ff4453SKonstantin Ananyev #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */ 164996ff4453SKonstantin Ananyev 1650af75078fSIntel } 1651af75078fSIntel } 1652af75078fSIntel } 1653af75078fSIntel 1654af75078fSIntel static int 1655af75078fSIntel check_lcore_params(void) 1656af75078fSIntel { 1657af75078fSIntel uint8_t queue, lcore; 1658af75078fSIntel uint16_t i; 1659af75078fSIntel int socketid; 1660af75078fSIntel 1661af75078fSIntel for (i = 0; i < nb_lcore_params; ++i) { 1662af75078fSIntel queue = lcore_params[i].queue_id; 1663af75078fSIntel if (queue >= MAX_RX_QUEUE_PER_PORT) { 1664af75078fSIntel printf("invalid queue number: %hhu\n", queue); 1665af75078fSIntel return -1; 1666af75078fSIntel } 1667af75078fSIntel lcore = lcore_params[i].lcore_id; 1668af75078fSIntel if (!rte_lcore_is_enabled(lcore)) { 1669af75078fSIntel printf("error: lcore %hhu is not enabled in lcore mask\n", lcore); 1670af75078fSIntel return -1; 1671af75078fSIntel } 1672af75078fSIntel if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && 1673af75078fSIntel (numa_on == 0)) { 1674af75078fSIntel printf("warning: lcore %hhu is on socket %d with numa off \n", 1675af75078fSIntel lcore, socketid); 1676af75078fSIntel } 1677af75078fSIntel } 1678af75078fSIntel return 0; 1679af75078fSIntel } 1680af75078fSIntel 1681af75078fSIntel static int 1682af75078fSIntel check_port_config(const unsigned nb_ports) 1683af75078fSIntel { 1684af75078fSIntel unsigned portid; 1685af75078fSIntel uint16_t i; 1686af75078fSIntel 1687af75078fSIntel for (i = 0; i < nb_lcore_params; ++i) { 1688af75078fSIntel portid = lcore_params[i].port_id; 1689af75078fSIntel if ((enabled_port_mask & (1 << portid)) == 0) { 1690af75078fSIntel printf("port %u is not enabled in port mask\n", portid); 1691af75078fSIntel return -1; 1692af75078fSIntel } 1693af75078fSIntel if (portid >= nb_ports) { 1694af75078fSIntel printf("port %u is not present on the board\n", portid); 1695af75078fSIntel return -1; 1696af75078fSIntel } 1697af75078fSIntel } 1698af75078fSIntel return 0; 1699af75078fSIntel } 1700af75078fSIntel 1701af75078fSIntel static uint8_t 1702af75078fSIntel get_port_n_rx_queues(const uint8_t port) 1703af75078fSIntel { 1704af75078fSIntel int queue = -1; 1705af75078fSIntel uint16_t i; 1706af75078fSIntel 1707af75078fSIntel for (i = 0; i < nb_lcore_params; ++i) { 1708af75078fSIntel if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue) 1709af75078fSIntel queue = lcore_params[i].queue_id; 1710af75078fSIntel } 1711af75078fSIntel return (uint8_t)(++queue); 1712af75078fSIntel } 1713af75078fSIntel 1714af75078fSIntel static int 1715af75078fSIntel init_lcore_rx_queues(void) 1716af75078fSIntel { 1717af75078fSIntel uint16_t i, nb_rx_queue; 1718af75078fSIntel uint8_t lcore; 1719af75078fSIntel 1720af75078fSIntel for (i = 0; i < nb_lcore_params; ++i) { 1721af75078fSIntel lcore = lcore_params[i].lcore_id; 1722af75078fSIntel nb_rx_queue = lcore_conf[lcore].n_rx_queue; 1723af75078fSIntel if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { 1724af75078fSIntel printf("error: too many queues (%u) for lcore: %u\n", 1725af75078fSIntel (unsigned)nb_rx_queue + 1, (unsigned)lcore); 1726af75078fSIntel return -1; 1727af75078fSIntel } else { 1728af75078fSIntel lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = 1729af75078fSIntel lcore_params[i].port_id; 1730af75078fSIntel lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = 1731af75078fSIntel lcore_params[i].queue_id; 1732af75078fSIntel lcore_conf[lcore].n_rx_queue++; 1733af75078fSIntel } 1734af75078fSIntel } 1735af75078fSIntel return 0; 1736af75078fSIntel } 1737af75078fSIntel 1738af75078fSIntel /* display usage */ 1739af75078fSIntel static void 1740af75078fSIntel print_usage(const char *prgname) 1741af75078fSIntel { 1742af75078fSIntel printf ("%s [EAL options] -- -p PORTMASK -P" 1743f68aad79SIntel " [--config (port,queue,lcore)[,(port,queue,lcore]]" 1744f68aad79SIntel " [--enable-jumbo [--max-pkt-len PKTLEN]]\n" 1745af75078fSIntel " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 1746f68aad79SIntel " -P : enable promiscuous mode\n" 1747af75078fSIntel " --config (port,queue,lcore): rx queues configuration\n" 1748f68aad79SIntel " --no-numa: optional, disable numa awareness\n" 1749997ee890SIntel " --ipv6: optional, specify it if running ipv6 packets\n" 1750f68aad79SIntel " --enable-jumbo: enable jumbo frame" 1751997ee890SIntel " which max packet len is PKTLEN in decimal (64-9600)\n" 1752997ee890SIntel " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n", 1753af75078fSIntel prgname); 1754af75078fSIntel } 1755af75078fSIntel 1756f68aad79SIntel static int parse_max_pkt_len(const char *pktlen) 1757f68aad79SIntel { 1758f68aad79SIntel char *end = NULL; 1759f68aad79SIntel unsigned long len; 1760f68aad79SIntel 1761f68aad79SIntel /* parse decimal string */ 1762f68aad79SIntel len = strtoul(pktlen, &end, 10); 1763f68aad79SIntel if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0')) 1764f68aad79SIntel return -1; 1765f68aad79SIntel 1766f68aad79SIntel if (len == 0) 1767f68aad79SIntel return -1; 1768f68aad79SIntel 1769f68aad79SIntel return len; 1770f68aad79SIntel } 1771f68aad79SIntel 1772af75078fSIntel static int 1773af75078fSIntel parse_portmask(const char *portmask) 1774af75078fSIntel { 1775af75078fSIntel char *end = NULL; 1776af75078fSIntel unsigned long pm; 1777af75078fSIntel 1778af75078fSIntel /* parse hexadecimal string */ 1779af75078fSIntel pm = strtoul(portmask, &end, 16); 1780af75078fSIntel if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 1781af75078fSIntel return -1; 1782af75078fSIntel 1783af75078fSIntel if (pm == 0) 1784af75078fSIntel return -1; 1785af75078fSIntel 1786af75078fSIntel return pm; 1787af75078fSIntel } 1788af75078fSIntel 1789997ee890SIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 1790997ee890SIntel static int 1791997ee890SIntel parse_hash_entry_number(const char *hash_entry_num) 1792997ee890SIntel { 1793997ee890SIntel char *end = NULL; 1794997ee890SIntel unsigned long hash_en; 1795997ee890SIntel /* parse hexadecimal string */ 1796997ee890SIntel hash_en = strtoul(hash_entry_num, &end, 16); 1797997ee890SIntel if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0')) 1798997ee890SIntel return -1; 1799997ee890SIntel 1800997ee890SIntel if (hash_en == 0) 1801997ee890SIntel return -1; 1802997ee890SIntel 1803997ee890SIntel return hash_en; 1804997ee890SIntel } 1805997ee890SIntel #endif 1806997ee890SIntel 1807af75078fSIntel static int 1808af75078fSIntel parse_config(const char *q_arg) 1809af75078fSIntel { 1810af75078fSIntel char s[256]; 1811af75078fSIntel const char *p, *p0 = q_arg; 1812af75078fSIntel char *end; 1813af75078fSIntel enum fieldnames { 1814af75078fSIntel FLD_PORT = 0, 1815af75078fSIntel FLD_QUEUE, 1816af75078fSIntel FLD_LCORE, 1817af75078fSIntel _NUM_FLD 1818af75078fSIntel }; 1819af75078fSIntel unsigned long int_fld[_NUM_FLD]; 1820af75078fSIntel char *str_fld[_NUM_FLD]; 1821af75078fSIntel int i; 1822af75078fSIntel unsigned size; 1823af75078fSIntel 1824af75078fSIntel nb_lcore_params = 0; 1825af75078fSIntel 1826af75078fSIntel while ((p = strchr(p0,'(')) != NULL) { 1827af75078fSIntel ++p; 1828af75078fSIntel if((p0 = strchr(p,')')) == NULL) 1829af75078fSIntel return -1; 1830af75078fSIntel 1831af75078fSIntel size = p0 - p; 1832af75078fSIntel if(size >= sizeof(s)) 1833af75078fSIntel return -1; 1834af75078fSIntel 18356f41fe75SStephen Hemminger snprintf(s, sizeof(s), "%.*s", size, p); 1836af75078fSIntel if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) 1837af75078fSIntel return -1; 1838af75078fSIntel for (i = 0; i < _NUM_FLD; i++){ 1839af75078fSIntel errno = 0; 1840af75078fSIntel int_fld[i] = strtoul(str_fld[i], &end, 0); 1841af75078fSIntel if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) 1842af75078fSIntel return -1; 1843af75078fSIntel } 1844af75078fSIntel if (nb_lcore_params >= MAX_LCORE_PARAMS) { 1845af75078fSIntel printf("exceeded max number of lcore params: %hu\n", 1846af75078fSIntel nb_lcore_params); 1847af75078fSIntel return -1; 1848af75078fSIntel } 1849af75078fSIntel lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT]; 1850af75078fSIntel lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE]; 1851af75078fSIntel lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE]; 1852af75078fSIntel ++nb_lcore_params; 1853af75078fSIntel } 1854af75078fSIntel lcore_params = lcore_params_array; 1855af75078fSIntel return 0; 1856af75078fSIntel } 1857af75078fSIntel 1858997ee890SIntel #define CMD_LINE_OPT_CONFIG "config" 1859997ee890SIntel #define CMD_LINE_OPT_NO_NUMA "no-numa" 1860997ee890SIntel #define CMD_LINE_OPT_IPV6 "ipv6" 1861997ee890SIntel #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo" 1862997ee890SIntel #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num" 1863997ee890SIntel 1864af75078fSIntel /* Parse the argument given in the command line of the application */ 1865af75078fSIntel static int 1866af75078fSIntel parse_args(int argc, char **argv) 1867af75078fSIntel { 1868af75078fSIntel int opt, ret; 1869af75078fSIntel char **argvopt; 1870af75078fSIntel int option_index; 1871af75078fSIntel char *prgname = argv[0]; 1872af75078fSIntel static struct option lgopts[] = { 1873997ee890SIntel {CMD_LINE_OPT_CONFIG, 1, 0, 0}, 1874997ee890SIntel {CMD_LINE_OPT_NO_NUMA, 0, 0, 0}, 1875997ee890SIntel {CMD_LINE_OPT_IPV6, 0, 0, 0}, 1876997ee890SIntel {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0}, 1877997ee890SIntel {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0}, 1878af75078fSIntel {NULL, 0, 0, 0} 1879af75078fSIntel }; 1880af75078fSIntel 1881af75078fSIntel argvopt = argv; 1882af75078fSIntel 1883af75078fSIntel while ((opt = getopt_long(argc, argvopt, "p:P", 1884af75078fSIntel lgopts, &option_index)) != EOF) { 1885af75078fSIntel 1886af75078fSIntel switch (opt) { 1887af75078fSIntel /* portmask */ 1888af75078fSIntel case 'p': 1889af75078fSIntel enabled_port_mask = parse_portmask(optarg); 1890af75078fSIntel if (enabled_port_mask == 0) { 1891af75078fSIntel printf("invalid portmask\n"); 1892af75078fSIntel print_usage(prgname); 1893af75078fSIntel return -1; 1894af75078fSIntel } 1895af75078fSIntel break; 1896af75078fSIntel case 'P': 1897af75078fSIntel printf("Promiscuous mode selected\n"); 1898af75078fSIntel promiscuous_on = 1; 1899af75078fSIntel break; 1900af75078fSIntel 1901af75078fSIntel /* long options */ 1902af75078fSIntel case 0: 1903997ee890SIntel if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG, 1904997ee890SIntel sizeof (CMD_LINE_OPT_CONFIG))) { 1905af75078fSIntel ret = parse_config(optarg); 1906af75078fSIntel if (ret) { 1907af75078fSIntel printf("invalid config\n"); 1908af75078fSIntel print_usage(prgname); 1909af75078fSIntel return -1; 1910af75078fSIntel } 1911af75078fSIntel } 1912af75078fSIntel 1913997ee890SIntel if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA, 1914997ee890SIntel sizeof(CMD_LINE_OPT_NO_NUMA))) { 1915af75078fSIntel printf("numa is disabled \n"); 1916af75078fSIntel numa_on = 0; 1917af75078fSIntel } 1918f68aad79SIntel 1919997ee890SIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 1920997ee890SIntel if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6, 1921997ee890SIntel sizeof(CMD_LINE_OPT_IPV6))) { 1922997ee890SIntel printf("ipv6 is specified \n"); 1923997ee890SIntel ipv6 = 1; 1924997ee890SIntel } 1925997ee890SIntel #endif 1926997ee890SIntel 1927997ee890SIntel if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO, 1928997ee890SIntel sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) { 1929f68aad79SIntel struct option lenopts = {"max-pkt-len", required_argument, 0, 0}; 1930f68aad79SIntel 1931997ee890SIntel printf("jumbo frame is enabled - disabling simple TX path\n"); 1932f68aad79SIntel port_conf.rxmode.jumbo_frame = 1; 1933f68aad79SIntel 1934f68aad79SIntel /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */ 1935f68aad79SIntel if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) { 1936f68aad79SIntel ret = parse_max_pkt_len(optarg); 1937f68aad79SIntel if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){ 1938f68aad79SIntel printf("invalid packet length\n"); 1939f68aad79SIntel print_usage(prgname); 1940f68aad79SIntel return -1; 1941f68aad79SIntel } 1942f68aad79SIntel port_conf.rxmode.max_rx_pkt_len = ret; 1943f68aad79SIntel } 1944f68aad79SIntel printf("set jumbo frame max packet length to %u\n", 1945f68aad79SIntel (unsigned int)port_conf.rxmode.max_rx_pkt_len); 1946f68aad79SIntel } 1947997ee890SIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 1948997ee890SIntel if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM, 1949997ee890SIntel sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) { 1950997ee890SIntel ret = parse_hash_entry_number(optarg); 1951997ee890SIntel if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) { 1952997ee890SIntel hash_entry_number = ret; 1953997ee890SIntel } else { 1954997ee890SIntel printf("invalid hash entry number\n"); 1955997ee890SIntel print_usage(prgname); 1956997ee890SIntel return -1; 1957997ee890SIntel } 1958997ee890SIntel } 1959997ee890SIntel #endif 1960af75078fSIntel break; 1961af75078fSIntel 1962af75078fSIntel default: 1963af75078fSIntel print_usage(prgname); 1964af75078fSIntel return -1; 1965af75078fSIntel } 1966af75078fSIntel } 1967af75078fSIntel 1968af75078fSIntel if (optind >= 0) 1969af75078fSIntel argv[optind-1] = prgname; 1970af75078fSIntel 1971af75078fSIntel ret = optind-1; 1972af75078fSIntel optind = 0; /* reset getopt lib */ 1973af75078fSIntel return ret; 1974af75078fSIntel } 1975af75078fSIntel 1976af75078fSIntel static void 1977af75078fSIntel print_ethaddr(const char *name, const struct ether_addr *eth_addr) 1978af75078fSIntel { 1979*ec3d82dbSCunming Liang char buf[ETHER_ADDR_FMT_SIZE]; 1980*ec3d82dbSCunming Liang ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 1981*ec3d82dbSCunming Liang printf("%s%s", name, buf); 1982af75078fSIntel } 1983af75078fSIntel 1984af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) 1985997ee890SIntel 1986997ee890SIntel static void convert_ipv4_5tuple(struct ipv4_5tuple* key1, 1987997ee890SIntel union ipv4_5tuple_host* key2) 1988997ee890SIntel { 1989997ee890SIntel key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst); 1990997ee890SIntel key2->ip_src = rte_cpu_to_be_32(key1->ip_src); 1991997ee890SIntel key2->port_dst = rte_cpu_to_be_16(key1->port_dst); 1992997ee890SIntel key2->port_src = rte_cpu_to_be_16(key1->port_src); 1993997ee890SIntel key2->proto = key1->proto; 1994997ee890SIntel key2->pad0 = 0; 1995997ee890SIntel key2->pad1 = 0; 1996997ee890SIntel return; 1997997ee890SIntel } 1998997ee890SIntel 1999997ee890SIntel static void convert_ipv6_5tuple(struct ipv6_5tuple* key1, 2000997ee890SIntel union ipv6_5tuple_host* key2) 2001997ee890SIntel { 2002997ee890SIntel uint32_t i; 2003997ee890SIntel for (i = 0; i < 16; i++) 2004997ee890SIntel { 2005997ee890SIntel key2->ip_dst[i] = key1->ip_dst[i]; 2006997ee890SIntel key2->ip_src[i] = key1->ip_src[i]; 2007997ee890SIntel } 2008997ee890SIntel key2->port_dst = rte_cpu_to_be_16(key1->port_dst); 2009997ee890SIntel key2->port_src = rte_cpu_to_be_16(key1->port_src); 2010997ee890SIntel key2->proto = key1->proto; 2011997ee890SIntel key2->pad0 = 0; 2012997ee890SIntel key2->pad1 = 0; 2013997ee890SIntel key2->reserve = 0; 2014997ee890SIntel return; 2015997ee890SIntel } 2016997ee890SIntel 2017997ee890SIntel #define BYTE_VALUE_MAX 256 2018997ee890SIntel #define ALL_32_BITS 0xffffffff 2019997ee890SIntel #define BIT_8_TO_15 0x0000ff00 2020997ee890SIntel static inline void 2021997ee890SIntel populate_ipv4_few_flow_into_table(const struct rte_hash* h) 2022997ee890SIntel { 2023997ee890SIntel uint32_t i; 2024997ee890SIntel int32_t ret; 2025997ee890SIntel uint32_t array_len = sizeof(ipv4_l3fwd_route_array)/sizeof(ipv4_l3fwd_route_array[0]); 2026997ee890SIntel 2027997ee890SIntel mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15); 2028997ee890SIntel for (i = 0; i < array_len; i++) { 2029997ee890SIntel struct ipv4_l3fwd_route entry; 2030997ee890SIntel union ipv4_5tuple_host newkey; 2031997ee890SIntel entry = ipv4_l3fwd_route_array[i]; 2032997ee890SIntel convert_ipv4_5tuple(&entry.key, &newkey); 2033997ee890SIntel ret = rte_hash_add_key (h,(void *) &newkey); 2034997ee890SIntel if (ret < 0) { 203596ff4453SKonstantin Ananyev rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32 203696ff4453SKonstantin Ananyev " to the l3fwd hash.\n", i); 2037997ee890SIntel } 2038997ee890SIntel ipv4_l3fwd_out_if[ret] = entry.if_out; 2039997ee890SIntel } 204096ff4453SKonstantin Ananyev printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len); 2041997ee890SIntel } 2042997ee890SIntel 2043997ee890SIntel #define BIT_16_TO_23 0x00ff0000 2044997ee890SIntel static inline void 2045997ee890SIntel populate_ipv6_few_flow_into_table(const struct rte_hash* h) 2046997ee890SIntel { 2047997ee890SIntel uint32_t i; 2048997ee890SIntel int32_t ret; 2049997ee890SIntel uint32_t array_len = sizeof(ipv6_l3fwd_route_array)/sizeof(ipv6_l3fwd_route_array[0]); 2050997ee890SIntel 2051997ee890SIntel mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23); 2052997ee890SIntel mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS); 2053997ee890SIntel for (i = 0; i < array_len; i++) { 2054997ee890SIntel struct ipv6_l3fwd_route entry; 2055997ee890SIntel union ipv6_5tuple_host newkey; 2056997ee890SIntel entry = ipv6_l3fwd_route_array[i]; 2057997ee890SIntel convert_ipv6_5tuple(&entry.key, &newkey); 2058997ee890SIntel ret = rte_hash_add_key (h, (void *) &newkey); 2059997ee890SIntel if (ret < 0) { 206096ff4453SKonstantin Ananyev rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32 206196ff4453SKonstantin Ananyev " to the l3fwd hash.\n", i); 2062997ee890SIntel } 2063997ee890SIntel ipv6_l3fwd_out_if[ret] = entry.if_out; 2064997ee890SIntel } 206596ff4453SKonstantin Ananyev printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len); 2066997ee890SIntel } 2067997ee890SIntel 2068997ee890SIntel #define NUMBER_PORT_USED 4 2069997ee890SIntel static inline void 2070997ee890SIntel populate_ipv4_many_flow_into_table(const struct rte_hash* h, 2071997ee890SIntel unsigned int nr_flow) 2072997ee890SIntel { 2073997ee890SIntel unsigned i; 2074997ee890SIntel mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15); 2075997ee890SIntel for (i = 0; i < nr_flow; i++) { 2076997ee890SIntel struct ipv4_l3fwd_route entry; 2077997ee890SIntel union ipv4_5tuple_host newkey; 2078997ee890SIntel uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX); 2079997ee890SIntel uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX); 2080997ee890SIntel uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX)); 2081997ee890SIntel /* Create the ipv4 exact match flow */ 208213c4ebd6SBruce Richardson memset(&entry, 0, sizeof(entry)); 2083997ee890SIntel switch (i & (NUMBER_PORT_USED -1)) { 2084997ee890SIntel case 0: 2085997ee890SIntel entry = ipv4_l3fwd_route_array[0]; 2086997ee890SIntel entry.key.ip_dst = IPv4(101,c,b,a); 2087997ee890SIntel break; 2088997ee890SIntel case 1: 2089997ee890SIntel entry = ipv4_l3fwd_route_array[1]; 2090997ee890SIntel entry.key.ip_dst = IPv4(201,c,b,a); 2091997ee890SIntel break; 2092997ee890SIntel case 2: 2093997ee890SIntel entry = ipv4_l3fwd_route_array[2]; 2094997ee890SIntel entry.key.ip_dst = IPv4(111,c,b,a); 2095997ee890SIntel break; 2096997ee890SIntel case 3: 2097997ee890SIntel entry = ipv4_l3fwd_route_array[3]; 2098997ee890SIntel entry.key.ip_dst = IPv4(211,c,b,a); 2099997ee890SIntel break; 2100997ee890SIntel }; 2101997ee890SIntel convert_ipv4_5tuple(&entry.key, &newkey); 2102997ee890SIntel int32_t ret = rte_hash_add_key(h,(void *) &newkey); 2103997ee890SIntel if (ret < 0) { 2104997ee890SIntel rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i); 2105997ee890SIntel } 2106997ee890SIntel ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out; 2107997ee890SIntel 2108997ee890SIntel } 2109997ee890SIntel printf("Hash: Adding 0x%x keys\n", nr_flow); 2110997ee890SIntel } 2111997ee890SIntel 2112997ee890SIntel static inline void 2113997ee890SIntel populate_ipv6_many_flow_into_table(const struct rte_hash* h, 2114997ee890SIntel unsigned int nr_flow) 2115997ee890SIntel { 2116997ee890SIntel unsigned i; 2117997ee890SIntel mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23); 2118997ee890SIntel mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS); 2119997ee890SIntel for (i = 0; i < nr_flow; i++) { 2120997ee890SIntel struct ipv6_l3fwd_route entry; 2121997ee890SIntel union ipv6_5tuple_host newkey; 2122997ee890SIntel uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX); 2123997ee890SIntel uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX); 2124997ee890SIntel uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX)); 2125997ee890SIntel /* Create the ipv6 exact match flow */ 212613c4ebd6SBruce Richardson memset(&entry, 0, sizeof(entry)); 2127997ee890SIntel switch (i & (NUMBER_PORT_USED - 1)) { 2128997ee890SIntel case 0: entry = ipv6_l3fwd_route_array[0]; break; 2129997ee890SIntel case 1: entry = ipv6_l3fwd_route_array[1]; break; 2130997ee890SIntel case 2: entry = ipv6_l3fwd_route_array[2]; break; 2131997ee890SIntel case 3: entry = ipv6_l3fwd_route_array[3]; break; 2132997ee890SIntel }; 2133997ee890SIntel entry.key.ip_dst[13] = c; 2134997ee890SIntel entry.key.ip_dst[14] = b; 2135997ee890SIntel entry.key.ip_dst[15] = a; 2136997ee890SIntel convert_ipv6_5tuple(&entry.key, &newkey); 2137997ee890SIntel int32_t ret = rte_hash_add_key(h,(void *) &newkey); 2138997ee890SIntel if (ret < 0) { 2139997ee890SIntel rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i); 2140997ee890SIntel } 2141997ee890SIntel ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out; 2142997ee890SIntel 2143997ee890SIntel } 2144997ee890SIntel printf("Hash: Adding 0x%x keys\n", nr_flow); 2145997ee890SIntel } 2146997ee890SIntel 2147af75078fSIntel static void 2148af75078fSIntel setup_hash(int socketid) 2149af75078fSIntel { 2150997ee890SIntel struct rte_hash_parameters ipv4_l3fwd_hash_params = { 2151997ee890SIntel .name = NULL, 2152997ee890SIntel .entries = L3FWD_HASH_ENTRIES, 2153997ee890SIntel .bucket_entries = 4, 2154997ee890SIntel .key_len = sizeof(union ipv4_5tuple_host), 2155997ee890SIntel .hash_func = ipv4_hash_crc, 2156997ee890SIntel .hash_func_init_val = 0, 2157997ee890SIntel }; 2158997ee890SIntel 2159997ee890SIntel struct rte_hash_parameters ipv6_l3fwd_hash_params = { 2160997ee890SIntel .name = NULL, 2161997ee890SIntel .entries = L3FWD_HASH_ENTRIES, 2162997ee890SIntel .bucket_entries = 4, 2163997ee890SIntel .key_len = sizeof(union ipv6_5tuple_host), 2164997ee890SIntel .hash_func = ipv6_hash_crc, 2165997ee890SIntel .hash_func_init_val = 0, 2166997ee890SIntel }; 2167997ee890SIntel 2168af75078fSIntel char s[64]; 2169af75078fSIntel 2170f68aad79SIntel /* create ipv4 hash */ 21716f41fe75SStephen Hemminger snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid); 2172f68aad79SIntel ipv4_l3fwd_hash_params.name = s; 2173f68aad79SIntel ipv4_l3fwd_hash_params.socket_id = socketid; 2174f68aad79SIntel ipv4_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv4_l3fwd_hash_params); 2175f68aad79SIntel if (ipv4_l3fwd_lookup_struct[socketid] == NULL) 2176af75078fSIntel rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on " 2177af75078fSIntel "socket %d\n", socketid); 2178af75078fSIntel 2179f68aad79SIntel /* create ipv6 hash */ 21806f41fe75SStephen Hemminger snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid); 2181f68aad79SIntel ipv6_l3fwd_hash_params.name = s; 2182f68aad79SIntel ipv6_l3fwd_hash_params.socket_id = socketid; 2183f68aad79SIntel ipv6_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv6_l3fwd_hash_params); 2184f68aad79SIntel if (ipv6_l3fwd_lookup_struct[socketid] == NULL) 2185f68aad79SIntel rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on " 2186f68aad79SIntel "socket %d\n", socketid); 2187f68aad79SIntel 2188997ee890SIntel if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) { 2189997ee890SIntel /* For testing hash matching with a large number of flows we 2190997ee890SIntel * generate millions of IP 5-tuples with an incremented dst 2191997ee890SIntel * address to initialize the hash table. */ 2192997ee890SIntel if (ipv6 == 0) { 2193f68aad79SIntel /* populate the ipv4 hash */ 2194997ee890SIntel populate_ipv4_many_flow_into_table( 2195997ee890SIntel ipv4_l3fwd_lookup_struct[socketid], hash_entry_number); 2196997ee890SIntel } else { 2197f68aad79SIntel /* populate the ipv6 hash */ 2198997ee890SIntel populate_ipv6_many_flow_into_table( 2199997ee890SIntel ipv6_l3fwd_lookup_struct[socketid], hash_entry_number); 2200f68aad79SIntel } 2201997ee890SIntel } else { 2202997ee890SIntel /* Use data in ipv4/ipv6 l3fwd lookup table directly to initialize the hash table */ 2203997ee890SIntel if (ipv6 == 0) { 2204997ee890SIntel /* populate the ipv4 hash */ 2205997ee890SIntel populate_ipv4_few_flow_into_table(ipv4_l3fwd_lookup_struct[socketid]); 2206997ee890SIntel } else { 2207997ee890SIntel /* populate the ipv6 hash */ 2208997ee890SIntel populate_ipv6_few_flow_into_table(ipv6_l3fwd_lookup_struct[socketid]); 2209997ee890SIntel } 2210af75078fSIntel } 2211af75078fSIntel } 2212af75078fSIntel #endif 2213af75078fSIntel 2214af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 2215af75078fSIntel static void 2216af75078fSIntel setup_lpm(int socketid) 2217af75078fSIntel { 2218997ee890SIntel struct rte_lpm6_config config; 2219af75078fSIntel unsigned i; 2220af75078fSIntel int ret; 2221af75078fSIntel char s[64]; 2222af75078fSIntel 2223af75078fSIntel /* create the LPM table */ 22246f41fe75SStephen Hemminger snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid); 2225f68aad79SIntel ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid, 2226f68aad79SIntel IPV4_L3FWD_LPM_MAX_RULES, 0); 2227f68aad79SIntel if (ipv4_l3fwd_lookup_struct[socketid] == NULL) 2228af75078fSIntel rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table" 2229af75078fSIntel " on socket %d\n", socketid); 2230af75078fSIntel 2231af75078fSIntel /* populate the LPM table */ 2232f68aad79SIntel for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) { 223396ff4453SKonstantin Ananyev 223496ff4453SKonstantin Ananyev /* skip unused ports */ 223596ff4453SKonstantin Ananyev if ((1 << ipv4_l3fwd_route_array[i].if_out & 223696ff4453SKonstantin Ananyev enabled_port_mask) == 0) 223796ff4453SKonstantin Ananyev continue; 223896ff4453SKonstantin Ananyev 2239f68aad79SIntel ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid], 2240f68aad79SIntel ipv4_l3fwd_route_array[i].ip, 2241f68aad79SIntel ipv4_l3fwd_route_array[i].depth, 2242f68aad79SIntel ipv4_l3fwd_route_array[i].if_out); 2243af75078fSIntel 2244af75078fSIntel if (ret < 0) { 2245af75078fSIntel rte_exit(EXIT_FAILURE, "Unable to add entry %u to the " 2246af75078fSIntel "l3fwd LPM table on socket %d\n", 2247af75078fSIntel i, socketid); 2248af75078fSIntel } 2249af75078fSIntel 2250af75078fSIntel printf("LPM: Adding route 0x%08x / %d (%d)\n", 2251f68aad79SIntel (unsigned)ipv4_l3fwd_route_array[i].ip, 2252f68aad79SIntel ipv4_l3fwd_route_array[i].depth, 2253f68aad79SIntel ipv4_l3fwd_route_array[i].if_out); 2254af75078fSIntel } 2255997ee890SIntel 2256997ee890SIntel /* create the LPM6 table */ 22576f41fe75SStephen Hemminger snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid); 2258997ee890SIntel 2259997ee890SIntel config.max_rules = IPV6_L3FWD_LPM_MAX_RULES; 2260997ee890SIntel config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S; 2261997ee890SIntel config.flags = 0; 2262997ee890SIntel ipv6_l3fwd_lookup_struct[socketid] = rte_lpm6_create(s, socketid, 2263997ee890SIntel &config); 2264997ee890SIntel if (ipv6_l3fwd_lookup_struct[socketid] == NULL) 2265997ee890SIntel rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table" 2266997ee890SIntel " on socket %d\n", socketid); 2267997ee890SIntel 2268997ee890SIntel /* populate the LPM table */ 2269997ee890SIntel for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) { 227096ff4453SKonstantin Ananyev 227196ff4453SKonstantin Ananyev /* skip unused ports */ 227296ff4453SKonstantin Ananyev if ((1 << ipv6_l3fwd_route_array[i].if_out & 227396ff4453SKonstantin Ananyev enabled_port_mask) == 0) 227496ff4453SKonstantin Ananyev continue; 227596ff4453SKonstantin Ananyev 2276997ee890SIntel ret = rte_lpm6_add(ipv6_l3fwd_lookup_struct[socketid], 2277997ee890SIntel ipv6_l3fwd_route_array[i].ip, 2278997ee890SIntel ipv6_l3fwd_route_array[i].depth, 2279997ee890SIntel ipv6_l3fwd_route_array[i].if_out); 2280997ee890SIntel 2281997ee890SIntel if (ret < 0) { 2282997ee890SIntel rte_exit(EXIT_FAILURE, "Unable to add entry %u to the " 2283997ee890SIntel "l3fwd LPM table on socket %d\n", 2284997ee890SIntel i, socketid); 2285997ee890SIntel } 2286997ee890SIntel 2287997ee890SIntel printf("LPM: Adding route %s / %d (%d)\n", 2288997ee890SIntel "IPV6", 2289997ee890SIntel ipv6_l3fwd_route_array[i].depth, 2290997ee890SIntel ipv6_l3fwd_route_array[i].if_out); 2291997ee890SIntel } 2292af75078fSIntel } 2293af75078fSIntel #endif 2294af75078fSIntel 2295af75078fSIntel static int 2296f68aad79SIntel init_mem(unsigned nb_mbuf) 2297af75078fSIntel { 2298af75078fSIntel struct lcore_conf *qconf; 2299af75078fSIntel int socketid; 2300af75078fSIntel unsigned lcore_id; 2301af75078fSIntel char s[64]; 2302af75078fSIntel 2303af75078fSIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 2304af75078fSIntel if (rte_lcore_is_enabled(lcore_id) == 0) 2305af75078fSIntel continue; 2306af75078fSIntel 2307af75078fSIntel if (numa_on) 2308af75078fSIntel socketid = rte_lcore_to_socket_id(lcore_id); 2309af75078fSIntel else 2310af75078fSIntel socketid = 0; 2311af75078fSIntel 2312af75078fSIntel if (socketid >= NB_SOCKETS) { 2313af75078fSIntel rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", 2314af75078fSIntel socketid, lcore_id, NB_SOCKETS); 2315af75078fSIntel } 2316af75078fSIntel if (pktmbuf_pool[socketid] == NULL) { 23176f41fe75SStephen Hemminger snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); 2318af75078fSIntel pktmbuf_pool[socketid] = 2319f68aad79SIntel rte_mempool_create(s, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE, 2320af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 2321af75078fSIntel rte_pktmbuf_pool_init, NULL, 2322af75078fSIntel rte_pktmbuf_init, NULL, 2323af75078fSIntel socketid, 0); 2324af75078fSIntel if (pktmbuf_pool[socketid] == NULL) 2325af75078fSIntel rte_exit(EXIT_FAILURE, 2326af75078fSIntel "Cannot init mbuf pool on socket %d\n", socketid); 2327af75078fSIntel else 2328af75078fSIntel printf("Allocated mbuf pool on socket %d\n", socketid); 2329af75078fSIntel 2330af75078fSIntel #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) 2331af75078fSIntel setup_lpm(socketid); 2332af75078fSIntel #else 2333af75078fSIntel setup_hash(socketid); 2334af75078fSIntel #endif 2335af75078fSIntel } 2336af75078fSIntel qconf = &lcore_conf[lcore_id]; 2337f68aad79SIntel qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid]; 2338f68aad79SIntel qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid]; 2339af75078fSIntel } 2340af75078fSIntel return 0; 2341af75078fSIntel } 2342af75078fSIntel 2343d3641ae8SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2344d3641ae8SIntel static void 2345d3641ae8SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 2346d3641ae8SIntel { 2347d3641ae8SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2348d3641ae8SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2349d3641ae8SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 2350d3641ae8SIntel struct rte_eth_link link; 2351d3641ae8SIntel 2352d3641ae8SIntel printf("\nChecking link status"); 2353d3641ae8SIntel fflush(stdout); 2354d3641ae8SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2355d3641ae8SIntel all_ports_up = 1; 2356d3641ae8SIntel for (portid = 0; portid < port_num; portid++) { 2357d3641ae8SIntel if ((port_mask & (1 << portid)) == 0) 2358d3641ae8SIntel continue; 2359d3641ae8SIntel memset(&link, 0, sizeof(link)); 2360d3641ae8SIntel rte_eth_link_get_nowait(portid, &link); 2361d3641ae8SIntel /* print link status if flag set */ 2362d3641ae8SIntel if (print_flag == 1) { 2363d3641ae8SIntel if (link.link_status) 2364d3641ae8SIntel printf("Port %d Link Up - speed %u " 2365d3641ae8SIntel "Mbps - %s\n", (uint8_t)portid, 2366d3641ae8SIntel (unsigned)link.link_speed, 2367d3641ae8SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2368d3641ae8SIntel ("full-duplex") : ("half-duplex\n")); 2369d3641ae8SIntel else 2370d3641ae8SIntel printf("Port %d Link Down\n", 2371d3641ae8SIntel (uint8_t)portid); 2372d3641ae8SIntel continue; 2373d3641ae8SIntel } 2374d3641ae8SIntel /* clear all_ports_up flag if any link down */ 2375d3641ae8SIntel if (link.link_status == 0) { 2376d3641ae8SIntel all_ports_up = 0; 2377d3641ae8SIntel break; 2378d3641ae8SIntel } 2379d3641ae8SIntel } 2380d3641ae8SIntel /* after finally printing all link status, get out */ 2381d3641ae8SIntel if (print_flag == 1) 2382d3641ae8SIntel break; 2383d3641ae8SIntel 2384d3641ae8SIntel if (all_ports_up == 0) { 2385d3641ae8SIntel printf("."); 2386d3641ae8SIntel fflush(stdout); 2387d3641ae8SIntel rte_delay_ms(CHECK_INTERVAL); 2388d3641ae8SIntel } 2389d3641ae8SIntel 2390d3641ae8SIntel /* set the print_flag if all ports up or timeout */ 2391d3641ae8SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2392d3641ae8SIntel print_flag = 1; 2393d3641ae8SIntel printf("done\n"); 2394d3641ae8SIntel } 2395d3641ae8SIntel } 2396d3641ae8SIntel } 2397d3641ae8SIntel 2398af75078fSIntel int 2399af75078fSIntel MAIN(int argc, char **argv) 2400af75078fSIntel { 2401af75078fSIntel struct lcore_conf *qconf; 240281f7ecd9SPablo de Lara struct rte_eth_dev_info dev_info; 240381f7ecd9SPablo de Lara struct rte_eth_txconf *txconf; 2404af75078fSIntel int ret; 2405af75078fSIntel unsigned nb_ports; 2406af75078fSIntel uint16_t queueid; 2407af75078fSIntel unsigned lcore_id; 2408af75078fSIntel uint32_t n_tx_queue, nb_lcores; 2409af75078fSIntel uint8_t portid, nb_rx_queue, queue, socketid; 2410af75078fSIntel 2411af75078fSIntel /* init EAL */ 2412af75078fSIntel ret = rte_eal_init(argc, argv); 2413af75078fSIntel if (ret < 0) 2414af75078fSIntel rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); 2415af75078fSIntel argc -= ret; 2416af75078fSIntel argv += ret; 2417af75078fSIntel 2418af75078fSIntel /* parse application arguments (after the EAL ones) */ 2419af75078fSIntel ret = parse_args(argc, argv); 2420af75078fSIntel if (ret < 0) 2421af75078fSIntel rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n"); 2422af75078fSIntel 2423af75078fSIntel if (check_lcore_params() < 0) 2424af75078fSIntel rte_exit(EXIT_FAILURE, "check_lcore_params failed\n"); 2425af75078fSIntel 2426af75078fSIntel ret = init_lcore_rx_queues(); 2427af75078fSIntel if (ret < 0) 2428af75078fSIntel rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); 2429af75078fSIntel 2430af75078fSIntel nb_ports = rte_eth_dev_count(); 24311c17baf4SIntel if (nb_ports > RTE_MAX_ETHPORTS) 24321c17baf4SIntel nb_ports = RTE_MAX_ETHPORTS; 2433af75078fSIntel 2434af75078fSIntel if (check_port_config(nb_ports) < 0) 2435af75078fSIntel rte_exit(EXIT_FAILURE, "check_port_config failed\n"); 2436af75078fSIntel 2437af75078fSIntel nb_lcores = rte_lcore_count(); 2438af75078fSIntel 2439af75078fSIntel /* initialize all ports */ 2440af75078fSIntel for (portid = 0; portid < nb_ports; portid++) { 2441af75078fSIntel /* skip ports that are not enabled */ 2442af75078fSIntel if ((enabled_port_mask & (1 << portid)) == 0) { 2443af75078fSIntel printf("\nSkipping disabled port %d\n", portid); 2444af75078fSIntel continue; 2445af75078fSIntel } 2446af75078fSIntel 2447af75078fSIntel /* init port */ 2448af75078fSIntel printf("Initializing port %d ... ", portid ); 2449af75078fSIntel fflush(stdout); 2450af75078fSIntel 2451af75078fSIntel nb_rx_queue = get_port_n_rx_queues(portid); 2452af75078fSIntel n_tx_queue = nb_lcores; 2453af75078fSIntel if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) 2454af75078fSIntel n_tx_queue = MAX_TX_QUEUE_PER_PORT; 2455af75078fSIntel printf("Creating queues: nb_rxq=%d nb_txq=%u... ", 2456af75078fSIntel nb_rx_queue, (unsigned)n_tx_queue ); 2457af75078fSIntel ret = rte_eth_dev_configure(portid, nb_rx_queue, 2458af75078fSIntel (uint16_t)n_tx_queue, &port_conf); 2459af75078fSIntel if (ret < 0) 2460af75078fSIntel rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", 2461af75078fSIntel ret, portid); 2462af75078fSIntel 2463af75078fSIntel rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); 2464af75078fSIntel print_ethaddr(" Address:", &ports_eth_addr[portid]); 2465af75078fSIntel printf(", "); 2466af75078fSIntel 246796ff4453SKonstantin Ananyev /* 246896ff4453SKonstantin Ananyev * prepare dst and src MACs for each port. 246996ff4453SKonstantin Ananyev */ 247096ff4453SKonstantin Ananyev *(uint64_t *)(val_eth + portid) = 247196ff4453SKonstantin Ananyev ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40); 247296ff4453SKonstantin Ananyev ether_addr_copy(&ports_eth_addr[portid], 247396ff4453SKonstantin Ananyev (struct ether_addr *)(val_eth + portid) + 1); 247496ff4453SKonstantin Ananyev 2475f68aad79SIntel /* init memory */ 2476f68aad79SIntel ret = init_mem(NB_MBUF); 2477f68aad79SIntel if (ret < 0) 2478f68aad79SIntel rte_exit(EXIT_FAILURE, "init_mem failed\n"); 2479af75078fSIntel 2480af75078fSIntel /* init one TX queue per couple (lcore,port) */ 2481af75078fSIntel queueid = 0; 2482af75078fSIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 2483af75078fSIntel if (rte_lcore_is_enabled(lcore_id) == 0) 2484af75078fSIntel continue; 2485af75078fSIntel 2486af75078fSIntel if (numa_on) 2487af75078fSIntel socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); 2488af75078fSIntel else 2489af75078fSIntel socketid = 0; 2490af75078fSIntel 2491af75078fSIntel printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); 2492af75078fSIntel fflush(stdout); 249381f7ecd9SPablo de Lara 249481f7ecd9SPablo de Lara rte_eth_dev_info_get(portid, &dev_info); 249581f7ecd9SPablo de Lara txconf = &dev_info.default_txconf; 249681f7ecd9SPablo de Lara if (port_conf.rxmode.jumbo_frame) 249781f7ecd9SPablo de Lara txconf->txq_flags = 0; 2498af75078fSIntel ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, 249981f7ecd9SPablo de Lara socketid, txconf); 2500af75078fSIntel if (ret < 0) 2501af75078fSIntel rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " 2502af75078fSIntel "port=%d\n", ret, portid); 2503af75078fSIntel 2504af75078fSIntel qconf = &lcore_conf[lcore_id]; 2505af75078fSIntel qconf->tx_queue_id[portid] = queueid; 2506af75078fSIntel queueid++; 2507af75078fSIntel } 2508af75078fSIntel printf("\n"); 2509af75078fSIntel } 2510af75078fSIntel 2511af75078fSIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 2512af75078fSIntel if (rte_lcore_is_enabled(lcore_id) == 0) 2513af75078fSIntel continue; 2514af75078fSIntel qconf = &lcore_conf[lcore_id]; 2515af75078fSIntel printf("\nInitializing rx queues on lcore %u ... ", lcore_id ); 2516af75078fSIntel fflush(stdout); 2517af75078fSIntel /* init RX queues */ 2518af75078fSIntel for(queue = 0; queue < qconf->n_rx_queue; ++queue) { 2519af75078fSIntel portid = qconf->rx_queue_list[queue].port_id; 2520af75078fSIntel queueid = qconf->rx_queue_list[queue].queue_id; 2521af75078fSIntel 2522af75078fSIntel if (numa_on) 2523af75078fSIntel socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); 2524af75078fSIntel else 2525af75078fSIntel socketid = 0; 2526af75078fSIntel 2527af75078fSIntel printf("rxq=%d,%d,%d ", portid, queueid, socketid); 2528af75078fSIntel fflush(stdout); 2529af75078fSIntel 2530af75078fSIntel ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, 253181f7ecd9SPablo de Lara socketid, 253281f7ecd9SPablo de Lara NULL, 253381f7ecd9SPablo de Lara pktmbuf_pool[socketid]); 2534af75078fSIntel if (ret < 0) 2535af75078fSIntel rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," 2536af75078fSIntel "port=%d\n", ret, portid); 2537af75078fSIntel } 2538af75078fSIntel } 2539af75078fSIntel 2540af75078fSIntel printf("\n"); 2541af75078fSIntel 2542af75078fSIntel /* start ports */ 2543af75078fSIntel for (portid = 0; portid < nb_ports; portid++) { 2544af75078fSIntel if ((enabled_port_mask & (1 << portid)) == 0) { 2545af75078fSIntel continue; 2546af75078fSIntel } 2547af75078fSIntel /* Start device */ 2548af75078fSIntel ret = rte_eth_dev_start(portid); 2549af75078fSIntel if (ret < 0) 2550af75078fSIntel rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", 2551af75078fSIntel ret, portid); 2552af75078fSIntel 2553af75078fSIntel /* 2554af75078fSIntel * If enabled, put device in promiscuous mode. 2555af75078fSIntel * This allows IO forwarding mode to forward packets 2556af75078fSIntel * to itself through 2 cross-connected ports of the 2557af75078fSIntel * target machine. 2558af75078fSIntel */ 2559af75078fSIntel if (promiscuous_on) 2560af75078fSIntel rte_eth_promiscuous_enable(portid); 2561af75078fSIntel } 2562af75078fSIntel 2563d3641ae8SIntel check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); 2564d3641ae8SIntel 2565af75078fSIntel /* launch per-lcore init on every lcore */ 2566af75078fSIntel rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); 2567af75078fSIntel RTE_LCORE_FOREACH_SLAVE(lcore_id) { 2568af75078fSIntel if (rte_eal_wait_lcore(lcore_id) < 0) 2569af75078fSIntel return -1; 2570af75078fSIntel } 2571af75078fSIntel 2572af75078fSIntel return 0; 2573af75078fSIntel } 2574