xref: /dpdk/examples/l3fwd/l3fwd_em.h (revision 4b01cabfb09b3284826c0134a5f1356c59dea24b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #ifndef __L3FWD_EM_H__
6 #define __L3FWD_EM_H__
7 
8 #include <rte_common.h>
9 
10 static __rte_always_inline uint16_t
l3fwd_em_handle_ipv4(struct rte_mbuf * m,uint16_t portid,struct rte_ether_hdr * eth_hdr,struct lcore_conf * qconf)11 l3fwd_em_handle_ipv4(struct rte_mbuf *m, uint16_t portid,
12 		     struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
13 {
14 	struct rte_ipv4_hdr *ipv4_hdr;
15 	uint16_t dst_port;
16 
17 	/* Handle IPv4 headers.*/
18 	ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
19 			sizeof(struct rte_ether_hdr));
20 
21 #ifdef DO_RFC_1812_CHECKS
22 	/* Check to make sure the packet is valid (RFC1812) */
23 	if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len, m->ol_flags) < 0) {
24 		rte_pktmbuf_free(m);
25 		return BAD_PORT;
26 	}
27 #endif
28 	dst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,
29 			qconf->ipv4_lookup_struct);
30 
31 	if (dst_port >= RTE_MAX_ETHPORTS ||
32 			(enabled_port_mask & 1 << dst_port) == 0)
33 		dst_port = portid;
34 
35 #ifdef DO_RFC_1812_CHECKS
36 	/* Update time to live and header checksum */
37 	--(ipv4_hdr->time_to_live);
38 	++(ipv4_hdr->hdr_checksum);
39 #endif
40 	/* dst addr */
41 	*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[dst_port];
42 
43 	/* src addr */
44 	rte_ether_addr_copy(&ports_eth_addr[dst_port],
45 			&eth_hdr->src_addr);
46 
47 	return dst_port;
48 }
49 
50 static __rte_always_inline uint16_t
l3fwd_em_handle_ipv6(struct rte_mbuf * m,uint16_t portid,struct rte_ether_hdr * eth_hdr,struct lcore_conf * qconf)51 l3fwd_em_handle_ipv6(struct rte_mbuf *m, uint16_t portid,
52 		struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
53 {
54 	/* Handle IPv6 headers.*/
55 	struct rte_ipv6_hdr *ipv6_hdr;
56 	uint16_t dst_port;
57 
58 	ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
59 			sizeof(struct rte_ether_hdr));
60 
61 	dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
62 			qconf->ipv6_lookup_struct);
63 
64 	if (dst_port >= RTE_MAX_ETHPORTS ||
65 			(enabled_port_mask & 1 << dst_port) == 0)
66 		dst_port = portid;
67 
68 	/* dst addr */
69 	*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[dst_port];
70 
71 	/* src addr */
72 	rte_ether_addr_copy(&ports_eth_addr[dst_port],
73 			&eth_hdr->src_addr);
74 
75 	return dst_port;
76 }
77 
78 static __rte_always_inline void
l3fwd_em_simple_forward(struct rte_mbuf * m,uint16_t portid,struct lcore_conf * qconf)79 l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
80 		struct lcore_conf *qconf)
81 {
82 	struct rte_ether_hdr *eth_hdr;
83 	uint16_t dst_port;
84 	uint32_t tcp_or_udp;
85 	uint32_t l3_ptypes;
86 
87 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
88 	tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
89 	l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
90 
91 	if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
92 		dst_port = l3fwd_em_handle_ipv4(m, portid, eth_hdr, qconf);
93 		send_single_packet(qconf, m, dst_port);
94 	} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
95 		dst_port = l3fwd_em_handle_ipv6(m, portid, eth_hdr, qconf);
96 		send_single_packet(qconf, m, dst_port);
97 	} else {
98 		/* Free the mbuf that contains non-IPV4/IPV6 packet */
99 		rte_pktmbuf_free(m);
100 	}
101 }
102 
103 static __rte_always_inline uint16_t
l3fwd_em_simple_process(struct rte_mbuf * m,struct lcore_conf * qconf)104 l3fwd_em_simple_process(struct rte_mbuf *m, struct lcore_conf *qconf)
105 {
106 	struct rte_ether_hdr *eth_hdr;
107 	uint32_t tcp_or_udp;
108 	uint32_t l3_ptypes;
109 
110 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
111 	tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
112 	l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
113 
114 	if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4))
115 		m->port = l3fwd_em_handle_ipv4(m, m->port, eth_hdr, qconf);
116 	else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6))
117 		m->port = l3fwd_em_handle_ipv6(m, m->port, eth_hdr, qconf);
118 	else
119 		m->port = BAD_PORT;
120 
121 	return m->port;
122 }
123 
124 /*
125  * Buffer non-optimized handling of packets, invoked
126  * from main_loop.
127  */
128 static inline void
l3fwd_em_no_opt_send_packets(int nb_rx,struct rte_mbuf ** pkts_burst,uint16_t portid,struct lcore_conf * qconf)129 l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
130 			uint16_t portid, struct lcore_conf *qconf)
131 {
132 	int32_t j;
133 
134 	/* Prefetch first packets */
135 	for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)
136 		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
137 
138 	/*
139 	 * Prefetch and forward already prefetched
140 	 * packets.
141 	 */
142 	for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
143 		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
144 				j + PREFETCH_OFFSET], void *));
145 		l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
146 	}
147 
148 	/* Forward remaining prefetched packets */
149 	for (; j < nb_rx; j++)
150 		l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
151 }
152 
153 /*
154  * Buffer non-optimized handling of events, invoked
155  * from main_loop.
156  */
157 static inline void
l3fwd_em_no_opt_process_events(int nb_rx,struct rte_event ** events,struct lcore_conf * qconf)158 l3fwd_em_no_opt_process_events(int nb_rx, struct rte_event **events,
159 			       struct lcore_conf *qconf)
160 {
161 	int32_t j;
162 
163 	/* Prefetch first packets */
164 	for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)
165 		rte_prefetch0(rte_pktmbuf_mtod(events[j]->mbuf, void *));
166 
167 	/*
168 	 * Prefetch and forward already prefetched
169 	 * packets.
170 	 */
171 	for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
172 		rte_prefetch0(rte_pktmbuf_mtod(events[
173 				j + PREFETCH_OFFSET]->mbuf, void *));
174 		l3fwd_em_simple_process(events[j]->mbuf, qconf);
175 	}
176 
177 	/* Forward remaining prefetched packets */
178 	for (; j < nb_rx; j++)
179 		l3fwd_em_simple_process(events[j]->mbuf, qconf);
180 }
181 
182 static inline void
l3fwd_em_no_opt_process_event_vector(struct rte_event_vector * vec,struct lcore_conf * qconf,uint16_t * dst_ports)183 l3fwd_em_no_opt_process_event_vector(struct rte_event_vector *vec,
184 				     struct lcore_conf *qconf,
185 				     uint16_t *dst_ports)
186 {
187 	struct rte_mbuf **mbufs = vec->mbufs;
188 	int32_t i;
189 
190 	/* Prefetch first packets */
191 	for (i = 0; i < PREFETCH_OFFSET && i < vec->nb_elem; i++)
192 		rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
193 
194 	/*
195 	 * Prefetch and forward already prefetched packets.
196 	 */
197 	for (i = 0; i < (vec->nb_elem - PREFETCH_OFFSET); i++) {
198 		rte_prefetch0(
199 			rte_pktmbuf_mtod(mbufs[i + PREFETCH_OFFSET], void *));
200 		dst_ports[i] = l3fwd_em_simple_process(mbufs[i], qconf);
201 	}
202 
203 	/* Forward remaining prefetched packets */
204 	for (; i < vec->nb_elem; i++)
205 		dst_ports[i] = l3fwd_em_simple_process(mbufs[i], qconf);
206 
207 	process_event_vector(vec, dst_ports);
208 }
209 
210 #endif /* __L3FWD_EM_H__ */
211