xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.h (revision 7e06c0de1952d3109a5b0c4779d7e7d8059c9d78)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #ifndef _IPSEC_SECGW_H_
5 #define _IPSEC_SECGW_H_
6 
7 #include <stdbool.h>
8 #include <rte_ethdev.h>
9 
10 #define MAX_RX_QUEUE_PER_LCORE 16
11 
12 #define NB_SOCKETS 4
13 
14 #define MAX_PKT_BURST 32
15 #define MAX_PKT_BURST_VEC 256
16 
17 #define MAX_PKTS                                  \
18 	((MAX_PKT_BURST_VEC > MAX_PKT_BURST ?     \
19 	  MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
20 
21 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
22 
23 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
24 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
25 	(((uint64_t)((a) & 0xff) << 56) | \
26 	((uint64_t)((b) & 0xff) << 48) | \
27 	((uint64_t)((c) & 0xff) << 40) | \
28 	((uint64_t)((d) & 0xff) << 32) | \
29 	((uint64_t)((e) & 0xff) << 24) | \
30 	((uint64_t)((f) & 0xff) << 16) | \
31 	((uint64_t)((g) & 0xff) << 8)  | \
32 	((uint64_t)(h) & 0xff))
33 #else
34 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
35 	(((uint64_t)((h) & 0xff) << 56) | \
36 	((uint64_t)((g) & 0xff) << 48) | \
37 	((uint64_t)((f) & 0xff) << 40) | \
38 	((uint64_t)((e) & 0xff) << 32) | \
39 	((uint64_t)((d) & 0xff) << 24) | \
40 	((uint64_t)((c) & 0xff) << 16) | \
41 	((uint64_t)((b) & 0xff) << 8) | \
42 	((uint64_t)(a) & 0xff))
43 #endif
44 
45 #define uint32_t_to_char(ip, a, b, c, d) do {\
46 		*a = (uint8_t)(ip >> 24 & 0xff);\
47 		*b = (uint8_t)(ip >> 16 & 0xff);\
48 		*c = (uint8_t)(ip >> 8 & 0xff);\
49 		*d = (uint8_t)(ip & 0xff);\
50 	} while (0)
51 
52 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
53 
54 #define IPSEC_NAT_T_PORT 4500
55 #define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
56 
57 struct __rte_cache_aligned traffic_type {
58 	uint32_t num;
59 	struct rte_mbuf *pkts[MAX_PKTS];
60 	const uint8_t *data[MAX_PKTS];
61 	void *saptr[MAX_PKTS];
62 	uint32_t res[MAX_PKTS];
63 };
64 
65 struct ipsec_traffic {
66 	struct traffic_type ipsec;
67 	struct traffic_type ip4;
68 	struct traffic_type ip6;
69 };
70 
71 /* Fields optimized for devices without burst */
72 struct traffic_type_nb {
73 	const uint8_t *data;
74 	struct rte_mbuf *pkt;
75 	uint32_t res;
76 	uint32_t num;
77 };
78 
79 struct ipsec_traffic_nb {
80 	struct traffic_type_nb ipsec;
81 	struct traffic_type_nb ip4;
82 	struct traffic_type_nb ip6;
83 };
84 
85 /* port/source ethernet addr and destination ethernet addr */
86 struct ethaddr_info {
87 	struct rte_ether_addr src, dst;
88 };
89 
90 struct ipsec_spd_stats {
91 	uint64_t protect;
92 	uint64_t bypass;
93 	uint64_t discard;
94 };
95 
96 struct ipsec_sa_stats {
97 	uint64_t hit;
98 	uint64_t miss;
99 };
100 
101 struct __rte_cache_aligned ipsec_core_statistics {
102 	uint64_t tx;
103 	uint64_t rx;
104 	uint64_t rx_call;
105 	uint64_t tx_call;
106 	uint64_t dropped;
107 	uint64_t frag_dropped;
108 	uint64_t burst_rx;
109 
110 	struct {
111 		struct ipsec_spd_stats spd4;
112 		struct ipsec_spd_stats spd6;
113 		struct ipsec_sa_stats sad;
114 	} outbound;
115 
116 	struct {
117 		struct ipsec_spd_stats spd4;
118 		struct ipsec_spd_stats spd6;
119 		struct ipsec_sa_stats sad;
120 	} inbound;
121 
122 	struct {
123 		uint64_t miss;
124 	} lpm4;
125 
126 	struct {
127 		uint64_t miss;
128 	} lpm6;
129 };
130 
131 extern struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
132 
133 extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
134 
135 /* Port mask to identify the unprotected ports */
136 extern uint32_t unprotected_port_mask;
137 
138 /* Index of SA in single mode */
139 extern uint32_t single_sa_idx;
140 extern uint32_t single_sa;
141 
142 extern volatile bool force_quit;
143 
144 extern uint32_t nb_bufs_in_pool;
145 
146 extern bool per_port_pool;
147 extern int ip_reassembly_dynfield_offset;
148 extern uint64_t ip_reassembly_dynflag;
149 extern uint32_t mtu_size;
150 extern uint32_t frag_tbl_sz;
151 extern uint32_t qp_desc_nb;
152 
153 #define SS_F		(1U << 0)	/* Single SA mode */
154 #define INL_PR_F	(1U << 1)	/* Inline Protocol */
155 #define INL_CR_F	(1U << 2)	/* Inline Crypto */
156 #define LA_PR_F		(1U << 3)	/* Lookaside Protocol */
157 #define LA_ANY_F	(1U << 4)	/* Lookaside Any */
158 #define MAX_F		(LA_ANY_F << 1)
159 
160 extern uint16_t wrkr_flags;
161 
162 static inline uint8_t
is_unprotected_port(uint16_t port_id)163 is_unprotected_port(uint16_t port_id)
164 {
165 	return unprotected_port_mask & (1 << port_id);
166 }
167 
168 static inline void
core_stats_update_rx(int n)169 core_stats_update_rx(int n)
170 {
171 	int lcore_id = rte_lcore_id();
172 	core_statistics[lcore_id].rx += n;
173 	core_statistics[lcore_id].rx_call++;
174 	if (n == MAX_PKT_BURST)
175 		core_statistics[lcore_id].burst_rx += n;
176 }
177 
178 static inline void
core_stats_update_tx(int n)179 core_stats_update_tx(int n)
180 {
181 	int lcore_id = rte_lcore_id();
182 	core_statistics[lcore_id].tx += n;
183 	core_statistics[lcore_id].tx_call++;
184 }
185 
186 static inline void
core_stats_update_drop(int n)187 core_stats_update_drop(int n)
188 {
189 	int lcore_id = rte_lcore_id();
190 	core_statistics[lcore_id].dropped += n;
191 }
192 
193 static inline void
core_stats_update_frag_drop(int n)194 core_stats_update_frag_drop(int n)
195 {
196 	int lcore_id = rte_lcore_id();
197 	core_statistics[lcore_id].frag_dropped += n;
198 }
199 
200 static inline int
is_ip_reassembly_incomplete(struct rte_mbuf * mbuf)201 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
202 {
203 	if (ip_reassembly_dynflag == 0)
204 		return -1;
205 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
206 }
207 
208 static inline void
free_reassembly_fail_pkt(struct rte_mbuf * mb)209 free_reassembly_fail_pkt(struct rte_mbuf *mb)
210 {
211 	if (ip_reassembly_dynfield_offset >= 0) {
212 		rte_eth_ip_reassembly_dynfield_t dynfield;
213 		uint32_t frag_cnt = 0;
214 
215 		while (mb) {
216 			dynfield = *RTE_MBUF_DYNFIELD(mb,
217 					ip_reassembly_dynfield_offset,
218 					rte_eth_ip_reassembly_dynfield_t *);
219 			rte_pktmbuf_free(mb);
220 			mb = dynfield.next_frag;
221 			frag_cnt++;
222 		}
223 
224 		core_stats_update_frag_drop(frag_cnt);
225 	} else {
226 		rte_pktmbuf_free(mb);
227 		core_stats_update_drop(1);
228 	}
229 }
230 
231 /* helper routine to free bulk of packets */
232 static __rte_always_inline void
free_pkts(struct rte_mbuf * mb[],const uint32_t n)233 free_pkts(struct rte_mbuf *mb[], const uint32_t n)
234 {
235 	n == 1 ? rte_pktmbuf_free(mb[0]) : rte_pktmbuf_free_bulk(mb, n);
236 	core_stats_update_drop(n);
237 }
238 
239 #endif /* _IPSEC_SECGW_H_ */
240