xref: /dpdk/examples/ipsec-secgw/sad.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef __SAD_H__
6 #define __SAD_H__
7 
8 #include <rte_ipsec_sad.h>
9 
10 #define SA_CACHE_SZ	128
11 #define SPI2IDX(spi, mask)	((spi) & (mask))
12 
13 struct ipsec_sad_cache {
14 	struct ipsec_sa **v4;
15 	struct ipsec_sa **v6;
16 	uint32_t mask;
17 };
18 
19 RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache);
20 
21 int ipsec_sad_create(const char *name, struct ipsec_sad *sad,
22 	int socket_id, struct ipsec_sa_cnt *sa_cnt);
23 
24 int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa);
25 
26 int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent);
27 
28 static inline int
29 cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
30 	struct rte_ipv6_hdr *ipv6)
31 {
32 	int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags);
33 	if ((sa_type == TRANSPORT) ||
34 			/* IPv4 check */
35 			(is_v4 && (sa_type == IP4_TUNNEL) &&
36 			(sa->src.ip.ip4 == ipv4->src_addr) &&
37 			(sa->dst.ip.ip4 == ipv4->dst_addr)) ||
38 			/* IPv6 check */
39 			(!is_v4 && (sa_type == IP6_TUNNEL) &&
40 			(!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
41 			(!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
42 		return 1;
43 
44 	return 0;
45 }
46 
47 static inline void
48 sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask)
49 {
50 	uint32_t cache_idx;
51 
52 	/* SAD cache is disabled */
53 	if (mask == 0)
54 		return;
55 
56 	cache_idx = SPI2IDX(sa->spi, mask);
57 	sa_cache[cache_idx] = sa;
58 }
59 
60 static inline void
61 sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
62 	void *sa[], uint16_t nb_pkts)
63 {
64 	uint32_t i;
65 	uint32_t nb_v4 = 0, nb_v6 = 0;
66 	struct rte_esp_hdr *esp;
67 	struct rte_ipv4_hdr *ipv4;
68 	struct rte_ipv6_hdr *ipv6;
69 	struct rte_ipsec_sadv4_key	v4[nb_pkts];
70 	struct rte_ipsec_sadv6_key	v6[nb_pkts];
71 	int v4_idxes[nb_pkts];
72 	int v6_idxes[nb_pkts];
73 	const union rte_ipsec_sad_key	*keys_v4[nb_pkts];
74 	const union rte_ipsec_sad_key	*keys_v6[nb_pkts];
75 	void *v4_res[nb_pkts];
76 	void *v6_res[nb_pkts];
77 	uint32_t spi, cache_idx;
78 	struct ipsec_sad_cache *cache;
79 	struct ipsec_sa *cached_sa;
80 	int is_ipv4;
81 
82 	cache  = &RTE_PER_LCORE(sad_cache);
83 
84 	/* split received packets by address family into two arrays */
85 	for (i = 0; i < nb_pkts; i++) {
86 		ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
87 		ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
88 		esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
89 				pkts[i]->l3_len);
90 
91 		is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4;
92 		spi = rte_be_to_cpu_32(esp->spi);
93 		cache_idx = SPI2IDX(spi, cache->mask);
94 
95 		if (is_ipv4) {
96 			cached_sa = (cache->mask != 0) ?
97 				cache->v4[cache_idx] : NULL;
98 			/* check SAD cache entry */
99 			if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
100 				if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) {
101 					/* cache hit */
102 					sa[i] = cached_sa;
103 					continue;
104 				}
105 			}
106 			/*
107 			 * cache miss
108 			 * preparing sad key to proceed with sad lookup
109 			 */
110 			v4[nb_v4].spi = esp->spi;
111 			v4[nb_v4].dip = ipv4->dst_addr;
112 			v4[nb_v4].sip = ipv4->src_addr;
113 			keys_v4[nb_v4] = (const union rte_ipsec_sad_key *)
114 						&v4[nb_v4];
115 			v4_idxes[nb_v4++] = i;
116 		} else {
117 			cached_sa = (cache->mask != 0) ?
118 				cache->v6[cache_idx] : NULL;
119 			if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
120 				if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) {
121 					sa[i] = cached_sa;
122 					continue;
123 				}
124 			}
125 			v6[nb_v6].spi = esp->spi;
126 			memcpy(v6[nb_v6].dip, ipv6->dst_addr,
127 					sizeof(ipv6->dst_addr));
128 			memcpy(v6[nb_v6].sip, ipv6->src_addr,
129 					sizeof(ipv6->src_addr));
130 			keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
131 						&v6[nb_v6];
132 			v6_idxes[nb_v6++] = i;
133 		}
134 	}
135 
136 	if (nb_v4 != 0)
137 		rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4);
138 	if (nb_v6 != 0)
139 		rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6);
140 
141 	for (i = 0; i < nb_v4; i++) {
142 		ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]],
143 			struct rte_ipv4_hdr *);
144 		if ((v4_res[i] != NULL) &&
145 				(cmp_sa_key(v4_res[i], 1, ipv4, NULL))) {
146 			sa[v4_idxes[i]] = v4_res[i];
147 			sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i],
148 				cache->mask);
149 		} else
150 			sa[v4_idxes[i]] = NULL;
151 	}
152 	for (i = 0; i < nb_v6; i++) {
153 		ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]],
154 			struct rte_ipv6_hdr *);
155 		if ((v6_res[i] != NULL) &&
156 				(cmp_sa_key(v6_res[i], 0, NULL, ipv6))) {
157 			sa[v6_idxes[i]] = v6_res[i];
158 			sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i],
159 				cache->mask);
160 		} else
161 			sa[v6_idxes[i]] = NULL;
162 	}
163 }
164 
165 #endif /* __SAD_H__ */
166