1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 5 #ifndef __SAD_H__ 6 #define __SAD_H__ 7 8 #include <rte_ip.h> 9 #include <rte_ipsec_sad.h> 10 11 #include "ipsec.h" 12 13 #define SA_CACHE_SZ 128 14 #define SPI2IDX(spi, mask) ((spi) & (mask)) 15 16 struct ipsec_sad_cache { 17 struct ipsec_sa **v4; 18 struct ipsec_sa **v6; 19 uint32_t mask; 20 }; 21 22 RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache); 23 24 int ipsec_sad_create(const char *name, struct ipsec_sad *sad, 25 int socket_id, struct ipsec_sa_cnt *sa_cnt); 26 27 int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa); 28 29 int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent); 30 31 static inline int 32 cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4, 33 struct rte_ipv6_hdr *ipv6) 34 { 35 int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags); 36 if ((sa_type == TRANSPORT) || 37 /* IPv4 check */ 38 (is_v4 && (sa_type == IP4_TUNNEL) && 39 (sa->src.ip.ip4 == ipv4->src_addr) && 40 (sa->dst.ip.ip4 == ipv4->dst_addr)) || 41 /* IPv6 check */ 42 (!is_v4 && (sa_type == IP6_TUNNEL) && 43 (rte_ipv6_addr_eq(&sa->src.ip.ip6, &ipv6->src_addr)) && 44 (rte_ipv6_addr_eq(&sa->dst.ip.ip6, &ipv6->dst_addr)))) 45 return 1; 46 47 return 0; 48 } 49 50 static inline void 51 sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask) 52 { 53 uint32_t cache_idx; 54 55 /* SAD cache is disabled */ 56 if (mask == 0) 57 return; 58 59 cache_idx = SPI2IDX(sa->spi, mask); 60 sa_cache[cache_idx] = sa; 61 } 62 63 static inline void 64 sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[], 65 void *sa[], uint16_t nb_pkts) 66 { 67 uint32_t i; 68 uint32_t nb_v4 = 0, nb_v6 = 0; 69 struct rte_esp_hdr *esp; 70 struct rte_ipv4_hdr *ipv4; 71 struct rte_ipv6_hdr *ipv6; 72 struct rte_ipsec_sadv4_key v4[nb_pkts]; 73 struct rte_ipsec_sadv6_key v6[nb_pkts]; 74 int v4_idxes[nb_pkts]; 75 int v6_idxes[nb_pkts]; 76 const union rte_ipsec_sad_key *keys_v4[nb_pkts]; 77 const union rte_ipsec_sad_key *keys_v6[nb_pkts]; 78 void *v4_res[nb_pkts]; 79 void *v6_res[nb_pkts]; 80 uint32_t spi, cache_idx; 81 struct ipsec_sad_cache *cache; 82 struct ipsec_sa *cached_sa; 83 uint16_t udp_hdr_len = 0; 84 int is_ipv4; 85 86 cache = &RTE_PER_LCORE(sad_cache); 87 88 /* split received packets by address family into two arrays */ 89 for (i = 0; i < nb_pkts; i++) { 90 ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *); 91 ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *); 92 if ((pkts[i]->packet_type & 93 (RTE_PTYPE_TUNNEL_MASK | RTE_PTYPE_L4_MASK)) == 94 MBUF_PTYPE_TUNNEL_ESP_IN_UDP) 95 udp_hdr_len = sizeof(struct rte_udp_hdr); 96 esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *, 97 pkts[i]->l3_len + udp_hdr_len); 98 99 is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4; 100 spi = rte_be_to_cpu_32(esp->spi); 101 cache_idx = SPI2IDX(spi, cache->mask); 102 103 if (is_ipv4) { 104 cached_sa = (cache->mask != 0) ? 105 cache->v4[cache_idx] : NULL; 106 /* check SAD cache entry */ 107 if ((cached_sa != NULL) && (cached_sa->spi == spi)) { 108 if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) { 109 /* cache hit */ 110 sa[i] = cached_sa; 111 continue; 112 } 113 } 114 /* 115 * cache miss 116 * preparing sad key to proceed with sad lookup 117 */ 118 v4[nb_v4].spi = esp->spi; 119 v4[nb_v4].dip = ipv4->dst_addr; 120 v4[nb_v4].sip = ipv4->src_addr; 121 keys_v4[nb_v4] = (const union rte_ipsec_sad_key *) 122 &v4[nb_v4]; 123 v4_idxes[nb_v4++] = i; 124 } else { 125 cached_sa = (cache->mask != 0) ? 126 cache->v6[cache_idx] : NULL; 127 if ((cached_sa != NULL) && (cached_sa->spi == spi)) { 128 if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) { 129 sa[i] = cached_sa; 130 continue; 131 } 132 } 133 v6[nb_v6].spi = esp->spi; 134 v6[nb_v6].dip = ipv6->dst_addr; 135 v6[nb_v6].sip = ipv6->src_addr; 136 keys_v6[nb_v6] = (const union rte_ipsec_sad_key *) 137 &v6[nb_v6]; 138 v6_idxes[nb_v6++] = i; 139 } 140 } 141 142 if (nb_v4 != 0) 143 rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4); 144 if (nb_v6 != 0) 145 rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6); 146 147 for (i = 0; i < nb_v4; i++) { 148 ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]], 149 struct rte_ipv4_hdr *); 150 if ((v4_res[i] != NULL) && 151 (cmp_sa_key(v4_res[i], 1, ipv4, NULL))) { 152 sa[v4_idxes[i]] = v4_res[i]; 153 sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i], 154 cache->mask); 155 } else 156 sa[v4_idxes[i]] = NULL; 157 } 158 for (i = 0; i < nb_v6; i++) { 159 ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]], 160 struct rte_ipv6_hdr *); 161 if ((v6_res[i] != NULL) && 162 (cmp_sa_key(v6_res[i], 0, NULL, ipv6))) { 163 sa[v6_idxes[i]] = v6_res[i]; 164 sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i], 165 cache->mask); 166 } else 167 sa[v6_idxes[i]] = NULL; 168 } 169 } 170 171 #endif /* __SAD_H__ */ 172