1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <rte_errno.h>
6 #include <rte_malloc.h>
7
8 #include "ipsec.h"
9 #include "sad.h"
10
11 RTE_DEFINE_PER_LCORE(struct ipsec_sad_cache, sad_cache) = {
12 .v4 = NULL,
13 .v6 = NULL,
14 .mask = 0,
15 };
16
17 int
ipsec_sad_add(struct ipsec_sad * sad,struct ipsec_sa * sa)18 ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa)
19 {
20 int ret;
21 void *tmp = NULL;
22 union rte_ipsec_sad_key key = { {0} };
23 const union rte_ipsec_sad_key *lookup_key[1];
24
25 /* spi field is common for ipv4 and ipv6 key types */
26 key.v4.spi = rte_cpu_to_be_32(sa->spi);
27 lookup_key[0] = &key;
28 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
29 case IP4_TUNNEL:
30 rte_ipsec_sad_lookup(sad->sad_v4, lookup_key, &tmp, 1);
31 if (tmp != NULL)
32 return -EEXIST;
33
34 ret = rte_ipsec_sad_add(sad->sad_v4, &key,
35 RTE_IPSEC_SAD_SPI_ONLY, sa);
36 if (ret != 0)
37 return ret;
38 break;
39 case IP6_TUNNEL:
40 rte_ipsec_sad_lookup(sad->sad_v6, lookup_key, &tmp, 1);
41 if (tmp != NULL)
42 return -EEXIST;
43
44 ret = rte_ipsec_sad_add(sad->sad_v6, &key,
45 RTE_IPSEC_SAD_SPI_ONLY, sa);
46 if (ret != 0)
47 return ret;
48 break;
49 case TRANSPORT:
50 if (sp4_spi_present(sa->spi, 1, NULL, NULL) >= 0) {
51 rte_ipsec_sad_lookup(sad->sad_v4, lookup_key, &tmp, 1);
52 if (tmp != NULL)
53 return -EEXIST;
54
55 ret = rte_ipsec_sad_add(sad->sad_v4, &key,
56 RTE_IPSEC_SAD_SPI_ONLY, sa);
57 if (ret != 0)
58 return ret;
59 }
60 if (sp6_spi_present(sa->spi, 1, NULL, NULL) >= 0) {
61 rte_ipsec_sad_lookup(sad->sad_v6, lookup_key, &tmp, 1);
62 if (tmp != NULL)
63 return -EEXIST;
64
65 ret = rte_ipsec_sad_add(sad->sad_v6, &key,
66 RTE_IPSEC_SAD_SPI_ONLY, sa);
67 if (ret != 0)
68 return ret;
69 }
70 }
71
72 return 0;
73 }
74
75 /*
76 * Init per lcore SAD cache.
77 * Must be called by every processing lcore.
78 */
79 int
ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent)80 ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent)
81 {
82 uint32_t cache_elem;
83 size_t cache_mem_sz;
84 struct ipsec_sad_cache *cache;
85
86 cache = &RTE_PER_LCORE(sad_cache);
87
88 cache_elem = rte_align32pow2(nb_cache_ent);
89 cache_mem_sz = sizeof(struct ipsec_sa *) * cache_elem;
90
91 if (cache_mem_sz != 0) {
92 cache->v4 = rte_zmalloc_socket(NULL, cache_mem_sz,
93 RTE_CACHE_LINE_SIZE, rte_socket_id());
94 if (cache->v4 == NULL)
95 return -rte_errno;
96
97 cache->v6 = rte_zmalloc_socket(NULL, cache_mem_sz,
98 RTE_CACHE_LINE_SIZE, rte_socket_id());
99 if (cache->v6 == NULL)
100 return -rte_errno;
101
102 cache->mask = cache_elem - 1;
103 }
104
105 return 0;
106 }
107
108 int
ipsec_sad_create(const char * name,struct ipsec_sad * sad,int socket_id,struct ipsec_sa_cnt * sa_cnt)109 ipsec_sad_create(const char *name, struct ipsec_sad *sad,
110 int socket_id, struct ipsec_sa_cnt *sa_cnt)
111 {
112 int ret;
113 struct rte_ipsec_sad_conf sad_conf;
114 char sad_name[RTE_IPSEC_SAD_NAMESIZE];
115
116 if ((name == NULL) || (sad == NULL) || (sa_cnt == NULL))
117 return -EINVAL;
118
119 ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, "%s_v4", name);
120 if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE)
121 return -ENAMETOOLONG;
122
123 sad_conf.socket_id = socket_id;
124 sad_conf.flags = 0;
125 /* Make SAD have extra 25% of required number of entries */
126 sad_conf.max_sa[RTE_IPSEC_SAD_SPI_ONLY] = sa_cnt->nb_v4 * 5 / 4;
127 sad_conf.max_sa[RTE_IPSEC_SAD_SPI_DIP] = 0;
128 sad_conf.max_sa[RTE_IPSEC_SAD_SPI_DIP_SIP] = 0;
129
130 if (sa_cnt->nb_v4 != 0) {
131 sad->sad_v4 = rte_ipsec_sad_create(sad_name, &sad_conf);
132 if (sad->sad_v4 == NULL)
133 return -rte_errno;
134 }
135
136 ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, "%s_v6", name);
137 if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE)
138 return -ENAMETOOLONG;
139 sad_conf.flags = RTE_IPSEC_SAD_FLAG_IPV6;
140 sad_conf.max_sa[RTE_IPSEC_SAD_SPI_ONLY] = sa_cnt->nb_v6 * 5 / 4;
141
142 if (sa_cnt->nb_v6 != 0) {
143 sad->sad_v6 = rte_ipsec_sad_create(name, &sad_conf);
144 if (sad->sad_v6 == NULL)
145 return -rte_errno;
146 }
147
148 return 0;
149 }
150