xref: /dpdk/examples/ipsec-secgw/ipsec_process.c (revision 88948ff31f57618a74c8985c59e332676995b438)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
12 #include <rte_mbuf.h>
13 
14 #include "ipsec.h"
15 #include "ipsec-secgw.h"
16 #include "ipsec_worker.h"
17 
18 /* helper routine to free bulk of crypto-ops and related packets */
19 static inline void
20 free_cops(struct rte_crypto_op *cop[], uint32_t n)
21 {
22 	uint32_t i;
23 
24 	for (i = 0; i != n; i++)
25 		free_pkts(&cop[i]->sym->m_src, 1);
26 }
27 
28 /* helper routine to enqueue bulk of crypto ops */
29 static inline void
30 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
31 {
32 	uint32_t i, k, len, n;
33 
34 	len = cqp->len;
35 
36 	/*
37 	 * if cqp is empty and we have enough ops,
38 	 * then queue them to the PMD straightway.
39 	 */
40 	if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
41 		n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
42 		cqp->in_flight += n;
43 		free_cops(cop + n, num - n);
44 		return;
45 	}
46 
47 	k = 0;
48 
49 	do {
50 		n = RTE_DIM(cqp->buf) - len;
51 		n = RTE_MIN(num - k, n);
52 
53 		/* put packets into cqp */
54 		for (i = 0; i != n; i++)
55 			cqp->buf[len + i] = cop[k + i];
56 
57 		len += n;
58 		k += n;
59 
60 		/* if cqp is full then, enqueue crypto-ops to PMD */
61 		if (len == RTE_DIM(cqp->buf)) {
62 			n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
63 					cqp->buf, len);
64 			cqp->in_flight += n;
65 			free_cops(cqp->buf + n, len - n);
66 			len = 0;
67 		}
68 
69 
70 	} while (k != num);
71 
72 	cqp->len = len;
73 }
74 
75 static inline int
76 check_ipsec_session(const struct rte_ipsec_session *ss)
77 {
78 	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
79 			ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
80 		if (ss->crypto.ses == NULL)
81 			return -ENOENT;
82 	} else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
83 		if (ss->security.ses == NULL)
84 			return -ENOENT;
85 	} else
86 		RTE_ASSERT(0);
87 	return 0;
88 }
89 
90 /*
91  * group input packets byt the SA they belong to.
92  */
93 static uint32_t
94 sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
95 	struct rte_ipsec_group grp[], uint32_t num)
96 {
97 	uint32_t i, n, spi;
98 	void *sa;
99 	void * const nosa = &spi;
100 
101 	sa = nosa;
102 	grp[0].m = pkts;
103 	for (i = 0, n = 0; i != num; i++) {
104 
105 		if (sa != sa_ptr[i]) {
106 			grp[n].cnt = pkts + i - grp[n].m;
107 			n += (sa != nosa);
108 			grp[n].id.ptr = sa_ptr[i];
109 			grp[n].m = pkts + i;
110 			sa = sa_ptr[i];
111 		}
112 	}
113 
114 	/* terminate last group */
115 	if (sa != nosa) {
116 		grp[n].cnt = pkts + i - grp[n].m;
117 		n++;
118 	}
119 
120 	return n;
121 }
122 
123 /*
124  * helper function, splits processed packets into ipv4/ipv6 traffic.
125  */
126 static inline void
127 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
128 	uint32_t num)
129 {
130 	uint32_t j, ofs, s;
131 	struct traffic_type *out;
132 
133 	/*
134 	 * determine traffic type(ipv4/ipv6) and offset for ACL classify
135 	 * based on SA type
136 	 */
137 	if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
138 		if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
139 			out = &trf->ip4;
140 			ofs = offsetof(struct ip, ip_p);
141 		} else {
142 			out = &trf->ip6;
143 			ofs = offsetof(struct ip6_hdr, ip6_nxt);
144 		}
145 	} else if (SATP_OUT_IPV4(satp)) {
146 		out = &trf->ip4;
147 		ofs = offsetof(struct ip, ip_p);
148 	} else {
149 		out = &trf->ip6;
150 		ofs = offsetof(struct ip6_hdr, ip6_nxt);
151 	}
152 
153 	for (j = 0, s = out->num; j != num; j++) {
154 		out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
155 				void *, ofs);
156 		out->pkts[s + j] = mb[j];
157 	}
158 
159 	out->num += num;
160 }
161 
162 static uint32_t
163 ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
164 		struct rte_ipsec_session *ips, struct rte_mbuf **m,
165 		unsigned int cnt)
166 {
167 	struct cdev_qp *cqp;
168 	struct rte_crypto_op *cop[cnt];
169 	uint32_t j, k;
170 	struct ipsec_mbuf_metadata *priv;
171 
172 	cqp = sa->cqp[ctx->lcore_id];
173 
174 	/* for that app each mbuf has it's own crypto op */
175 	for (j = 0; j != cnt; j++) {
176 		priv = get_priv(m[j]);
177 		cop[j] = &priv->cop;
178 		/*
179 		 * this is just to satisfy inbound_sa_check()
180 		 * should be removed in future.
181 		 */
182 		priv->sa = sa;
183 	}
184 
185 	/* prepare and enqueue crypto ops */
186 	k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
187 	if (k != 0)
188 		enqueue_cop_bulk(cqp, cop, k);
189 
190 	return k;
191 }
192 
193 /*
194  * finish processing of packets successfully decrypted by an inline processor
195  */
196 static uint32_t
197 ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa,
198 	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
199 {
200 	uint64_t satp;
201 	uint32_t k;
202 
203 	/* get SA type */
204 	satp = rte_ipsec_sa_type(ips->sa);
205 	prep_process_group(sa, mb, cnt);
206 
207 	k = rte_ipsec_pkt_process(ips, mb, cnt);
208 	copy_to_trf(trf, satp, mb, k);
209 	return k;
210 }
211 
212 /*
213  * process packets synchronously
214  */
215 static uint32_t
216 ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa,
217 	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
218 {
219 	uint64_t satp;
220 	uint32_t k;
221 
222 	/* get SA type */
223 	satp = rte_ipsec_sa_type(ips->sa);
224 	prep_process_group(sa, mb, cnt);
225 
226 	k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
227 	k = rte_ipsec_pkt_process(ips, mb, k);
228 	copy_to_trf(trf, satp, mb, k);
229 	return k;
230 }
231 
232 /*
233  * Process ipsec packets.
234  * If packet belong to SA that is subject of inline-crypto,
235  * then process it immediately.
236  * Otherwise do necessary preparations and queue it to related
237  * crypto-dev queue.
238  */
239 void
240 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
241 {
242 	uint32_t i, k, n;
243 	struct ipsec_sa *sa;
244 	struct rte_ipsec_group *pg;
245 	struct rte_ipsec_session *ips;
246 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
247 
248 	n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
249 
250 	for (i = 0; i != n; i++) {
251 
252 		pg = grp + i;
253 		sa = ipsec_mask_saptr(pg->id.ptr);
254 
255 		/* fallback to cryptodev with RX packets which inline
256 		 * processor was unable to process
257 		 */
258 		if (sa != NULL)
259 			ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
260 				ipsec_get_fallback_session(sa) :
261 				ipsec_get_primary_session(sa);
262 
263 		/* no valid HW session for that SA */
264 		if (sa == NULL || unlikely(check_ipsec_session(ips) != 0))
265 			k = 0;
266 
267 		/* process packets inline */
268 		else {
269 			switch (ips->type) {
270 			/* enqueue packets to crypto dev */
271 			case RTE_SECURITY_ACTION_TYPE_NONE:
272 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
273 				k = ipsec_prepare_crypto_group(ctx, sa, ips,
274 					pg->m, pg->cnt);
275 				break;
276 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
277 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
278 				k = ipsec_process_inline_group(ips, sa,
279 					trf, pg->m, pg->cnt);
280 				break;
281 			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
282 				k = ipsec_process_cpu_group(ips, sa,
283 					trf, pg->m, pg->cnt);
284 				break;
285 			default:
286 				k = 0;
287 			}
288 		}
289 
290 		/* drop packets that cannot be enqueued/processed */
291 		if (k != pg->cnt)
292 			free_pkts(pg->m + k, pg->cnt - k);
293 	}
294 }
295 
296 static inline uint32_t
297 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
298 {
299 	uint32_t n;
300 
301 	if (cqp->in_flight == 0)
302 		return 0;
303 
304 	n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
305 	RTE_ASSERT(cqp->in_flight >= n);
306 	cqp->in_flight -= n;
307 
308 	return n;
309 }
310 
311 static inline uint32_t
312 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
313 {
314 	uint32_t i, n;
315 
316 	n = 0;
317 
318 	for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
319 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
320 
321 	for (i = 0; n != num && i != ctx->last_qp; i++)
322 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
323 
324 	ctx->last_qp = i;
325 	return n;
326 }
327 
328 /*
329  * dequeue packets from crypto-queues and finalize processing.
330  */
331 void
332 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
333 {
334 	uint64_t satp;
335 	uint32_t i, k, n, ng;
336 	struct rte_ipsec_session *ss;
337 	struct traffic_type *out;
338 	struct rte_ipsec_group *pg;
339 	const int nb_cops = RTE_DIM(trf->ipsec.pkts);
340 	struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
341 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
342 
343 	trf->ip4.num = 0;
344 	trf->ip6.num = 0;
345 
346 	out = &trf->ipsec;
347 
348 	/* dequeue completed crypto-ops */
349 	n = ctx_dequeue(ctx, cop, RTE_MIN(MAX_PKT_BURST, nb_cops));
350 	if (n == 0)
351 		return;
352 
353 	/* group them by ipsec session */
354 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
355 		(uintptr_t)cop, out->pkts, grp, n);
356 
357 	/* process each group of packets */
358 	for (i = 0; i != ng; i++) {
359 
360 		pg = grp + i;
361 		ss = pg->id.ptr;
362 		satp = rte_ipsec_sa_type(ss->sa);
363 
364 		k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
365 		copy_to_trf(trf, satp, pg->m, k);
366 
367 		/* free bad packets, if any */
368 		free_pkts(pg->m + k, pg->cnt - k);
369 
370 		n -= pg->cnt;
371 	}
372 
373 	/* we should never have packet with unknown SA here */
374 	RTE_VERIFY(n == 0);
375 }
376