xref: /dpdk/examples/ipsec-secgw/ipsec_process.c (revision e11bdd37745229bf26b557305c07d118c3dbaad7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
12 #include <rte_mbuf.h>
13 
14 #include "ipsec.h"
15 
16 #define SATP_OUT_IPV4(t)	\
17 	((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
18 	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
19 	((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
20 
21 /* helper routine to free bulk of packets */
22 static inline void
23 free_pkts(struct rte_mbuf *mb[], uint32_t n)
24 {
25 	uint32_t i;
26 
27 	for (i = 0; i != n; i++)
28 		rte_pktmbuf_free(mb[i]);
29 }
30 
31 /* helper routine to free bulk of crypto-ops and related packets */
32 static inline void
33 free_cops(struct rte_crypto_op *cop[], uint32_t n)
34 {
35 	uint32_t i;
36 
37 	for (i = 0; i != n; i++)
38 		rte_pktmbuf_free(cop[i]->sym->m_src);
39 }
40 
41 /* helper routine to enqueue bulk of crypto ops */
42 static inline void
43 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
44 {
45 	uint32_t i, k, len, n;
46 
47 	len = cqp->len;
48 
49 	/*
50 	 * if cqp is empty and we have enough ops,
51 	 * then queue them to the PMD straightway.
52 	 */
53 	if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
54 		n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
55 		cqp->in_flight += n;
56 		free_cops(cop + n, num - n);
57 		return;
58 	}
59 
60 	k = 0;
61 
62 	do {
63 		n = RTE_DIM(cqp->buf) - len;
64 		n = RTE_MIN(num - k, n);
65 
66 		/* put packets into cqp */
67 		for (i = 0; i != n; i++)
68 			cqp->buf[len + i] = cop[k + i];
69 
70 		len += n;
71 		k += n;
72 
73 		/* if cqp is full then, enqueue crypto-ops to PMD */
74 		if (len == RTE_DIM(cqp->buf)) {
75 			n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
76 					cqp->buf, len);
77 			cqp->in_flight += n;
78 			free_cops(cqp->buf + n, len - n);
79 			len = 0;
80 		}
81 
82 
83 	} while (k != num);
84 
85 	cqp->len = len;
86 }
87 
88 static inline int
89 fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
90 	struct ipsec_sa *sa)
91 {
92 	int32_t rc;
93 
94 	/* setup crypto section */
95 	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
96 			ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
97 		RTE_ASSERT(ss->crypto.ses == NULL);
98 		rc = create_lookaside_session(ctx, sa, ss);
99 		if (rc != 0)
100 			return rc;
101 	/* setup session action type */
102 	} else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
103 		RTE_ASSERT(ss->security.ses == NULL);
104 		rc = create_lookaside_session(ctx, sa, ss);
105 		if (rc != 0)
106 			return rc;
107 	} else
108 		RTE_ASSERT(0);
109 
110 	rc = rte_ipsec_session_prepare(ss);
111 	if (rc != 0)
112 		memset(ss, 0, sizeof(*ss));
113 
114 	return rc;
115 }
116 
117 /*
118  * group input packets byt the SA they belong to.
119  */
120 static uint32_t
121 sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
122 	struct rte_ipsec_group grp[], uint32_t num)
123 {
124 	uint32_t i, n, spi;
125 	void *sa;
126 	void * const nosa = &spi;
127 
128 	sa = nosa;
129 	grp[0].m = pkts;
130 	for (i = 0, n = 0; i != num; i++) {
131 
132 		if (sa != sa_ptr[i]) {
133 			grp[n].cnt = pkts + i - grp[n].m;
134 			n += (sa != nosa);
135 			grp[n].id.ptr = sa_ptr[i];
136 			grp[n].m = pkts + i;
137 			sa = sa_ptr[i];
138 		}
139 	}
140 
141 	/* terminate last group */
142 	if (sa != nosa) {
143 		grp[n].cnt = pkts + i - grp[n].m;
144 		n++;
145 	}
146 
147 	return n;
148 }
149 
150 /*
151  * helper function, splits processed packets into ipv4/ipv6 traffic.
152  */
153 static inline void
154 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
155 	uint32_t num)
156 {
157 	uint32_t j, ofs, s;
158 	struct traffic_type *out;
159 
160 	/*
161 	 * determine traffic type(ipv4/ipv6) and offset for ACL classify
162 	 * based on SA type
163 	 */
164 	if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
165 		if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
166 			out = &trf->ip4;
167 			ofs = offsetof(struct ip, ip_p);
168 		} else {
169 			out = &trf->ip6;
170 			ofs = offsetof(struct ip6_hdr, ip6_nxt);
171 		}
172 	} else if (SATP_OUT_IPV4(satp)) {
173 		out = &trf->ip4;
174 		ofs = offsetof(struct ip, ip_p);
175 	} else {
176 		out = &trf->ip6;
177 		ofs = offsetof(struct ip6_hdr, ip6_nxt);
178 	}
179 
180 	for (j = 0, s = out->num; j != num; j++) {
181 		out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
182 				void *, ofs);
183 		out->pkts[s + j] = mb[j];
184 	}
185 
186 	out->num += num;
187 }
188 
189 static uint32_t
190 ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
191 		struct rte_ipsec_session *ips, struct rte_mbuf **m,
192 		unsigned int cnt)
193 {
194 	struct cdev_qp *cqp;
195 	struct rte_crypto_op *cop[cnt];
196 	uint32_t j, k;
197 	struct ipsec_mbuf_metadata *priv;
198 
199 	cqp = &ctx->tbl[sa->cdev_id_qp];
200 
201 	/* for that app each mbuf has it's own crypto op */
202 	for (j = 0; j != cnt; j++) {
203 		priv = get_priv(m[j]);
204 		cop[j] = &priv->cop;
205 		/*
206 		 * this is just to satisfy inbound_sa_check()
207 		 * should be removed in future.
208 		 */
209 		priv->sa = sa;
210 	}
211 
212 	/* prepare and enqueue crypto ops */
213 	k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
214 	if (k != 0)
215 		enqueue_cop_bulk(cqp, cop, k);
216 
217 	return k;
218 }
219 
220 /*
221  * helper routine for inline and cpu(synchronous) processing
222  * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
223  * Should be removed in future.
224  */
225 static inline void
226 prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
227 {
228 	uint32_t j;
229 	struct ipsec_mbuf_metadata *priv;
230 
231 	for (j = 0; j != cnt; j++) {
232 		priv = get_priv(mb[j]);
233 		priv->sa = sa;
234 	}
235 }
236 
237 /*
238  * finish processing of packets successfully decrypted by an inline processor
239  */
240 static uint32_t
241 ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa,
242 	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
243 {
244 	uint64_t satp;
245 	uint32_t k;
246 
247 	/* get SA type */
248 	satp = rte_ipsec_sa_type(ips->sa);
249 	prep_process_group(sa, mb, cnt);
250 
251 	k = rte_ipsec_pkt_process(ips, mb, cnt);
252 	copy_to_trf(trf, satp, mb, k);
253 	return k;
254 }
255 
256 /*
257  * process packets synchronously
258  */
259 static uint32_t
260 ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa,
261 	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
262 {
263 	uint64_t satp;
264 	uint32_t k;
265 
266 	/* get SA type */
267 	satp = rte_ipsec_sa_type(ips->sa);
268 	prep_process_group(sa, mb, cnt);
269 
270 	k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
271 	k = rte_ipsec_pkt_process(ips, mb, k);
272 	copy_to_trf(trf, satp, mb, k);
273 	return k;
274 }
275 
276 /*
277  * Process ipsec packets.
278  * If packet belong to SA that is subject of inline-crypto,
279  * then process it immediately.
280  * Otherwise do necessary preparations and queue it to related
281  * crypto-dev queue.
282  */
283 void
284 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
285 {
286 	uint32_t i, k, n;
287 	struct ipsec_sa *sa;
288 	struct rte_ipsec_group *pg;
289 	struct rte_ipsec_session *ips;
290 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
291 
292 	n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
293 
294 	for (i = 0; i != n; i++) {
295 
296 		pg = grp + i;
297 		sa = ipsec_mask_saptr(pg->id.ptr);
298 
299 		/* fallback to cryptodev with RX packets which inline
300 		 * processor was unable to process
301 		 */
302 		if (sa != NULL)
303 			ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
304 				ipsec_get_fallback_session(sa) :
305 				ipsec_get_primary_session(sa);
306 
307 		/* no valid HW session for that SA, try to create one */
308 		if (sa == NULL || (ips->crypto.ses == NULL &&
309 				fill_ipsec_session(ips, ctx, sa) != 0))
310 			k = 0;
311 
312 		/* process packets inline */
313 		else {
314 			switch (ips->type) {
315 			/* enqueue packets to crypto dev */
316 			case RTE_SECURITY_ACTION_TYPE_NONE:
317 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
318 				k = ipsec_prepare_crypto_group(ctx, sa, ips,
319 					pg->m, pg->cnt);
320 				break;
321 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
322 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
323 				k = ipsec_process_inline_group(ips, sa,
324 					trf, pg->m, pg->cnt);
325 				break;
326 			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
327 				k = ipsec_process_cpu_group(ips, sa,
328 					trf, pg->m, pg->cnt);
329 				break;
330 			default:
331 				k = 0;
332 			}
333 		}
334 
335 		/* drop packets that cannot be enqueued/processed */
336 		if (k != pg->cnt)
337 			free_pkts(pg->m + k, pg->cnt - k);
338 	}
339 }
340 
341 static inline uint32_t
342 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
343 {
344 	uint32_t n;
345 
346 	if (cqp->in_flight == 0)
347 		return 0;
348 
349 	n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
350 	RTE_ASSERT(cqp->in_flight >= n);
351 	cqp->in_flight -= n;
352 
353 	return n;
354 }
355 
356 static inline uint32_t
357 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
358 {
359 	uint32_t i, n;
360 
361 	n = 0;
362 
363 	for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
364 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
365 
366 	for (i = 0; n != num && i != ctx->last_qp; i++)
367 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
368 
369 	ctx->last_qp = i;
370 	return n;
371 }
372 
373 /*
374  * dequeue packets from crypto-queues and finalize processing.
375  */
376 void
377 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
378 {
379 	uint64_t satp;
380 	uint32_t i, k, n, ng;
381 	struct rte_ipsec_session *ss;
382 	struct traffic_type *out;
383 	struct rte_ipsec_group *pg;
384 	struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
385 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
386 
387 	trf->ip4.num = 0;
388 	trf->ip6.num = 0;
389 
390 	out = &trf->ipsec;
391 
392 	/* dequeue completed crypto-ops */
393 	n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
394 	if (n == 0)
395 		return;
396 
397 	/* group them by ipsec session */
398 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
399 		(uintptr_t)cop, out->pkts, grp, n);
400 
401 	/* process each group of packets */
402 	for (i = 0; i != ng; i++) {
403 
404 		pg = grp + i;
405 		ss = pg->id.ptr;
406 		satp = rte_ipsec_sa_type(ss->sa);
407 
408 		k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
409 		copy_to_trf(trf, satp, pg->m, k);
410 
411 		/* free bad packets, if any */
412 		free_pkts(pg->m + k, pg->cnt - k);
413 
414 		n -= pg->cnt;
415 	}
416 
417 	/* we should never have packet with unknown SA here */
418 	RTE_VERIFY(n == 0);
419 }
420