xref: /dpdk/examples/ipsec-secgw/ipsec_process.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
12 #include <rte_mbuf.h>
13 
14 #include "ipsec.h"
15 
16 #define SATP_OUT_IPV4(t)	\
17 	((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
18 	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
19 	((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
20 
21 
22 /* helper routine to free bulk of packets */
23 static inline void
24 free_pkts(struct rte_mbuf *mb[], uint32_t n)
25 {
26 	uint32_t i;
27 
28 	for (i = 0; i != n; i++)
29 		rte_pktmbuf_free(mb[i]);
30 }
31 
32 /* helper routine to free bulk of crypto-ops and related packets */
33 static inline void
34 free_cops(struct rte_crypto_op *cop[], uint32_t n)
35 {
36 	uint32_t i;
37 
38 	for (i = 0; i != n; i++)
39 		rte_pktmbuf_free(cop[i]->sym->m_src);
40 }
41 
42 /* helper routine to enqueue bulk of crypto ops */
43 static inline void
44 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
45 {
46 	uint32_t i, k, len, n;
47 
48 	len = cqp->len;
49 
50 	/*
51 	 * if cqp is empty and we have enough ops,
52 	 * then queue them to the PMD straightway.
53 	 */
54 	if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
55 		n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
56 		cqp->in_flight += n;
57 		free_cops(cop + n, num - n);
58 		return;
59 	}
60 
61 	k = 0;
62 
63 	do {
64 		n = RTE_DIM(cqp->buf) - len;
65 		n = RTE_MIN(num - k, n);
66 
67 		/* put packets into cqp */
68 		for (i = 0; i != n; i++)
69 			cqp->buf[len + i] = cop[k + i];
70 
71 		len += n;
72 		k += n;
73 
74 		/* if cqp is full then, enqueue crypto-ops to PMD */
75 		if (len == RTE_DIM(cqp->buf)) {
76 			n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
77 					cqp->buf, len);
78 			cqp->in_flight += n;
79 			free_cops(cqp->buf + n, len - n);
80 			len = 0;
81 		}
82 
83 
84 	} while (k != num);
85 
86 	cqp->len = len;
87 }
88 
89 static inline int
90 fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
91 	struct ipsec_sa *sa)
92 {
93 	int32_t rc;
94 
95 	/* setup crypto section */
96 	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
97 		if (sa->crypto_session == NULL) {
98 			rc = create_lookaside_session(ctx, sa);
99 			if (rc != 0)
100 				return rc;
101 		}
102 		ss->crypto.ses = sa->crypto_session;
103 	/* setup session action type */
104 	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
105 		if (sa->sec_session == NULL) {
106 			rc = create_lookaside_session(ctx, sa);
107 			if (rc != 0)
108 				return rc;
109 		}
110 		ss->security.ses = sa->sec_session;
111 		ss->security.ctx = sa->security_ctx;
112 		ss->security.ol_flags = sa->ol_flags;
113 	} else
114 		RTE_ASSERT(0);
115 
116 	rc = rte_ipsec_session_prepare(ss);
117 	if (rc != 0)
118 		memset(ss, 0, sizeof(*ss));
119 
120 	return rc;
121 }
122 
123 /*
124  * group input packets byt the SA they belong to.
125  */
126 static uint32_t
127 sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[],
128 	struct rte_ipsec_group grp[], uint32_t num)
129 {
130 	uint32_t i, n, spi;
131 	void *sa;
132 	void * const nosa = &spi;
133 
134 	sa = nosa;
135 	for (i = 0, n = 0; i != num; i++) {
136 
137 		if (sa != sa_ptr[i]) {
138 			grp[n].cnt = pkts + i - grp[n].m;
139 			n += (sa != nosa);
140 			grp[n].id.ptr = sa_ptr[i];
141 			grp[n].m = pkts + i;
142 			sa = sa_ptr[i];
143 		}
144 	}
145 
146 	/* terminate last group */
147 	if (sa != nosa) {
148 		grp[n].cnt = pkts + i - grp[n].m;
149 		n++;
150 	}
151 
152 	return n;
153 }
154 
155 /*
156  * helper function, splits processed packets into ipv4/ipv6 traffic.
157  */
158 static inline void
159 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
160 	uint32_t num)
161 {
162 	uint32_t j, ofs, s;
163 	struct traffic_type *out;
164 
165 	/*
166 	 * determine traffic type(ipv4/ipv6) and offset for ACL classify
167 	 * based on SA type
168 	 */
169 	if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
170 		if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
171 			out = &trf->ip4;
172 			ofs = offsetof(struct ip, ip_p);
173 		} else {
174 			out = &trf->ip6;
175 			ofs = offsetof(struct ip6_hdr, ip6_nxt);
176 		}
177 	} else if (SATP_OUT_IPV4(satp)) {
178 		out = &trf->ip4;
179 		ofs = offsetof(struct ip, ip_p);
180 	} else {
181 		out = &trf->ip6;
182 		ofs = offsetof(struct ip6_hdr, ip6_nxt);
183 	}
184 
185 	for (j = 0, s = out->num; j != num; j++) {
186 		out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
187 				void *, ofs);
188 		out->pkts[s + j] = mb[j];
189 	}
190 
191 	out->num += num;
192 }
193 
194 /*
195  * Process ipsec packets.
196  * If packet belong to SA that is subject of inline-crypto,
197  * then process it immediately.
198  * Otherwise do necessary preparations and queue it to related
199  * crypto-dev queue.
200  */
201 void
202 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
203 {
204 	uint64_t satp;
205 	uint32_t i, j, k, n;
206 	struct ipsec_sa *sa;
207 	struct ipsec_mbuf_metadata *priv;
208 	struct rte_ipsec_group *pg;
209 	struct rte_ipsec_session *ips;
210 	struct cdev_qp *cqp;
211 	struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
212 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
213 
214 	n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
215 
216 	for (i = 0; i != n; i++) {
217 
218 		pg = grp + i;
219 		sa = pg->id.ptr;
220 
221 		ips = &sa->ips;
222 
223 		/* no valid HW session for that SA, try to create one */
224 		if (sa == NULL || (ips->crypto.ses == NULL &&
225 				fill_ipsec_session(ips, ctx, sa) != 0))
226 			k = 0;
227 
228 		/* process packets inline */
229 		else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
230 				sa->type ==
231 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
232 
233 			satp = rte_ipsec_sa_type(ips->sa);
234 
235 			/*
236 			 * This is just to satisfy inbound_sa_check()
237 			 * and get_hop_for_offload_pkt().
238 			 * Should be removed in future.
239 			 */
240 			for (j = 0; j != pg->cnt; j++) {
241 				priv = get_priv(pg->m[j]);
242 				priv->sa = sa;
243 			}
244 
245 			k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
246 			copy_to_trf(trf, satp, pg->m, k);
247 
248 		/* enqueue packets to crypto dev */
249 		} else {
250 
251 			cqp = &ctx->tbl[sa->cdev_id_qp];
252 
253 			/* for that app each mbuf has it's own crypto op */
254 			for (j = 0; j != pg->cnt; j++) {
255 				priv = get_priv(pg->m[j]);
256 				cop[j] = &priv->cop;
257 				/*
258 				 * this is just to satisfy inbound_sa_check()
259 				 * should be removed in future.
260 				 */
261 				priv->sa = sa;
262 			}
263 
264 			/* prepare and enqueue crypto ops */
265 			k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop,
266 				pg->cnt);
267 			if (k != 0)
268 				enqueue_cop_bulk(cqp, cop, k);
269 		}
270 
271 		/* drop packets that cannot be enqueued/processed */
272 		if (k != pg->cnt)
273 			free_pkts(pg->m + k, pg->cnt - k);
274 	}
275 }
276 
277 static inline uint32_t
278 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
279 {
280 	uint32_t n;
281 
282 	if (cqp->in_flight == 0)
283 		return 0;
284 
285 	n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
286 	RTE_ASSERT(cqp->in_flight >= n);
287 	cqp->in_flight -= n;
288 
289 	return n;
290 }
291 
292 static inline uint32_t
293 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
294 {
295 	uint32_t i, n;
296 
297 	n = 0;
298 
299 	for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
300 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
301 
302 	for (i = 0; n != num && i != ctx->last_qp; i++)
303 		n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
304 
305 	ctx->last_qp = i;
306 	return n;
307 }
308 
309 /*
310  * dequeue packets from crypto-queues and finalize processing.
311  */
312 void
313 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
314 {
315 	uint64_t satp;
316 	uint32_t i, k, n, ng;
317 	struct rte_ipsec_session *ss;
318 	struct traffic_type *out;
319 	struct rte_ipsec_group *pg;
320 	struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
321 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
322 
323 	trf->ip4.num = 0;
324 	trf->ip6.num = 0;
325 
326 	out = &trf->ipsec;
327 
328 	/* dequeue completed crypto-ops */
329 	n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
330 	if (n == 0)
331 		return;
332 
333 	/* group them by ipsec session */
334 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
335 		(uintptr_t)cop, out->pkts, grp, n);
336 
337 	/* process each group of packets */
338 	for (i = 0; i != ng; i++) {
339 
340 		pg = grp + i;
341 		ss = pg->id.ptr;
342 		satp = rte_ipsec_sa_type(ss->sa);
343 
344 		k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
345 		copy_to_trf(trf, satp, pg->m, k);
346 
347 		/* free bad packets, if any */
348 		free_pkts(pg->m + k, pg->cnt - k);
349 
350 		n -= pg->cnt;
351 	}
352 
353 	/* we should never have packet with unknown SA here */
354 	RTE_VERIFY(n == 0);
355 }
356