xref: /dpdk/lib/ipsec/esp_inb.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_errno.h>
8 #include <rte_cryptodev.h>
9 
10 #include "sa.h"
11 #include "ipsec_sqn.h"
12 #include "crypto.h"
13 #include "iph.h"
14 #include "misc.h"
15 #include "pad.h"
16 
17 typedef uint16_t (*esp_inb_process_t)(struct rte_ipsec_sa *sa,
18 	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
19 	uint8_t sqh_len);
20 
21 /*
22  * helper function to fill crypto_sym op for cipher+auth algorithms.
23  * used by inb_cop_prepare(), see below.
24  */
25 static inline void
sop_ciph_auth_prepare(struct rte_crypto_sym_op * sop,const struct rte_ipsec_sa * sa,const union sym_op_data * icv,uint32_t pofs,uint32_t plen)26 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
27 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
28 	uint32_t pofs, uint32_t plen)
29 {
30 	sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
31 	sop->cipher.data.length = plen - sa->ctp.cipher.length;
32 	sop->auth.data.offset = pofs + sa->ctp.auth.offset;
33 	sop->auth.data.length = plen - sa->ctp.auth.length;
34 	sop->auth.digest.data = icv->va;
35 	sop->auth.digest.phys_addr = icv->pa;
36 }
37 
38 /*
39  * helper function to fill crypto_sym op for aead algorithms
40  * used by inb_cop_prepare(), see below.
41  */
42 static inline void
sop_aead_prepare(struct rte_crypto_sym_op * sop,const struct rte_ipsec_sa * sa,const union sym_op_data * icv,uint32_t pofs,uint32_t plen)43 sop_aead_prepare(struct rte_crypto_sym_op *sop,
44 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
45 	uint32_t pofs, uint32_t plen)
46 {
47 	sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
48 	sop->aead.data.length = plen - sa->ctp.cipher.length;
49 	sop->aead.digest.data = icv->va;
50 	sop->aead.digest.phys_addr = icv->pa;
51 	sop->aead.aad.data = icv->va + sa->icv_len;
52 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
53 }
54 
55 /*
56  * setup crypto op and crypto sym op for ESP inbound packet.
57  */
58 static inline void
inb_cop_prepare(struct rte_crypto_op * cop,const struct rte_ipsec_sa * sa,struct rte_mbuf * mb,const union sym_op_data * icv,uint32_t pofs,uint32_t plen)59 inb_cop_prepare(struct rte_crypto_op *cop,
60 	const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
61 	const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
62 {
63 	struct rte_crypto_sym_op *sop;
64 	struct aead_gcm_iv *gcm;
65 	struct aead_ccm_iv *ccm;
66 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
67 	struct aesctr_cnt_blk *ctr;
68 	uint64_t *ivc, *ivp;
69 	uint32_t algo;
70 
71 	algo = sa->algo_type;
72 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
73 		pofs + sizeof(struct rte_esp_hdr));
74 
75 	/* fill sym op fields */
76 	sop = cop->sym;
77 
78 	switch (algo) {
79 	case ALGO_TYPE_AES_GCM:
80 		sop_aead_prepare(sop, sa, icv, pofs, plen);
81 
82 		/* fill AAD IV (located inside crypto op) */
83 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
84 			sa->iv_ofs);
85 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
86 		break;
87 	case ALGO_TYPE_AES_CCM:
88 		sop_aead_prepare(sop, sa, icv, pofs, plen);
89 
90 		/* fill AAD IV (located inside crypto op) */
91 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
92 			sa->iv_ofs);
93 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
94 		break;
95 	case ALGO_TYPE_CHACHA20_POLY1305:
96 		sop_aead_prepare(sop, sa, icv, pofs, plen);
97 
98 		/* fill AAD IV (located inside crypto op) */
99 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
100 				struct aead_chacha20_poly1305_iv *,
101 				sa->iv_ofs);
102 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
103 					       ivp[0], sa->salt);
104 		break;
105 	case ALGO_TYPE_AES_CBC:
106 	case ALGO_TYPE_3DES_CBC:
107 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
108 
109 		/* copy iv from the input packet to the cop */
110 		ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
111 		copy_iv(ivc, ivp, sa->iv_len);
112 		break;
113 	case ALGO_TYPE_AES_GMAC:
114 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
115 
116 		/* fill AAD IV (located inside crypto op) */
117 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
118 			sa->iv_ofs);
119 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
120 		break;
121 	case ALGO_TYPE_AES_CTR:
122 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
123 
124 		/* fill CTR block (located inside crypto op) */
125 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
126 			sa->iv_ofs);
127 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
128 		break;
129 	case ALGO_TYPE_NULL:
130 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
131 		break;
132 	}
133 }
134 
135 static inline uint32_t
inb_cpu_crypto_prepare(const struct rte_ipsec_sa * sa,struct rte_mbuf * mb,uint32_t * pofs,uint32_t plen,void * iv)136 inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
137 	uint32_t *pofs, uint32_t plen, void *iv)
138 {
139 	struct aead_gcm_iv *gcm;
140 	struct aead_ccm_iv *ccm;
141 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
142 	struct aesctr_cnt_blk *ctr;
143 	uint64_t *ivp;
144 	uint32_t clen;
145 
146 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
147 		*pofs + sizeof(struct rte_esp_hdr));
148 	clen = 0;
149 
150 	switch (sa->algo_type) {
151 	case ALGO_TYPE_AES_GCM:
152 	case ALGO_TYPE_AES_GMAC:
153 		gcm = (struct aead_gcm_iv *)iv;
154 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
155 		break;
156 	case ALGO_TYPE_AES_CCM:
157 		ccm = (struct aead_ccm_iv *)iv;
158 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
159 		break;
160 	case ALGO_TYPE_CHACHA20_POLY1305:
161 		chacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;
162 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
163 					       ivp[0], sa->salt);
164 		break;
165 	case ALGO_TYPE_AES_CBC:
166 	case ALGO_TYPE_3DES_CBC:
167 		copy_iv(iv, ivp, sa->iv_len);
168 		break;
169 	case ALGO_TYPE_AES_CTR:
170 		ctr = (struct aesctr_cnt_blk *)iv;
171 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
172 		break;
173 	}
174 
175 	*pofs += sa->ctp.auth.offset;
176 	clen = plen - sa->ctp.auth.length;
177 	return clen;
178 }
179 
180 /*
181  * Helper function for prepare() to deal with situation when
182  * ICV is spread by two segments. Tries to move ICV completely into the
183  * last segment.
184  */
185 static struct rte_mbuf *
move_icv(struct rte_mbuf * ml,uint32_t ofs)186 move_icv(struct rte_mbuf *ml, uint32_t ofs)
187 {
188 	uint32_t n;
189 	struct rte_mbuf *ms;
190 	const void *prev;
191 	void *new;
192 
193 	ms = ml->next;
194 	n = ml->data_len - ofs;
195 
196 	prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
197 	new = rte_pktmbuf_prepend(ms, n);
198 	if (new == NULL)
199 		return NULL;
200 
201 	/* move n ICV bytes from ml into ms */
202 	rte_memcpy(new, prev, n);
203 	ml->data_len -= n;
204 
205 	return ms;
206 }
207 
208 /*
209  * for pure cryptodev (lookaside none) depending on SA settings,
210  * we might have to write some extra data to the packet.
211  */
212 static inline void
inb_pkt_xprepare(const struct rte_ipsec_sa * sa,rte_be64_t sqc,const union sym_op_data * icv)213 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
214 	const union sym_op_data *icv)
215 {
216 	struct aead_gcm_aad *aad;
217 	struct aead_ccm_aad *caad;
218 	struct aead_chacha20_poly1305_aad *chacha_aad;
219 
220 	/* insert SQN.hi between ESP trailer and ICV */
221 	if (sa->sqh_len != 0)
222 		insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
223 
224 	/*
225 	 * fill AAD fields, if any (aad fields are placed after icv),
226 	 * right now we support only one AEAD algorithm: AES-GCM.
227 	 */
228 	switch (sa->algo_type) {
229 	case ALGO_TYPE_AES_GCM:
230 		if (sa->aad_len != 0) {
231 			aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
232 			aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
233 		}
234 		break;
235 	case ALGO_TYPE_AES_CCM:
236 		if (sa->aad_len != 0) {
237 			caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
238 			aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
239 		}
240 		break;
241 	case ALGO_TYPE_CHACHA20_POLY1305:
242 		if (sa->aad_len != 0) {
243 			chacha_aad = (struct aead_chacha20_poly1305_aad *)
244 			    (icv->va + sa->icv_len);
245 			aead_chacha20_poly1305_aad_fill(chacha_aad,
246 						sa->spi, sqc, IS_ESN(sa));
247 		}
248 		break;
249 	}
250 }
251 
252 static inline int
inb_get_sqn(const struct rte_ipsec_sa * sa,const struct replay_sqn * rsn,struct rte_mbuf * mb,uint32_t hlen,rte_be64_t * sqc)253 inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
254 	struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
255 {
256 	int32_t rc;
257 	uint64_t sqn;
258 	struct rte_esp_hdr *esph;
259 
260 	esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
261 
262 	/*
263 	 * retrieve and reconstruct SQN, then check it, then
264 	 * convert it back into network byte order.
265 	 */
266 	sqn = rte_be_to_cpu_32(esph->seq);
267 	if (IS_ESN(sa))
268 		sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
269 	*sqc = rte_cpu_to_be_64(sqn);
270 
271 	/* check IPsec window */
272 	rc = esn_inb_check_sqn(rsn, sa, sqn);
273 
274 	return rc;
275 }
276 
277 /* prepare packet for upcoming processing */
278 static inline int32_t
inb_prepare(const struct rte_ipsec_sa * sa,struct rte_mbuf * mb,uint32_t hlen,union sym_op_data * icv)279 inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
280 	uint32_t hlen, union sym_op_data *icv)
281 {
282 	uint32_t clen, icv_len, icv_ofs, plen;
283 	struct rte_mbuf *ml;
284 
285 	/* start packet manipulation */
286 	plen = mb->pkt_len;
287 	plen = plen - hlen;
288 
289 	/* check that packet has a valid length */
290 	clen = plen - sa->ctp.cipher.length;
291 	if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
292 		return -EBADMSG;
293 
294 	/* find ICV location */
295 	icv_len = sa->icv_len;
296 	icv_ofs = mb->pkt_len - icv_len;
297 
298 	ml = mbuf_get_seg_ofs(mb, &icv_ofs);
299 
300 	/*
301 	 * if ICV is spread by two segments, then try to
302 	 * move ICV completely into the last segment.
303 	 */
304 	if (ml->data_len < icv_ofs + icv_len) {
305 
306 		ml = move_icv(ml, icv_ofs);
307 		if (ml == NULL)
308 			return -ENOSPC;
309 
310 		/* new ICV location */
311 		icv_ofs = 0;
312 	}
313 
314 	icv_ofs += sa->sqh_len;
315 
316 	/*
317 	 * we have to allocate space for AAD somewhere,
318 	 * right now - just use free trailing space at the last segment.
319 	 * Would probably be more convenient to reserve space for AAD
320 	 * inside rte_crypto_op itself
321 	 * (again for IV space is already reserved inside cop).
322 	 */
323 	if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
324 		return -ENOSPC;
325 
326 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
327 	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
328 
329 	/*
330 	 * if esn is used then high-order 32 bits are also used in ICV
331 	 * calculation but are not transmitted, update packet length
332 	 * to be consistent with auth data length and offset, this will
333 	 * be subtracted from packet length in post crypto processing
334 	 */
335 	mb->pkt_len += sa->sqh_len;
336 	ml->data_len += sa->sqh_len;
337 
338 	return plen;
339 }
340 
341 static inline int32_t
inb_pkt_prepare(const struct rte_ipsec_sa * sa,const struct replay_sqn * rsn,struct rte_mbuf * mb,uint32_t hlen,union sym_op_data * icv)342 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
343 	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
344 {
345 	int rc;
346 	rte_be64_t sqn;
347 
348 	rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
349 	if (rc != 0)
350 		return rc;
351 
352 	rc = inb_prepare(sa, mb, hlen, icv);
353 	if (rc < 0)
354 		return rc;
355 
356 	inb_pkt_xprepare(sa, sqn, icv);
357 	return rc;
358 }
359 
360 /*
361  * setup/update packets and crypto ops for ESP inbound case.
362  */
363 uint16_t
esp_inb_pkt_prepare(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],struct rte_crypto_op * cop[],uint16_t num)364 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
365 	struct rte_crypto_op *cop[], uint16_t num)
366 {
367 	int32_t rc;
368 	uint32_t i, k, hl;
369 	struct rte_ipsec_sa *sa;
370 	struct rte_cryptodev_sym_session *cs;
371 	struct replay_sqn *rsn;
372 	union sym_op_data icv;
373 	uint32_t dr[num];
374 
375 	sa = ss->sa;
376 	cs = ss->crypto.ses;
377 	rsn = rsn_acquire(sa);
378 
379 	k = 0;
380 	for (i = 0; i != num; i++) {
381 
382 		hl = mb[i]->l2_len + mb[i]->l3_len;
383 		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
384 		if (rc >= 0) {
385 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
386 			inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
387 			k++;
388 		} else {
389 			dr[i - k] = i;
390 			rte_errno = -rc;
391 		}
392 	}
393 
394 	rsn_release(sa, rsn);
395 
396 	/* copy not prepared mbufs beyond good ones */
397 	if (k != num && k != 0)
398 		move_bad_mbufs(mb, dr, num, num - k);
399 
400 	return k;
401 }
402 
403 /*
404  * Start with processing inbound packet.
405  * This is common part for both tunnel and transport mode.
406  * Extract information that will be needed later from mbuf metadata and
407  * actual packet data:
408  * - mbuf for packet's last segment
409  * - length of the L2/L3 headers
410  * - esp tail structure
411  */
412 static inline void
process_step1(struct rte_mbuf * mb,uint32_t tlen,struct rte_mbuf ** ml,struct rte_esp_tail * espt,uint32_t * hlen,uint32_t * tofs)413 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
414 	struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
415 {
416 	const struct rte_esp_tail *pt;
417 	uint32_t ofs;
418 
419 	ofs = mb->pkt_len - tlen;
420 	hlen[0] = mb->l2_len + mb->l3_len;
421 	ml[0] = mbuf_get_seg_ofs(mb, &ofs);
422 	pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
423 	tofs[0] = ofs;
424 	espt[0] = pt[0];
425 }
426 
427 /*
428  * Helper function to check pad bytes values.
429  * Note that pad bytes can be spread across multiple segments.
430  */
431 static inline int
check_pad_bytes(struct rte_mbuf * mb,uint32_t ofs,uint32_t len)432 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
433 {
434 	const uint8_t *pd;
435 	uint32_t k, n;
436 
437 	for (n = 0; n != len; n += k, mb = mb->next) {
438 		k = mb->data_len - ofs;
439 		k = RTE_MIN(k, len - n);
440 		pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
441 		if (memcmp(pd, esp_pad_bytes + n, k) != 0)
442 			break;
443 		ofs = 0;
444 	}
445 
446 	return len - n;
447 }
448 
449 /*
450  * packet checks for transport mode:
451  * - no reported IPsec related failures in ol_flags
452  * - tail and header lengths are valid
453  * - padding bytes are valid
454  * apart from checks, function also updates tail offset (and segment)
455  * by taking into account pad length.
456  */
457 static inline int32_t
trs_process_check(struct rte_mbuf * mb,struct rte_mbuf ** ml,uint32_t * tofs,struct rte_esp_tail espt,uint32_t hlen,uint32_t tlen)458 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
459 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
460 {
461 	if ((mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) != 0 ||
462 			tlen + hlen > mb->pkt_len)
463 		return -EBADMSG;
464 
465 	/* padding bytes are spread over multiple segments */
466 	if (tofs[0] < espt.pad_len) {
467 		tofs[0] = mb->pkt_len - tlen;
468 		ml[0] = mbuf_get_seg_ofs(mb, tofs);
469 	} else
470 		tofs[0] -= espt.pad_len;
471 
472 	return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
473 }
474 
475 /*
476  * packet checks for tunnel mode:
477  * - same as for transport mode
478  * - esp tail next proto contains expected for that SA value
479  */
480 static inline int32_t
tun_process_check(struct rte_mbuf * mb,struct rte_mbuf ** ml,uint32_t * tofs,struct rte_esp_tail espt,uint32_t hlen,uint32_t tlen,uint8_t proto)481 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
482 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
483 	uint8_t proto)
484 {
485 	return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
486 		espt.next_proto != proto);
487 }
488 
489 /*
490  * step two for tunnel mode:
491  * - read SQN value (for future use)
492  * - cut of ICV, ESP tail and padding bytes
493  * - cut of ESP header and IV, also if needed - L2/L3 headers
494  *   (controlled by *adj* value)
495  */
496 static inline void *
tun_process_step2(struct rte_mbuf * mb,struct rte_mbuf * ml,uint32_t hlen,uint32_t adj,uint32_t tofs,uint32_t tlen,uint32_t * sqn)497 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
498 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
499 {
500 	const struct rte_esp_hdr *ph;
501 
502 	/* read SQN value */
503 	ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
504 	sqn[0] = ph->seq;
505 
506 	/* cut of ICV, ESP tail and padding bytes */
507 	mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
508 
509 	/* cut of L2/L3 headers, ESP header and IV */
510 	return rte_pktmbuf_adj(mb, adj);
511 }
512 
513 /*
514  * step two for transport mode:
515  * - read SQN value (for future use)
516  * - cut of ICV, ESP tail and padding bytes
517  * - cut of ESP header and IV
518  * - move L2/L3 header to fill the gap after ESP header removal
519  */
520 static inline void *
trs_process_step2(struct rte_mbuf * mb,struct rte_mbuf * ml,uint32_t hlen,uint32_t adj,uint32_t tofs,uint32_t tlen,uint32_t * sqn)521 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
522 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
523 {
524 	char *np, *op;
525 
526 	/* get start of the packet before modifications */
527 	op = rte_pktmbuf_mtod(mb, char *);
528 
529 	/* cut off ESP header and IV */
530 	np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
531 
532 	/* move header bytes to fill the gap after ESP header removal */
533 	remove_esph(np, op, hlen);
534 	return np;
535 }
536 
537 /*
538  * step three for transport mode:
539  * update mbuf metadata:
540  * - packet_type
541  * - ol_flags
542  */
543 static inline void
trs_process_step3(struct rte_mbuf * mb)544 trs_process_step3(struct rte_mbuf *mb)
545 {
546 	/* reset mbuf packet type */
547 	mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
548 
549 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
550 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
551 }
552 
553 /*
554  * step three for tunnel mode:
555  * update mbuf metadata:
556  * - packet_type
557  * - ol_flags
558  * - tx_offload
559  */
560 static inline void
tun_process_step3(struct rte_mbuf * mb,uint64_t txof_msk,uint64_t txof_val)561 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
562 {
563 	/* reset mbuf metadata: L2/L3 len, packet type */
564 	mb->packet_type = RTE_PTYPE_UNKNOWN;
565 	mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
566 
567 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
568 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
569 }
570 
571 /*
572  * *process* function for tunnel packets
573  */
574 static inline uint16_t
tun_process(struct rte_ipsec_sa * sa,struct rte_mbuf * mb[],uint32_t sqn[],uint32_t dr[],uint16_t num,uint8_t sqh_len)575 tun_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
576 	    uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
577 {
578 	uint32_t adj, i, k, tl, bytes;
579 	uint32_t hl[num], to[num];
580 	struct rte_esp_tail espt[num];
581 	struct rte_mbuf *ml[num];
582 	const void *outh;
583 	void *inh;
584 
585 	/*
586 	 * remove icv, esp trailer and high-order
587 	 * 32 bits of esn from packet length
588 	 */
589 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
590 	const uint32_t cofs = sa->ctp.cipher.offset;
591 
592 	/*
593 	 * to minimize stalls due to load latency,
594 	 * read mbufs metadata and esp tail first.
595 	 */
596 	for (i = 0; i != num; i++)
597 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
598 
599 	k = 0;
600 	bytes = 0;
601 	for (i = 0; i != num; i++) {
602 
603 		adj = hl[i] + cofs;
604 		tl = tlen + espt[i].pad_len;
605 
606 		/* check that packet is valid */
607 		if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
608 					sa->proto) == 0) {
609 
610 			outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
611 					mb[i]->l2_len);
612 
613 			/* modify packet's layout */
614 			inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
615 					to[i], tl, sqn + k);
616 
617 			/* update inner ip header */
618 			update_tun_inb_l3hdr(sa, outh, inh);
619 
620 			/* update mbuf's metadata */
621 			tun_process_step3(mb[i], sa->tx_offload.msk,
622 				sa->tx_offload.val);
623 			k++;
624 			bytes += mb[i]->pkt_len;
625 		} else
626 			dr[i - k] = i;
627 	}
628 
629 	sa->statistics.count += k;
630 	sa->statistics.bytes += bytes;
631 	return k;
632 }
633 
634 /*
635  * *process* function for tunnel packets
636  */
637 static inline uint16_t
trs_process(struct rte_ipsec_sa * sa,struct rte_mbuf * mb[],uint32_t sqn[],uint32_t dr[],uint16_t num,uint8_t sqh_len)638 trs_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
639 	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
640 {
641 	char *np;
642 	uint32_t i, k, l2, tl, bytes;
643 	uint32_t hl[num], to[num];
644 	struct rte_esp_tail espt[num];
645 	struct rte_mbuf *ml[num];
646 
647 	/*
648 	 * remove icv, esp trailer and high-order
649 	 * 32 bits of esn from packet length
650 	 */
651 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
652 	const uint32_t cofs = sa->ctp.cipher.offset;
653 
654 	/*
655 	 * to minimize stalls due to load latency,
656 	 * read mbufs metadata and esp tail first.
657 	 */
658 	for (i = 0; i != num; i++)
659 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
660 
661 	k = 0;
662 	bytes = 0;
663 	for (i = 0; i != num; i++) {
664 
665 		tl = tlen + espt[i].pad_len;
666 		l2 = mb[i]->l2_len;
667 
668 		/* check that packet is valid */
669 		if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
670 				hl[i] + cofs, tl) == 0) {
671 
672 			/* modify packet's layout */
673 			np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
674 				to[i], tl, sqn + k);
675 			update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
676 				l2, hl[i] - l2, espt[i].next_proto);
677 
678 			/* update mbuf's metadata */
679 			trs_process_step3(mb[i]);
680 			k++;
681 			bytes += mb[i]->pkt_len;
682 		} else
683 			dr[i - k] = i;
684 	}
685 
686 	sa->statistics.count += k;
687 	sa->statistics.bytes += bytes;
688 	return k;
689 }
690 
691 /*
692  * for group of ESP inbound packets perform SQN check and update.
693  */
694 static inline uint16_t
esp_inb_rsn_update(struct rte_ipsec_sa * sa,const uint32_t sqn[],uint32_t dr[],uint16_t num)695 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
696 	uint32_t dr[], uint16_t num)
697 {
698 	uint32_t i, k;
699 	struct replay_sqn *rsn;
700 
701 	/* replay not enabled */
702 	if (sa->replay.win_sz == 0)
703 		return num;
704 
705 	rsn = rsn_update_start(sa);
706 
707 	k = 0;
708 	for (i = 0; i != num; i++) {
709 		if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
710 			k++;
711 		else
712 			dr[i - k] = i;
713 	}
714 
715 	rsn_update_finish(sa, rsn);
716 	return k;
717 }
718 
719 /*
720  * process group of ESP inbound packets.
721  */
722 static inline uint16_t
esp_inb_pkt_process(struct rte_ipsec_sa * sa,struct rte_mbuf * mb[],uint16_t num,uint8_t sqh_len,esp_inb_process_t process)723 esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
724 	uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
725 {
726 	uint32_t k, n;
727 	uint32_t sqn[num];
728 	uint32_t dr[num];
729 
730 	/* process packets, extract seq numbers */
731 	k = process(sa, mb, sqn, dr, num, sqh_len);
732 
733 	/* handle unprocessed mbufs */
734 	if (k != num && k != 0)
735 		move_bad_mbufs(mb, dr, num, num - k);
736 
737 	/* update SQN and replay window */
738 	n = esp_inb_rsn_update(sa, sqn, dr, k);
739 
740 	/* handle mbufs with wrong SQN */
741 	if (n != k && n != 0)
742 		move_bad_mbufs(mb, dr, k, k - n);
743 
744 	if (n != num)
745 		rte_errno = EBADMSG;
746 
747 	return n;
748 }
749 
750 /*
751  * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
752  * (synchronous mode).
753  */
754 uint16_t
cpu_inb_pkt_prepare(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],uint16_t num)755 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
756 	struct rte_mbuf *mb[], uint16_t num)
757 {
758 	int32_t rc;
759 	uint32_t i, k;
760 	struct rte_ipsec_sa *sa;
761 	struct replay_sqn *rsn;
762 	union sym_op_data icv;
763 	struct rte_crypto_va_iova_ptr iv[num];
764 	struct rte_crypto_va_iova_ptr aad[num];
765 	struct rte_crypto_va_iova_ptr dgst[num];
766 	uint32_t dr[num];
767 	uint32_t l4ofs[num];
768 	uint32_t clen[num];
769 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
770 
771 	sa = ss->sa;
772 
773 	/* grab rsn lock */
774 	rsn = rsn_acquire(sa);
775 
776 	/* do preparation for all packets */
777 	for (i = 0, k = 0; i != num; i++) {
778 
779 		/* calculate ESP header offset */
780 		l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
781 
782 		/* prepare ESP packet for processing */
783 		rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
784 		if (rc >= 0) {
785 			/* get encrypted data offset and length */
786 			clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
787 				l4ofs + k, rc, ivbuf[k]);
788 
789 			/* fill iv, digest and aad */
790 			iv[k].va = ivbuf[k];
791 			aad[k].va = icv.va + sa->icv_len;
792 			dgst[k++].va = icv.va;
793 		} else {
794 			dr[i - k] = i;
795 			rte_errno = -rc;
796 		}
797 	}
798 
799 	/* release rsn lock */
800 	rsn_release(sa, rsn);
801 
802 	/* copy not prepared mbufs beyond good ones */
803 	if (k != num && k != 0)
804 		move_bad_mbufs(mb, dr, num, num - k);
805 
806 	/* convert mbufs to iovecs and do actual crypto/auth processing */
807 	if (k != 0)
808 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
809 			l4ofs, clen, k);
810 	return k;
811 }
812 
813 /*
814  * process group of ESP inbound tunnel packets.
815  */
816 uint16_t
esp_inb_tun_pkt_process(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],uint16_t num)817 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
818 	struct rte_mbuf *mb[], uint16_t num)
819 {
820 	struct rte_ipsec_sa *sa = ss->sa;
821 
822 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
823 }
824 
825 uint16_t
inline_inb_tun_pkt_process(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],uint16_t num)826 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
827 	struct rte_mbuf *mb[], uint16_t num)
828 {
829 	return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
830 }
831 
832 /*
833  * process group of ESP inbound transport packets.
834  */
835 uint16_t
esp_inb_trs_pkt_process(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],uint16_t num)836 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
837 	struct rte_mbuf *mb[], uint16_t num)
838 {
839 	struct rte_ipsec_sa *sa = ss->sa;
840 
841 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
842 }
843 
844 uint16_t
inline_inb_trs_pkt_process(const struct rte_ipsec_session * ss,struct rte_mbuf * mb[],uint16_t num)845 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
846 	struct rte_mbuf *mb[], uint16_t num)
847 {
848 	return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
849 }
850