xref: /dpdk/lib/ipsec/esp_inb.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10 
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17 
18 typedef uint16_t (*esp_inb_process_t)(struct rte_ipsec_sa *sa,
19 	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
20 	uint8_t sqh_len);
21 
22 /*
23  * helper function to fill crypto_sym op for cipher+auth algorithms.
24  * used by inb_cop_prepare(), see below.
25  */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 	uint32_t pofs, uint32_t plen)
30 {
31 	sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
32 	sop->cipher.data.length = plen - sa->ctp.cipher.length;
33 	sop->auth.data.offset = pofs + sa->ctp.auth.offset;
34 	sop->auth.data.length = plen - sa->ctp.auth.length;
35 	sop->auth.digest.data = icv->va;
36 	sop->auth.digest.phys_addr = icv->pa;
37 }
38 
39 /*
40  * helper function to fill crypto_sym op for aead algorithms
41  * used by inb_cop_prepare(), see below.
42  */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 	uint32_t pofs, uint32_t plen)
47 {
48 	sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
49 	sop->aead.data.length = plen - sa->ctp.cipher.length;
50 	sop->aead.digest.data = icv->va;
51 	sop->aead.digest.phys_addr = icv->pa;
52 	sop->aead.aad.data = icv->va + sa->icv_len;
53 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55 
56 /*
57  * setup crypto op and crypto sym op for ESP inbound packet.
58  */
59 static inline void
60 inb_cop_prepare(struct rte_crypto_op *cop,
61 	const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
62 	const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
63 {
64 	struct rte_crypto_sym_op *sop;
65 	struct aead_gcm_iv *gcm;
66 	struct aead_ccm_iv *ccm;
67 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
68 	struct aesctr_cnt_blk *ctr;
69 	uint64_t *ivc, *ivp;
70 	uint32_t algo;
71 
72 	algo = sa->algo_type;
73 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
74 		pofs + sizeof(struct rte_esp_hdr));
75 
76 	/* fill sym op fields */
77 	sop = cop->sym;
78 
79 	switch (algo) {
80 	case ALGO_TYPE_AES_GCM:
81 		sop_aead_prepare(sop, sa, icv, pofs, plen);
82 
83 		/* fill AAD IV (located inside crypto op) */
84 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
85 			sa->iv_ofs);
86 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
87 		break;
88 	case ALGO_TYPE_AES_CCM:
89 		sop_aead_prepare(sop, sa, icv, pofs, plen);
90 
91 		/* fill AAD IV (located inside crypto op) */
92 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
93 			sa->iv_ofs);
94 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
95 		break;
96 	case ALGO_TYPE_CHACHA20_POLY1305:
97 		sop_aead_prepare(sop, sa, icv, pofs, plen);
98 
99 		/* fill AAD IV (located inside crypto op) */
100 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
101 				struct aead_chacha20_poly1305_iv *,
102 				sa->iv_ofs);
103 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
104 					       ivp[0], sa->salt);
105 		break;
106 	case ALGO_TYPE_AES_CBC:
107 	case ALGO_TYPE_3DES_CBC:
108 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
109 
110 		/* copy iv from the input packet to the cop */
111 		ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
112 		copy_iv(ivc, ivp, sa->iv_len);
113 		break;
114 	case ALGO_TYPE_AES_GMAC:
115 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
116 
117 		/* fill AAD IV (located inside crypto op) */
118 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
119 			sa->iv_ofs);
120 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
121 		break;
122 	case ALGO_TYPE_AES_CTR:
123 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
124 
125 		/* fill CTR block (located inside crypto op) */
126 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
127 			sa->iv_ofs);
128 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
129 		break;
130 	case ALGO_TYPE_NULL:
131 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
132 		break;
133 	}
134 }
135 
136 static inline uint32_t
137 inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
138 	uint32_t *pofs, uint32_t plen, void *iv)
139 {
140 	struct aead_gcm_iv *gcm;
141 	struct aead_ccm_iv *ccm;
142 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
143 	struct aesctr_cnt_blk *ctr;
144 	uint64_t *ivp;
145 	uint32_t clen;
146 
147 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
148 		*pofs + sizeof(struct rte_esp_hdr));
149 	clen = 0;
150 
151 	switch (sa->algo_type) {
152 	case ALGO_TYPE_AES_GCM:
153 	case ALGO_TYPE_AES_GMAC:
154 		gcm = (struct aead_gcm_iv *)iv;
155 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
156 		break;
157 	case ALGO_TYPE_AES_CCM:
158 		ccm = (struct aead_ccm_iv *)iv;
159 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
160 		break;
161 	case ALGO_TYPE_CHACHA20_POLY1305:
162 		chacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;
163 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
164 					       ivp[0], sa->salt);
165 		break;
166 	case ALGO_TYPE_AES_CBC:
167 	case ALGO_TYPE_3DES_CBC:
168 		copy_iv(iv, ivp, sa->iv_len);
169 		break;
170 	case ALGO_TYPE_AES_CTR:
171 		ctr = (struct aesctr_cnt_blk *)iv;
172 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
173 		break;
174 	}
175 
176 	*pofs += sa->ctp.auth.offset;
177 	clen = plen - sa->ctp.auth.length;
178 	return clen;
179 }
180 
181 /*
182  * Helper function for prepare() to deal with situation when
183  * ICV is spread by two segments. Tries to move ICV completely into the
184  * last segment.
185  */
186 static struct rte_mbuf *
187 move_icv(struct rte_mbuf *ml, uint32_t ofs)
188 {
189 	uint32_t n;
190 	struct rte_mbuf *ms;
191 	const void *prev;
192 	void *new;
193 
194 	ms = ml->next;
195 	n = ml->data_len - ofs;
196 
197 	prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
198 	new = rte_pktmbuf_prepend(ms, n);
199 	if (new == NULL)
200 		return NULL;
201 
202 	/* move n ICV bytes from ml into ms */
203 	rte_memcpy(new, prev, n);
204 	ml->data_len -= n;
205 
206 	return ms;
207 }
208 
209 /*
210  * for pure cryptodev (lookaside none) depending on SA settings,
211  * we might have to write some extra data to the packet.
212  */
213 static inline void
214 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
215 	const union sym_op_data *icv)
216 {
217 	struct aead_gcm_aad *aad;
218 	struct aead_ccm_aad *caad;
219 	struct aead_chacha20_poly1305_aad *chacha_aad;
220 
221 	/* insert SQN.hi between ESP trailer and ICV */
222 	if (sa->sqh_len != 0)
223 		insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
224 
225 	/*
226 	 * fill AAD fields, if any (aad fields are placed after icv),
227 	 * right now we support only one AEAD algorithm: AES-GCM.
228 	 */
229 	switch (sa->algo_type) {
230 	case ALGO_TYPE_AES_GCM:
231 		if (sa->aad_len != 0) {
232 			aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
233 			aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
234 		}
235 		break;
236 	case ALGO_TYPE_AES_CCM:
237 		if (sa->aad_len != 0) {
238 			caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
239 			aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
240 		}
241 		break;
242 	case ALGO_TYPE_CHACHA20_POLY1305:
243 		if (sa->aad_len != 0) {
244 			chacha_aad = (struct aead_chacha20_poly1305_aad *)
245 			    (icv->va + sa->icv_len);
246 			aead_chacha20_poly1305_aad_fill(chacha_aad,
247 						sa->spi, sqc, IS_ESN(sa));
248 		}
249 		break;
250 	}
251 }
252 
253 static inline int
254 inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
255 	struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
256 {
257 	int32_t rc;
258 	uint64_t sqn;
259 	struct rte_esp_hdr *esph;
260 
261 	esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
262 
263 	/*
264 	 * retrieve and reconstruct SQN, then check it, then
265 	 * convert it back into network byte order.
266 	 */
267 	sqn = rte_be_to_cpu_32(esph->seq);
268 	if (IS_ESN(sa))
269 		sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
270 	*sqc = rte_cpu_to_be_64(sqn);
271 
272 	/* check IPsec window */
273 	rc = esn_inb_check_sqn(rsn, sa, sqn);
274 
275 	return rc;
276 }
277 
278 /* prepare packet for upcoming processing */
279 static inline int32_t
280 inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
281 	uint32_t hlen, union sym_op_data *icv)
282 {
283 	uint32_t clen, icv_len, icv_ofs, plen;
284 	struct rte_mbuf *ml;
285 
286 	/* start packet manipulation */
287 	plen = mb->pkt_len;
288 	plen = plen - hlen;
289 
290 	/* check that packet has a valid length */
291 	clen = plen - sa->ctp.cipher.length;
292 	if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
293 		return -EBADMSG;
294 
295 	/* find ICV location */
296 	icv_len = sa->icv_len;
297 	icv_ofs = mb->pkt_len - icv_len;
298 
299 	ml = mbuf_get_seg_ofs(mb, &icv_ofs);
300 
301 	/*
302 	 * if ICV is spread by two segments, then try to
303 	 * move ICV completely into the last segment.
304 	 */
305 	if (ml->data_len < icv_ofs + icv_len) {
306 
307 		ml = move_icv(ml, icv_ofs);
308 		if (ml == NULL)
309 			return -ENOSPC;
310 
311 		/* new ICV location */
312 		icv_ofs = 0;
313 	}
314 
315 	icv_ofs += sa->sqh_len;
316 
317 	/*
318 	 * we have to allocate space for AAD somewhere,
319 	 * right now - just use free trailing space at the last segment.
320 	 * Would probably be more convenient to reserve space for AAD
321 	 * inside rte_crypto_op itself
322 	 * (again for IV space is already reserved inside cop).
323 	 */
324 	if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
325 		return -ENOSPC;
326 
327 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
328 	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
329 
330 	/*
331 	 * if esn is used then high-order 32 bits are also used in ICV
332 	 * calculation but are not transmitted, update packet length
333 	 * to be consistent with auth data length and offset, this will
334 	 * be subtracted from packet length in post crypto processing
335 	 */
336 	mb->pkt_len += sa->sqh_len;
337 	ml->data_len += sa->sqh_len;
338 
339 	return plen;
340 }
341 
342 static inline int32_t
343 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
344 	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
345 {
346 	int rc;
347 	rte_be64_t sqn;
348 
349 	rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
350 	if (rc != 0)
351 		return rc;
352 
353 	rc = inb_prepare(sa, mb, hlen, icv);
354 	if (rc < 0)
355 		return rc;
356 
357 	inb_pkt_xprepare(sa, sqn, icv);
358 	return rc;
359 }
360 
361 /*
362  * setup/update packets and crypto ops for ESP inbound case.
363  */
364 uint16_t
365 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
366 	struct rte_crypto_op *cop[], uint16_t num)
367 {
368 	int32_t rc;
369 	uint32_t i, k, hl;
370 	struct rte_ipsec_sa *sa;
371 	struct rte_cryptodev_sym_session *cs;
372 	struct replay_sqn *rsn;
373 	union sym_op_data icv;
374 	uint32_t dr[num];
375 
376 	sa = ss->sa;
377 	cs = ss->crypto.ses;
378 	rsn = rsn_acquire(sa);
379 
380 	k = 0;
381 	for (i = 0; i != num; i++) {
382 
383 		hl = mb[i]->l2_len + mb[i]->l3_len;
384 		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
385 		if (rc >= 0) {
386 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
387 			inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
388 			k++;
389 		} else {
390 			dr[i - k] = i;
391 			rte_errno = -rc;
392 		}
393 	}
394 
395 	rsn_release(sa, rsn);
396 
397 	/* copy not prepared mbufs beyond good ones */
398 	if (k != num && k != 0)
399 		move_bad_mbufs(mb, dr, num, num - k);
400 
401 	return k;
402 }
403 
404 /*
405  * Start with processing inbound packet.
406  * This is common part for both tunnel and transport mode.
407  * Extract information that will be needed later from mbuf metadata and
408  * actual packet data:
409  * - mbuf for packet's last segment
410  * - length of the L2/L3 headers
411  * - esp tail structure
412  */
413 static inline void
414 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
415 	struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
416 {
417 	const struct rte_esp_tail *pt;
418 	uint32_t ofs;
419 
420 	ofs = mb->pkt_len - tlen;
421 	hlen[0] = mb->l2_len + mb->l3_len;
422 	ml[0] = mbuf_get_seg_ofs(mb, &ofs);
423 	pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
424 	tofs[0] = ofs;
425 	espt[0] = pt[0];
426 }
427 
428 /*
429  * Helper function to check pad bytes values.
430  * Note that pad bytes can be spread across multiple segments.
431  */
432 static inline int
433 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
434 {
435 	const uint8_t *pd;
436 	uint32_t k, n;
437 
438 	for (n = 0; n != len; n += k, mb = mb->next) {
439 		k = mb->data_len - ofs;
440 		k = RTE_MIN(k, len - n);
441 		pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
442 		if (memcmp(pd, esp_pad_bytes + n, k) != 0)
443 			break;
444 		ofs = 0;
445 	}
446 
447 	return len - n;
448 }
449 
450 /*
451  * packet checks for transport mode:
452  * - no reported IPsec related failures in ol_flags
453  * - tail and header lengths are valid
454  * - padding bytes are valid
455  * apart from checks, function also updates tail offset (and segment)
456  * by taking into account pad length.
457  */
458 static inline int32_t
459 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
460 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
461 {
462 	if ((mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) != 0 ||
463 			tlen + hlen > mb->pkt_len)
464 		return -EBADMSG;
465 
466 	/* padding bytes are spread over multiple segments */
467 	if (tofs[0] < espt.pad_len) {
468 		tofs[0] = mb->pkt_len - tlen;
469 		ml[0] = mbuf_get_seg_ofs(mb, tofs);
470 	} else
471 		tofs[0] -= espt.pad_len;
472 
473 	return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
474 }
475 
476 /*
477  * packet checks for tunnel mode:
478  * - same as for transport mode
479  * - esp tail next proto contains expected for that SA value
480  */
481 static inline int32_t
482 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
483 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
484 	uint8_t proto)
485 {
486 	return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
487 		espt.next_proto != proto);
488 }
489 
490 /*
491  * step two for tunnel mode:
492  * - read SQN value (for future use)
493  * - cut of ICV, ESP tail and padding bytes
494  * - cut of ESP header and IV, also if needed - L2/L3 headers
495  *   (controlled by *adj* value)
496  */
497 static inline void *
498 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
499 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
500 {
501 	const struct rte_esp_hdr *ph;
502 
503 	/* read SQN value */
504 	ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
505 	sqn[0] = ph->seq;
506 
507 	/* cut of ICV, ESP tail and padding bytes */
508 	mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
509 
510 	/* cut of L2/L3 headers, ESP header and IV */
511 	return rte_pktmbuf_adj(mb, adj);
512 }
513 
514 /*
515  * step two for transport mode:
516  * - read SQN value (for future use)
517  * - cut of ICV, ESP tail and padding bytes
518  * - cut of ESP header and IV
519  * - move L2/L3 header to fill the gap after ESP header removal
520  */
521 static inline void *
522 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
523 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
524 {
525 	char *np, *op;
526 
527 	/* get start of the packet before modifications */
528 	op = rte_pktmbuf_mtod(mb, char *);
529 
530 	/* cut off ESP header and IV */
531 	np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
532 
533 	/* move header bytes to fill the gap after ESP header removal */
534 	remove_esph(np, op, hlen);
535 	return np;
536 }
537 
538 /*
539  * step three for transport mode:
540  * update mbuf metadata:
541  * - packet_type
542  * - ol_flags
543  */
544 static inline void
545 trs_process_step3(struct rte_mbuf *mb)
546 {
547 	/* reset mbuf packet type */
548 	mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
549 
550 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
551 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
552 }
553 
554 /*
555  * step three for tunnel mode:
556  * update mbuf metadata:
557  * - packet_type
558  * - ol_flags
559  * - tx_offload
560  */
561 static inline void
562 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
563 {
564 	/* reset mbuf metadata: L2/L3 len, packet type */
565 	mb->packet_type = RTE_PTYPE_UNKNOWN;
566 	mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
567 
568 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
569 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
570 }
571 
572 /*
573  * *process* function for tunnel packets
574  */
575 static inline uint16_t
576 tun_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
577 	    uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
578 {
579 	uint32_t adj, i, k, tl, bytes;
580 	uint32_t hl[num], to[num];
581 	struct rte_esp_tail espt[num];
582 	struct rte_mbuf *ml[num];
583 	const void *outh;
584 	void *inh;
585 
586 	/*
587 	 * remove icv, esp trailer and high-order
588 	 * 32 bits of esn from packet length
589 	 */
590 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
591 	const uint32_t cofs = sa->ctp.cipher.offset;
592 
593 	/*
594 	 * to minimize stalls due to load latency,
595 	 * read mbufs metadata and esp tail first.
596 	 */
597 	for (i = 0; i != num; i++)
598 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
599 
600 	k = 0;
601 	bytes = 0;
602 	for (i = 0; i != num; i++) {
603 
604 		adj = hl[i] + cofs;
605 		tl = tlen + espt[i].pad_len;
606 
607 		/* check that packet is valid */
608 		if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
609 					sa->proto) == 0) {
610 
611 			outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
612 					mb[i]->l2_len);
613 
614 			/* modify packet's layout */
615 			inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
616 					to[i], tl, sqn + k);
617 
618 			/* update inner ip header */
619 			update_tun_inb_l3hdr(sa, outh, inh);
620 
621 			/* update mbuf's metadata */
622 			tun_process_step3(mb[i], sa->tx_offload.msk,
623 				sa->tx_offload.val);
624 			k++;
625 			bytes += mb[i]->pkt_len;
626 		} else
627 			dr[i - k] = i;
628 	}
629 
630 	sa->statistics.count += k;
631 	sa->statistics.bytes += bytes;
632 	return k;
633 }
634 
635 /*
636  * *process* function for tunnel packets
637  */
638 static inline uint16_t
639 trs_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
640 	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
641 {
642 	char *np;
643 	uint32_t i, k, l2, tl, bytes;
644 	uint32_t hl[num], to[num];
645 	struct rte_esp_tail espt[num];
646 	struct rte_mbuf *ml[num];
647 
648 	/*
649 	 * remove icv, esp trailer and high-order
650 	 * 32 bits of esn from packet length
651 	 */
652 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
653 	const uint32_t cofs = sa->ctp.cipher.offset;
654 
655 	/*
656 	 * to minimize stalls due to load latency,
657 	 * read mbufs metadata and esp tail first.
658 	 */
659 	for (i = 0; i != num; i++)
660 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
661 
662 	k = 0;
663 	bytes = 0;
664 	for (i = 0; i != num; i++) {
665 
666 		tl = tlen + espt[i].pad_len;
667 		l2 = mb[i]->l2_len;
668 
669 		/* check that packet is valid */
670 		if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
671 				hl[i] + cofs, tl) == 0) {
672 
673 			/* modify packet's layout */
674 			np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
675 				to[i], tl, sqn + k);
676 			update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
677 				l2, hl[i] - l2, espt[i].next_proto);
678 
679 			/* update mbuf's metadata */
680 			trs_process_step3(mb[i]);
681 			k++;
682 			bytes += mb[i]->pkt_len;
683 		} else
684 			dr[i - k] = i;
685 	}
686 
687 	sa->statistics.count += k;
688 	sa->statistics.bytes += bytes;
689 	return k;
690 }
691 
692 /*
693  * for group of ESP inbound packets perform SQN check and update.
694  */
695 static inline uint16_t
696 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
697 	uint32_t dr[], uint16_t num)
698 {
699 	uint32_t i, k;
700 	struct replay_sqn *rsn;
701 
702 	/* replay not enabled */
703 	if (sa->replay.win_sz == 0)
704 		return num;
705 
706 	rsn = rsn_update_start(sa);
707 
708 	k = 0;
709 	for (i = 0; i != num; i++) {
710 		if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
711 			k++;
712 		else
713 			dr[i - k] = i;
714 	}
715 
716 	rsn_update_finish(sa, rsn);
717 	return k;
718 }
719 
720 /*
721  * process group of ESP inbound packets.
722  */
723 static inline uint16_t
724 esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
725 	uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
726 {
727 	uint32_t k, n;
728 	uint32_t sqn[num];
729 	uint32_t dr[num];
730 
731 	/* process packets, extract seq numbers */
732 	k = process(sa, mb, sqn, dr, num, sqh_len);
733 
734 	/* handle unprocessed mbufs */
735 	if (k != num && k != 0)
736 		move_bad_mbufs(mb, dr, num, num - k);
737 
738 	/* update SQN and replay window */
739 	n = esp_inb_rsn_update(sa, sqn, dr, k);
740 
741 	/* handle mbufs with wrong SQN */
742 	if (n != k && n != 0)
743 		move_bad_mbufs(mb, dr, k, k - n);
744 
745 	if (n != num)
746 		rte_errno = EBADMSG;
747 
748 	return n;
749 }
750 
751 /*
752  * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
753  * (synchronous mode).
754  */
755 uint16_t
756 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
757 	struct rte_mbuf *mb[], uint16_t num)
758 {
759 	int32_t rc;
760 	uint32_t i, k;
761 	struct rte_ipsec_sa *sa;
762 	struct replay_sqn *rsn;
763 	union sym_op_data icv;
764 	struct rte_crypto_va_iova_ptr iv[num];
765 	struct rte_crypto_va_iova_ptr aad[num];
766 	struct rte_crypto_va_iova_ptr dgst[num];
767 	uint32_t dr[num];
768 	uint32_t l4ofs[num];
769 	uint32_t clen[num];
770 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
771 
772 	sa = ss->sa;
773 
774 	/* grab rsn lock */
775 	rsn = rsn_acquire(sa);
776 
777 	/* do preparation for all packets */
778 	for (i = 0, k = 0; i != num; i++) {
779 
780 		/* calculate ESP header offset */
781 		l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
782 
783 		/* prepare ESP packet for processing */
784 		rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
785 		if (rc >= 0) {
786 			/* get encrypted data offset and length */
787 			clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
788 				l4ofs + k, rc, ivbuf[k]);
789 
790 			/* fill iv, digest and aad */
791 			iv[k].va = ivbuf[k];
792 			aad[k].va = icv.va + sa->icv_len;
793 			dgst[k++].va = icv.va;
794 		} else {
795 			dr[i - k] = i;
796 			rte_errno = -rc;
797 		}
798 	}
799 
800 	/* release rsn lock */
801 	rsn_release(sa, rsn);
802 
803 	/* copy not prepared mbufs beyond good ones */
804 	if (k != num && k != 0)
805 		move_bad_mbufs(mb, dr, num, num - k);
806 
807 	/* convert mbufs to iovecs and do actual crypto/auth processing */
808 	if (k != 0)
809 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
810 			l4ofs, clen, k);
811 	return k;
812 }
813 
814 /*
815  * process group of ESP inbound tunnel packets.
816  */
817 uint16_t
818 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
819 	struct rte_mbuf *mb[], uint16_t num)
820 {
821 	struct rte_ipsec_sa *sa = ss->sa;
822 
823 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
824 }
825 
826 uint16_t
827 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
828 	struct rte_mbuf *mb[], uint16_t num)
829 {
830 	return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
831 }
832 
833 /*
834  * process group of ESP inbound transport packets.
835  */
836 uint16_t
837 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
838 	struct rte_mbuf *mb[], uint16_t num)
839 {
840 	struct rte_ipsec_sa *sa = ss->sa;
841 
842 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
843 }
844 
845 uint16_t
846 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
847 	struct rte_mbuf *mb[], uint16_t num)
848 {
849 	return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
850 }
851