xref: /dpdk/lib/ipsec/esp_inb.c (revision 99f9d799ce21ab22e922ffec8aad51d56e24d04d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10 
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17 
18 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
19 	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
20 	uint8_t sqh_len);
21 
22 /*
23  * helper function to fill crypto_sym op for cipher+auth algorithms.
24  * used by inb_cop_prepare(), see below.
25  */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 	uint32_t pofs, uint32_t plen)
30 {
31 	sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
32 	sop->cipher.data.length = plen - sa->ctp.cipher.length;
33 	sop->auth.data.offset = pofs + sa->ctp.auth.offset;
34 	sop->auth.data.length = plen - sa->ctp.auth.length;
35 	sop->auth.digest.data = icv->va;
36 	sop->auth.digest.phys_addr = icv->pa;
37 }
38 
39 /*
40  * helper function to fill crypto_sym op for aead algorithms
41  * used by inb_cop_prepare(), see below.
42  */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 	uint32_t pofs, uint32_t plen)
47 {
48 	sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
49 	sop->aead.data.length = plen - sa->ctp.cipher.length;
50 	sop->aead.digest.data = icv->va;
51 	sop->aead.digest.phys_addr = icv->pa;
52 	sop->aead.aad.data = icv->va + sa->icv_len;
53 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55 
56 /*
57  * setup crypto op and crypto sym op for ESP inbound packet.
58  */
59 static inline void
60 inb_cop_prepare(struct rte_crypto_op *cop,
61 	const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
62 	const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
63 {
64 	struct rte_crypto_sym_op *sop;
65 	struct aead_gcm_iv *gcm;
66 	struct aesctr_cnt_blk *ctr;
67 	uint64_t *ivc, *ivp;
68 	uint32_t algo;
69 
70 	algo = sa->algo_type;
71 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
72 		pofs + sizeof(struct rte_esp_hdr));
73 
74 	/* fill sym op fields */
75 	sop = cop->sym;
76 
77 	switch (algo) {
78 	case ALGO_TYPE_AES_GCM:
79 		sop_aead_prepare(sop, sa, icv, pofs, plen);
80 
81 		/* fill AAD IV (located inside crypto op) */
82 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
83 			sa->iv_ofs);
84 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
85 		break;
86 	case ALGO_TYPE_AES_CBC:
87 	case ALGO_TYPE_3DES_CBC:
88 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
89 
90 		/* copy iv from the input packet to the cop */
91 		ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
92 		copy_iv(ivc, ivp, sa->iv_len);
93 		break;
94 	case ALGO_TYPE_AES_CTR:
95 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
96 
97 		/* fill CTR block (located inside crypto op) */
98 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
99 			sa->iv_ofs);
100 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
101 		break;
102 	case ALGO_TYPE_NULL:
103 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
104 		break;
105 	}
106 }
107 
108 static inline uint32_t
109 inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
110 	uint32_t *pofs, uint32_t plen, void *iv)
111 {
112 	struct aead_gcm_iv *gcm;
113 	struct aesctr_cnt_blk *ctr;
114 	uint64_t *ivp;
115 	uint32_t clen;
116 
117 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
118 		*pofs + sizeof(struct rte_esp_hdr));
119 	clen = 0;
120 
121 	switch (sa->algo_type) {
122 	case ALGO_TYPE_AES_GCM:
123 		gcm = (struct aead_gcm_iv *)iv;
124 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
125 		break;
126 	case ALGO_TYPE_AES_CBC:
127 	case ALGO_TYPE_3DES_CBC:
128 		copy_iv(iv, ivp, sa->iv_len);
129 		break;
130 	case ALGO_TYPE_AES_CTR:
131 		ctr = (struct aesctr_cnt_blk *)iv;
132 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
133 		break;
134 	}
135 
136 	*pofs += sa->ctp.auth.offset;
137 	clen = plen - sa->ctp.auth.length;
138 	return clen;
139 }
140 
141 /*
142  * Helper function for prepare() to deal with situation when
143  * ICV is spread by two segments. Tries to move ICV completely into the
144  * last segment.
145  */
146 static struct rte_mbuf *
147 move_icv(struct rte_mbuf *ml, uint32_t ofs)
148 {
149 	uint32_t n;
150 	struct rte_mbuf *ms;
151 	const void *prev;
152 	void *new;
153 
154 	ms = ml->next;
155 	n = ml->data_len - ofs;
156 
157 	prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
158 	new = rte_pktmbuf_prepend(ms, n);
159 	if (new == NULL)
160 		return NULL;
161 
162 	/* move n ICV bytes from ml into ms */
163 	rte_memcpy(new, prev, n);
164 	ml->data_len -= n;
165 
166 	return ms;
167 }
168 
169 /*
170  * for pure cryptodev (lookaside none) depending on SA settings,
171  * we might have to write some extra data to the packet.
172  */
173 static inline void
174 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
175 	const union sym_op_data *icv)
176 {
177 	struct aead_gcm_aad *aad;
178 
179 	/* insert SQN.hi between ESP trailer and ICV */
180 	if (sa->sqh_len != 0)
181 		insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
182 
183 	/*
184 	 * fill AAD fields, if any (aad fields are placed after icv),
185 	 * right now we support only one AEAD algorithm: AES-GCM.
186 	 */
187 	if (sa->aad_len != 0) {
188 		aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
189 		aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
190 	}
191 }
192 
193 static inline int
194 inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
195 	struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
196 {
197 	int32_t rc;
198 	uint64_t sqn;
199 	struct rte_esp_hdr *esph;
200 
201 	esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
202 
203 	/*
204 	 * retrieve and reconstruct SQN, then check it, then
205 	 * convert it back into network byte order.
206 	 */
207 	sqn = rte_be_to_cpu_32(esph->seq);
208 	if (IS_ESN(sa))
209 		sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
210 	*sqc = rte_cpu_to_be_64(sqn);
211 
212 	/* check IPsec window */
213 	rc = esn_inb_check_sqn(rsn, sa, sqn);
214 
215 	return rc;
216 }
217 
218 /* prepare packet for upcoming processing */
219 static inline int32_t
220 inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
221 	uint32_t hlen, union sym_op_data *icv)
222 {
223 	uint32_t clen, icv_len, icv_ofs, plen;
224 	struct rte_mbuf *ml;
225 
226 	/* start packet manipulation */
227 	plen = mb->pkt_len;
228 	plen = plen - hlen;
229 
230 	/* check that packet has a valid length */
231 	clen = plen - sa->ctp.cipher.length;
232 	if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
233 		return -EBADMSG;
234 
235 	/* find ICV location */
236 	icv_len = sa->icv_len;
237 	icv_ofs = mb->pkt_len - icv_len;
238 
239 	ml = mbuf_get_seg_ofs(mb, &icv_ofs);
240 
241 	/*
242 	 * if ICV is spread by two segments, then try to
243 	 * move ICV completely into the last segment.
244 	 */
245 	if (ml->data_len < icv_ofs + icv_len) {
246 
247 		ml = move_icv(ml, icv_ofs);
248 		if (ml == NULL)
249 			return -ENOSPC;
250 
251 		/* new ICV location */
252 		icv_ofs = 0;
253 	}
254 
255 	icv_ofs += sa->sqh_len;
256 
257 	/*
258 	 * we have to allocate space for AAD somewhere,
259 	 * right now - just use free trailing space at the last segment.
260 	 * Would probably be more convenient to reserve space for AAD
261 	 * inside rte_crypto_op itself
262 	 * (again for IV space is already reserved inside cop).
263 	 */
264 	if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
265 		return -ENOSPC;
266 
267 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
268 	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
269 
270 	/*
271 	 * if esn is used then high-order 32 bits are also used in ICV
272 	 * calculation but are not transmitted, update packet length
273 	 * to be consistent with auth data length and offset, this will
274 	 * be subtracted from packet length in post crypto processing
275 	 */
276 	mb->pkt_len += sa->sqh_len;
277 	ml->data_len += sa->sqh_len;
278 
279 	return plen;
280 }
281 
282 static inline int32_t
283 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
284 	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
285 {
286 	int rc;
287 	rte_be64_t sqn;
288 
289 	rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
290 	if (rc != 0)
291 		return rc;
292 
293 	rc = inb_prepare(sa, mb, hlen, icv);
294 	if (rc < 0)
295 		return rc;
296 
297 	inb_pkt_xprepare(sa, sqn, icv);
298 	return rc;
299 }
300 
301 /*
302  * setup/update packets and crypto ops for ESP inbound case.
303  */
304 uint16_t
305 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
306 	struct rte_crypto_op *cop[], uint16_t num)
307 {
308 	int32_t rc;
309 	uint32_t i, k, hl;
310 	struct rte_ipsec_sa *sa;
311 	struct rte_cryptodev_sym_session *cs;
312 	struct replay_sqn *rsn;
313 	union sym_op_data icv;
314 	uint32_t dr[num];
315 
316 	sa = ss->sa;
317 	cs = ss->crypto.ses;
318 	rsn = rsn_acquire(sa);
319 
320 	k = 0;
321 	for (i = 0; i != num; i++) {
322 
323 		hl = mb[i]->l2_len + mb[i]->l3_len;
324 		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
325 		if (rc >= 0) {
326 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
327 			inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
328 			k++;
329 		} else {
330 			dr[i - k] = i;
331 			rte_errno = -rc;
332 		}
333 	}
334 
335 	rsn_release(sa, rsn);
336 
337 	/* copy not prepared mbufs beyond good ones */
338 	if (k != num && k != 0)
339 		move_bad_mbufs(mb, dr, num, num - k);
340 
341 	return k;
342 }
343 
344 /*
345  * Start with processing inbound packet.
346  * This is common part for both tunnel and transport mode.
347  * Extract information that will be needed later from mbuf metadata and
348  * actual packet data:
349  * - mbuf for packet's last segment
350  * - length of the L2/L3 headers
351  * - esp tail structure
352  */
353 static inline void
354 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
355 	struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
356 {
357 	const struct rte_esp_tail *pt;
358 	uint32_t ofs;
359 
360 	ofs = mb->pkt_len - tlen;
361 	hlen[0] = mb->l2_len + mb->l3_len;
362 	ml[0] = mbuf_get_seg_ofs(mb, &ofs);
363 	pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
364 	tofs[0] = ofs;
365 	espt[0] = pt[0];
366 }
367 
368 /*
369  * Helper function to check pad bytes values.
370  * Note that pad bytes can be spread across multiple segments.
371  */
372 static inline int
373 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
374 {
375 	const uint8_t *pd;
376 	uint32_t k, n;
377 
378 	for (n = 0; n != len; n += k, mb = mb->next) {
379 		k = mb->data_len - ofs;
380 		k = RTE_MIN(k, len - n);
381 		pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
382 		if (memcmp(pd, esp_pad_bytes + n, k) != 0)
383 			break;
384 		ofs = 0;
385 	}
386 
387 	return len - n;
388 }
389 
390 /*
391  * packet checks for transport mode:
392  * - no reported IPsec related failures in ol_flags
393  * - tail and header lengths are valid
394  * - padding bytes are valid
395  * apart from checks, function also updates tail offset (and segment)
396  * by taking into account pad length.
397  */
398 static inline int32_t
399 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
400 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
401 {
402 	if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
403 			tlen + hlen > mb->pkt_len)
404 		return -EBADMSG;
405 
406 	/* padding bytes are spread over multiple segments */
407 	if (tofs[0] < espt.pad_len) {
408 		tofs[0] = mb->pkt_len - tlen;
409 		ml[0] = mbuf_get_seg_ofs(mb, tofs);
410 	} else
411 		tofs[0] -= espt.pad_len;
412 
413 	return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
414 }
415 
416 /*
417  * packet checks for tunnel mode:
418  * - same as for trasnport mode
419  * - esp tail next proto contains expected for that SA value
420  */
421 static inline int32_t
422 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
423 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
424 	uint8_t proto)
425 {
426 	return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
427 		espt.next_proto != proto);
428 }
429 
430 /*
431  * step two for tunnel mode:
432  * - read SQN value (for future use)
433  * - cut of ICV, ESP tail and padding bytes
434  * - cut of ESP header and IV, also if needed - L2/L3 headers
435  *   (controlled by *adj* value)
436  */
437 static inline void *
438 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
439 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
440 {
441 	const struct rte_esp_hdr *ph;
442 
443 	/* read SQN value */
444 	ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
445 	sqn[0] = ph->seq;
446 
447 	/* cut of ICV, ESP tail and padding bytes */
448 	mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
449 
450 	/* cut of L2/L3 headers, ESP header and IV */
451 	return rte_pktmbuf_adj(mb, adj);
452 }
453 
454 /*
455  * step two for transport mode:
456  * - read SQN value (for future use)
457  * - cut of ICV, ESP tail and padding bytes
458  * - cut of ESP header and IV
459  * - move L2/L3 header to fill the gap after ESP header removal
460  */
461 static inline void *
462 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
463 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
464 {
465 	char *np, *op;
466 
467 	/* get start of the packet before modifications */
468 	op = rte_pktmbuf_mtod(mb, char *);
469 
470 	/* cut off ESP header and IV */
471 	np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
472 
473 	/* move header bytes to fill the gap after ESP header removal */
474 	remove_esph(np, op, hlen);
475 	return np;
476 }
477 
478 /*
479  * step three for transport mode:
480  * update mbuf metadata:
481  * - packet_type
482  * - ol_flags
483  */
484 static inline void
485 trs_process_step3(struct rte_mbuf *mb)
486 {
487 	/* reset mbuf packet type */
488 	mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
489 
490 	/* clear the PKT_RX_SEC_OFFLOAD flag if set */
491 	mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
492 }
493 
494 /*
495  * step three for tunnel mode:
496  * update mbuf metadata:
497  * - packet_type
498  * - ol_flags
499  * - tx_offload
500  */
501 static inline void
502 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
503 {
504 	/* reset mbuf metatdata: L2/L3 len, packet type */
505 	mb->packet_type = RTE_PTYPE_UNKNOWN;
506 	mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
507 
508 	/* clear the PKT_RX_SEC_OFFLOAD flag if set */
509 	mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
510 }
511 
512 /*
513  * *process* function for tunnel packets
514  */
515 static inline uint16_t
516 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
517 	    uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
518 {
519 	uint32_t adj, i, k, tl;
520 	uint32_t hl[num], to[num];
521 	struct rte_esp_tail espt[num];
522 	struct rte_mbuf *ml[num];
523 	const void *outh;
524 	void *inh;
525 
526 	/*
527 	 * remove icv, esp trailer and high-order
528 	 * 32 bits of esn from packet length
529 	 */
530 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
531 	const uint32_t cofs = sa->ctp.cipher.offset;
532 
533 	/*
534 	 * to minimize stalls due to load latency,
535 	 * read mbufs metadata and esp tail first.
536 	 */
537 	for (i = 0; i != num; i++)
538 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
539 
540 	k = 0;
541 	for (i = 0; i != num; i++) {
542 
543 		adj = hl[i] + cofs;
544 		tl = tlen + espt[i].pad_len;
545 
546 		/* check that packet is valid */
547 		if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
548 					sa->proto) == 0) {
549 
550 			outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
551 					mb[i]->l2_len);
552 
553 			/* modify packet's layout */
554 			inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
555 					to[i], tl, sqn + k);
556 
557 			/* update inner ip header */
558 			update_tun_inb_l3hdr(sa, outh, inh);
559 
560 			/* update mbuf's metadata */
561 			tun_process_step3(mb[i], sa->tx_offload.msk,
562 				sa->tx_offload.val);
563 			k++;
564 		} else
565 			dr[i - k] = i;
566 	}
567 
568 	return k;
569 }
570 
571 /*
572  * *process* function for tunnel packets
573  */
574 static inline uint16_t
575 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
576 	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
577 {
578 	char *np;
579 	uint32_t i, k, l2, tl;
580 	uint32_t hl[num], to[num];
581 	struct rte_esp_tail espt[num];
582 	struct rte_mbuf *ml[num];
583 
584 	/*
585 	 * remove icv, esp trailer and high-order
586 	 * 32 bits of esn from packet length
587 	 */
588 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
589 	const uint32_t cofs = sa->ctp.cipher.offset;
590 
591 	/*
592 	 * to minimize stalls due to load latency,
593 	 * read mbufs metadata and esp tail first.
594 	 */
595 	for (i = 0; i != num; i++)
596 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
597 
598 	k = 0;
599 	for (i = 0; i != num; i++) {
600 
601 		tl = tlen + espt[i].pad_len;
602 		l2 = mb[i]->l2_len;
603 
604 		/* check that packet is valid */
605 		if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
606 				hl[i] + cofs, tl) == 0) {
607 
608 			/* modify packet's layout */
609 			np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
610 				to[i], tl, sqn + k);
611 			update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
612 				l2, hl[i] - l2, espt[i].next_proto);
613 
614 			/* update mbuf's metadata */
615 			trs_process_step3(mb[i]);
616 			k++;
617 		} else
618 			dr[i - k] = i;
619 	}
620 
621 	return k;
622 }
623 
624 /*
625  * for group of ESP inbound packets perform SQN check and update.
626  */
627 static inline uint16_t
628 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
629 	uint32_t dr[], uint16_t num)
630 {
631 	uint32_t i, k;
632 	struct replay_sqn *rsn;
633 
634 	/* replay not enabled */
635 	if (sa->replay.win_sz == 0)
636 		return num;
637 
638 	rsn = rsn_update_start(sa);
639 
640 	k = 0;
641 	for (i = 0; i != num; i++) {
642 		if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
643 			k++;
644 		else
645 			dr[i - k] = i;
646 	}
647 
648 	rsn_update_finish(sa, rsn);
649 	return k;
650 }
651 
652 /*
653  * process group of ESP inbound packets.
654  */
655 static inline uint16_t
656 esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
657 	uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
658 {
659 	uint32_t k, n;
660 	uint32_t sqn[num];
661 	uint32_t dr[num];
662 
663 	/* process packets, extract seq numbers */
664 	k = process(sa, mb, sqn, dr, num, sqh_len);
665 
666 	/* handle unprocessed mbufs */
667 	if (k != num && k != 0)
668 		move_bad_mbufs(mb, dr, num, num - k);
669 
670 	/* update SQN and replay window */
671 	n = esp_inb_rsn_update(sa, sqn, dr, k);
672 
673 	/* handle mbufs with wrong SQN */
674 	if (n != k && n != 0)
675 		move_bad_mbufs(mb, dr, k, k - n);
676 
677 	if (n != num)
678 		rte_errno = EBADMSG;
679 
680 	return n;
681 }
682 
683 /*
684  * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
685  * (synchronous mode).
686  */
687 uint16_t
688 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
689 	struct rte_mbuf *mb[], uint16_t num)
690 {
691 	int32_t rc;
692 	uint32_t i, k;
693 	struct rte_ipsec_sa *sa;
694 	struct replay_sqn *rsn;
695 	union sym_op_data icv;
696 	struct rte_crypto_va_iova_ptr iv[num];
697 	struct rte_crypto_va_iova_ptr aad[num];
698 	struct rte_crypto_va_iova_ptr dgst[num];
699 	uint32_t dr[num];
700 	uint32_t l4ofs[num];
701 	uint32_t clen[num];
702 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
703 
704 	sa = ss->sa;
705 
706 	/* grab rsn lock */
707 	rsn = rsn_acquire(sa);
708 
709 	/* do preparation for all packets */
710 	for (i = 0, k = 0; i != num; i++) {
711 
712 		/* calculate ESP header offset */
713 		l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
714 
715 		/* prepare ESP packet for processing */
716 		rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
717 		if (rc >= 0) {
718 			/* get encrypted data offset and length */
719 			clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
720 				l4ofs + k, rc, ivbuf[k]);
721 
722 			/* fill iv, digest and aad */
723 			iv[k].va = ivbuf[k];
724 			aad[k].va = icv.va + sa->icv_len;
725 			dgst[k++].va = icv.va;
726 		} else {
727 			dr[i - k] = i;
728 			rte_errno = -rc;
729 		}
730 	}
731 
732 	/* release rsn lock */
733 	rsn_release(sa, rsn);
734 
735 	/* copy not prepared mbufs beyond good ones */
736 	if (k != num && k != 0)
737 		move_bad_mbufs(mb, dr, num, num - k);
738 
739 	/* convert mbufs to iovecs and do actual crypto/auth processing */
740 	if (k != 0)
741 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
742 			l4ofs, clen, k);
743 	return k;
744 }
745 
746 /*
747  * process group of ESP inbound tunnel packets.
748  */
749 uint16_t
750 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
751 	struct rte_mbuf *mb[], uint16_t num)
752 {
753 	struct rte_ipsec_sa *sa = ss->sa;
754 
755 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
756 }
757 
758 uint16_t
759 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
760 	struct rte_mbuf *mb[], uint16_t num)
761 {
762 	return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
763 }
764 
765 /*
766  * process group of ESP inbound transport packets.
767  */
768 uint16_t
769 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
770 	struct rte_mbuf *mb[], uint16_t num)
771 {
772 	struct rte_ipsec_sa *sa = ss->sa;
773 
774 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
775 }
776 
777 uint16_t
778 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
779 	struct rte_mbuf *mb[], uint16_t num)
780 {
781 	return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
782 }
783