xref: /dpdk/lib/ipsec/esp_inb.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2018-2020 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
599a2dd95SBruce Richardson #include <rte_ipsec.h>
699a2dd95SBruce Richardson #include <rte_esp.h>
799a2dd95SBruce Richardson #include <rte_ip.h>
899a2dd95SBruce Richardson #include <rte_errno.h>
999a2dd95SBruce Richardson #include <rte_cryptodev.h>
1099a2dd95SBruce Richardson 
1199a2dd95SBruce Richardson #include "sa.h"
1299a2dd95SBruce Richardson #include "ipsec_sqn.h"
1399a2dd95SBruce Richardson #include "crypto.h"
1499a2dd95SBruce Richardson #include "iph.h"
1599a2dd95SBruce Richardson #include "misc.h"
1699a2dd95SBruce Richardson #include "pad.h"
1799a2dd95SBruce Richardson 
1868977baaSRadu Nicolau typedef uint16_t (*esp_inb_process_t)(struct rte_ipsec_sa *sa,
1999a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
2099a2dd95SBruce Richardson 	uint8_t sqh_len);
2199a2dd95SBruce Richardson 
2299a2dd95SBruce Richardson /*
2399a2dd95SBruce Richardson  * helper function to fill crypto_sym op for cipher+auth algorithms.
2499a2dd95SBruce Richardson  * used by inb_cop_prepare(), see below.
2599a2dd95SBruce Richardson  */
2699a2dd95SBruce Richardson static inline void
2799a2dd95SBruce Richardson sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
2899a2dd95SBruce Richardson 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
2999a2dd95SBruce Richardson 	uint32_t pofs, uint32_t plen)
3099a2dd95SBruce Richardson {
3199a2dd95SBruce Richardson 	sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
3299a2dd95SBruce Richardson 	sop->cipher.data.length = plen - sa->ctp.cipher.length;
3399a2dd95SBruce Richardson 	sop->auth.data.offset = pofs + sa->ctp.auth.offset;
3499a2dd95SBruce Richardson 	sop->auth.data.length = plen - sa->ctp.auth.length;
3599a2dd95SBruce Richardson 	sop->auth.digest.data = icv->va;
3699a2dd95SBruce Richardson 	sop->auth.digest.phys_addr = icv->pa;
3799a2dd95SBruce Richardson }
3899a2dd95SBruce Richardson 
3999a2dd95SBruce Richardson /*
4099a2dd95SBruce Richardson  * helper function to fill crypto_sym op for aead algorithms
4199a2dd95SBruce Richardson  * used by inb_cop_prepare(), see below.
4299a2dd95SBruce Richardson  */
4399a2dd95SBruce Richardson static inline void
4499a2dd95SBruce Richardson sop_aead_prepare(struct rte_crypto_sym_op *sop,
4599a2dd95SBruce Richardson 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
4699a2dd95SBruce Richardson 	uint32_t pofs, uint32_t plen)
4799a2dd95SBruce Richardson {
4899a2dd95SBruce Richardson 	sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
4999a2dd95SBruce Richardson 	sop->aead.data.length = plen - sa->ctp.cipher.length;
5099a2dd95SBruce Richardson 	sop->aead.digest.data = icv->va;
5199a2dd95SBruce Richardson 	sop->aead.digest.phys_addr = icv->pa;
5299a2dd95SBruce Richardson 	sop->aead.aad.data = icv->va + sa->icv_len;
5399a2dd95SBruce Richardson 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
5499a2dd95SBruce Richardson }
5599a2dd95SBruce Richardson 
5699a2dd95SBruce Richardson /*
5799a2dd95SBruce Richardson  * setup crypto op and crypto sym op for ESP inbound packet.
5899a2dd95SBruce Richardson  */
5999a2dd95SBruce Richardson static inline void
6099a2dd95SBruce Richardson inb_cop_prepare(struct rte_crypto_op *cop,
6199a2dd95SBruce Richardson 	const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
6299a2dd95SBruce Richardson 	const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
6399a2dd95SBruce Richardson {
6499a2dd95SBruce Richardson 	struct rte_crypto_sym_op *sop;
6599a2dd95SBruce Richardson 	struct aead_gcm_iv *gcm;
66c99d2619SRadu Nicolau 	struct aead_ccm_iv *ccm;
67c99d2619SRadu Nicolau 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
6899a2dd95SBruce Richardson 	struct aesctr_cnt_blk *ctr;
6999a2dd95SBruce Richardson 	uint64_t *ivc, *ivp;
7099a2dd95SBruce Richardson 	uint32_t algo;
7199a2dd95SBruce Richardson 
7299a2dd95SBruce Richardson 	algo = sa->algo_type;
7399a2dd95SBruce Richardson 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
7499a2dd95SBruce Richardson 		pofs + sizeof(struct rte_esp_hdr));
7599a2dd95SBruce Richardson 
7699a2dd95SBruce Richardson 	/* fill sym op fields */
7799a2dd95SBruce Richardson 	sop = cop->sym;
7899a2dd95SBruce Richardson 
7999a2dd95SBruce Richardson 	switch (algo) {
8099a2dd95SBruce Richardson 	case ALGO_TYPE_AES_GCM:
8199a2dd95SBruce Richardson 		sop_aead_prepare(sop, sa, icv, pofs, plen);
8299a2dd95SBruce Richardson 
8399a2dd95SBruce Richardson 		/* fill AAD IV (located inside crypto op) */
8499a2dd95SBruce Richardson 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
8599a2dd95SBruce Richardson 			sa->iv_ofs);
8699a2dd95SBruce Richardson 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
8799a2dd95SBruce Richardson 		break;
88c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_CCM:
89c99d2619SRadu Nicolau 		sop_aead_prepare(sop, sa, icv, pofs, plen);
90c99d2619SRadu Nicolau 
91c99d2619SRadu Nicolau 		/* fill AAD IV (located inside crypto op) */
92c99d2619SRadu Nicolau 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
93c99d2619SRadu Nicolau 			sa->iv_ofs);
94c99d2619SRadu Nicolau 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
95c99d2619SRadu Nicolau 		break;
96c99d2619SRadu Nicolau 	case ALGO_TYPE_CHACHA20_POLY1305:
97c99d2619SRadu Nicolau 		sop_aead_prepare(sop, sa, icv, pofs, plen);
98c99d2619SRadu Nicolau 
99c99d2619SRadu Nicolau 		/* fill AAD IV (located inside crypto op) */
100c99d2619SRadu Nicolau 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
101c99d2619SRadu Nicolau 				struct aead_chacha20_poly1305_iv *,
102c99d2619SRadu Nicolau 				sa->iv_ofs);
103c99d2619SRadu Nicolau 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
104c99d2619SRadu Nicolau 					       ivp[0], sa->salt);
105c99d2619SRadu Nicolau 		break;
10699a2dd95SBruce Richardson 	case ALGO_TYPE_AES_CBC:
10799a2dd95SBruce Richardson 	case ALGO_TYPE_3DES_CBC:
10899a2dd95SBruce Richardson 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
10999a2dd95SBruce Richardson 
11099a2dd95SBruce Richardson 		/* copy iv from the input packet to the cop */
11199a2dd95SBruce Richardson 		ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
11299a2dd95SBruce Richardson 		copy_iv(ivc, ivp, sa->iv_len);
11399a2dd95SBruce Richardson 		break;
114c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_GMAC:
115c99d2619SRadu Nicolau 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
116c99d2619SRadu Nicolau 
117c99d2619SRadu Nicolau 		/* fill AAD IV (located inside crypto op) */
118c99d2619SRadu Nicolau 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
119c99d2619SRadu Nicolau 			sa->iv_ofs);
120c99d2619SRadu Nicolau 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
121c99d2619SRadu Nicolau 		break;
12299a2dd95SBruce Richardson 	case ALGO_TYPE_AES_CTR:
12399a2dd95SBruce Richardson 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
12499a2dd95SBruce Richardson 
12599a2dd95SBruce Richardson 		/* fill CTR block (located inside crypto op) */
12699a2dd95SBruce Richardson 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
12799a2dd95SBruce Richardson 			sa->iv_ofs);
12899a2dd95SBruce Richardson 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
12999a2dd95SBruce Richardson 		break;
13099a2dd95SBruce Richardson 	case ALGO_TYPE_NULL:
13199a2dd95SBruce Richardson 		sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
13299a2dd95SBruce Richardson 		break;
13399a2dd95SBruce Richardson 	}
13499a2dd95SBruce Richardson }
13599a2dd95SBruce Richardson 
13699a2dd95SBruce Richardson static inline uint32_t
13799a2dd95SBruce Richardson inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
13899a2dd95SBruce Richardson 	uint32_t *pofs, uint32_t plen, void *iv)
13999a2dd95SBruce Richardson {
14099a2dd95SBruce Richardson 	struct aead_gcm_iv *gcm;
141c99d2619SRadu Nicolau 	struct aead_ccm_iv *ccm;
142c99d2619SRadu Nicolau 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
14399a2dd95SBruce Richardson 	struct aesctr_cnt_blk *ctr;
14499a2dd95SBruce Richardson 	uint64_t *ivp;
14599a2dd95SBruce Richardson 	uint32_t clen;
14699a2dd95SBruce Richardson 
14799a2dd95SBruce Richardson 	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
14899a2dd95SBruce Richardson 		*pofs + sizeof(struct rte_esp_hdr));
14999a2dd95SBruce Richardson 	clen = 0;
15099a2dd95SBruce Richardson 
15199a2dd95SBruce Richardson 	switch (sa->algo_type) {
15299a2dd95SBruce Richardson 	case ALGO_TYPE_AES_GCM:
153c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_GMAC:
15499a2dd95SBruce Richardson 		gcm = (struct aead_gcm_iv *)iv;
15599a2dd95SBruce Richardson 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
15699a2dd95SBruce Richardson 		break;
157c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_CCM:
158c99d2619SRadu Nicolau 		ccm = (struct aead_ccm_iv *)iv;
159c99d2619SRadu Nicolau 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
160c99d2619SRadu Nicolau 		break;
161c99d2619SRadu Nicolau 	case ALGO_TYPE_CHACHA20_POLY1305:
162c99d2619SRadu Nicolau 		chacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;
163c99d2619SRadu Nicolau 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
164c99d2619SRadu Nicolau 					       ivp[0], sa->salt);
165c99d2619SRadu Nicolau 		break;
16699a2dd95SBruce Richardson 	case ALGO_TYPE_AES_CBC:
16799a2dd95SBruce Richardson 	case ALGO_TYPE_3DES_CBC:
16899a2dd95SBruce Richardson 		copy_iv(iv, ivp, sa->iv_len);
16999a2dd95SBruce Richardson 		break;
17099a2dd95SBruce Richardson 	case ALGO_TYPE_AES_CTR:
17199a2dd95SBruce Richardson 		ctr = (struct aesctr_cnt_blk *)iv;
17299a2dd95SBruce Richardson 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
17399a2dd95SBruce Richardson 		break;
17499a2dd95SBruce Richardson 	}
17599a2dd95SBruce Richardson 
17699a2dd95SBruce Richardson 	*pofs += sa->ctp.auth.offset;
17799a2dd95SBruce Richardson 	clen = plen - sa->ctp.auth.length;
17899a2dd95SBruce Richardson 	return clen;
17999a2dd95SBruce Richardson }
18099a2dd95SBruce Richardson 
18199a2dd95SBruce Richardson /*
18299a2dd95SBruce Richardson  * Helper function for prepare() to deal with situation when
18399a2dd95SBruce Richardson  * ICV is spread by two segments. Tries to move ICV completely into the
18499a2dd95SBruce Richardson  * last segment.
18599a2dd95SBruce Richardson  */
18699a2dd95SBruce Richardson static struct rte_mbuf *
18799a2dd95SBruce Richardson move_icv(struct rte_mbuf *ml, uint32_t ofs)
18899a2dd95SBruce Richardson {
18999a2dd95SBruce Richardson 	uint32_t n;
19099a2dd95SBruce Richardson 	struct rte_mbuf *ms;
19199a2dd95SBruce Richardson 	const void *prev;
19299a2dd95SBruce Richardson 	void *new;
19399a2dd95SBruce Richardson 
19499a2dd95SBruce Richardson 	ms = ml->next;
19599a2dd95SBruce Richardson 	n = ml->data_len - ofs;
19699a2dd95SBruce Richardson 
19799a2dd95SBruce Richardson 	prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
19899a2dd95SBruce Richardson 	new = rte_pktmbuf_prepend(ms, n);
19999a2dd95SBruce Richardson 	if (new == NULL)
20099a2dd95SBruce Richardson 		return NULL;
20199a2dd95SBruce Richardson 
20299a2dd95SBruce Richardson 	/* move n ICV bytes from ml into ms */
20399a2dd95SBruce Richardson 	rte_memcpy(new, prev, n);
20499a2dd95SBruce Richardson 	ml->data_len -= n;
20599a2dd95SBruce Richardson 
20699a2dd95SBruce Richardson 	return ms;
20799a2dd95SBruce Richardson }
20899a2dd95SBruce Richardson 
20999a2dd95SBruce Richardson /*
21099a2dd95SBruce Richardson  * for pure cryptodev (lookaside none) depending on SA settings,
21199a2dd95SBruce Richardson  * we might have to write some extra data to the packet.
21299a2dd95SBruce Richardson  */
21399a2dd95SBruce Richardson static inline void
21499a2dd95SBruce Richardson inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
21599a2dd95SBruce Richardson 	const union sym_op_data *icv)
21699a2dd95SBruce Richardson {
21799a2dd95SBruce Richardson 	struct aead_gcm_aad *aad;
218c99d2619SRadu Nicolau 	struct aead_ccm_aad *caad;
219c99d2619SRadu Nicolau 	struct aead_chacha20_poly1305_aad *chacha_aad;
22099a2dd95SBruce Richardson 
22199a2dd95SBruce Richardson 	/* insert SQN.hi between ESP trailer and ICV */
22299a2dd95SBruce Richardson 	if (sa->sqh_len != 0)
22399a2dd95SBruce Richardson 		insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
22499a2dd95SBruce Richardson 
22599a2dd95SBruce Richardson 	/*
22699a2dd95SBruce Richardson 	 * fill AAD fields, if any (aad fields are placed after icv),
22799a2dd95SBruce Richardson 	 * right now we support only one AEAD algorithm: AES-GCM.
22899a2dd95SBruce Richardson 	 */
229c99d2619SRadu Nicolau 	switch (sa->algo_type) {
230c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_GCM:
23199a2dd95SBruce Richardson 		if (sa->aad_len != 0) {
23299a2dd95SBruce Richardson 			aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
23399a2dd95SBruce Richardson 			aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
23499a2dd95SBruce Richardson 		}
235c99d2619SRadu Nicolau 		break;
236c99d2619SRadu Nicolau 	case ALGO_TYPE_AES_CCM:
237c99d2619SRadu Nicolau 		if (sa->aad_len != 0) {
238c99d2619SRadu Nicolau 			caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
239c99d2619SRadu Nicolau 			aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
240c99d2619SRadu Nicolau 		}
241c99d2619SRadu Nicolau 		break;
242c99d2619SRadu Nicolau 	case ALGO_TYPE_CHACHA20_POLY1305:
243c99d2619SRadu Nicolau 		if (sa->aad_len != 0) {
244c99d2619SRadu Nicolau 			chacha_aad = (struct aead_chacha20_poly1305_aad *)
245c99d2619SRadu Nicolau 			    (icv->va + sa->icv_len);
246c99d2619SRadu Nicolau 			aead_chacha20_poly1305_aad_fill(chacha_aad,
247c99d2619SRadu Nicolau 						sa->spi, sqc, IS_ESN(sa));
248c99d2619SRadu Nicolau 		}
249c99d2619SRadu Nicolau 		break;
250c99d2619SRadu Nicolau 	}
25199a2dd95SBruce Richardson }
25299a2dd95SBruce Richardson 
25399a2dd95SBruce Richardson static inline int
25499a2dd95SBruce Richardson inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
25599a2dd95SBruce Richardson 	struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
25699a2dd95SBruce Richardson {
25799a2dd95SBruce Richardson 	int32_t rc;
25899a2dd95SBruce Richardson 	uint64_t sqn;
25999a2dd95SBruce Richardson 	struct rte_esp_hdr *esph;
26099a2dd95SBruce Richardson 
26199a2dd95SBruce Richardson 	esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
26299a2dd95SBruce Richardson 
26399a2dd95SBruce Richardson 	/*
26499a2dd95SBruce Richardson 	 * retrieve and reconstruct SQN, then check it, then
26599a2dd95SBruce Richardson 	 * convert it back into network byte order.
26699a2dd95SBruce Richardson 	 */
26799a2dd95SBruce Richardson 	sqn = rte_be_to_cpu_32(esph->seq);
26899a2dd95SBruce Richardson 	if (IS_ESN(sa))
26999a2dd95SBruce Richardson 		sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
27099a2dd95SBruce Richardson 	*sqc = rte_cpu_to_be_64(sqn);
27199a2dd95SBruce Richardson 
27299a2dd95SBruce Richardson 	/* check IPsec window */
27399a2dd95SBruce Richardson 	rc = esn_inb_check_sqn(rsn, sa, sqn);
27499a2dd95SBruce Richardson 
27599a2dd95SBruce Richardson 	return rc;
27699a2dd95SBruce Richardson }
27799a2dd95SBruce Richardson 
27899a2dd95SBruce Richardson /* prepare packet for upcoming processing */
27999a2dd95SBruce Richardson static inline int32_t
28099a2dd95SBruce Richardson inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
28199a2dd95SBruce Richardson 	uint32_t hlen, union sym_op_data *icv)
28299a2dd95SBruce Richardson {
28399a2dd95SBruce Richardson 	uint32_t clen, icv_len, icv_ofs, plen;
28499a2dd95SBruce Richardson 	struct rte_mbuf *ml;
28599a2dd95SBruce Richardson 
28699a2dd95SBruce Richardson 	/* start packet manipulation */
28799a2dd95SBruce Richardson 	plen = mb->pkt_len;
28899a2dd95SBruce Richardson 	plen = plen - hlen;
28999a2dd95SBruce Richardson 
29099a2dd95SBruce Richardson 	/* check that packet has a valid length */
29199a2dd95SBruce Richardson 	clen = plen - sa->ctp.cipher.length;
29299a2dd95SBruce Richardson 	if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
29399a2dd95SBruce Richardson 		return -EBADMSG;
29499a2dd95SBruce Richardson 
29599a2dd95SBruce Richardson 	/* find ICV location */
29699a2dd95SBruce Richardson 	icv_len = sa->icv_len;
29799a2dd95SBruce Richardson 	icv_ofs = mb->pkt_len - icv_len;
29899a2dd95SBruce Richardson 
29999a2dd95SBruce Richardson 	ml = mbuf_get_seg_ofs(mb, &icv_ofs);
30099a2dd95SBruce Richardson 
30199a2dd95SBruce Richardson 	/*
30299a2dd95SBruce Richardson 	 * if ICV is spread by two segments, then try to
30399a2dd95SBruce Richardson 	 * move ICV completely into the last segment.
30499a2dd95SBruce Richardson 	 */
30599a2dd95SBruce Richardson 	if (ml->data_len < icv_ofs + icv_len) {
30699a2dd95SBruce Richardson 
30799a2dd95SBruce Richardson 		ml = move_icv(ml, icv_ofs);
30899a2dd95SBruce Richardson 		if (ml == NULL)
30999a2dd95SBruce Richardson 			return -ENOSPC;
31099a2dd95SBruce Richardson 
31199a2dd95SBruce Richardson 		/* new ICV location */
31299a2dd95SBruce Richardson 		icv_ofs = 0;
31399a2dd95SBruce Richardson 	}
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson 	icv_ofs += sa->sqh_len;
31699a2dd95SBruce Richardson 
31799a2dd95SBruce Richardson 	/*
31899a2dd95SBruce Richardson 	 * we have to allocate space for AAD somewhere,
31999a2dd95SBruce Richardson 	 * right now - just use free trailing space at the last segment.
32099a2dd95SBruce Richardson 	 * Would probably be more convenient to reserve space for AAD
32199a2dd95SBruce Richardson 	 * inside rte_crypto_op itself
32299a2dd95SBruce Richardson 	 * (again for IV space is already reserved inside cop).
32399a2dd95SBruce Richardson 	 */
32499a2dd95SBruce Richardson 	if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
32599a2dd95SBruce Richardson 		return -ENOSPC;
32699a2dd95SBruce Richardson 
32799a2dd95SBruce Richardson 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
32899a2dd95SBruce Richardson 	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
32999a2dd95SBruce Richardson 
33099a2dd95SBruce Richardson 	/*
33199a2dd95SBruce Richardson 	 * if esn is used then high-order 32 bits are also used in ICV
33299a2dd95SBruce Richardson 	 * calculation but are not transmitted, update packet length
33399a2dd95SBruce Richardson 	 * to be consistent with auth data length and offset, this will
33499a2dd95SBruce Richardson 	 * be subtracted from packet length in post crypto processing
33599a2dd95SBruce Richardson 	 */
33699a2dd95SBruce Richardson 	mb->pkt_len += sa->sqh_len;
33799a2dd95SBruce Richardson 	ml->data_len += sa->sqh_len;
33899a2dd95SBruce Richardson 
33999a2dd95SBruce Richardson 	return plen;
34099a2dd95SBruce Richardson }
34199a2dd95SBruce Richardson 
34299a2dd95SBruce Richardson static inline int32_t
34399a2dd95SBruce Richardson inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
34499a2dd95SBruce Richardson 	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
34599a2dd95SBruce Richardson {
34699a2dd95SBruce Richardson 	int rc;
34799a2dd95SBruce Richardson 	rte_be64_t sqn;
34899a2dd95SBruce Richardson 
34999a2dd95SBruce Richardson 	rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
35099a2dd95SBruce Richardson 	if (rc != 0)
35199a2dd95SBruce Richardson 		return rc;
35299a2dd95SBruce Richardson 
35399a2dd95SBruce Richardson 	rc = inb_prepare(sa, mb, hlen, icv);
35499a2dd95SBruce Richardson 	if (rc < 0)
35599a2dd95SBruce Richardson 		return rc;
35699a2dd95SBruce Richardson 
35799a2dd95SBruce Richardson 	inb_pkt_xprepare(sa, sqn, icv);
35899a2dd95SBruce Richardson 	return rc;
35999a2dd95SBruce Richardson }
36099a2dd95SBruce Richardson 
36199a2dd95SBruce Richardson /*
36299a2dd95SBruce Richardson  * setup/update packets and crypto ops for ESP inbound case.
36399a2dd95SBruce Richardson  */
36499a2dd95SBruce Richardson uint16_t
36599a2dd95SBruce Richardson esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
36699a2dd95SBruce Richardson 	struct rte_crypto_op *cop[], uint16_t num)
36799a2dd95SBruce Richardson {
36899a2dd95SBruce Richardson 	int32_t rc;
36999a2dd95SBruce Richardson 	uint32_t i, k, hl;
37099a2dd95SBruce Richardson 	struct rte_ipsec_sa *sa;
37199a2dd95SBruce Richardson 	struct rte_cryptodev_sym_session *cs;
37299a2dd95SBruce Richardson 	struct replay_sqn *rsn;
37399a2dd95SBruce Richardson 	union sym_op_data icv;
37499a2dd95SBruce Richardson 	uint32_t dr[num];
37599a2dd95SBruce Richardson 
37699a2dd95SBruce Richardson 	sa = ss->sa;
37799a2dd95SBruce Richardson 	cs = ss->crypto.ses;
37899a2dd95SBruce Richardson 	rsn = rsn_acquire(sa);
37999a2dd95SBruce Richardson 
38099a2dd95SBruce Richardson 	k = 0;
38199a2dd95SBruce Richardson 	for (i = 0; i != num; i++) {
38299a2dd95SBruce Richardson 
38399a2dd95SBruce Richardson 		hl = mb[i]->l2_len + mb[i]->l3_len;
38499a2dd95SBruce Richardson 		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
38599a2dd95SBruce Richardson 		if (rc >= 0) {
38699a2dd95SBruce Richardson 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
38799a2dd95SBruce Richardson 			inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
38899a2dd95SBruce Richardson 			k++;
38999a2dd95SBruce Richardson 		} else {
39099a2dd95SBruce Richardson 			dr[i - k] = i;
39199a2dd95SBruce Richardson 			rte_errno = -rc;
39299a2dd95SBruce Richardson 		}
39399a2dd95SBruce Richardson 	}
39499a2dd95SBruce Richardson 
39599a2dd95SBruce Richardson 	rsn_release(sa, rsn);
39699a2dd95SBruce Richardson 
39799a2dd95SBruce Richardson 	/* copy not prepared mbufs beyond good ones */
39899a2dd95SBruce Richardson 	if (k != num && k != 0)
39999a2dd95SBruce Richardson 		move_bad_mbufs(mb, dr, num, num - k);
40099a2dd95SBruce Richardson 
40199a2dd95SBruce Richardson 	return k;
40299a2dd95SBruce Richardson }
40399a2dd95SBruce Richardson 
40499a2dd95SBruce Richardson /*
40599a2dd95SBruce Richardson  * Start with processing inbound packet.
40699a2dd95SBruce Richardson  * This is common part for both tunnel and transport mode.
40799a2dd95SBruce Richardson  * Extract information that will be needed later from mbuf metadata and
40899a2dd95SBruce Richardson  * actual packet data:
40999a2dd95SBruce Richardson  * - mbuf for packet's last segment
41099a2dd95SBruce Richardson  * - length of the L2/L3 headers
41199a2dd95SBruce Richardson  * - esp tail structure
41299a2dd95SBruce Richardson  */
41399a2dd95SBruce Richardson static inline void
41499a2dd95SBruce Richardson process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
41599a2dd95SBruce Richardson 	struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
41699a2dd95SBruce Richardson {
41799a2dd95SBruce Richardson 	const struct rte_esp_tail *pt;
41899a2dd95SBruce Richardson 	uint32_t ofs;
41999a2dd95SBruce Richardson 
42099a2dd95SBruce Richardson 	ofs = mb->pkt_len - tlen;
42199a2dd95SBruce Richardson 	hlen[0] = mb->l2_len + mb->l3_len;
42299a2dd95SBruce Richardson 	ml[0] = mbuf_get_seg_ofs(mb, &ofs);
42399a2dd95SBruce Richardson 	pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
42499a2dd95SBruce Richardson 	tofs[0] = ofs;
42599a2dd95SBruce Richardson 	espt[0] = pt[0];
42699a2dd95SBruce Richardson }
42799a2dd95SBruce Richardson 
42899a2dd95SBruce Richardson /*
42999a2dd95SBruce Richardson  * Helper function to check pad bytes values.
43099a2dd95SBruce Richardson  * Note that pad bytes can be spread across multiple segments.
43199a2dd95SBruce Richardson  */
43299a2dd95SBruce Richardson static inline int
43399a2dd95SBruce Richardson check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
43499a2dd95SBruce Richardson {
43599a2dd95SBruce Richardson 	const uint8_t *pd;
43699a2dd95SBruce Richardson 	uint32_t k, n;
43799a2dd95SBruce Richardson 
43899a2dd95SBruce Richardson 	for (n = 0; n != len; n += k, mb = mb->next) {
43999a2dd95SBruce Richardson 		k = mb->data_len - ofs;
44099a2dd95SBruce Richardson 		k = RTE_MIN(k, len - n);
44199a2dd95SBruce Richardson 		pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
44299a2dd95SBruce Richardson 		if (memcmp(pd, esp_pad_bytes + n, k) != 0)
44399a2dd95SBruce Richardson 			break;
44499a2dd95SBruce Richardson 		ofs = 0;
44599a2dd95SBruce Richardson 	}
44699a2dd95SBruce Richardson 
44799a2dd95SBruce Richardson 	return len - n;
44899a2dd95SBruce Richardson }
44999a2dd95SBruce Richardson 
45099a2dd95SBruce Richardson /*
45199a2dd95SBruce Richardson  * packet checks for transport mode:
45299a2dd95SBruce Richardson  * - no reported IPsec related failures in ol_flags
45399a2dd95SBruce Richardson  * - tail and header lengths are valid
45499a2dd95SBruce Richardson  * - padding bytes are valid
45599a2dd95SBruce Richardson  * apart from checks, function also updates tail offset (and segment)
45699a2dd95SBruce Richardson  * by taking into account pad length.
45799a2dd95SBruce Richardson  */
45899a2dd95SBruce Richardson static inline int32_t
45999a2dd95SBruce Richardson trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
46099a2dd95SBruce Richardson 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
46199a2dd95SBruce Richardson {
462*daa02b5cSOlivier Matz 	if ((mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) != 0 ||
46399a2dd95SBruce Richardson 			tlen + hlen > mb->pkt_len)
46499a2dd95SBruce Richardson 		return -EBADMSG;
46599a2dd95SBruce Richardson 
46699a2dd95SBruce Richardson 	/* padding bytes are spread over multiple segments */
46799a2dd95SBruce Richardson 	if (tofs[0] < espt.pad_len) {
46899a2dd95SBruce Richardson 		tofs[0] = mb->pkt_len - tlen;
46999a2dd95SBruce Richardson 		ml[0] = mbuf_get_seg_ofs(mb, tofs);
47099a2dd95SBruce Richardson 	} else
47199a2dd95SBruce Richardson 		tofs[0] -= espt.pad_len;
47299a2dd95SBruce Richardson 
47399a2dd95SBruce Richardson 	return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
47499a2dd95SBruce Richardson }
47599a2dd95SBruce Richardson 
47699a2dd95SBruce Richardson /*
47799a2dd95SBruce Richardson  * packet checks for tunnel mode:
47899a2dd95SBruce Richardson  * - same as for trasnport mode
47999a2dd95SBruce Richardson  * - esp tail next proto contains expected for that SA value
48099a2dd95SBruce Richardson  */
48199a2dd95SBruce Richardson static inline int32_t
48299a2dd95SBruce Richardson tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
48399a2dd95SBruce Richardson 	uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
48499a2dd95SBruce Richardson 	uint8_t proto)
48599a2dd95SBruce Richardson {
48699a2dd95SBruce Richardson 	return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
48799a2dd95SBruce Richardson 		espt.next_proto != proto);
48899a2dd95SBruce Richardson }
48999a2dd95SBruce Richardson 
49099a2dd95SBruce Richardson /*
49199a2dd95SBruce Richardson  * step two for tunnel mode:
49299a2dd95SBruce Richardson  * - read SQN value (for future use)
49399a2dd95SBruce Richardson  * - cut of ICV, ESP tail and padding bytes
49499a2dd95SBruce Richardson  * - cut of ESP header and IV, also if needed - L2/L3 headers
49599a2dd95SBruce Richardson  *   (controlled by *adj* value)
49699a2dd95SBruce Richardson  */
49799a2dd95SBruce Richardson static inline void *
49899a2dd95SBruce Richardson tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
49999a2dd95SBruce Richardson 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
50099a2dd95SBruce Richardson {
50199a2dd95SBruce Richardson 	const struct rte_esp_hdr *ph;
50299a2dd95SBruce Richardson 
50399a2dd95SBruce Richardson 	/* read SQN value */
50499a2dd95SBruce Richardson 	ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
50599a2dd95SBruce Richardson 	sqn[0] = ph->seq;
50699a2dd95SBruce Richardson 
50799a2dd95SBruce Richardson 	/* cut of ICV, ESP tail and padding bytes */
50899a2dd95SBruce Richardson 	mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
50999a2dd95SBruce Richardson 
51099a2dd95SBruce Richardson 	/* cut of L2/L3 headers, ESP header and IV */
51199a2dd95SBruce Richardson 	return rte_pktmbuf_adj(mb, adj);
51299a2dd95SBruce Richardson }
51399a2dd95SBruce Richardson 
51499a2dd95SBruce Richardson /*
51599a2dd95SBruce Richardson  * step two for transport mode:
51699a2dd95SBruce Richardson  * - read SQN value (for future use)
51799a2dd95SBruce Richardson  * - cut of ICV, ESP tail and padding bytes
51899a2dd95SBruce Richardson  * - cut of ESP header and IV
51999a2dd95SBruce Richardson  * - move L2/L3 header to fill the gap after ESP header removal
52099a2dd95SBruce Richardson  */
52199a2dd95SBruce Richardson static inline void *
52299a2dd95SBruce Richardson trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
52399a2dd95SBruce Richardson 	uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
52499a2dd95SBruce Richardson {
52599a2dd95SBruce Richardson 	char *np, *op;
52699a2dd95SBruce Richardson 
52799a2dd95SBruce Richardson 	/* get start of the packet before modifications */
52899a2dd95SBruce Richardson 	op = rte_pktmbuf_mtod(mb, char *);
52999a2dd95SBruce Richardson 
53099a2dd95SBruce Richardson 	/* cut off ESP header and IV */
53199a2dd95SBruce Richardson 	np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
53299a2dd95SBruce Richardson 
53399a2dd95SBruce Richardson 	/* move header bytes to fill the gap after ESP header removal */
53499a2dd95SBruce Richardson 	remove_esph(np, op, hlen);
53599a2dd95SBruce Richardson 	return np;
53699a2dd95SBruce Richardson }
53799a2dd95SBruce Richardson 
53899a2dd95SBruce Richardson /*
53999a2dd95SBruce Richardson  * step three for transport mode:
54099a2dd95SBruce Richardson  * update mbuf metadata:
54199a2dd95SBruce Richardson  * - packet_type
54299a2dd95SBruce Richardson  * - ol_flags
54399a2dd95SBruce Richardson  */
54499a2dd95SBruce Richardson static inline void
54599a2dd95SBruce Richardson trs_process_step3(struct rte_mbuf *mb)
54699a2dd95SBruce Richardson {
54799a2dd95SBruce Richardson 	/* reset mbuf packet type */
54899a2dd95SBruce Richardson 	mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
54999a2dd95SBruce Richardson 
550*daa02b5cSOlivier Matz 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
551*daa02b5cSOlivier Matz 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
55299a2dd95SBruce Richardson }
55399a2dd95SBruce Richardson 
55499a2dd95SBruce Richardson /*
55599a2dd95SBruce Richardson  * step three for tunnel mode:
55699a2dd95SBruce Richardson  * update mbuf metadata:
55799a2dd95SBruce Richardson  * - packet_type
55899a2dd95SBruce Richardson  * - ol_flags
55999a2dd95SBruce Richardson  * - tx_offload
56099a2dd95SBruce Richardson  */
56199a2dd95SBruce Richardson static inline void
56299a2dd95SBruce Richardson tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
56399a2dd95SBruce Richardson {
56499a2dd95SBruce Richardson 	/* reset mbuf metatdata: L2/L3 len, packet type */
56599a2dd95SBruce Richardson 	mb->packet_type = RTE_PTYPE_UNKNOWN;
56699a2dd95SBruce Richardson 	mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
56799a2dd95SBruce Richardson 
568*daa02b5cSOlivier Matz 	/* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
569*daa02b5cSOlivier Matz 	mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
57099a2dd95SBruce Richardson }
57199a2dd95SBruce Richardson 
57299a2dd95SBruce Richardson /*
57399a2dd95SBruce Richardson  * *process* function for tunnel packets
57499a2dd95SBruce Richardson  */
57599a2dd95SBruce Richardson static inline uint16_t
57668977baaSRadu Nicolau tun_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
57799a2dd95SBruce Richardson 	    uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
57899a2dd95SBruce Richardson {
57968977baaSRadu Nicolau 	uint32_t adj, i, k, tl, bytes;
58099a2dd95SBruce Richardson 	uint32_t hl[num], to[num];
58199a2dd95SBruce Richardson 	struct rte_esp_tail espt[num];
58299a2dd95SBruce Richardson 	struct rte_mbuf *ml[num];
58399a2dd95SBruce Richardson 	const void *outh;
58499a2dd95SBruce Richardson 	void *inh;
58599a2dd95SBruce Richardson 
58699a2dd95SBruce Richardson 	/*
58799a2dd95SBruce Richardson 	 * remove icv, esp trailer and high-order
58899a2dd95SBruce Richardson 	 * 32 bits of esn from packet length
58999a2dd95SBruce Richardson 	 */
59099a2dd95SBruce Richardson 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
59199a2dd95SBruce Richardson 	const uint32_t cofs = sa->ctp.cipher.offset;
59299a2dd95SBruce Richardson 
59399a2dd95SBruce Richardson 	/*
59499a2dd95SBruce Richardson 	 * to minimize stalls due to load latency,
59599a2dd95SBruce Richardson 	 * read mbufs metadata and esp tail first.
59699a2dd95SBruce Richardson 	 */
59799a2dd95SBruce Richardson 	for (i = 0; i != num; i++)
59899a2dd95SBruce Richardson 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
59999a2dd95SBruce Richardson 
60099a2dd95SBruce Richardson 	k = 0;
60168977baaSRadu Nicolau 	bytes = 0;
60299a2dd95SBruce Richardson 	for (i = 0; i != num; i++) {
60399a2dd95SBruce Richardson 
60499a2dd95SBruce Richardson 		adj = hl[i] + cofs;
60599a2dd95SBruce Richardson 		tl = tlen + espt[i].pad_len;
60699a2dd95SBruce Richardson 
60799a2dd95SBruce Richardson 		/* check that packet is valid */
60899a2dd95SBruce Richardson 		if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
60999a2dd95SBruce Richardson 					sa->proto) == 0) {
61099a2dd95SBruce Richardson 
61199a2dd95SBruce Richardson 			outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
61299a2dd95SBruce Richardson 					mb[i]->l2_len);
61399a2dd95SBruce Richardson 
61499a2dd95SBruce Richardson 			/* modify packet's layout */
61599a2dd95SBruce Richardson 			inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
61699a2dd95SBruce Richardson 					to[i], tl, sqn + k);
61799a2dd95SBruce Richardson 
61899a2dd95SBruce Richardson 			/* update inner ip header */
61999a2dd95SBruce Richardson 			update_tun_inb_l3hdr(sa, outh, inh);
62099a2dd95SBruce Richardson 
62199a2dd95SBruce Richardson 			/* update mbuf's metadata */
62299a2dd95SBruce Richardson 			tun_process_step3(mb[i], sa->tx_offload.msk,
62399a2dd95SBruce Richardson 				sa->tx_offload.val);
62499a2dd95SBruce Richardson 			k++;
62568977baaSRadu Nicolau 			bytes += mb[i]->pkt_len;
62699a2dd95SBruce Richardson 		} else
62799a2dd95SBruce Richardson 			dr[i - k] = i;
62899a2dd95SBruce Richardson 	}
62999a2dd95SBruce Richardson 
63068977baaSRadu Nicolau 	sa->statistics.count += k;
63168977baaSRadu Nicolau 	sa->statistics.bytes += bytes;
63299a2dd95SBruce Richardson 	return k;
63399a2dd95SBruce Richardson }
63499a2dd95SBruce Richardson 
63599a2dd95SBruce Richardson /*
63699a2dd95SBruce Richardson  * *process* function for tunnel packets
63799a2dd95SBruce Richardson  */
63899a2dd95SBruce Richardson static inline uint16_t
63968977baaSRadu Nicolau trs_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
64099a2dd95SBruce Richardson 	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
64199a2dd95SBruce Richardson {
64299a2dd95SBruce Richardson 	char *np;
64368977baaSRadu Nicolau 	uint32_t i, k, l2, tl, bytes;
64499a2dd95SBruce Richardson 	uint32_t hl[num], to[num];
64599a2dd95SBruce Richardson 	struct rte_esp_tail espt[num];
64699a2dd95SBruce Richardson 	struct rte_mbuf *ml[num];
64799a2dd95SBruce Richardson 
64899a2dd95SBruce Richardson 	/*
64999a2dd95SBruce Richardson 	 * remove icv, esp trailer and high-order
65099a2dd95SBruce Richardson 	 * 32 bits of esn from packet length
65199a2dd95SBruce Richardson 	 */
65299a2dd95SBruce Richardson 	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
65399a2dd95SBruce Richardson 	const uint32_t cofs = sa->ctp.cipher.offset;
65499a2dd95SBruce Richardson 
65599a2dd95SBruce Richardson 	/*
65699a2dd95SBruce Richardson 	 * to minimize stalls due to load latency,
65799a2dd95SBruce Richardson 	 * read mbufs metadata and esp tail first.
65899a2dd95SBruce Richardson 	 */
65999a2dd95SBruce Richardson 	for (i = 0; i != num; i++)
66099a2dd95SBruce Richardson 		process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
66199a2dd95SBruce Richardson 
66299a2dd95SBruce Richardson 	k = 0;
66368977baaSRadu Nicolau 	bytes = 0;
66499a2dd95SBruce Richardson 	for (i = 0; i != num; i++) {
66599a2dd95SBruce Richardson 
66699a2dd95SBruce Richardson 		tl = tlen + espt[i].pad_len;
66799a2dd95SBruce Richardson 		l2 = mb[i]->l2_len;
66899a2dd95SBruce Richardson 
66999a2dd95SBruce Richardson 		/* check that packet is valid */
67099a2dd95SBruce Richardson 		if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
67199a2dd95SBruce Richardson 				hl[i] + cofs, tl) == 0) {
67299a2dd95SBruce Richardson 
67399a2dd95SBruce Richardson 			/* modify packet's layout */
67499a2dd95SBruce Richardson 			np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
67599a2dd95SBruce Richardson 				to[i], tl, sqn + k);
67699a2dd95SBruce Richardson 			update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
67799a2dd95SBruce Richardson 				l2, hl[i] - l2, espt[i].next_proto);
67899a2dd95SBruce Richardson 
67999a2dd95SBruce Richardson 			/* update mbuf's metadata */
68099a2dd95SBruce Richardson 			trs_process_step3(mb[i]);
68199a2dd95SBruce Richardson 			k++;
68268977baaSRadu Nicolau 			bytes += mb[i]->pkt_len;
68399a2dd95SBruce Richardson 		} else
68499a2dd95SBruce Richardson 			dr[i - k] = i;
68599a2dd95SBruce Richardson 	}
68699a2dd95SBruce Richardson 
68768977baaSRadu Nicolau 	sa->statistics.count += k;
68868977baaSRadu Nicolau 	sa->statistics.bytes += bytes;
68999a2dd95SBruce Richardson 	return k;
69099a2dd95SBruce Richardson }
69199a2dd95SBruce Richardson 
69299a2dd95SBruce Richardson /*
69399a2dd95SBruce Richardson  * for group of ESP inbound packets perform SQN check and update.
69499a2dd95SBruce Richardson  */
69599a2dd95SBruce Richardson static inline uint16_t
69699a2dd95SBruce Richardson esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
69799a2dd95SBruce Richardson 	uint32_t dr[], uint16_t num)
69899a2dd95SBruce Richardson {
69999a2dd95SBruce Richardson 	uint32_t i, k;
70099a2dd95SBruce Richardson 	struct replay_sqn *rsn;
70199a2dd95SBruce Richardson 
70299a2dd95SBruce Richardson 	/* replay not enabled */
70399a2dd95SBruce Richardson 	if (sa->replay.win_sz == 0)
70499a2dd95SBruce Richardson 		return num;
70599a2dd95SBruce Richardson 
70699a2dd95SBruce Richardson 	rsn = rsn_update_start(sa);
70799a2dd95SBruce Richardson 
70899a2dd95SBruce Richardson 	k = 0;
70999a2dd95SBruce Richardson 	for (i = 0; i != num; i++) {
71099a2dd95SBruce Richardson 		if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
71199a2dd95SBruce Richardson 			k++;
71299a2dd95SBruce Richardson 		else
71399a2dd95SBruce Richardson 			dr[i - k] = i;
71499a2dd95SBruce Richardson 	}
71599a2dd95SBruce Richardson 
71699a2dd95SBruce Richardson 	rsn_update_finish(sa, rsn);
71799a2dd95SBruce Richardson 	return k;
71899a2dd95SBruce Richardson }
71999a2dd95SBruce Richardson 
72099a2dd95SBruce Richardson /*
72199a2dd95SBruce Richardson  * process group of ESP inbound packets.
72299a2dd95SBruce Richardson  */
72399a2dd95SBruce Richardson static inline uint16_t
72499a2dd95SBruce Richardson esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
72599a2dd95SBruce Richardson 	uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
72699a2dd95SBruce Richardson {
72799a2dd95SBruce Richardson 	uint32_t k, n;
72899a2dd95SBruce Richardson 	uint32_t sqn[num];
72999a2dd95SBruce Richardson 	uint32_t dr[num];
73099a2dd95SBruce Richardson 
73199a2dd95SBruce Richardson 	/* process packets, extract seq numbers */
73299a2dd95SBruce Richardson 	k = process(sa, mb, sqn, dr, num, sqh_len);
73399a2dd95SBruce Richardson 
73499a2dd95SBruce Richardson 	/* handle unprocessed mbufs */
73599a2dd95SBruce Richardson 	if (k != num && k != 0)
73699a2dd95SBruce Richardson 		move_bad_mbufs(mb, dr, num, num - k);
73799a2dd95SBruce Richardson 
73899a2dd95SBruce Richardson 	/* update SQN and replay window */
73999a2dd95SBruce Richardson 	n = esp_inb_rsn_update(sa, sqn, dr, k);
74099a2dd95SBruce Richardson 
74199a2dd95SBruce Richardson 	/* handle mbufs with wrong SQN */
74299a2dd95SBruce Richardson 	if (n != k && n != 0)
74399a2dd95SBruce Richardson 		move_bad_mbufs(mb, dr, k, k - n);
74499a2dd95SBruce Richardson 
74599a2dd95SBruce Richardson 	if (n != num)
74699a2dd95SBruce Richardson 		rte_errno = EBADMSG;
74799a2dd95SBruce Richardson 
74899a2dd95SBruce Richardson 	return n;
74999a2dd95SBruce Richardson }
75099a2dd95SBruce Richardson 
75199a2dd95SBruce Richardson /*
75299a2dd95SBruce Richardson  * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
75399a2dd95SBruce Richardson  * (synchronous mode).
75499a2dd95SBruce Richardson  */
75599a2dd95SBruce Richardson uint16_t
75699a2dd95SBruce Richardson cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
75799a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint16_t num)
75899a2dd95SBruce Richardson {
75999a2dd95SBruce Richardson 	int32_t rc;
76099a2dd95SBruce Richardson 	uint32_t i, k;
76199a2dd95SBruce Richardson 	struct rte_ipsec_sa *sa;
76299a2dd95SBruce Richardson 	struct replay_sqn *rsn;
76399a2dd95SBruce Richardson 	union sym_op_data icv;
76499a2dd95SBruce Richardson 	struct rte_crypto_va_iova_ptr iv[num];
76599a2dd95SBruce Richardson 	struct rte_crypto_va_iova_ptr aad[num];
76699a2dd95SBruce Richardson 	struct rte_crypto_va_iova_ptr dgst[num];
76799a2dd95SBruce Richardson 	uint32_t dr[num];
76899a2dd95SBruce Richardson 	uint32_t l4ofs[num];
76999a2dd95SBruce Richardson 	uint32_t clen[num];
77099a2dd95SBruce Richardson 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
77199a2dd95SBruce Richardson 
77299a2dd95SBruce Richardson 	sa = ss->sa;
77399a2dd95SBruce Richardson 
77499a2dd95SBruce Richardson 	/* grab rsn lock */
77599a2dd95SBruce Richardson 	rsn = rsn_acquire(sa);
77699a2dd95SBruce Richardson 
77799a2dd95SBruce Richardson 	/* do preparation for all packets */
77899a2dd95SBruce Richardson 	for (i = 0, k = 0; i != num; i++) {
77999a2dd95SBruce Richardson 
78099a2dd95SBruce Richardson 		/* calculate ESP header offset */
78199a2dd95SBruce Richardson 		l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
78299a2dd95SBruce Richardson 
78399a2dd95SBruce Richardson 		/* prepare ESP packet for processing */
78499a2dd95SBruce Richardson 		rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
78599a2dd95SBruce Richardson 		if (rc >= 0) {
78699a2dd95SBruce Richardson 			/* get encrypted data offset and length */
78799a2dd95SBruce Richardson 			clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
78899a2dd95SBruce Richardson 				l4ofs + k, rc, ivbuf[k]);
78999a2dd95SBruce Richardson 
79099a2dd95SBruce Richardson 			/* fill iv, digest and aad */
79199a2dd95SBruce Richardson 			iv[k].va = ivbuf[k];
79299a2dd95SBruce Richardson 			aad[k].va = icv.va + sa->icv_len;
79399a2dd95SBruce Richardson 			dgst[k++].va = icv.va;
79499a2dd95SBruce Richardson 		} else {
79599a2dd95SBruce Richardson 			dr[i - k] = i;
79699a2dd95SBruce Richardson 			rte_errno = -rc;
79799a2dd95SBruce Richardson 		}
79899a2dd95SBruce Richardson 	}
79999a2dd95SBruce Richardson 
80099a2dd95SBruce Richardson 	/* release rsn lock */
80199a2dd95SBruce Richardson 	rsn_release(sa, rsn);
80299a2dd95SBruce Richardson 
80399a2dd95SBruce Richardson 	/* copy not prepared mbufs beyond good ones */
80499a2dd95SBruce Richardson 	if (k != num && k != 0)
80599a2dd95SBruce Richardson 		move_bad_mbufs(mb, dr, num, num - k);
80699a2dd95SBruce Richardson 
80799a2dd95SBruce Richardson 	/* convert mbufs to iovecs and do actual crypto/auth processing */
80899a2dd95SBruce Richardson 	if (k != 0)
80999a2dd95SBruce Richardson 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
81099a2dd95SBruce Richardson 			l4ofs, clen, k);
81199a2dd95SBruce Richardson 	return k;
81299a2dd95SBruce Richardson }
81399a2dd95SBruce Richardson 
81499a2dd95SBruce Richardson /*
81599a2dd95SBruce Richardson  * process group of ESP inbound tunnel packets.
81699a2dd95SBruce Richardson  */
81799a2dd95SBruce Richardson uint16_t
81899a2dd95SBruce Richardson esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
81999a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint16_t num)
82099a2dd95SBruce Richardson {
82199a2dd95SBruce Richardson 	struct rte_ipsec_sa *sa = ss->sa;
82299a2dd95SBruce Richardson 
82399a2dd95SBruce Richardson 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
82499a2dd95SBruce Richardson }
82599a2dd95SBruce Richardson 
82699a2dd95SBruce Richardson uint16_t
82799a2dd95SBruce Richardson inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
82899a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint16_t num)
82999a2dd95SBruce Richardson {
83099a2dd95SBruce Richardson 	return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
83199a2dd95SBruce Richardson }
83299a2dd95SBruce Richardson 
83399a2dd95SBruce Richardson /*
83499a2dd95SBruce Richardson  * process group of ESP inbound transport packets.
83599a2dd95SBruce Richardson  */
83699a2dd95SBruce Richardson uint16_t
83799a2dd95SBruce Richardson esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
83899a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint16_t num)
83999a2dd95SBruce Richardson {
84099a2dd95SBruce Richardson 	struct rte_ipsec_sa *sa = ss->sa;
84199a2dd95SBruce Richardson 
84299a2dd95SBruce Richardson 	return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
84399a2dd95SBruce Richardson }
84499a2dd95SBruce Richardson 
84599a2dd95SBruce Richardson uint16_t
84699a2dd95SBruce Richardson inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
84799a2dd95SBruce Richardson 	struct rte_mbuf *mb[], uint16_t num)
84899a2dd95SBruce Richardson {
84999a2dd95SBruce Richardson 	return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
85099a2dd95SBruce Richardson }
851